1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save and restore registers.
78 The value for normal-mode code must be a SMALL_OPERAND and must
79 preserve the maximum stack alignment. We therefore use a value
80 of 0x7ff0 in this case.
82 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
83 up to 0x7f8 bytes and can usually save or restore all the registers
84 that we need to save or restore. (Note that we can only use these
85 instructions for o32, for which the stack alignment is 8 bytes.)
87 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
88 RESTORE are not available. We can then use unextended instructions
89 to save and restore registers, and to allocate and deallocate the top
91 #define MIPS_MAX_FIRST_STACK_STEP \
92 (!TARGET_MIPS16 ? 0x7ff0 \
93 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
94 : TARGET_64BIT ? 0x100 : 0x400)
96 /* True if INSN is a mips.md pattern or asm statement. */
97 #define USEFUL_INSN_P(INSN) \
99 && GET_CODE (PATTERN (INSN)) != USE \
100 && GET_CODE (PATTERN (INSN)) != CLOBBER \
101 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
102 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
104 /* If INSN is a delayed branch sequence, return the first instruction
105 in the sequence, otherwise return INSN itself. */
106 #define SEQ_BEGIN(INSN) \
107 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
108 ? XVECEXP (PATTERN (INSN), 0, 0) \
111 /* Likewise for the last instruction in a delayed branch sequence. */
112 #define SEQ_END(INSN) \
113 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
114 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
117 /* Execute the following loop body with SUBINSN set to each instruction
118 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
119 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
120 for ((SUBINSN) = SEQ_BEGIN (INSN); \
121 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
122 (SUBINSN) = NEXT_INSN (SUBINSN))
124 /* True if bit BIT is set in VALUE. */
125 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
127 /* Classifies an address.
130 A natural register + offset address. The register satisfies
131 mips_valid_base_register_p and the offset is a const_arith_operand.
134 A LO_SUM rtx. The first operand is a valid base register and
135 the second operand is a symbolic address.
138 A signed 16-bit constant address.
141 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
142 enum mips_address_type {
149 /* Classifies the prototype of a builtin function. */
150 enum mips_function_type
152 MIPS_V2SF_FTYPE_V2SF,
153 MIPS_V2SF_FTYPE_V2SF_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
156 MIPS_V2SF_FTYPE_SF_SF,
157 MIPS_INT_FTYPE_V2SF_V2SF,
158 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
159 MIPS_INT_FTYPE_SF_SF,
160 MIPS_INT_FTYPE_DF_DF,
167 /* For MIPS DSP ASE */
169 MIPS_DI_FTYPE_DI_SI_SI,
170 MIPS_DI_FTYPE_DI_V2HI_V2HI,
171 MIPS_DI_FTYPE_DI_V4QI_V4QI,
173 MIPS_SI_FTYPE_PTR_SI,
177 MIPS_SI_FTYPE_V2HI_V2HI,
179 MIPS_SI_FTYPE_V4QI_V4QI,
182 MIPS_V2HI_FTYPE_SI_SI,
183 MIPS_V2HI_FTYPE_V2HI,
184 MIPS_V2HI_FTYPE_V2HI_SI,
185 MIPS_V2HI_FTYPE_V2HI_V2HI,
186 MIPS_V2HI_FTYPE_V4QI,
187 MIPS_V2HI_FTYPE_V4QI_V2HI,
189 MIPS_V4QI_FTYPE_V2HI_V2HI,
190 MIPS_V4QI_FTYPE_V4QI_SI,
191 MIPS_V4QI_FTYPE_V4QI_V4QI,
192 MIPS_VOID_FTYPE_SI_SI,
193 MIPS_VOID_FTYPE_V2HI_V2HI,
194 MIPS_VOID_FTYPE_V4QI_V4QI,
196 /* For MIPS DSP REV 2 ASE. */
197 MIPS_V4QI_FTYPE_V4QI,
198 MIPS_SI_FTYPE_SI_SI_SI,
199 MIPS_DI_FTYPE_DI_USI_USI,
201 MIPS_DI_FTYPE_USI_USI,
202 MIPS_V2HI_FTYPE_SI_SI_SI,
208 /* Specifies how a builtin function should be converted into rtl. */
209 enum mips_builtin_type
211 /* The builtin corresponds directly to an .md pattern. The return
212 value is mapped to operand 0 and the arguments are mapped to
213 operands 1 and above. */
216 /* The builtin corresponds directly to an .md pattern. There is no return
217 value and the arguments are mapped to operands 0 and above. */
218 MIPS_BUILTIN_DIRECT_NO_TARGET,
220 /* The builtin corresponds to a comparison instruction followed by
221 a mips_cond_move_tf_ps pattern. The first two arguments are the
222 values to compare and the second two arguments are the vector
223 operands for the movt.ps or movf.ps instruction (in assembly order). */
227 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
228 of this instruction is the result of the comparison, which has mode
229 CCV2 or CCV4. The function arguments are mapped to operands 1 and
230 above. The function's return value is an SImode boolean that is
231 true under the following conditions:
233 MIPS_BUILTIN_CMP_ANY: one of the registers is true
234 MIPS_BUILTIN_CMP_ALL: all of the registers are true
235 MIPS_BUILTIN_CMP_LOWER: the first register is true
236 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
237 MIPS_BUILTIN_CMP_ANY,
238 MIPS_BUILTIN_CMP_ALL,
239 MIPS_BUILTIN_CMP_UPPER,
240 MIPS_BUILTIN_CMP_LOWER,
242 /* As above, but the instruction only sets a single $fcc register. */
243 MIPS_BUILTIN_CMP_SINGLE,
245 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
246 MIPS_BUILTIN_BPOSGE32
249 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
250 #define MIPS_FP_CONDITIONS(MACRO) \
268 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
269 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
270 enum mips_fp_condition {
271 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
274 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
275 #define STRINGIFY(X) #X
276 static const char *const mips_fp_conditions[] = {
277 MIPS_FP_CONDITIONS (STRINGIFY)
280 /* A function to save or store a register. The first argument is the
281 register and the second is the stack slot. */
282 typedef void (*mips_save_restore_fn) (rtx, rtx);
284 struct mips16_constant;
285 struct mips_arg_info;
286 struct mips_address_info;
287 struct mips_integer_op;
290 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
291 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
292 static bool mips_classify_address (struct mips_address_info *, rtx,
293 enum machine_mode, int);
294 static bool mips_cannot_force_const_mem (rtx);
295 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
296 static int mips_symbol_insns (enum mips_symbol_type);
297 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
298 static rtx mips_force_temporary (rtx, rtx);
299 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
300 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
301 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
302 static unsigned int mips_build_lower (struct mips_integer_op *,
303 unsigned HOST_WIDE_INT);
304 static unsigned int mips_build_integer (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
307 static int m16_check_op (rtx, int, int, int);
308 static bool mips_rtx_costs (rtx, int, int, int *);
309 static int mips_address_cost (rtx);
310 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
311 static void mips_load_call_address (rtx, rtx, int);
312 static bool mips_function_ok_for_sibcall (tree, tree);
313 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
314 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
315 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
316 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
317 tree, int, struct mips_arg_info *);
318 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
319 static void mips_set_architecture (const struct mips_cpu_info *);
320 static void mips_set_tune (const struct mips_cpu_info *);
321 static bool mips_handle_option (size_t, const char *, int);
322 static struct machine_function *mips_init_machine_status (void);
323 static void print_operand_reloc (FILE *, rtx, enum mips_symbol_context,
325 static void mips_file_start (void);
326 static int mips_small_data_pattern_1 (rtx *, void *);
327 static int mips_rewrite_small_data_1 (rtx *, void *);
328 static bool mips_function_has_gp_insn (void);
329 static unsigned int mips_global_pointer (void);
330 static bool mips_save_reg_p (unsigned int);
331 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
332 mips_save_restore_fn);
333 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
334 static void mips_output_cplocal (void);
335 static void mips_emit_loadgp (void);
336 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
337 static void mips_set_frame_expr (rtx);
338 static rtx mips_frame_set (rtx, rtx);
339 static void mips_save_reg (rtx, rtx);
340 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
341 static void mips_restore_reg (rtx, rtx);
342 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
343 HOST_WIDE_INT, tree);
344 static int symbolic_expression_p (rtx);
345 static section *mips_select_rtx_section (enum machine_mode, rtx,
346 unsigned HOST_WIDE_INT);
347 static section *mips_function_rodata_section (tree);
348 static bool mips_in_small_data_p (tree);
349 static bool mips_use_anchors_for_symbol_p (rtx);
350 static int mips_fpr_return_fields (tree, tree *);
351 static bool mips_return_in_msb (tree);
352 static rtx mips_return_fpr_pair (enum machine_mode mode,
353 enum machine_mode mode1, HOST_WIDE_INT,
354 enum machine_mode mode2, HOST_WIDE_INT);
355 static rtx mips16_gp_pseudo_reg (void);
356 static void mips16_fp_args (FILE *, int, int);
357 static void build_mips16_function_stub (FILE *);
358 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
359 static void dump_constants (struct mips16_constant *, rtx);
360 static int mips16_insn_length (rtx);
361 static int mips16_rewrite_pool_refs (rtx *, void *);
362 static void mips16_lay_out_constants (void);
363 static void mips_sim_reset (struct mips_sim *);
364 static void mips_sim_init (struct mips_sim *, state_t);
365 static void mips_sim_next_cycle (struct mips_sim *);
366 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
367 static int mips_sim_wait_regs_2 (rtx *, void *);
368 static void mips_sim_wait_regs_1 (rtx *, void *);
369 static void mips_sim_wait_regs (struct mips_sim *, rtx);
370 static void mips_sim_wait_units (struct mips_sim *, rtx);
371 static void mips_sim_wait_insn (struct mips_sim *, rtx);
372 static void mips_sim_record_set (rtx, const_rtx, void *);
373 static void mips_sim_issue_insn (struct mips_sim *, rtx);
374 static void mips_sim_issue_nop (struct mips_sim *);
375 static void mips_sim_finish_insn (struct mips_sim *, rtx);
376 static void vr4130_avoid_branch_rt_conflict (rtx);
377 static void vr4130_align_insns (void);
378 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
379 static void mips_avoid_hazards (void);
380 static void mips_reorg (void);
381 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
382 static bool mips_matching_cpu_name_p (const char *, const char *);
383 static const struct mips_cpu_info *mips_parse_cpu (const char *);
384 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
385 static bool mips_return_in_memory (tree, tree);
386 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
387 static void mips_macc_chains_record (rtx);
388 static void mips_macc_chains_reorder (rtx *, int);
389 static void vr4130_true_reg_dependence_p_1 (rtx, const_rtx, void *);
390 static bool vr4130_true_reg_dependence_p (rtx);
391 static bool vr4130_swap_insns_p (rtx, rtx);
392 static void vr4130_reorder (rtx *, int);
393 static void mips_promote_ready (rtx *, int, int);
394 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
395 static int mips_variable_issue (FILE *, int, rtx, int);
396 static int mips_adjust_cost (rtx, rtx, rtx, int);
397 static int mips_issue_rate (void);
398 static int mips_multipass_dfa_lookahead (void);
399 static void mips_init_libfuncs (void);
400 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
402 static tree mips_build_builtin_va_list (void);
403 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
404 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
406 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static bool mips_valid_pointer_mode (enum machine_mode);
411 static bool mips_vector_mode_supported_p (enum machine_mode);
412 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
413 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
414 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
415 static void mips_init_builtins (void);
416 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
417 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
418 enum insn_code, enum mips_fp_condition,
420 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
421 enum insn_code, enum mips_fp_condition,
423 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
424 static void mips_encode_section_info (tree, rtx, int);
425 static void mips_extra_live_on_entry (bitmap);
426 static int mips_comp_type_attributes (tree, tree);
427 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
428 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
429 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
431 /* Structure to be filled in by compute_frame_size with register
432 save masks, and offsets for the current function. */
434 struct mips_frame_info GTY(())
436 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
437 HOST_WIDE_INT var_size; /* # bytes that variables take up */
438 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
439 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
440 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
441 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
442 unsigned int mask; /* mask of saved gp registers */
443 unsigned int fmask; /* mask of saved fp registers */
444 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
445 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
446 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
447 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
448 bool initialized; /* true if frame size already calculated */
449 int num_gp; /* number of gp registers saved */
450 int num_fp; /* number of fp registers saved */
453 struct machine_function GTY(()) {
454 /* Pseudo-reg holding the value of $28 in a mips16 function which
455 refers to GP relative global variables. */
456 rtx mips16_gp_pseudo_rtx;
458 /* The number of extra stack bytes taken up by register varargs.
459 This area is allocated by the callee at the very top of the frame. */
462 /* Current frame information, calculated by compute_frame_size. */
463 struct mips_frame_info frame;
465 /* The register to use as the global pointer within this function. */
466 unsigned int global_pointer;
468 /* True if mips_adjust_insn_length should ignore an instruction's
470 bool ignore_hazard_length_p;
472 /* True if the whole function is suitable for .set noreorder and
474 bool all_noreorder_p;
476 /* True if the function is known to have an instruction that needs $gp. */
479 /* True if we have emitted an instruction to initialize
480 mips16_gp_pseudo_rtx. */
481 bool initialized_mips16_gp_pseudo_p;
484 /* Information about a single argument. */
487 /* True if the argument is passed in a floating-point register, or
488 would have been if we hadn't run out of registers. */
491 /* The number of words passed in registers, rounded up. */
492 unsigned int reg_words;
494 /* For EABI, the offset of the first register from GP_ARG_FIRST or
495 FP_ARG_FIRST. For other ABIs, the offset of the first register from
496 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
497 comment for details).
499 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
501 unsigned int reg_offset;
503 /* The number of words that must be passed on the stack, rounded up. */
504 unsigned int stack_words;
506 /* The offset from the start of the stack overflow area of the argument's
507 first stack word. Only meaningful when STACK_WORDS is nonzero. */
508 unsigned int stack_offset;
512 /* Information about an address described by mips_address_type.
518 REG is the base register and OFFSET is the constant offset.
521 REG is the register that contains the high part of the address,
522 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
523 is the type of OFFSET's symbol.
526 SYMBOL_TYPE is the type of symbol being referenced. */
528 struct mips_address_info
530 enum mips_address_type type;
533 enum mips_symbol_type symbol_type;
537 /* One stage in a constant building sequence. These sequences have
541 A = A CODE[1] VALUE[1]
542 A = A CODE[2] VALUE[2]
545 where A is an accumulator, each CODE[i] is a binary rtl operation
546 and each VALUE[i] is a constant integer. */
547 struct mips_integer_op {
549 unsigned HOST_WIDE_INT value;
553 /* The largest number of operations needed to load an integer constant.
554 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
555 When the lowest bit is clear, we can try, but reject a sequence with
556 an extra SLL at the end. */
557 #define MIPS_MAX_INTEGER_OPS 7
559 /* Information about a MIPS16e SAVE or RESTORE instruction. */
560 struct mips16e_save_restore_info {
561 /* The number of argument registers saved by a SAVE instruction.
562 0 for RESTORE instructions. */
565 /* Bit X is set if the instruction saves or restores GPR X. */
568 /* The total number of bytes to allocate. */
572 /* Global variables for machine-dependent things. */
574 /* Threshold for data being put into the small data/bss area, instead
575 of the normal data area. */
576 int mips_section_threshold = -1;
578 /* Count the number of .file directives, so that .loc is up to date. */
579 int num_source_filenames = 0;
581 /* Count the number of sdb related labels are generated (to find block
582 start and end boundaries). */
583 int sdb_label_count = 0;
585 /* Next label # for each statement for Silicon Graphics IRIS systems. */
588 /* Name of the file containing the current function. */
589 const char *current_function_file = "";
591 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
597 /* The next branch instruction is a branch likely, not branch normal. */
598 int mips_branch_likely;
600 /* The operands passed to the last cmpMM expander. */
603 /* The target cpu for code generation. */
604 enum processor_type mips_arch;
605 const struct mips_cpu_info *mips_arch_info;
607 /* The target cpu for optimization and scheduling. */
608 enum processor_type mips_tune;
609 const struct mips_cpu_info *mips_tune_info;
611 /* Which instruction set architecture to use. */
614 /* Which ABI to use. */
615 int mips_abi = MIPS_ABI_DEFAULT;
617 /* Cost information to use. */
618 const struct mips_rtx_cost_data *mips_cost;
620 /* Whether we are generating mips16 hard float code. In mips16 mode
621 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
622 -msoft-float was not specified by the user, which means that we
623 should arrange to call mips32 hard floating point code. */
624 int mips16_hard_float;
626 /* The architecture selected by -mipsN. */
627 static const struct mips_cpu_info *mips_isa_info;
629 /* If TRUE, we split addresses into their high and low parts in the RTL. */
630 int mips_split_addresses;
632 /* Mode used for saving/restoring general purpose registers. */
633 static enum machine_mode gpr_mode;
635 /* Array giving truth value on whether or not a given hard register
636 can support a given mode. */
637 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
639 /* List of all MIPS punctuation characters used by print_operand. */
640 char mips_print_operand_punct[256];
642 /* Map GCC register number to debugger register number. */
643 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
644 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
646 /* A copy of the original flag_delayed_branch: see override_options. */
647 static int mips_flag_delayed_branch;
649 static GTY (()) int mips_output_filename_first_time = 1;
651 /* mips_split_p[X] is true if symbols of type X can be split by
652 mips_split_symbol(). */
653 bool mips_split_p[NUM_SYMBOL_TYPES];
655 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
656 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
657 if they are matched by a special .md file pattern. */
658 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
660 /* Likewise for HIGHs. */
661 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
663 /* Map hard register number to register class */
664 const enum reg_class mips_regno_to_class[] =
666 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
667 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
668 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
669 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
670 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
671 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
672 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
673 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
674 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
675 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
676 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
677 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
680 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
681 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
682 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
683 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
684 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
685 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
686 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
687 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
688 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
689 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
690 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
691 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
692 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
693 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
694 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
695 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
696 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
697 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
698 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
699 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
700 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
701 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
702 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
703 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
704 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
705 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
706 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
707 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
708 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
709 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
710 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
711 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
712 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
715 /* Table of machine dependent attributes. */
716 const struct attribute_spec mips_attribute_table[] =
718 { "long_call", 0, 0, false, true, true, NULL },
719 { "far", 0, 0, false, true, true, NULL },
720 { "near", 0, 0, false, true, true, NULL },
721 { NULL, 0, 0, false, false, false, NULL }
724 /* A table describing all the processors gcc knows about. Names are
725 matched in the order listed. The first mention of an ISA level is
726 taken as the canonical name for that ISA.
728 To ease comparison, please keep this table in the same order as
729 gas's mips_cpu_info_table[]. Please also make sure that
730 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
731 const struct mips_cpu_info mips_cpu_info_table[] = {
732 /* Entries for generic ISAs */
733 { "mips1", PROCESSOR_R3000, 1 },
734 { "mips2", PROCESSOR_R6000, 2 },
735 { "mips3", PROCESSOR_R4000, 3 },
736 { "mips4", PROCESSOR_R8000, 4 },
737 { "mips32", PROCESSOR_4KC, 32 },
738 { "mips32r2", PROCESSOR_M4K, 33 },
739 { "mips64", PROCESSOR_5KC, 64 },
742 { "r3000", PROCESSOR_R3000, 1 },
743 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
744 { "r3900", PROCESSOR_R3900, 1 },
747 { "r6000", PROCESSOR_R6000, 2 },
750 { "r4000", PROCESSOR_R4000, 3 },
751 { "vr4100", PROCESSOR_R4100, 3 },
752 { "vr4111", PROCESSOR_R4111, 3 },
753 { "vr4120", PROCESSOR_R4120, 3 },
754 { "vr4130", PROCESSOR_R4130, 3 },
755 { "vr4300", PROCESSOR_R4300, 3 },
756 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
757 { "r4600", PROCESSOR_R4600, 3 },
758 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
759 { "r4650", PROCESSOR_R4650, 3 },
762 { "r8000", PROCESSOR_R8000, 4 },
763 { "vr5000", PROCESSOR_R5000, 4 },
764 { "vr5400", PROCESSOR_R5400, 4 },
765 { "vr5500", PROCESSOR_R5500, 4 },
766 { "rm7000", PROCESSOR_R7000, 4 },
767 { "rm9000", PROCESSOR_R9000, 4 },
770 { "4kc", PROCESSOR_4KC, 32 },
771 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
772 { "4kp", PROCESSOR_4KP, 32 },
773 { "4ksc", PROCESSOR_4KC, 32 },
775 /* MIPS32 Release 2 */
776 { "m4k", PROCESSOR_M4K, 33 },
777 { "4kec", PROCESSOR_4KC, 33 },
778 { "4kem", PROCESSOR_4KC, 33 },
779 { "4kep", PROCESSOR_4KP, 33 },
780 { "4ksd", PROCESSOR_4KC, 33 },
782 { "24kc", PROCESSOR_24KC, 33 },
783 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
784 { "24kf", PROCESSOR_24KF2_1, 33 },
785 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
786 { "24kfx", PROCESSOR_24KF1_1, 33 },
787 { "24kx", PROCESSOR_24KF1_1, 33 },
789 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
790 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
791 { "24kef", PROCESSOR_24KF2_1, 33 },
792 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
793 { "24kefx", PROCESSOR_24KF1_1, 33 },
794 { "24kex", PROCESSOR_24KF1_1, 33 },
796 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
797 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
798 { "34kf", PROCESSOR_24KF2_1, 33 },
799 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
800 { "34kfx", PROCESSOR_24KF1_1, 33 },
801 { "34kx", PROCESSOR_24KF1_1, 33 },
803 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
804 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
805 { "74kf", PROCESSOR_74KF2_1, 33 },
806 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
807 { "74kfx", PROCESSOR_74KF1_1, 33 },
808 { "74kx", PROCESSOR_74KF1_1, 33 },
809 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
812 { "5kc", PROCESSOR_5KC, 64 },
813 { "5kf", PROCESSOR_5KF, 64 },
814 { "20kc", PROCESSOR_20KC, 64 },
815 { "sb1", PROCESSOR_SB1, 64 },
816 { "sb1a", PROCESSOR_SB1A, 64 },
817 { "sr71000", PROCESSOR_SR71000, 64 },
823 /* Default costs. If these are used for a processor we should look
824 up the actual costs. */
825 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
826 COSTS_N_INSNS (7), /* fp_mult_sf */ \
827 COSTS_N_INSNS (8), /* fp_mult_df */ \
828 COSTS_N_INSNS (23), /* fp_div_sf */ \
829 COSTS_N_INSNS (36), /* fp_div_df */ \
830 COSTS_N_INSNS (10), /* int_mult_si */ \
831 COSTS_N_INSNS (10), /* int_mult_di */ \
832 COSTS_N_INSNS (69), /* int_div_si */ \
833 COSTS_N_INSNS (69), /* int_div_di */ \
834 2, /* branch_cost */ \
835 4 /* memory_latency */
837 /* Need to replace these with the costs of calling the appropriate
839 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
840 COSTS_N_INSNS (256), /* fp_mult_sf */ \
841 COSTS_N_INSNS (256), /* fp_mult_df */ \
842 COSTS_N_INSNS (256), /* fp_div_sf */ \
843 COSTS_N_INSNS (256) /* fp_div_df */
845 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
847 COSTS_N_INSNS (1), /* fp_add */
848 COSTS_N_INSNS (1), /* fp_mult_sf */
849 COSTS_N_INSNS (1), /* fp_mult_df */
850 COSTS_N_INSNS (1), /* fp_div_sf */
851 COSTS_N_INSNS (1), /* fp_div_df */
852 COSTS_N_INSNS (1), /* int_mult_si */
853 COSTS_N_INSNS (1), /* int_mult_di */
854 COSTS_N_INSNS (1), /* int_div_si */
855 COSTS_N_INSNS (1), /* int_div_di */
857 4 /* memory_latency */
860 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
863 COSTS_N_INSNS (2), /* fp_add */
864 COSTS_N_INSNS (4), /* fp_mult_sf */
865 COSTS_N_INSNS (5), /* fp_mult_df */
866 COSTS_N_INSNS (12), /* fp_div_sf */
867 COSTS_N_INSNS (19), /* fp_div_df */
868 COSTS_N_INSNS (12), /* int_mult_si */
869 COSTS_N_INSNS (12), /* int_mult_di */
870 COSTS_N_INSNS (35), /* int_div_si */
871 COSTS_N_INSNS (35), /* int_div_di */
873 4 /* memory_latency */
878 COSTS_N_INSNS (6), /* int_mult_si */
879 COSTS_N_INSNS (6), /* int_mult_di */
880 COSTS_N_INSNS (36), /* int_div_si */
881 COSTS_N_INSNS (36), /* int_div_di */
883 4 /* memory_latency */
887 COSTS_N_INSNS (36), /* int_mult_si */
888 COSTS_N_INSNS (36), /* int_mult_di */
889 COSTS_N_INSNS (37), /* int_div_si */
890 COSTS_N_INSNS (37), /* int_div_di */
892 4 /* memory_latency */
896 COSTS_N_INSNS (4), /* int_mult_si */
897 COSTS_N_INSNS (11), /* int_mult_di */
898 COSTS_N_INSNS (36), /* int_div_si */
899 COSTS_N_INSNS (68), /* int_div_di */
901 4 /* memory_latency */
904 COSTS_N_INSNS (4), /* fp_add */
905 COSTS_N_INSNS (4), /* fp_mult_sf */
906 COSTS_N_INSNS (5), /* fp_mult_df */
907 COSTS_N_INSNS (17), /* fp_div_sf */
908 COSTS_N_INSNS (32), /* fp_div_df */
909 COSTS_N_INSNS (4), /* int_mult_si */
910 COSTS_N_INSNS (11), /* int_mult_di */
911 COSTS_N_INSNS (36), /* int_div_si */
912 COSTS_N_INSNS (68), /* int_div_di */
914 4 /* memory_latency */
917 COSTS_N_INSNS (4), /* fp_add */
918 COSTS_N_INSNS (4), /* fp_mult_sf */
919 COSTS_N_INSNS (5), /* fp_mult_df */
920 COSTS_N_INSNS (17), /* fp_div_sf */
921 COSTS_N_INSNS (32), /* fp_div_df */
922 COSTS_N_INSNS (4), /* int_mult_si */
923 COSTS_N_INSNS (7), /* int_mult_di */
924 COSTS_N_INSNS (42), /* int_div_si */
925 COSTS_N_INSNS (72), /* int_div_di */
927 4 /* memory_latency */
931 COSTS_N_INSNS (5), /* int_mult_si */
932 COSTS_N_INSNS (5), /* int_mult_di */
933 COSTS_N_INSNS (41), /* int_div_si */
934 COSTS_N_INSNS (41), /* int_div_di */
936 4 /* memory_latency */
939 COSTS_N_INSNS (8), /* fp_add */
940 COSTS_N_INSNS (8), /* fp_mult_sf */
941 COSTS_N_INSNS (10), /* fp_mult_df */
942 COSTS_N_INSNS (34), /* fp_div_sf */
943 COSTS_N_INSNS (64), /* fp_div_df */
944 COSTS_N_INSNS (5), /* int_mult_si */
945 COSTS_N_INSNS (5), /* int_mult_di */
946 COSTS_N_INSNS (41), /* int_div_si */
947 COSTS_N_INSNS (41), /* int_div_di */
949 4 /* memory_latency */
952 COSTS_N_INSNS (4), /* fp_add */
953 COSTS_N_INSNS (4), /* fp_mult_sf */
954 COSTS_N_INSNS (5), /* fp_mult_df */
955 COSTS_N_INSNS (17), /* fp_div_sf */
956 COSTS_N_INSNS (32), /* fp_div_df */
957 COSTS_N_INSNS (5), /* int_mult_si */
958 COSTS_N_INSNS (5), /* int_mult_di */
959 COSTS_N_INSNS (41), /* int_div_si */
960 COSTS_N_INSNS (41), /* int_div_di */
962 4 /* memory_latency */
966 COSTS_N_INSNS (5), /* int_mult_si */
967 COSTS_N_INSNS (5), /* int_mult_di */
968 COSTS_N_INSNS (41), /* int_div_si */
969 COSTS_N_INSNS (41), /* int_div_di */
971 4 /* memory_latency */
974 COSTS_N_INSNS (8), /* fp_add */
975 COSTS_N_INSNS (8), /* fp_mult_sf */
976 COSTS_N_INSNS (10), /* fp_mult_df */
977 COSTS_N_INSNS (34), /* fp_div_sf */
978 COSTS_N_INSNS (64), /* fp_div_df */
979 COSTS_N_INSNS (5), /* int_mult_si */
980 COSTS_N_INSNS (5), /* int_mult_di */
981 COSTS_N_INSNS (41), /* int_div_si */
982 COSTS_N_INSNS (41), /* int_div_di */
984 4 /* memory_latency */
987 COSTS_N_INSNS (4), /* fp_add */
988 COSTS_N_INSNS (4), /* fp_mult_sf */
989 COSTS_N_INSNS (5), /* fp_mult_df */
990 COSTS_N_INSNS (17), /* fp_div_sf */
991 COSTS_N_INSNS (32), /* fp_div_df */
992 COSTS_N_INSNS (5), /* int_mult_si */
993 COSTS_N_INSNS (5), /* int_mult_di */
994 COSTS_N_INSNS (41), /* int_div_si */
995 COSTS_N_INSNS (41), /* int_div_di */
997 4 /* memory_latency */
1000 COSTS_N_INSNS (6), /* fp_add */
1001 COSTS_N_INSNS (6), /* fp_mult_sf */
1002 COSTS_N_INSNS (7), /* fp_mult_df */
1003 COSTS_N_INSNS (25), /* fp_div_sf */
1004 COSTS_N_INSNS (48), /* fp_div_df */
1005 COSTS_N_INSNS (5), /* int_mult_si */
1006 COSTS_N_INSNS (5), /* int_mult_di */
1007 COSTS_N_INSNS (41), /* int_div_si */
1008 COSTS_N_INSNS (41), /* int_div_di */
1009 1, /* branch_cost */
1010 4 /* memory_latency */
1016 COSTS_N_INSNS (2), /* fp_add */
1017 COSTS_N_INSNS (4), /* fp_mult_sf */
1018 COSTS_N_INSNS (5), /* fp_mult_df */
1019 COSTS_N_INSNS (12), /* fp_div_sf */
1020 COSTS_N_INSNS (19), /* fp_div_df */
1021 COSTS_N_INSNS (2), /* int_mult_si */
1022 COSTS_N_INSNS (2), /* int_mult_di */
1023 COSTS_N_INSNS (35), /* int_div_si */
1024 COSTS_N_INSNS (35), /* int_div_di */
1025 1, /* branch_cost */
1026 4 /* memory_latency */
1029 COSTS_N_INSNS (3), /* fp_add */
1030 COSTS_N_INSNS (5), /* fp_mult_sf */
1031 COSTS_N_INSNS (6), /* fp_mult_df */
1032 COSTS_N_INSNS (15), /* fp_div_sf */
1033 COSTS_N_INSNS (16), /* fp_div_df */
1034 COSTS_N_INSNS (17), /* int_mult_si */
1035 COSTS_N_INSNS (17), /* int_mult_di */
1036 COSTS_N_INSNS (38), /* int_div_si */
1037 COSTS_N_INSNS (38), /* int_div_di */
1038 2, /* branch_cost */
1039 6 /* memory_latency */
1042 COSTS_N_INSNS (6), /* fp_add */
1043 COSTS_N_INSNS (7), /* fp_mult_sf */
1044 COSTS_N_INSNS (8), /* fp_mult_df */
1045 COSTS_N_INSNS (23), /* fp_div_sf */
1046 COSTS_N_INSNS (36), /* fp_div_df */
1047 COSTS_N_INSNS (10), /* int_mult_si */
1048 COSTS_N_INSNS (10), /* int_mult_di */
1049 COSTS_N_INSNS (69), /* int_div_si */
1050 COSTS_N_INSNS (69), /* int_div_di */
1051 2, /* branch_cost */
1052 6 /* memory_latency */
1064 /* The only costs that appear to be updated here are
1065 integer multiplication. */
1067 COSTS_N_INSNS (4), /* int_mult_si */
1068 COSTS_N_INSNS (6), /* int_mult_di */
1069 COSTS_N_INSNS (69), /* int_div_si */
1070 COSTS_N_INSNS (69), /* int_div_di */
1071 1, /* branch_cost */
1072 4 /* memory_latency */
1084 COSTS_N_INSNS (6), /* fp_add */
1085 COSTS_N_INSNS (4), /* fp_mult_sf */
1086 COSTS_N_INSNS (5), /* fp_mult_df */
1087 COSTS_N_INSNS (23), /* fp_div_sf */
1088 COSTS_N_INSNS (36), /* fp_div_df */
1089 COSTS_N_INSNS (5), /* int_mult_si */
1090 COSTS_N_INSNS (5), /* int_mult_di */
1091 COSTS_N_INSNS (36), /* int_div_si */
1092 COSTS_N_INSNS (36), /* int_div_di */
1093 1, /* branch_cost */
1094 4 /* memory_latency */
1097 COSTS_N_INSNS (6), /* fp_add */
1098 COSTS_N_INSNS (5), /* fp_mult_sf */
1099 COSTS_N_INSNS (6), /* fp_mult_df */
1100 COSTS_N_INSNS (30), /* fp_div_sf */
1101 COSTS_N_INSNS (59), /* fp_div_df */
1102 COSTS_N_INSNS (3), /* int_mult_si */
1103 COSTS_N_INSNS (4), /* int_mult_di */
1104 COSTS_N_INSNS (42), /* int_div_si */
1105 COSTS_N_INSNS (74), /* int_div_di */
1106 1, /* branch_cost */
1107 4 /* memory_latency */
1110 COSTS_N_INSNS (6), /* fp_add */
1111 COSTS_N_INSNS (5), /* fp_mult_sf */
1112 COSTS_N_INSNS (6), /* fp_mult_df */
1113 COSTS_N_INSNS (30), /* fp_div_sf */
1114 COSTS_N_INSNS (59), /* fp_div_df */
1115 COSTS_N_INSNS (5), /* int_mult_si */
1116 COSTS_N_INSNS (9), /* int_mult_di */
1117 COSTS_N_INSNS (42), /* int_div_si */
1118 COSTS_N_INSNS (74), /* int_div_di */
1119 1, /* branch_cost */
1120 4 /* memory_latency */
1123 /* The only costs that are changed here are
1124 integer multiplication. */
1125 COSTS_N_INSNS (6), /* fp_add */
1126 COSTS_N_INSNS (7), /* fp_mult_sf */
1127 COSTS_N_INSNS (8), /* fp_mult_df */
1128 COSTS_N_INSNS (23), /* fp_div_sf */
1129 COSTS_N_INSNS (36), /* fp_div_df */
1130 COSTS_N_INSNS (5), /* int_mult_si */
1131 COSTS_N_INSNS (9), /* int_mult_di */
1132 COSTS_N_INSNS (69), /* int_div_si */
1133 COSTS_N_INSNS (69), /* int_div_di */
1134 1, /* branch_cost */
1135 4 /* memory_latency */
1141 /* The only costs that are changed here are
1142 integer multiplication. */
1143 COSTS_N_INSNS (6), /* fp_add */
1144 COSTS_N_INSNS (7), /* fp_mult_sf */
1145 COSTS_N_INSNS (8), /* fp_mult_df */
1146 COSTS_N_INSNS (23), /* fp_div_sf */
1147 COSTS_N_INSNS (36), /* fp_div_df */
1148 COSTS_N_INSNS (3), /* int_mult_si */
1149 COSTS_N_INSNS (8), /* int_mult_di */
1150 COSTS_N_INSNS (69), /* int_div_si */
1151 COSTS_N_INSNS (69), /* int_div_di */
1152 1, /* branch_cost */
1153 4 /* memory_latency */
1156 /* These costs are the same as the SB-1A below. */
1157 COSTS_N_INSNS (4), /* fp_add */
1158 COSTS_N_INSNS (4), /* fp_mult_sf */
1159 COSTS_N_INSNS (4), /* fp_mult_df */
1160 COSTS_N_INSNS (24), /* fp_div_sf */
1161 COSTS_N_INSNS (32), /* fp_div_df */
1162 COSTS_N_INSNS (3), /* int_mult_si */
1163 COSTS_N_INSNS (4), /* int_mult_di */
1164 COSTS_N_INSNS (36), /* int_div_si */
1165 COSTS_N_INSNS (68), /* int_div_di */
1166 1, /* branch_cost */
1167 4 /* memory_latency */
1170 /* These costs are the same as the SB-1 above. */
1171 COSTS_N_INSNS (4), /* fp_add */
1172 COSTS_N_INSNS (4), /* fp_mult_sf */
1173 COSTS_N_INSNS (4), /* fp_mult_df */
1174 COSTS_N_INSNS (24), /* fp_div_sf */
1175 COSTS_N_INSNS (32), /* fp_div_df */
1176 COSTS_N_INSNS (3), /* int_mult_si */
1177 COSTS_N_INSNS (4), /* int_mult_di */
1178 COSTS_N_INSNS (36), /* int_div_si */
1179 COSTS_N_INSNS (68), /* int_div_di */
1180 1, /* branch_cost */
1181 4 /* memory_latency */
1188 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1189 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1190 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1191 static const unsigned char mips16e_s2_s8_regs[] = {
1192 30, 23, 22, 21, 20, 19, 18
1194 static const unsigned char mips16e_a0_a3_regs[] = {
1198 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1199 ordered from the uppermost in memory to the lowest in memory. */
1200 static const unsigned char mips16e_save_restore_regs[] = {
1201 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1204 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1205 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1206 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1209 /* Initialize the GCC target structure. */
1210 #undef TARGET_ASM_ALIGNED_HI_OP
1211 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1212 #undef TARGET_ASM_ALIGNED_SI_OP
1213 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1214 #undef TARGET_ASM_ALIGNED_DI_OP
1215 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1217 #undef TARGET_ASM_FUNCTION_PROLOGUE
1218 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1219 #undef TARGET_ASM_FUNCTION_EPILOGUE
1220 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1221 #undef TARGET_ASM_SELECT_RTX_SECTION
1222 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1223 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1224 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1226 #undef TARGET_SCHED_REORDER
1227 #define TARGET_SCHED_REORDER mips_sched_reorder
1228 #undef TARGET_SCHED_VARIABLE_ISSUE
1229 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1230 #undef TARGET_SCHED_ADJUST_COST
1231 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1232 #undef TARGET_SCHED_ISSUE_RATE
1233 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1234 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1235 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1236 mips_multipass_dfa_lookahead
1238 #undef TARGET_DEFAULT_TARGET_FLAGS
1239 #define TARGET_DEFAULT_TARGET_FLAGS \
1241 | TARGET_CPU_DEFAULT \
1242 | TARGET_ENDIAN_DEFAULT \
1243 | TARGET_FP_EXCEPTIONS_DEFAULT \
1244 | MASK_CHECK_ZERO_DIV \
1246 #undef TARGET_HANDLE_OPTION
1247 #define TARGET_HANDLE_OPTION mips_handle_option
1249 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1250 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1252 #undef TARGET_VALID_POINTER_MODE
1253 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1254 #undef TARGET_RTX_COSTS
1255 #define TARGET_RTX_COSTS mips_rtx_costs
1256 #undef TARGET_ADDRESS_COST
1257 #define TARGET_ADDRESS_COST mips_address_cost
1259 #undef TARGET_IN_SMALL_DATA_P
1260 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1262 #undef TARGET_MACHINE_DEPENDENT_REORG
1263 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1265 #undef TARGET_ASM_FILE_START
1266 #define TARGET_ASM_FILE_START mips_file_start
1267 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1268 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1270 #undef TARGET_INIT_LIBFUNCS
1271 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1273 #undef TARGET_BUILD_BUILTIN_VA_LIST
1274 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1275 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1276 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1278 #undef TARGET_PROMOTE_FUNCTION_ARGS
1279 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1280 #undef TARGET_PROMOTE_FUNCTION_RETURN
1281 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1282 #undef TARGET_PROMOTE_PROTOTYPES
1283 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1285 #undef TARGET_RETURN_IN_MEMORY
1286 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1287 #undef TARGET_RETURN_IN_MSB
1288 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1290 #undef TARGET_ASM_OUTPUT_MI_THUNK
1291 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1292 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1293 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1295 #undef TARGET_SETUP_INCOMING_VARARGS
1296 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1297 #undef TARGET_STRICT_ARGUMENT_NAMING
1298 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1299 #undef TARGET_MUST_PASS_IN_STACK
1300 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1301 #undef TARGET_PASS_BY_REFERENCE
1302 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1303 #undef TARGET_CALLEE_COPIES
1304 #define TARGET_CALLEE_COPIES mips_callee_copies
1305 #undef TARGET_ARG_PARTIAL_BYTES
1306 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1308 #undef TARGET_MODE_REP_EXTENDED
1309 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1311 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1312 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1314 #undef TARGET_INIT_BUILTINS
1315 #define TARGET_INIT_BUILTINS mips_init_builtins
1316 #undef TARGET_EXPAND_BUILTIN
1317 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1319 #undef TARGET_HAVE_TLS
1320 #define TARGET_HAVE_TLS HAVE_AS_TLS
1322 #undef TARGET_CANNOT_FORCE_CONST_MEM
1323 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1325 #undef TARGET_ENCODE_SECTION_INFO
1326 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1328 #undef TARGET_ATTRIBUTE_TABLE
1329 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1331 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1332 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1334 #undef TARGET_MIN_ANCHOR_OFFSET
1335 #define TARGET_MIN_ANCHOR_OFFSET -32768
1336 #undef TARGET_MAX_ANCHOR_OFFSET
1337 #define TARGET_MAX_ANCHOR_OFFSET 32767
1338 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1339 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1340 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1341 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1343 #undef TARGET_COMP_TYPE_ATTRIBUTES
1344 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1346 #ifdef HAVE_AS_DTPRELWORD
1347 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1348 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1351 struct gcc_target targetm = TARGET_INITIALIZER;
1354 /* Predicates to test for presence of "near" and "far"/"long_call"
1355 attributes on the given TYPE. */
1358 mips_near_type_p (tree type)
1360 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1364 mips_far_type_p (tree type)
1366 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1367 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1371 /* Return 0 if the attributes for two types are incompatible, 1 if they
1372 are compatible, and 2 if they are nearly compatible (which causes a
1373 warning to be generated). */
1376 mips_comp_type_attributes (tree type1, tree type2)
1378 /* Check for mismatch of non-default calling convention. */
1379 if (TREE_CODE (type1) != FUNCTION_TYPE)
1382 /* Disallow mixed near/far attributes. */
1383 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1385 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1391 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1392 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1395 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1397 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1399 *base_ptr = XEXP (x, 0);
1400 *offset_ptr = INTVAL (XEXP (x, 1));
1409 /* Return true if SYMBOL_REF X is associated with a global symbol
1410 (in the STB_GLOBAL sense). */
1413 mips_global_symbol_p (rtx x)
1417 decl = SYMBOL_REF_DECL (x);
1419 return !SYMBOL_REF_LOCAL_P (x);
1421 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1422 or weak symbols. Relocations in the object file will be against
1423 the target symbol, so it's that symbol's binding that matters here. */
1424 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1427 /* Return true if SYMBOL_REF X binds locally. */
1430 mips_symbol_binds_local_p (rtx x)
1432 return (SYMBOL_REF_DECL (x)
1433 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1434 : SYMBOL_REF_LOCAL_P (x));
1437 /* Return the method that should be used to access SYMBOL_REF or
1438 LABEL_REF X in context CONTEXT. */
1440 static enum mips_symbol_type
1441 mips_classify_symbol (rtx x, enum mips_symbol_context context)
1444 return SYMBOL_GOT_DISP;
1446 if (GET_CODE (x) == LABEL_REF)
1449 return SYMBOL_PC_RELATIVE;
1450 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1451 return SYMBOL_GOT_PAGE_OFST;
1452 return SYMBOL_ABSOLUTE;
1455 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1457 if (SYMBOL_REF_TLS_MODEL (x))
1460 if (CONSTANT_POOL_ADDRESS_P (x))
1463 return SYMBOL_PC_RELATIVE;
1465 if (!TARGET_EMBEDDED_DATA
1466 && GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1467 return SYMBOL_GP_RELATIVE;
1470 /* Do not use small-data accesses for weak symbols; they may end up
1472 if (SYMBOL_REF_SMALL_P (x)
1473 && !SYMBOL_REF_WEAK (x))
1474 return SYMBOL_GP_RELATIVE;
1476 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1479 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1481 /* There are three cases to consider:
1483 - o32 PIC (either with or without explicit relocs)
1484 - n32/n64 PIC without explicit relocs
1485 - n32/n64 PIC with explicit relocs
1487 In the first case, both local and global accesses will use an
1488 R_MIPS_GOT16 relocation. We must correctly predict which of
1489 the two semantics (local or global) the assembler and linker
1490 will apply. The choice depends on the symbol's binding rather
1491 than its visibility.
1493 In the second case, the assembler will not use R_MIPS_GOT16
1494 relocations, but it chooses between local and global accesses
1495 in the same way as for o32 PIC.
1497 In the third case we have more freedom since both forms of
1498 access will work for any kind of symbol. However, there seems
1499 little point in doing things differently. */
1500 if (mips_global_symbol_p (x))
1501 return SYMBOL_GOT_DISP;
1503 return SYMBOL_GOT_PAGE_OFST;
1506 if (TARGET_MIPS16 && context != SYMBOL_CONTEXT_CALL)
1507 return SYMBOL_FORCE_TO_MEM;
1508 return SYMBOL_ABSOLUTE;
1511 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1512 is the alignment (in bytes) of SYMBOL_REF X. */
1515 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1517 /* If for some reason we can't get the alignment for the
1518 symbol, initializing this to one means we will only accept
1520 HOST_WIDE_INT align = 1;
1523 /* Get the alignment of the symbol we're referring to. */
1524 t = SYMBOL_REF_DECL (x);
1526 align = DECL_ALIGN_UNIT (t);
1528 return offset >= 0 && offset < align;
1531 /* Return true if X is a symbolic constant that can be used in context
1532 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1535 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1536 enum mips_symbol_type *symbol_type)
1540 split_const (x, &x, &offset);
1541 if (UNSPEC_ADDRESS_P (x))
1543 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1544 x = UNSPEC_ADDRESS (x);
1546 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1548 *symbol_type = mips_classify_symbol (x, context);
1549 if (*symbol_type == SYMBOL_TLS)
1555 if (offset == const0_rtx)
1558 /* Check whether a nonzero offset is valid for the underlying
1560 switch (*symbol_type)
1562 case SYMBOL_ABSOLUTE:
1563 case SYMBOL_FORCE_TO_MEM:
1564 case SYMBOL_64_HIGH:
1567 /* If the target has 64-bit pointers and the object file only
1568 supports 32-bit symbols, the values of those symbols will be
1569 sign-extended. In this case we can't allow an arbitrary offset
1570 in case the 32-bit value X + OFFSET has a different sign from X. */
1571 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1572 return offset_within_block_p (x, INTVAL (offset));
1574 /* In other cases the relocations can handle any offset. */
1577 case SYMBOL_PC_RELATIVE:
1578 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1579 In this case, we no longer have access to the underlying constant,
1580 but the original symbol-based access was known to be valid. */
1581 if (GET_CODE (x) == LABEL_REF)
1586 case SYMBOL_GP_RELATIVE:
1587 /* Make sure that the offset refers to something within the
1588 same object block. This should guarantee that the final
1589 PC- or GP-relative offset is within the 16-bit limit. */
1590 return offset_within_block_p (x, INTVAL (offset));
1592 case SYMBOL_GOT_PAGE_OFST:
1593 case SYMBOL_GOTOFF_PAGE:
1594 /* If the symbol is global, the GOT entry will contain the symbol's
1595 address, and we will apply a 16-bit offset after loading it.
1596 If the symbol is local, the linker should provide enough local
1597 GOT entries for a 16-bit offset, but larger offsets may lead
1599 return SMALL_INT (offset);
1603 /* There is no carry between the HI and LO REL relocations, so the
1604 offset is only valid if we know it won't lead to such a carry. */
1605 return mips_offset_within_alignment_p (x, INTVAL (offset));
1607 case SYMBOL_GOT_DISP:
1608 case SYMBOL_GOTOFF_DISP:
1609 case SYMBOL_GOTOFF_CALL:
1610 case SYMBOL_GOTOFF_LOADGP:
1613 case SYMBOL_GOTTPREL:
1622 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1625 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1627 if (!HARD_REGISTER_NUM_P (regno))
1631 regno = reg_renumber[regno];
1634 /* These fake registers will be eliminated to either the stack or
1635 hard frame pointer, both of which are usually valid base registers.
1636 Reload deals with the cases where the eliminated form isn't valid. */
1637 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1640 /* In mips16 mode, the stack pointer can only address word and doubleword
1641 values, nothing smaller. There are two problems here:
1643 (a) Instantiating virtual registers can introduce new uses of the
1644 stack pointer. If these virtual registers are valid addresses,
1645 the stack pointer should be too.
1647 (b) Most uses of the stack pointer are not made explicit until
1648 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1649 We don't know until that stage whether we'll be eliminating to the
1650 stack pointer (which needs the restriction) or the hard frame
1651 pointer (which doesn't).
1653 All in all, it seems more consistent to only enforce this restriction
1654 during and after reload. */
1655 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1656 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1658 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1662 /* Return true if X is a valid base register for the given mode.
1663 Allow only hard registers if STRICT. */
1666 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1668 if (!strict && GET_CODE (x) == SUBREG)
1672 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1676 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1677 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1680 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1681 enum machine_mode mode)
1683 switch (symbol_type)
1685 case SYMBOL_ABSOLUTE:
1686 case SYMBOL_GP_RELATIVE:
1689 case SYMBOL_PC_RELATIVE:
1690 /* PC-relative addressing is only available for lw and ld. */
1691 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1693 case SYMBOL_GOT_PAGE_OFST:
1696 case SYMBOL_FORCE_TO_MEM:
1697 case SYMBOL_GOT_DISP:
1698 /* The address will have to be loaded from the constant pool
1699 or GOT before it is used in an address. */
1702 case SYMBOL_GOTOFF_PAGE:
1703 case SYMBOL_GOTOFF_DISP:
1704 case SYMBOL_GOTOFF_CALL:
1705 case SYMBOL_GOTOFF_LOADGP:
1710 case SYMBOL_GOTTPREL:
1712 case SYMBOL_64_HIGH:
1722 /* Return true if X is a valid address for machine mode MODE. If it is,
1723 fill in INFO appropriately. STRICT is true if we should only accept
1724 hard base registers. */
1727 mips_classify_address (struct mips_address_info *info, rtx x,
1728 enum machine_mode mode, int strict)
1730 switch (GET_CODE (x))
1734 info->type = ADDRESS_REG;
1736 info->offset = const0_rtx;
1737 return mips_valid_base_register_p (info->reg, mode, strict);
1740 info->type = ADDRESS_REG;
1741 info->reg = XEXP (x, 0);
1742 info->offset = XEXP (x, 1);
1743 return (mips_valid_base_register_p (info->reg, mode, strict)
1744 && const_arith_operand (info->offset, VOIDmode));
1747 info->type = ADDRESS_LO_SUM;
1748 info->reg = XEXP (x, 0);
1749 info->offset = XEXP (x, 1);
1750 return (mips_valid_base_register_p (info->reg, mode, strict)
1751 && mips_symbolic_constant_p (info->offset, SYMBOL_CONTEXT_MEM,
1753 && mips_symbolic_address_p (info->symbol_type, mode)
1754 && mips_lo_relocs[info->symbol_type] != 0);
1757 /* Small-integer addresses don't occur very often, but they
1758 are legitimate if $0 is a valid base register. */
1759 info->type = ADDRESS_CONST_INT;
1760 return !TARGET_MIPS16 && SMALL_INT (x);
1765 info->type = ADDRESS_SYMBOLIC;
1766 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1768 && mips_symbolic_address_p (info->symbol_type, mode)
1769 && !mips_split_p[info->symbol_type]);
1776 /* Return true if X is a thread-local symbol. */
1779 mips_tls_operand_p (rtx x)
1781 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1784 /* Return true if X can not be forced into a constant pool. */
1787 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1789 return mips_tls_operand_p (*x);
1792 /* Return true if X can not be forced into a constant pool. */
1795 mips_cannot_force_const_mem (rtx x)
1801 /* As an optimization, reject constants that mips_legitimize_move
1804 Suppose we have a multi-instruction sequence that loads constant C
1805 into register R. If R does not get allocated a hard register, and
1806 R is used in an operand that allows both registers and memory
1807 references, reload will consider forcing C into memory and using
1808 one of the instruction's memory alternatives. Returning false
1809 here will force it to use an input reload instead. */
1810 if (GET_CODE (x) == CONST_INT)
1813 split_const (x, &base, &offset);
1814 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1818 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1824 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1825 constant pools, but normal-mode code doesn't need to. */
1828 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1829 rtx x ATTRIBUTE_UNUSED)
1831 return !TARGET_MIPS16;
1834 /* Return the number of instructions needed to load a symbol of the
1835 given type into a register. If valid in an address, the same number
1836 of instructions are needed for loads and stores. Treat extended
1837 mips16 instructions as two instructions. */
1840 mips_symbol_insns (enum mips_symbol_type type)
1844 case SYMBOL_ABSOLUTE:
1845 /* When using 64-bit symbols, we need 5 preparatory instructions,
1848 lui $at,%highest(symbol)
1849 daddiu $at,$at,%higher(symbol)
1851 daddiu $at,$at,%hi(symbol)
1854 The final address is then $at + %lo(symbol). With 32-bit
1855 symbols we just need a preparatory lui. */
1856 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1858 case SYMBOL_GP_RELATIVE:
1862 case SYMBOL_PC_RELATIVE:
1863 /* This case is for mips16 only. Assume we'll need an
1864 extended instruction. */
1867 case SYMBOL_FORCE_TO_MEM:
1868 /* The constant must be loaded from the constant pool. */
1871 case SYMBOL_GOT_PAGE_OFST:
1872 case SYMBOL_GOT_DISP:
1873 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1874 the local/global classification is accurate. See override_options
1877 The worst cases are:
1879 (1) For local symbols when generating o32 or o64 code. The assembler
1885 ...and the final address will be $at + %lo(symbol).
1887 (2) For global symbols when -mxgot. The assembler will use:
1889 lui $at,%got_hi(symbol)
1892 ...and the final address will be $at + %got_lo(symbol). */
1895 case SYMBOL_GOTOFF_PAGE:
1896 case SYMBOL_GOTOFF_DISP:
1897 case SYMBOL_GOTOFF_CALL:
1898 case SYMBOL_GOTOFF_LOADGP:
1899 case SYMBOL_64_HIGH:
1905 case SYMBOL_GOTTPREL:
1907 /* Check whether the offset is a 16- or 32-bit value. */
1908 return mips_split_p[type] ? 2 : 1;
1911 /* We don't treat a bare TLS symbol as a constant. */
1917 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1920 mips_stack_address_p (rtx x, enum machine_mode mode)
1922 struct mips_address_info addr;
1924 return (mips_classify_address (&addr, x, mode, false)
1925 && addr.type == ADDRESS_REG
1926 && addr.reg == stack_pointer_rtx);
1929 /* Return true if a value at OFFSET bytes from BASE can be accessed
1930 using an unextended mips16 instruction. MODE is the mode of the
1933 Usually the offset in an unextended instruction is a 5-bit field.
1934 The offset is unsigned and shifted left once for HIs, twice
1935 for SIs, and so on. An exception is SImode accesses off the
1936 stack pointer, which have an 8-bit immediate field. */
1939 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1942 && GET_CODE (offset) == CONST_INT
1943 && INTVAL (offset) >= 0
1944 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1946 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1947 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1948 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1954 /* Return the number of instructions needed to load or store a value
1955 of mode MODE at X. Return 0 if X isn't valid for MODE.
1957 For mips16 code, count extended instructions as two instructions. */
1960 mips_address_insns (rtx x, enum machine_mode mode)
1962 struct mips_address_info addr;
1965 if (mode == BLKmode)
1966 /* BLKmode is used for single unaligned loads and stores. */
1969 /* Each word of a multi-word value will be accessed individually. */
1970 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1972 if (mips_classify_address (&addr, x, mode, false))
1977 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1981 case ADDRESS_LO_SUM:
1982 return (TARGET_MIPS16 ? factor * 2 : factor);
1984 case ADDRESS_CONST_INT:
1987 case ADDRESS_SYMBOLIC:
1988 return factor * mips_symbol_insns (addr.symbol_type);
1994 /* Likewise for constant X. */
1997 mips_const_insns (rtx x)
1999 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2000 enum mips_symbol_type symbol_type;
2003 switch (GET_CODE (x))
2007 || !mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2009 || !mips_split_p[symbol_type])
2016 /* Unsigned 8-bit constants can be loaded using an unextended
2017 LI instruction. Unsigned 16-bit constants can be loaded
2018 using an extended LI. Negative constants must be loaded
2019 using LI and then negated. */
2020 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2021 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2022 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2023 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2026 return mips_build_integer (codes, INTVAL (x));
2030 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2036 /* See if we can refer to X directly. */
2037 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2038 return mips_symbol_insns (symbol_type);
2040 /* Otherwise try splitting the constant into a base and offset.
2041 16-bit offsets can be added using an extra addiu. Larger offsets
2042 must be calculated separately and then added to the base. */
2043 split_const (x, &x, &offset);
2046 int n = mips_const_insns (x);
2049 if (SMALL_INT (offset))
2052 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2059 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA));
2067 /* Return the number of instructions needed for memory reference X.
2068 Count extended mips16 instructions as two instructions. */
2071 mips_fetch_insns (rtx x)
2073 gcc_assert (MEM_P (x));
2074 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
2078 /* Return the number of instructions needed for an integer division. */
2081 mips_idiv_insns (void)
2086 if (TARGET_CHECK_ZERO_DIV)
2088 if (GENERATE_DIVIDE_TRAPS)
2094 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2099 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2100 returns a nonzero value if X is a legitimate address for a memory
2101 operand of the indicated MODE. STRICT is nonzero if this function
2102 is called during reload. */
2105 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2107 struct mips_address_info addr;
2109 return mips_classify_address (&addr, x, mode, strict);
2113 /* Copy VALUE to a register and return that register. If new psuedos
2114 are allowed, copy it into a new register, otherwise use DEST. */
2117 mips_force_temporary (rtx dest, rtx value)
2119 if (can_create_pseudo_p ())
2120 return force_reg (Pmode, value);
2123 emit_move_insn (copy_rtx (dest), value);
2129 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
2130 and is used to load the high part into a register. */
2133 mips_split_symbol (rtx temp, rtx addr)
2138 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
2139 else if (!can_create_pseudo_p ())
2141 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2145 high = mips16_gp_pseudo_reg ();
2146 return gen_rtx_LO_SUM (Pmode, high, addr);
2150 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2151 type SYMBOL_TYPE. */
2154 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2158 split_const (address, &base, &offset);
2159 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2160 UNSPEC_ADDRESS_FIRST + symbol_type);
2161 if (offset != const0_rtx)
2162 base = gen_rtx_PLUS (Pmode, base, offset);
2163 return gen_rtx_CONST (Pmode, base);
2167 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2168 high part to BASE and return the result. Just return BASE otherwise.
2169 TEMP is available as a temporary register if needed.
2171 The returned expression can be used as the first operand to a LO_SUM. */
2174 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2175 enum mips_symbol_type symbol_type)
2177 if (mips_split_p[symbol_type])
2179 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2180 addr = mips_force_temporary (temp, addr);
2181 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2187 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2188 mips_force_temporary; it is only needed when OFFSET is not a
2192 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2194 if (!SMALL_OPERAND (offset))
2199 /* Load the full offset into a register so that we can use
2200 an unextended instruction for the address itself. */
2201 high = GEN_INT (offset);
2206 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2207 high = GEN_INT (CONST_HIGH_PART (offset));
2208 offset = CONST_LOW_PART (offset);
2210 high = mips_force_temporary (temp, high);
2211 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2213 return plus_constant (reg, offset);
2216 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2217 referencing, and TYPE is the symbol type to use (either global
2218 dynamic or local dynamic). V0 is an RTX for the return value
2219 location. The entire insn sequence is returned. */
2221 static GTY(()) rtx mips_tls_symbol;
2224 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2226 rtx insn, loc, tga, a0;
2228 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2230 if (!mips_tls_symbol)
2231 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2233 loc = mips_unspec_address (sym, type);
2237 emit_insn (gen_rtx_SET (Pmode, a0,
2238 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2239 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2240 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2241 CONST_OR_PURE_CALL_P (insn) = 1;
2242 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2243 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2244 insn = get_insns ();
2251 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2252 return value will be a valid address and move_operand (either a REG
2256 mips_legitimize_tls_address (rtx loc)
2258 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2259 enum tls_model model;
2261 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2262 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2264 model = SYMBOL_REF_TLS_MODEL (loc);
2265 /* Only TARGET_ABICALLS code can have more than one module; other
2266 code must be be static and should not use a GOT. All TLS models
2267 reduce to local exec in this situation. */
2268 if (!TARGET_ABICALLS)
2269 model = TLS_MODEL_LOCAL_EXEC;
2273 case TLS_MODEL_GLOBAL_DYNAMIC:
2274 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2275 dest = gen_reg_rtx (Pmode);
2276 emit_libcall_block (insn, dest, v0, loc);
2279 case TLS_MODEL_LOCAL_DYNAMIC:
2280 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2281 tmp1 = gen_reg_rtx (Pmode);
2283 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2284 share the LDM result with other LD model accesses. */
2285 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2287 emit_libcall_block (insn, tmp1, v0, eqv);
2289 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2290 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2291 mips_unspec_address (loc, SYMBOL_DTPREL));
2294 case TLS_MODEL_INITIAL_EXEC:
2295 tmp1 = gen_reg_rtx (Pmode);
2296 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2297 if (Pmode == DImode)
2299 emit_insn (gen_tls_get_tp_di (v1));
2300 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2304 emit_insn (gen_tls_get_tp_si (v1));
2305 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2307 dest = gen_reg_rtx (Pmode);
2308 emit_insn (gen_add3_insn (dest, tmp1, v1));
2311 case TLS_MODEL_LOCAL_EXEC:
2312 if (Pmode == DImode)
2313 emit_insn (gen_tls_get_tp_di (v1));
2315 emit_insn (gen_tls_get_tp_si (v1));
2317 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2318 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2319 mips_unspec_address (loc, SYMBOL_TPREL));
2329 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2330 be legitimized in a way that the generic machinery might not expect,
2331 put the new address in *XLOC and return true. MODE is the mode of
2332 the memory being accessed. */
2335 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2337 enum mips_symbol_type symbol_type;
2339 if (mips_tls_operand_p (*xloc))
2341 *xloc = mips_legitimize_tls_address (*xloc);
2345 /* See if the address can split into a high part and a LO_SUM. */
2346 if (mips_symbolic_constant_p (*xloc, SYMBOL_CONTEXT_MEM, &symbol_type)
2347 && mips_symbolic_address_p (symbol_type, mode)
2348 && mips_split_p[symbol_type])
2350 *xloc = mips_split_symbol (0, *xloc);
2354 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2356 /* Handle REG + CONSTANT using mips_add_offset. */
2359 reg = XEXP (*xloc, 0);
2360 if (!mips_valid_base_register_p (reg, mode, 0))
2361 reg = copy_to_mode_reg (Pmode, reg);
2362 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2370 /* Subroutine of mips_build_integer (with the same interface).
2371 Assume that the final action in the sequence should be a left shift. */
2374 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2376 unsigned int i, shift;
2378 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2379 since signed numbers are easier to load than unsigned ones. */
2381 while ((value & 1) == 0)
2382 value /= 2, shift++;
2384 i = mips_build_integer (codes, value);
2385 codes[i].code = ASHIFT;
2386 codes[i].value = shift;
2391 /* As for mips_build_shift, but assume that the final action will be
2392 an IOR or PLUS operation. */
2395 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2397 unsigned HOST_WIDE_INT high;
2400 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2401 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2403 /* The constant is too complex to load with a simple lui/ori pair
2404 so our goal is to clear as many trailing zeros as possible.
2405 In this case, we know bit 16 is set and that the low 16 bits
2406 form a negative number. If we subtract that number from VALUE,
2407 we will clear at least the lowest 17 bits, maybe more. */
2408 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2409 codes[i].code = PLUS;
2410 codes[i].value = CONST_LOW_PART (value);
2414 i = mips_build_integer (codes, high);
2415 codes[i].code = IOR;
2416 codes[i].value = value & 0xffff;
2422 /* Fill CODES with a sequence of rtl operations to load VALUE.
2423 Return the number of operations needed. */
2426 mips_build_integer (struct mips_integer_op *codes,
2427 unsigned HOST_WIDE_INT value)
2429 if (SMALL_OPERAND (value)
2430 || SMALL_OPERAND_UNSIGNED (value)
2431 || LUI_OPERAND (value))
2433 /* The value can be loaded with a single instruction. */
2434 codes[0].code = UNKNOWN;
2435 codes[0].value = value;
2438 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2440 /* Either the constant is a simple LUI/ORI combination or its
2441 lowest bit is set. We don't want to shift in this case. */
2442 return mips_build_lower (codes, value);
2444 else if ((value & 0xffff) == 0)
2446 /* The constant will need at least three actions. The lowest
2447 16 bits are clear, so the final action will be a shift. */
2448 return mips_build_shift (codes, value);
2452 /* The final action could be a shift, add or inclusive OR.
2453 Rather than use a complex condition to select the best
2454 approach, try both mips_build_shift and mips_build_lower
2455 and pick the one that gives the shortest sequence.
2456 Note that this case is only used once per constant. */
2457 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2458 unsigned int cost, alt_cost;
2460 cost = mips_build_shift (codes, value);
2461 alt_cost = mips_build_lower (alt_codes, value);
2462 if (alt_cost < cost)
2464 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2472 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2475 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2477 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2478 enum machine_mode mode;
2479 unsigned int i, cost;
2482 mode = GET_MODE (dest);
2483 cost = mips_build_integer (codes, value);
2485 /* Apply each binary operation to X. Invariant: X is a legitimate
2486 source operand for a SET pattern. */
2487 x = GEN_INT (codes[0].value);
2488 for (i = 1; i < cost; i++)
2490 if (!can_create_pseudo_p ())
2492 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2496 x = force_reg (mode, x);
2497 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2500 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2504 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2505 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2509 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2513 /* Split moves of big integers into smaller pieces. */
2514 if (splittable_const_int_operand (src, mode))
2516 mips_move_integer (dest, dest, INTVAL (src));
2520 /* Split moves of symbolic constants into high/low pairs. */
2521 if (splittable_symbolic_operand (src, mode))
2523 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2527 if (mips_tls_operand_p (src))
2529 emit_move_insn (dest, mips_legitimize_tls_address (src));
2533 /* If we have (const (plus symbol offset)), load the symbol first
2534 and then add in the offset. This is usually better than forcing
2535 the constant into memory, at least in non-mips16 code. */
2536 split_const (src, &base, &offset);
2538 && offset != const0_rtx
2539 && (can_create_pseudo_p () || SMALL_INT (offset)))
2541 base = mips_force_temporary (dest, base);
2542 emit_move_insn (dest, mips_add_offset (0, base, INTVAL (offset)));
2546 src = force_const_mem (mode, src);
2548 /* When using explicit relocs, constant pool references are sometimes
2549 not legitimate addresses. */
2550 if (!memory_operand (src, VOIDmode))
2551 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2552 emit_move_insn (dest, src);
2556 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2557 sequence that is valid. */
2560 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2562 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2564 emit_move_insn (dest, force_reg (mode, src));
2568 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2569 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2570 && REG_P (src) && MD_REG_P (REGNO (src))
2571 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2573 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2574 if (GET_MODE_SIZE (mode) <= 4)
2575 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2576 gen_rtx_REG (SImode, REGNO (src)),
2577 gen_rtx_REG (SImode, other_regno)));
2579 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2580 gen_rtx_REG (DImode, REGNO (src)),
2581 gen_rtx_REG (DImode, other_regno)));
2585 /* We need to deal with constants that would be legitimate
2586 immediate_operands but not legitimate move_operands. */
2587 if (CONSTANT_P (src) && !move_operand (src, mode))
2589 mips_legitimize_const_move (mode, dest, src);
2590 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2596 /* We need a lot of little routines to check constant values on the
2597 mips16. These are used to figure out how long the instruction will
2598 be. It would be much better to do this using constraints, but
2599 there aren't nearly enough letters available. */
2602 m16_check_op (rtx op, int low, int high, int mask)
2604 return (GET_CODE (op) == CONST_INT
2605 && INTVAL (op) >= low
2606 && INTVAL (op) <= high
2607 && (INTVAL (op) & mask) == 0);
2611 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2613 return m16_check_op (op, 0x1, 0x8, 0);
2617 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2619 return m16_check_op (op, - 0x8, 0x7, 0);
2623 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2625 return m16_check_op (op, - 0x7, 0x8, 0);
2629 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2631 return m16_check_op (op, - 0x10, 0xf, 0);
2635 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2637 return m16_check_op (op, - 0xf, 0x10, 0);
2641 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2643 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2647 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2649 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2653 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2655 return m16_check_op (op, - 0x80, 0x7f, 0);
2659 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2661 return m16_check_op (op, - 0x7f, 0x80, 0);
2665 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2667 return m16_check_op (op, 0x0, 0xff, 0);
2671 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2673 return m16_check_op (op, - 0xff, 0x0, 0);
2677 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2679 return m16_check_op (op, - 0x1, 0xfe, 0);
2683 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2685 return m16_check_op (op, 0x0, 0xff << 2, 3);
2689 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2691 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2695 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2697 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2701 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2703 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2706 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2707 address instruction. */
2710 mips_lwxs_address_p (rtx addr)
2713 && GET_CODE (addr) == PLUS
2714 && REG_P (XEXP (addr, 1)))
2716 rtx offset = XEXP (addr, 0);
2717 if (GET_CODE (offset) == MULT
2718 && REG_P (XEXP (offset, 0))
2719 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2720 && INTVAL (XEXP (offset, 1)) == 4)
2727 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2729 enum machine_mode mode = GET_MODE (x);
2730 bool float_mode_p = FLOAT_MODE_P (mode);
2737 /* A number between 1 and 8 inclusive is efficient for a shift.
2738 Otherwise, we will need an extended instruction. */
2739 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2740 || (outer_code) == LSHIFTRT)
2742 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2745 *total = COSTS_N_INSNS (1);
2749 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2750 if ((outer_code) == XOR
2751 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2757 /* We may be able to use slt or sltu for a comparison with a
2758 signed 16-bit value. (The boundary conditions aren't quite
2759 right, but this is just a heuristic anyhow.) */
2760 if (((outer_code) == LT || (outer_code) == LE
2761 || (outer_code) == GE || (outer_code) == GT
2762 || (outer_code) == LTU || (outer_code) == LEU
2763 || (outer_code) == GEU || (outer_code) == GTU)
2764 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2770 /* Equality comparisons with 0 are cheap. */
2771 if (((outer_code) == EQ || (outer_code) == NE)
2778 /* Constants in the range 0...255 can be loaded with an unextended
2779 instruction. They are therefore as cheap as a register move.
2781 Given the choice between "li R1,0...255" and "move R1,R2"
2782 (where R2 is a known constant), it is usually better to use "li",
2783 since we do not want to unnecessarily extend the lifetime
2785 if (outer_code == SET
2787 && INTVAL (x) < 256)
2795 /* These can be used anywhere. */
2800 /* Otherwise fall through to the handling below because
2801 we'll need to construct the constant. */
2807 if (LEGITIMATE_CONSTANT_P (x))
2809 *total = COSTS_N_INSNS (1);
2814 /* The value will need to be fetched from the constant pool. */
2815 *total = CONSTANT_POOL_COST;
2821 /* If the address is legitimate, return the number of
2822 instructions it needs. */
2823 rtx addr = XEXP (x, 0);
2824 int n = mips_address_insns (addr, GET_MODE (x));
2827 *total = COSTS_N_INSNS (n + 1);
2830 /* Check for scaled indexed address. */
2831 if (mips_lwxs_address_p (addr))
2833 *total = COSTS_N_INSNS (2);
2836 /* Otherwise use the default handling. */
2841 *total = COSTS_N_INSNS (6);
2845 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2851 if (mode == DImode && !TARGET_64BIT)
2853 *total = COSTS_N_INSNS (2);
2861 if (mode == DImode && !TARGET_64BIT)
2863 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2871 *total = COSTS_N_INSNS (1);
2873 *total = COSTS_N_INSNS (4);
2877 *total = COSTS_N_INSNS (1);
2884 *total = mips_cost->fp_add;
2888 else if (mode == DImode && !TARGET_64BIT)
2890 *total = COSTS_N_INSNS (4);
2896 if (mode == DImode && !TARGET_64BIT)
2898 *total = COSTS_N_INSNS (4);
2905 *total = mips_cost->fp_mult_sf;
2907 else if (mode == DFmode)
2908 *total = mips_cost->fp_mult_df;
2910 else if (mode == SImode)
2911 *total = mips_cost->int_mult_si;
2914 *total = mips_cost->int_mult_di;
2923 *total = mips_cost->fp_div_sf;
2925 *total = mips_cost->fp_div_df;
2934 *total = mips_cost->int_div_di;
2936 *total = mips_cost->int_div_si;
2941 /* A sign extend from SImode to DImode in 64-bit mode is often
2942 zero instructions, because the result can often be used
2943 directly by another instruction; we'll call it one. */
2944 if (TARGET_64BIT && mode == DImode
2945 && GET_MODE (XEXP (x, 0)) == SImode)
2946 *total = COSTS_N_INSNS (1);
2948 *total = COSTS_N_INSNS (2);
2952 if (TARGET_64BIT && mode == DImode
2953 && GET_MODE (XEXP (x, 0)) == SImode)
2954 *total = COSTS_N_INSNS (2);
2956 *total = COSTS_N_INSNS (1);
2960 case UNSIGNED_FLOAT:
2963 case FLOAT_TRUNCATE:
2965 *total = mips_cost->fp_add;
2973 /* Provide the costs of an addressing mode that contains ADDR.
2974 If ADDR is not a valid address, its cost is irrelevant. */
2977 mips_address_cost (rtx addr)
2979 return mips_address_insns (addr, SImode);
2982 /* Return one word of double-word value OP, taking into account the fixed
2983 endianness of certain registers. HIGH_P is true to select the high part,
2984 false to select the low part. */
2987 mips_subword (rtx op, int high_p)
2990 enum machine_mode mode;
2992 mode = GET_MODE (op);
2993 if (mode == VOIDmode)
2996 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2997 byte = UNITS_PER_WORD;
3001 if (FP_REG_RTX_P (op))
3002 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
3005 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3007 return simplify_gen_subreg (word_mode, op, mode, byte);
3011 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3014 mips_split_64bit_move_p (rtx dest, rtx src)
3019 /* FP->FP moves can be done in a single instruction. */
3020 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3023 /* Check for floating-point loads and stores. They can be done using
3024 ldc1 and sdc1 on MIPS II and above. */
3027 if (FP_REG_RTX_P (dest) && MEM_P (src))
3029 if (FP_REG_RTX_P (src) && MEM_P (dest))
3036 /* Split a 64-bit move from SRC to DEST assuming that
3037 mips_split_64bit_move_p holds.
3039 Moves into and out of FPRs cause some difficulty here. Such moves
3040 will always be DFmode, since paired FPRs are not allowed to store
3041 DImode values. The most natural representation would be two separate
3042 32-bit moves, such as:
3044 (set (reg:SI $f0) (mem:SI ...))
3045 (set (reg:SI $f1) (mem:SI ...))
3047 However, the second insn is invalid because odd-numbered FPRs are
3048 not allowed to store independent values. Use the patterns load_df_low,
3049 load_df_high and store_df_high instead. */
3052 mips_split_64bit_move (rtx dest, rtx src)
3054 if (FP_REG_RTX_P (dest))
3056 /* Loading an FPR from memory or from GPRs. */
3059 dest = gen_lowpart (DFmode, dest);
3060 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3061 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3066 emit_insn (gen_load_df_low (copy_rtx (dest),
3067 mips_subword (src, 0)));
3068 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3072 else if (FP_REG_RTX_P (src))
3074 /* Storing an FPR into memory or GPRs. */
3077 src = gen_lowpart (DFmode, src);
3078 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3079 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3083 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3084 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3089 /* The operation can be split into two normal moves. Decide in
3090 which order to do them. */
3093 low_dest = mips_subword (dest, 0);
3094 if (REG_P (low_dest)
3095 && reg_overlap_mentioned_p (low_dest, src))
3097 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3098 emit_move_insn (low_dest, mips_subword (src, 0));
3102 emit_move_insn (low_dest, mips_subword (src, 0));
3103 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3108 /* Return the appropriate instructions to move SRC into DEST. Assume
3109 that SRC is operand 1 and DEST is operand 0. */
3112 mips_output_move (rtx dest, rtx src)
3114 enum rtx_code dest_code, src_code;
3117 dest_code = GET_CODE (dest);
3118 src_code = GET_CODE (src);
3119 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3121 if (dbl_p && mips_split_64bit_move_p (dest, src))
3124 if ((src_code == REG && GP_REG_P (REGNO (src)))
3125 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3127 if (dest_code == REG)
3129 if (GP_REG_P (REGNO (dest)))
3130 return "move\t%0,%z1";
3132 if (MD_REG_P (REGNO (dest)))
3135 if (DSP_ACC_REG_P (REGNO (dest)))
3137 static char retval[] = "mt__\t%z1,%q0";
3138 retval[2] = reg_names[REGNO (dest)][4];
3139 retval[3] = reg_names[REGNO (dest)][5];
3143 if (FP_REG_P (REGNO (dest)))
3144 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3146 if (ALL_COP_REG_P (REGNO (dest)))
3148 static char retval[] = "dmtc_\t%z1,%0";
3150 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3151 return (dbl_p ? retval : retval + 1);
3154 if (dest_code == MEM)
3155 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3157 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3159 if (src_code == REG)
3161 if (DSP_ACC_REG_P (REGNO (src)))
3163 static char retval[] = "mf__\t%0,%q1";
3164 retval[2] = reg_names[REGNO (src)][4];
3165 retval[3] = reg_names[REGNO (src)][5];
3169 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3170 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3172 if (FP_REG_P (REGNO (src)))
3173 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3175 if (ALL_COP_REG_P (REGNO (src)))
3177 static char retval[] = "dmfc_\t%0,%1";
3179 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3180 return (dbl_p ? retval : retval + 1);
3184 if (src_code == MEM)
3185 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3187 if (src_code == CONST_INT)
3189 /* Don't use the X format, because that will give out of
3190 range numbers for 64-bit hosts and 32-bit targets. */
3192 return "li\t%0,%1\t\t\t# %X1";
3194 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3197 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3201 if (src_code == HIGH)
3202 return "lui\t%0,%h1";
3204 if (CONST_GP_P (src))
3205 return "move\t%0,%1";
3207 if (symbolic_operand (src, VOIDmode))
3208 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3210 if (src_code == REG && FP_REG_P (REGNO (src)))
3212 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3214 if (GET_MODE (dest) == V2SFmode)
3215 return "mov.ps\t%0,%1";
3217 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3220 if (dest_code == MEM)
3221 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3223 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3225 if (src_code == MEM)
3226 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3228 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3230 static char retval[] = "l_c_\t%0,%1";
3232 retval[1] = (dbl_p ? 'd' : 'w');
3233 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3236 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3238 static char retval[] = "s_c_\t%1,%0";
3240 retval[1] = (dbl_p ? 'd' : 'w');
3241 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3247 /* Restore $gp from its save slot. Valid only when using o32 or
3251 mips_restore_gp (void)
3255 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3257 address = mips_add_offset (pic_offset_table_rtx,
3258 frame_pointer_needed
3259 ? hard_frame_pointer_rtx
3260 : stack_pointer_rtx,
3261 current_function_outgoing_args_size);
3262 slot = gen_rtx_MEM (Pmode, address);
3264 emit_move_insn (pic_offset_table_rtx, slot);
3265 if (!TARGET_EXPLICIT_RELOCS)
3266 emit_insn (gen_blockage ());
3269 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3272 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3274 emit_insn (gen_rtx_SET (VOIDmode, target,
3275 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3278 /* Return true if CMP1 is a suitable second operand for relational
3279 operator CODE. See also the *sCC patterns in mips.md. */
3282 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3288 return reg_or_0_operand (cmp1, VOIDmode);
3292 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3296 return arith_operand (cmp1, VOIDmode);
3299 return sle_operand (cmp1, VOIDmode);
3302 return sleu_operand (cmp1, VOIDmode);
3309 /* Canonicalize LE or LEU comparisons into LT comparisons when
3310 possible to avoid extra instructions or inverting the
3314 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3315 enum machine_mode mode)
3317 HOST_WIDE_INT original, plus_one;
3319 if (GET_CODE (*cmp1) != CONST_INT)
3322 original = INTVAL (*cmp1);
3323 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3328 if (original < plus_one)
3331 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3340 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3353 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3354 result in TARGET. CMP0 and TARGET are register_operands that have
3355 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3356 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3359 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3360 rtx target, rtx cmp0, rtx cmp1)
3362 /* First see if there is a MIPS instruction that can do this operation
3363 with CMP1 in its current form. If not, try to canonicalize the
3364 comparison to LT. If that fails, try doing the same for the
3365 inverse operation. If that also fails, force CMP1 into a register
3367 if (mips_relational_operand_ok_p (code, cmp1))
3368 mips_emit_binary (code, target, cmp0, cmp1);
3369 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3370 mips_emit_binary (code, target, cmp0, cmp1);
3373 enum rtx_code inv_code = reverse_condition (code);
3374 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3376 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3377 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3379 else if (invert_ptr == 0)
3381 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3382 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3383 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3387 *invert_ptr = !*invert_ptr;
3388 mips_emit_binary (inv_code, target, cmp0, cmp1);
3393 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3394 The register will have the same mode as CMP0. */
3397 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3399 if (cmp1 == const0_rtx)
3402 if (uns_arith_operand (cmp1, VOIDmode))
3403 return expand_binop (GET_MODE (cmp0), xor_optab,
3404 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3406 return expand_binop (GET_MODE (cmp0), sub_optab,
3407 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3410 /* Convert *CODE into a code that can be used in a floating-point
3411 scc instruction (c.<cond>.<fmt>). Return true if the values of
3412 the condition code registers will be inverted, with 0 indicating
3413 that the condition holds. */
3416 mips_reverse_fp_cond_p (enum rtx_code *code)
3423 *code = reverse_condition_maybe_unordered (*code);
3431 /* Convert a comparison into something that can be used in a branch or
3432 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3433 being compared and *CODE is the code used to compare them.
3435 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3436 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3437 otherwise any standard branch condition can be used. The standard branch
3440 - EQ/NE between two registers.
3441 - any comparison between a register and zero. */
3444 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3446 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3448 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3450 *op0 = cmp_operands[0];
3451 *op1 = cmp_operands[1];
3453 else if (*code == EQ || *code == NE)
3457 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3462 *op0 = cmp_operands[0];
3463 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3468 /* The comparison needs a separate scc instruction. Store the
3469 result of the scc in *OP0 and compare it against zero. */
3470 bool invert = false;
3471 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3473 mips_emit_int_relational (*code, &invert, *op0,
3474 cmp_operands[0], cmp_operands[1]);
3475 *code = (invert ? EQ : NE);
3480 enum rtx_code cmp_code;
3482 /* Floating-point tests use a separate c.cond.fmt comparison to
3483 set a condition code register. The branch or conditional move
3484 will then compare that register against zero.
3486 Set CMP_CODE to the code of the comparison instruction and
3487 *CODE to the code that the branch or move should use. */
3489 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3491 ? gen_reg_rtx (CCmode)
3492 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3494 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3498 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3499 Store the result in TARGET and return true if successful.
3501 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3504 mips_emit_scc (enum rtx_code code, rtx target)
3506 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3509 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3510 if (code == EQ || code == NE)
3512 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3513 mips_emit_binary (code, target, zie, const0_rtx);
3516 mips_emit_int_relational (code, 0, target,
3517 cmp_operands[0], cmp_operands[1]);
3521 /* Emit the common code for doing conditional branches.
3522 operand[0] is the label to jump to.
3523 The comparison operands are saved away by cmp{si,di,sf,df}. */
3526 gen_conditional_branch (rtx *operands, enum rtx_code code)
3528 rtx op0, op1, condition;
3530 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3531 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3532 emit_jump_insn (gen_condjump (condition, operands[0]));
3537 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3538 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3541 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3542 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3547 reversed_p = mips_reverse_fp_cond_p (&cond);
3548 cmp_result = gen_reg_rtx (CCV2mode);
3549 emit_insn (gen_scc_ps (cmp_result,
3550 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3552 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3555 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3559 /* Emit the common code for conditional moves. OPERANDS is the array
3560 of operands passed to the conditional move define_expand. */
3563 gen_conditional_move (rtx *operands)
3568 code = GET_CODE (operands[1]);
3569 mips_emit_compare (&code, &op0, &op1, true);
3570 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3571 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3572 gen_rtx_fmt_ee (code,
3575 operands[2], operands[3])));
3578 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3579 the conditional_trap expander. */
3582 mips_gen_conditional_trap (rtx *operands)
3585 enum rtx_code cmp_code = GET_CODE (operands[0]);
3586 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3588 /* MIPS conditional trap machine instructions don't have GT or LE
3589 flavors, so we must invert the comparison and convert to LT and
3590 GE, respectively. */
3593 case GT: cmp_code = LT; break;
3594 case LE: cmp_code = GE; break;
3595 case GTU: cmp_code = LTU; break;
3596 case LEU: cmp_code = GEU; break;
3599 if (cmp_code == GET_CODE (operands[0]))
3601 op0 = cmp_operands[0];
3602 op1 = cmp_operands[1];
3606 op0 = cmp_operands[1];
3607 op1 = cmp_operands[0];
3609 op0 = force_reg (mode, op0);
3610 if (!arith_operand (op1, mode))
3611 op1 = force_reg (mode, op1);
3613 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3614 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3618 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3621 mips_ok_for_lazy_binding_p (rtx x)
3623 return (TARGET_USE_GOT
3624 && GET_CODE (x) == SYMBOL_REF
3625 && !mips_symbol_binds_local_p (x));
3628 /* Load function address ADDR into register DEST. SIBCALL_P is true
3629 if the address is needed for a sibling call. */
3632 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3634 /* If we're generating PIC, and this call is to a global function,
3635 try to allow its address to be resolved lazily. This isn't
3636 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3637 to the stub would be our caller's gp, not ours. */
3638 if (TARGET_EXPLICIT_RELOCS
3639 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3640 && mips_ok_for_lazy_binding_p (addr))
3642 rtx high, lo_sum_symbol;
3644 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3645 addr, SYMBOL_GOTOFF_CALL);
3646 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3647 if (Pmode == SImode)
3648 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3650 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3653 emit_move_insn (dest, addr);
3657 /* Expand a call or call_value instruction. RESULT is where the
3658 result will go (null for calls), ADDR is the address of the
3659 function, ARGS_SIZE is the size of the arguments and AUX is
3660 the value passed to us by mips_function_arg. SIBCALL_P is true
3661 if we are expanding a sibling call, false if we're expanding
3665 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3667 rtx orig_addr, pattern, insn;
3670 if (!call_insn_operand (addr, VOIDmode))
3672 addr = gen_reg_rtx (Pmode);
3673 mips_load_call_address (addr, orig_addr, sibcall_p);
3676 if (mips16_hard_float
3677 && build_mips16_call_stub (result, addr, args_size,
3678 aux == 0 ? 0 : (int) GET_MODE (aux)))
3682 pattern = (sibcall_p
3683 ? gen_sibcall_internal (addr, args_size)
3684 : gen_call_internal (addr, args_size));
3685 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3689 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3690 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3693 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3694 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3697 pattern = (sibcall_p
3698 ? gen_sibcall_value_internal (result, addr, args_size)
3699 : gen_call_value_internal (result, addr, args_size));
3701 insn = emit_call_insn (pattern);
3703 /* Lazy-binding stubs require $gp to be valid on entry. */
3704 if (mips_ok_for_lazy_binding_p (orig_addr))
3705 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3709 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3712 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3713 tree exp ATTRIBUTE_UNUSED)
3715 return TARGET_SIBCALLS;
3718 /* Emit code to move general operand SRC into condition-code
3719 register DEST. SCRATCH is a scratch TFmode float register.
3726 where FP1 and FP2 are single-precision float registers
3727 taken from SCRATCH. */
3730 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3734 /* Change the source to SFmode. */
3736 src = adjust_address (src, SFmode, 0);
3737 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3738 src = gen_rtx_REG (SFmode, true_regnum (src));
3740 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3741 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3743 emit_move_insn (copy_rtx (fp1), src);
3744 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3745 emit_insn (gen_slt_sf (dest, fp2, fp1));
3748 /* Emit code to change the current function's return address to
3749 ADDRESS. SCRATCH is available as a scratch register, if needed.
3750 ADDRESS and SCRATCH are both word-mode GPRs. */
3753 mips_set_return_address (rtx address, rtx scratch)
3757 compute_frame_size (get_frame_size ());
3758 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3759 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3760 cfun->machine->frame.gp_sp_offset);
3762 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3765 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3766 Assume that the areas do not overlap. */
3769 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3771 HOST_WIDE_INT offset, delta;
3772 unsigned HOST_WIDE_INT bits;
3774 enum machine_mode mode;
3777 /* Work out how many bits to move at a time. If both operands have
3778 half-word alignment, it is usually better to move in half words.
3779 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3780 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3781 Otherwise move word-sized chunks. */
3782 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3783 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3784 bits = BITS_PER_WORD / 2;
3786 bits = BITS_PER_WORD;
3788 mode = mode_for_size (bits, MODE_INT, 0);
3789 delta = bits / BITS_PER_UNIT;
3791 /* Allocate a buffer for the temporary registers. */
3792 regs = alloca (sizeof (rtx) * length / delta);
3794 /* Load as many BITS-sized chunks as possible. Use a normal load if
3795 the source has enough alignment, otherwise use left/right pairs. */
3796 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3798 regs[i] = gen_reg_rtx (mode);
3799 if (MEM_ALIGN (src) >= bits)
3800 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3803 rtx part = adjust_address (src, BLKmode, offset);
3804 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3809 /* Copy the chunks to the destination. */
3810 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3811 if (MEM_ALIGN (dest) >= bits)
3812 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3815 rtx part = adjust_address (dest, BLKmode, offset);
3816 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3820 /* Mop up any left-over bytes. */
3821 if (offset < length)
3823 src = adjust_address (src, BLKmode, offset);
3824 dest = adjust_address (dest, BLKmode, offset);
3825 move_by_pieces (dest, src, length - offset,
3826 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3830 #define MAX_MOVE_REGS 4
3831 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3834 /* Helper function for doing a loop-based block operation on memory
3835 reference MEM. Each iteration of the loop will operate on LENGTH
3838 Create a new base register for use within the loop and point it to
3839 the start of MEM. Create a new memory reference that uses this
3840 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3843 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3844 rtx *loop_reg, rtx *loop_mem)
3846 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3848 /* Although the new mem does not refer to a known location,
3849 it does keep up to LENGTH bytes of alignment. */
3850 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3851 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3855 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3856 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3857 memory regions do not overlap. */
3860 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3862 rtx label, src_reg, dest_reg, final_src;
3863 HOST_WIDE_INT leftover;
3865 leftover = length % MAX_MOVE_BYTES;
3868 /* Create registers and memory references for use within the loop. */
3869 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3870 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3872 /* Calculate the value that SRC_REG should have after the last iteration
3874 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3877 /* Emit the start of the loop. */
3878 label = gen_label_rtx ();
3881 /* Emit the loop body. */
3882 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3884 /* Move on to the next block. */
3885 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3886 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3888 /* Emit the loop condition. */
3889 if (Pmode == DImode)
3890 emit_insn (gen_cmpdi (src_reg, final_src));
3892 emit_insn (gen_cmpsi (src_reg, final_src));
3893 emit_jump_insn (gen_bne (label));
3895 /* Mop up any left-over bytes. */
3897 mips_block_move_straight (dest, src, leftover);
3901 /* Expand a loop of synci insns for the address range [BEGIN, END). */
3904 mips_expand_synci_loop (rtx begin, rtx end)
3906 rtx inc, label, cmp, cmp_result;
3908 /* Load INC with the cache line size (rdhwr INC,$1). */
3909 inc = gen_reg_rtx (SImode);
3910 emit_insn (gen_rdhwr (inc, const1_rtx));
3912 /* Loop back to here. */
3913 label = gen_label_rtx ();
3916 emit_insn (gen_synci (begin));
3918 cmp = gen_reg_rtx (Pmode);
3919 mips_emit_binary (GTU, cmp, begin, end);
3921 mips_emit_binary (PLUS, begin, begin, inc);
3923 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
3924 emit_jump_insn (gen_condjump (cmp_result, label));
3927 /* Expand a movmemsi instruction. */
3930 mips_expand_block_move (rtx dest, rtx src, rtx length)
3932 if (GET_CODE (length) == CONST_INT)
3934 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3936 mips_block_move_straight (dest, src, INTVAL (length));
3941 mips_block_move_loop (dest, src, INTVAL (length));
3948 /* Argument support functions. */
3950 /* Initialize CUMULATIVE_ARGS for a function. */
3953 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3954 rtx libname ATTRIBUTE_UNUSED)
3956 static CUMULATIVE_ARGS zero_cum;
3957 tree param, next_param;
3960 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3962 /* Determine if this function has variable arguments. This is
3963 indicated by the last argument being 'void_type_mode' if there
3964 are no variable arguments. The standard MIPS calling sequence
3965 passes all arguments in the general purpose registers in this case. */
3967 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3968 param != 0; param = next_param)
3970 next_param = TREE_CHAIN (param);
3971 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3972 cum->gp_reg_found = 1;
3977 /* Fill INFO with information about a single argument. CUM is the
3978 cumulative state for earlier arguments. MODE is the mode of this
3979 argument and TYPE is its type (if known). NAMED is true if this
3980 is a named (fixed) argument rather than a variable one. */
3983 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3984 tree type, int named, struct mips_arg_info *info)
3986 bool doubleword_aligned_p;
3987 unsigned int num_bytes, num_words, max_regs;
3989 /* Work out the size of the argument. */
3990 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3991 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3993 /* Decide whether it should go in a floating-point register, assuming
3994 one is free. Later code checks for availability.
3996 The checks against UNITS_PER_FPVALUE handle the soft-float and
3997 single-float cases. */
4001 /* The EABI conventions have traditionally been defined in terms
4002 of TYPE_MODE, regardless of the actual type. */
4003 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4004 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4005 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4010 /* Only leading floating-point scalars are passed in
4011 floating-point registers. We also handle vector floats the same
4012 say, which is OK because they are not covered by the standard ABI. */
4013 info->fpr_p = (!cum->gp_reg_found
4014 && cum->arg_number < 2
4015 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4016 || VECTOR_FLOAT_TYPE_P (type))
4017 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4018 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4019 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4024 /* Scalar and complex floating-point types are passed in
4025 floating-point registers. */
4026 info->fpr_p = (named
4027 && (type == 0 || FLOAT_TYPE_P (type))
4028 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4029 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4030 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4031 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4033 /* ??? According to the ABI documentation, the real and imaginary
4034 parts of complex floats should be passed in individual registers.
4035 The real and imaginary parts of stack arguments are supposed
4036 to be contiguous and there should be an extra word of padding
4039 This has two problems. First, it makes it impossible to use a
4040 single "void *" va_list type, since register and stack arguments
4041 are passed differently. (At the time of writing, MIPSpro cannot
4042 handle complex float varargs correctly.) Second, it's unclear
4043 what should happen when there is only one register free.
4045 For now, we assume that named complex floats should go into FPRs
4046 if there are two FPRs free, otherwise they should be passed in the
4047 same way as a struct containing two floats. */
4049 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4050 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4052 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4053 info->fpr_p = false;
4063 /* See whether the argument has doubleword alignment. */
4064 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4066 /* Set REG_OFFSET to the register count we're interested in.
4067 The EABI allocates the floating-point registers separately,
4068 but the other ABIs allocate them like integer registers. */
4069 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4073 /* Advance to an even register if the argument is doubleword-aligned. */
4074 if (doubleword_aligned_p)
4075 info->reg_offset += info->reg_offset & 1;
4077 /* Work out the offset of a stack argument. */
4078 info->stack_offset = cum->stack_words;
4079 if (doubleword_aligned_p)
4080 info->stack_offset += info->stack_offset & 1;
4082 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4084 /* Partition the argument between registers and stack. */
4085 info->reg_words = MIN (num_words, max_regs);
4086 info->stack_words = num_words - info->reg_words;
4090 /* INFO describes an argument that is passed in a single-register value.
4091 Return the register it uses, assuming that FPRs are available if
4095 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4097 if (!info->fpr_p || !hard_float_p)
4098 return GP_ARG_FIRST + info->reg_offset;
4099 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4100 /* In o32, the second argument is always passed in $f14
4101 for TARGET_DOUBLE_FLOAT, regardless of whether the
4102 first argument was a word or doubleword. */
4103 return FP_ARG_FIRST + 2;
4105 return FP_ARG_FIRST + info->reg_offset;
4108 /* Implement FUNCTION_ARG_ADVANCE. */
4111 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4112 tree type, int named)
4114 struct mips_arg_info info;
4116 mips_arg_info (cum, mode, type, named, &info);
4119 cum->gp_reg_found = true;
4121 /* See the comment above the cumulative args structure in mips.h
4122 for an explanation of what this code does. It assumes the O32
4123 ABI, which passes at most 2 arguments in float registers. */
4124 if (cum->arg_number < 2 && info.fpr_p)
4125 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4127 if (mips_abi != ABI_EABI || !info.fpr_p)
4128 cum->num_gprs = info.reg_offset + info.reg_words;
4129 else if (info.reg_words > 0)
4130 cum->num_fprs += MAX_FPRS_PER_FMT;
4132 if (info.stack_words > 0)
4133 cum->stack_words = info.stack_offset + info.stack_words;
4138 /* Implement FUNCTION_ARG. */
4141 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4142 tree type, int named)
4144 struct mips_arg_info info;
4146 /* We will be called with a mode of VOIDmode after the last argument
4147 has been seen. Whatever we return will be passed to the call
4148 insn. If we need a mips16 fp_code, return a REG with the code
4149 stored as the mode. */
4150 if (mode == VOIDmode)
4152 if (TARGET_MIPS16 && cum->fp_code != 0)
4153 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4159 mips_arg_info (cum, mode, type, named, &info);
4161 /* Return straight away if the whole argument is passed on the stack. */
4162 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4166 && TREE_CODE (type) == RECORD_TYPE
4168 && TYPE_SIZE_UNIT (type)
4169 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4172 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4173 structure contains a double in its entirety, then that 64-bit
4174 chunk is passed in a floating point register. */
4177 /* First check to see if there is any such field. */
4178 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4179 if (TREE_CODE (field) == FIELD_DECL
4180 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4181 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4182 && host_integerp (bit_position (field), 0)
4183 && int_bit_position (field) % BITS_PER_WORD == 0)
4188 /* Now handle the special case by returning a PARALLEL
4189 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4190 chunks are passed in registers. */
4192 HOST_WIDE_INT bitpos;
4195 /* assign_parms checks the mode of ENTRY_PARM, so we must
4196 use the actual mode here. */
4197 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4200 field = TYPE_FIELDS (type);
4201 for (i = 0; i < info.reg_words; i++)
4205 for (; field; field = TREE_CHAIN (field))
4206 if (TREE_CODE (field) == FIELD_DECL
4207 && int_bit_position (field) >= bitpos)
4211 && int_bit_position (field) == bitpos
4212 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4213 && !TARGET_SOFT_FLOAT
4214 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4215 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4217 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4220 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4221 GEN_INT (bitpos / BITS_PER_UNIT));
4223 bitpos += BITS_PER_WORD;
4229 /* Handle the n32/n64 conventions for passing complex floating-point
4230 arguments in FPR pairs. The real part goes in the lower register
4231 and the imaginary part goes in the upper register. */
4234 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4237 enum machine_mode inner;
4240 inner = GET_MODE_INNER (mode);
4241 reg = FP_ARG_FIRST + info.reg_offset;
4242 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4244 /* Real part in registers, imaginary part on stack. */
4245 gcc_assert (info.stack_words == info.reg_words);
4246 return gen_rtx_REG (inner, reg);
4250 gcc_assert (info.stack_words == 0);
4251 real = gen_rtx_EXPR_LIST (VOIDmode,
4252 gen_rtx_REG (inner, reg),
4254 imag = gen_rtx_EXPR_LIST (VOIDmode,
4256 reg + info.reg_words / 2),
4257 GEN_INT (GET_MODE_SIZE (inner)));
4258 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4262 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4266 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4269 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4270 enum machine_mode mode, tree type, bool named)
4272 struct mips_arg_info info;
4274 mips_arg_info (cum, mode, type, named, &info);
4275 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4279 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4280 PARM_BOUNDARY bits of alignment, but will be given anything up
4281 to STACK_BOUNDARY bits if the type requires it. */
4284 function_arg_boundary (enum machine_mode mode, tree type)
4286 unsigned int alignment;
4288 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4289 if (alignment < PARM_BOUNDARY)
4290 alignment = PARM_BOUNDARY;
4291 if (alignment > STACK_BOUNDARY)
4292 alignment = STACK_BOUNDARY;
4296 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4297 upward rather than downward. In other words, return true if the
4298 first byte of the stack slot has useful data, false if the last
4302 mips_pad_arg_upward (enum machine_mode mode, tree type)
4304 /* On little-endian targets, the first byte of every stack argument
4305 is passed in the first byte of the stack slot. */
4306 if (!BYTES_BIG_ENDIAN)
4309 /* Otherwise, integral types are padded downward: the last byte of a
4310 stack argument is passed in the last byte of the stack slot. */
4312 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4313 : GET_MODE_CLASS (mode) == MODE_INT)
4316 /* Big-endian o64 pads floating-point arguments downward. */
4317 if (mips_abi == ABI_O64)
4318 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4321 /* Other types are padded upward for o32, o64, n32 and n64. */
4322 if (mips_abi != ABI_EABI)
4325 /* Arguments smaller than a stack slot are padded downward. */
4326 if (mode != BLKmode)
4327 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4329 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4333 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4334 if the least significant byte of the register has useful data. Return
4335 the opposite if the most significant byte does. */
4338 mips_pad_reg_upward (enum machine_mode mode, tree type)
4340 /* No shifting is required for floating-point arguments. */
4341 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4342 return !BYTES_BIG_ENDIAN;
4344 /* Otherwise, apply the same padding to register arguments as we do
4345 to stack arguments. */
4346 return mips_pad_arg_upward (mode, type);
4350 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4351 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4354 CUMULATIVE_ARGS local_cum;
4355 int gp_saved, fp_saved;
4357 /* The caller has advanced CUM up to, but not beyond, the last named
4358 argument. Advance a local copy of CUM past the last "real" named
4359 argument, to find out how many registers are left over. */
4362 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4364 /* Found out how many registers we need to save. */
4365 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4366 fp_saved = (EABI_FLOAT_VARARGS_P
4367 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4376 ptr = plus_constant (virtual_incoming_args_rtx,
4377 REG_PARM_STACK_SPACE (cfun->decl)
4378 - gp_saved * UNITS_PER_WORD);
4379 mem = gen_rtx_MEM (BLKmode, ptr);
4380 set_mem_alias_set (mem, get_varargs_alias_set ());
4382 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4387 /* We can't use move_block_from_reg, because it will use
4389 enum machine_mode mode;
4392 /* Set OFF to the offset from virtual_incoming_args_rtx of
4393 the first float register. The FP save area lies below
4394 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4395 off = -gp_saved * UNITS_PER_WORD;
4396 off &= ~(UNITS_PER_FPVALUE - 1);
4397 off -= fp_saved * UNITS_PER_FPREG;
4399 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4401 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4402 i += MAX_FPRS_PER_FMT)
4406 ptr = plus_constant (virtual_incoming_args_rtx, off);
4407 mem = gen_rtx_MEM (mode, ptr);
4408 set_mem_alias_set (mem, get_varargs_alias_set ());
4409 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4410 off += UNITS_PER_HWFPVALUE;
4414 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4415 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4416 + fp_saved * UNITS_PER_FPREG);
4419 /* Create the va_list data type.
4420 We keep 3 pointers, and two offsets.
4421 Two pointers are to the overflow area, which starts at the CFA.
4422 One of these is constant, for addressing into the GPR save area below it.
4423 The other is advanced up the stack through the overflow region.
4424 The third pointer is to the GPR save area. Since the FPR save area
4425 is just below it, we can address FPR slots off this pointer.
4426 We also keep two one-byte offsets, which are to be subtracted from the
4427 constant pointers to yield addresses in the GPR and FPR save areas.
4428 These are downcounted as float or non-float arguments are used,
4429 and when they get to zero, the argument must be obtained from the
4431 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4432 pointer is enough. It's started at the GPR save area, and is
4434 Note that the GPR save area is not constant size, due to optimization
4435 in the prologue. Hence, we can't use a design with two pointers
4436 and two offsets, although we could have designed this with two pointers
4437 and three offsets. */
4440 mips_build_builtin_va_list (void)
4442 if (EABI_FLOAT_VARARGS_P)
4444 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4447 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4449 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4451 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4453 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4455 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4456 unsigned_char_type_node);
4457 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4458 unsigned_char_type_node);
4459 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4460 warn on every user file. */
4461 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4462 array = build_array_type (unsigned_char_type_node,
4463 build_index_type (index));
4464 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4466 DECL_FIELD_CONTEXT (f_ovfl) = record;
4467 DECL_FIELD_CONTEXT (f_gtop) = record;
4468 DECL_FIELD_CONTEXT (f_ftop) = record;
4469 DECL_FIELD_CONTEXT (f_goff) = record;
4470 DECL_FIELD_CONTEXT (f_foff) = record;
4471 DECL_FIELD_CONTEXT (f_res) = record;
4473 TYPE_FIELDS (record) = f_ovfl;
4474 TREE_CHAIN (f_ovfl) = f_gtop;
4475 TREE_CHAIN (f_gtop) = f_ftop;
4476 TREE_CHAIN (f_ftop) = f_goff;
4477 TREE_CHAIN (f_goff) = f_foff;
4478 TREE_CHAIN (f_foff) = f_res;
4480 layout_type (record);
4483 else if (TARGET_IRIX && TARGET_IRIX6)
4484 /* On IRIX 6, this type is 'char *'. */
4485 return build_pointer_type (char_type_node);
4487 /* Otherwise, we use 'void *'. */
4488 return ptr_type_node;
4491 /* Implement va_start. */
4494 mips_va_start (tree valist, rtx nextarg)
4496 if (EABI_FLOAT_VARARGS_P)
4498 const CUMULATIVE_ARGS *cum;
4499 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4500 tree ovfl, gtop, ftop, goff, foff;
4502 int gpr_save_area_size;
4503 int fpr_save_area_size;
4506 cum = ¤t_function_args_info;
4508 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4510 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4512 f_ovfl = TYPE_FIELDS (va_list_type_node);
4513 f_gtop = TREE_CHAIN (f_ovfl);
4514 f_ftop = TREE_CHAIN (f_gtop);
4515 f_goff = TREE_CHAIN (f_ftop);
4516 f_foff = TREE_CHAIN (f_goff);
4518 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4520 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4522 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4524 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4526 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4529 /* Emit code to initialize OVFL, which points to the next varargs
4530 stack argument. CUM->STACK_WORDS gives the number of stack
4531 words used by named arguments. */
4532 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4533 if (cum->stack_words > 0)
4534 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4535 size_int (cum->stack_words * UNITS_PER_WORD));
4536 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4537 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4539 /* Emit code to initialize GTOP, the top of the GPR save area. */
4540 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4541 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4542 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4544 /* Emit code to initialize FTOP, the top of the FPR save area.
4545 This address is gpr_save_area_bytes below GTOP, rounded
4546 down to the next fp-aligned boundary. */
4547 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4548 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4549 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4551 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4552 size_int (-fpr_offset));
4553 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4554 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4556 /* Emit code to initialize GOFF, the offset from GTOP of the
4557 next GPR argument. */
4558 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4559 build_int_cst (NULL_TREE, gpr_save_area_size));
4560 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4562 /* Likewise emit code to initialize FOFF, the offset from FTOP
4563 of the next FPR argument. */
4564 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4565 build_int_cst (NULL_TREE, fpr_save_area_size));
4566 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4570 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4571 std_expand_builtin_va_start (valist, nextarg);
4575 /* Implement va_arg. */
4578 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4580 HOST_WIDE_INT size, rsize;
4584 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4587 type = build_pointer_type (type);
4589 size = int_size_in_bytes (type);
4590 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4592 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4593 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4596 /* Not a simple merged stack. */
4598 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4599 tree ovfl, top, off, align;
4600 HOST_WIDE_INT osize;
4603 f_ovfl = TYPE_FIELDS (va_list_type_node);
4604 f_gtop = TREE_CHAIN (f_ovfl);
4605 f_ftop = TREE_CHAIN (f_gtop);
4606 f_goff = TREE_CHAIN (f_ftop);
4607 f_foff = TREE_CHAIN (f_goff);
4609 /* We maintain separate pointers and offsets for floating-point
4610 and integer arguments, but we need similar code in both cases.
4613 TOP be the top of the register save area;
4614 OFF be the offset from TOP of the next register;
4615 ADDR_RTX be the address of the argument;
4616 RSIZE be the number of bytes used to store the argument
4617 when it's in the register save area;
4618 OSIZE be the number of bytes used to store it when it's
4619 in the stack overflow area; and
4620 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4622 The code we want is:
4624 1: off &= -rsize; // round down
4627 4: addr_rtx = top - off;
4632 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4633 10: addr_rtx = ovfl + PADDING;
4637 [1] and [9] can sometimes be optimized away. */
4639 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4642 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4643 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4645 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4647 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4650 /* When floating-point registers are saved to the stack,
4651 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4652 of the float's precision. */
4653 rsize = UNITS_PER_HWFPVALUE;
4655 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4656 (= PARM_BOUNDARY bits). This can be different from RSIZE
4659 (1) On 32-bit targets when TYPE is a structure such as:
4661 struct s { float f; };
4663 Such structures are passed in paired FPRs, so RSIZE
4664 will be 8 bytes. However, the structure only takes
4665 up 4 bytes of memory, so OSIZE will only be 4.
4667 (2) In combinations such as -mgp64 -msingle-float
4668 -fshort-double. Doubles passed in registers
4669 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4670 but those passed on the stack take up
4671 UNITS_PER_WORD bytes. */
4672 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4676 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4678 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4680 if (rsize > UNITS_PER_WORD)
4682 /* [1] Emit code for: off &= -rsize. */
4683 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4684 build_int_cst (NULL_TREE, -rsize));
4685 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4686 gimplify_and_add (t, pre_p);
4691 /* [2] Emit code to branch if off == 0. */
4692 t = build2 (NE_EXPR, boolean_type_node, off,
4693 build_int_cst (TREE_TYPE (off), 0));
4694 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4696 /* [5] Emit code for: off -= rsize. We do this as a form of
4697 post-increment not available to C. Also widen for the
4698 coming pointer arithmetic. */
4699 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4700 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4701 t = fold_convert (sizetype, t);
4702 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4704 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4705 the argument has RSIZE - SIZE bytes of leading padding. */
4706 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4707 if (BYTES_BIG_ENDIAN && rsize > size)
4709 u = size_int (rsize - size);
4710 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4712 COND_EXPR_THEN (addr) = t;
4714 if (osize > UNITS_PER_WORD)
4716 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4717 u = size_int (osize - 1);
4718 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4719 t = fold_convert (sizetype, t);
4720 u = size_int (-osize);
4721 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4722 t = fold_convert (TREE_TYPE (ovfl), t);
4723 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4728 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4729 post-increment ovfl by osize. On big-endian machines,
4730 the argument has OSIZE - SIZE bytes of leading padding. */
4731 u = fold_convert (TREE_TYPE (ovfl),
4732 build_int_cst (NULL_TREE, osize));
4733 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4734 if (BYTES_BIG_ENDIAN && osize > size)
4736 u = size_int (osize - size);
4737 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4740 /* String [9] and [10,11] together. */
4742 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4743 COND_EXPR_ELSE (addr) = t;
4745 addr = fold_convert (build_pointer_type (type), addr);
4746 addr = build_va_arg_indirect_ref (addr);
4750 addr = build_va_arg_indirect_ref (addr);
4755 /* Return true if it is possible to use left/right accesses for a
4756 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4757 returning true, update *OP, *LEFT and *RIGHT as follows:
4759 *OP is a BLKmode reference to the whole field.
4761 *LEFT is a QImode reference to the first byte if big endian or
4762 the last byte if little endian. This address can be used in the
4763 left-side instructions (lwl, swl, ldl, sdl).
4765 *RIGHT is a QImode reference to the opposite end of the field and
4766 can be used in the patterning right-side instruction. */
4769 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4770 rtx *left, rtx *right)
4774 /* Check that the operand really is a MEM. Not all the extv and
4775 extzv predicates are checked. */
4779 /* Check that the size is valid. */
4780 if (width != 32 && (!TARGET_64BIT || width != 64))
4783 /* We can only access byte-aligned values. Since we are always passed
4784 a reference to the first byte of the field, it is not necessary to
4785 do anything with BITPOS after this check. */
4786 if (bitpos % BITS_PER_UNIT != 0)
4789 /* Reject aligned bitfields: we want to use a normal load or store
4790 instead of a left/right pair. */
4791 if (MEM_ALIGN (*op) >= width)
4794 /* Adjust *OP to refer to the whole field. This also has the effect
4795 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4796 *op = adjust_address (*op, BLKmode, 0);
4797 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4799 /* Get references to both ends of the field. We deliberately don't
4800 use the original QImode *OP for FIRST since the new BLKmode one
4801 might have a simpler address. */
4802 first = adjust_address (*op, QImode, 0);
4803 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4805 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4806 be the upper word and RIGHT the lower word. */
4807 if (TARGET_BIG_ENDIAN)
4808 *left = first, *right = last;
4810 *left = last, *right = first;
4816 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4817 Return true on success. We only handle cases where zero_extract is
4818 equivalent to sign_extract. */
4821 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4823 rtx left, right, temp;
4825 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4826 paradoxical word_mode subreg. This is the only case in which
4827 we allow the destination to be larger than the source. */
4828 if (GET_CODE (dest) == SUBREG
4829 && GET_MODE (dest) == DImode
4830 && SUBREG_BYTE (dest) == 0
4831 && GET_MODE (SUBREG_REG (dest)) == SImode)
4832 dest = SUBREG_REG (dest);
4834 /* After the above adjustment, the destination must be the same
4835 width as the source. */
4836 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4839 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4842 temp = gen_reg_rtx (GET_MODE (dest));
4843 if (GET_MODE (dest) == DImode)
4845 emit_insn (gen_mov_ldl (temp, src, left));
4846 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4850 emit_insn (gen_mov_lwl (temp, src, left));
4851 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4857 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4861 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4864 enum machine_mode mode;
4866 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4869 mode = mode_for_size (width, MODE_INT, 0);
4870 src = gen_lowpart (mode, src);
4874 emit_insn (gen_mov_sdl (dest, src, left));
4875 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4879 emit_insn (gen_mov_swl (dest, src, left));
4880 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4885 /* Return true if X is a MEM with the same size as MODE. */
4888 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4895 size = MEM_SIZE (x);
4896 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4899 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4900 source of an "ext" instruction or the destination of an "ins"
4901 instruction. OP must be a register operand and the following
4902 conditions must hold:
4904 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4905 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4906 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4908 Also reject lengths equal to a word as they are better handled
4909 by the move patterns. */
4912 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4914 HOST_WIDE_INT len, pos;
4916 if (!ISA_HAS_EXT_INS
4917 || !register_operand (op, VOIDmode)
4918 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4921 len = INTVAL (size);
4922 pos = INTVAL (position);
4924 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4925 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4931 /* Set up globals to generate code for the ISA or processor
4932 described by INFO. */
4935 mips_set_architecture (const struct mips_cpu_info *info)
4939 mips_arch_info = info;
4940 mips_arch = info->cpu;
4941 mips_isa = info->isa;
4946 /* Likewise for tuning. */
4949 mips_set_tune (const struct mips_cpu_info *info)
4953 mips_tune_info = info;
4954 mips_tune = info->cpu;
4958 /* Implement TARGET_HANDLE_OPTION. */
4961 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4966 if (strcmp (arg, "32") == 0)
4968 else if (strcmp (arg, "o64") == 0)
4970 else if (strcmp (arg, "n32") == 0)
4972 else if (strcmp (arg, "64") == 0)
4974 else if (strcmp (arg, "eabi") == 0)
4975 mips_abi = ABI_EABI;
4982 return mips_parse_cpu (arg) != 0;
4985 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4986 return mips_isa_info != 0;
4988 case OPT_mno_flush_func:
4989 mips_cache_flush_func = NULL;
4997 /* Set up the threshold for data to go into the small data area, instead
4998 of the normal data area, and detect any conflicts in the switches. */
5001 override_options (void)
5003 int i, start, regno;
5004 enum machine_mode mode;
5006 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5007 SUBTARGET_OVERRIDE_OPTIONS;
5010 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5012 /* The following code determines the architecture and register size.
5013 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5014 The GAS and GCC code should be kept in sync as much as possible. */
5016 if (mips_arch_string != 0)
5017 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5019 if (mips_isa_info != 0)
5021 if (mips_arch_info == 0)
5022 mips_set_architecture (mips_isa_info);
5023 else if (mips_arch_info->isa != mips_isa_info->isa)
5024 error ("-%s conflicts with the other architecture options, "
5025 "which specify a %s processor",
5026 mips_isa_info->name,
5027 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5030 if (mips_arch_info == 0)
5032 #ifdef MIPS_CPU_STRING_DEFAULT
5033 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5035 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5039 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5040 error ("-march=%s is not compatible with the selected ABI",
5041 mips_arch_info->name);
5043 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5044 if (mips_tune_string != 0)
5045 mips_set_tune (mips_parse_cpu (mips_tune_string));
5047 if (mips_tune_info == 0)
5048 mips_set_tune (mips_arch_info);
5050 /* Set cost structure for the processor. */
5052 mips_cost = &mips_rtx_cost_optimize_size;
5054 mips_cost = &mips_rtx_cost_data[mips_tune];
5056 /* If the user hasn't specified a branch cost, use the processor's
5058 if (mips_branch_cost == 0)
5059 mips_branch_cost = mips_cost->branch_cost;
5061 if ((target_flags_explicit & MASK_64BIT) != 0)
5063 /* The user specified the size of the integer registers. Make sure
5064 it agrees with the ABI and ISA. */
5065 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5066 error ("-mgp64 used with a 32-bit processor");
5067 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5068 error ("-mgp32 used with a 64-bit ABI");
5069 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5070 error ("-mgp64 used with a 32-bit ABI");
5074 /* Infer the integer register size from the ABI and processor.
5075 Restrict ourselves to 32-bit registers if that's all the
5076 processor has, or if the ABI cannot handle 64-bit registers. */
5077 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5078 target_flags &= ~MASK_64BIT;
5080 target_flags |= MASK_64BIT;
5083 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5085 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5086 only one right answer here. */
5087 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5088 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5089 else if (!TARGET_64BIT && TARGET_FLOAT64
5090 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5091 error ("-mgp32 and -mfp64 can only be combined if the target"
5092 " supports the mfhc1 and mthc1 instructions");
5093 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5094 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5098 /* -msingle-float selects 32-bit float registers. Otherwise the
5099 float registers should be the same size as the integer ones. */
5100 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5101 target_flags |= MASK_FLOAT64;
5103 target_flags &= ~MASK_FLOAT64;
5106 /* End of code shared with GAS. */
5108 if ((target_flags_explicit & MASK_LONG64) == 0)
5110 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5111 target_flags |= MASK_LONG64;
5113 target_flags &= ~MASK_LONG64;
5116 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5117 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
5119 /* For some configurations, it is useful to have -march control
5120 the default setting of MASK_SOFT_FLOAT. */
5121 switch ((int) mips_arch)
5123 case PROCESSOR_R4100:
5124 case PROCESSOR_R4111:
5125 case PROCESSOR_R4120:
5126 case PROCESSOR_R4130:
5127 target_flags |= MASK_SOFT_FLOAT;
5131 target_flags &= ~MASK_SOFT_FLOAT;
5137 flag_pcc_struct_return = 0;
5139 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5141 /* If neither -mbranch-likely nor -mno-branch-likely was given
5142 on the command line, set MASK_BRANCHLIKELY based on the target
5145 By default, we enable use of Branch Likely instructions on
5146 all architectures which support them with the following
5147 exceptions: when creating MIPS32 or MIPS64 code, and when
5148 tuning for architectures where their use tends to hurt
5151 The MIPS32 and MIPS64 architecture specifications say "Software
5152 is strongly encouraged to avoid use of Branch Likely
5153 instructions, as they will be removed from a future revision
5154 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5155 issue those instructions unless instructed to do so by
5157 if (ISA_HAS_BRANCHLIKELY
5158 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5159 && !(TUNE_MIPS5500 || TUNE_SB1))
5160 target_flags |= MASK_BRANCHLIKELY;
5162 target_flags &= ~MASK_BRANCHLIKELY;
5164 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5165 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5167 /* The effect of -mabicalls isn't defined for the EABI. */
5168 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5170 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5171 target_flags &= ~MASK_ABICALLS;
5174 if (TARGET_ABICALLS)
5176 /* We need to set flag_pic for executables as well as DSOs
5177 because we may reference symbols that are not defined in
5178 the final executable. (MIPS does not use things like
5179 copy relocs, for example.)
5181 Also, there is a body of code that uses __PIC__ to distinguish
5182 between -mabicalls and -mno-abicalls code. */
5184 if (mips_section_threshold > 0)
5185 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
5188 if (TARGET_VXWORKS_RTP && mips_section_threshold > 0)
5189 warning (0, "-G and -mrtp are incompatible");
5191 /* mips_split_addresses is a half-way house between explicit
5192 relocations and the traditional assembler macros. It can
5193 split absolute 32-bit symbolic constants into a high/lo_sum
5194 pair but uses macros for other sorts of access.
5196 Like explicit relocation support for REL targets, it relies
5197 on GNU extensions in the assembler and the linker.
5199 Although this code should work for -O0, it has traditionally
5200 been treated as an optimization. */
5201 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5202 && optimize && !flag_pic
5203 && !ABI_HAS_64BIT_SYMBOLS)
5204 mips_split_addresses = 1;
5206 mips_split_addresses = 0;
5208 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5209 faster code, but at the expense of more nops. Enable it at -O3 and
5211 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5212 target_flags |= MASK_VR4130_ALIGN;
5214 /* When compiling for the mips16, we cannot use floating point. We
5215 record the original hard float value in mips16_hard_float. */
5218 if (TARGET_SOFT_FLOAT)
5219 mips16_hard_float = 0;
5221 mips16_hard_float = 1;
5222 target_flags |= MASK_SOFT_FLOAT;
5224 /* Don't run the scheduler before reload, since it tends to
5225 increase register pressure. */
5226 flag_schedule_insns = 0;
5228 /* Don't do hot/cold partitioning. The constant layout code expects
5229 the whole function to be in a single section. */
5230 flag_reorder_blocks_and_partition = 0;
5232 /* Silently disable -mexplicit-relocs since it doesn't apply
5233 to mips16 code. Even so, it would overly pedantic to warn
5234 about "-mips16 -mexplicit-relocs", especially given that
5235 we use a %gprel() operator. */
5236 target_flags &= ~MASK_EXPLICIT_RELOCS;
5239 /* When using explicit relocs, we call dbr_schedule from within
5241 if (TARGET_EXPLICIT_RELOCS)
5243 mips_flag_delayed_branch = flag_delayed_branch;
5244 flag_delayed_branch = 0;
5247 #ifdef MIPS_TFMODE_FORMAT
5248 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5251 /* Make sure that the user didn't turn off paired single support when
5252 MIPS-3D support is requested. */
5253 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5254 && !TARGET_PAIRED_SINGLE_FLOAT)
5255 error ("-mips3d requires -mpaired-single");
5257 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5259 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5261 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5262 and TARGET_HARD_FLOAT are both true. */
5263 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5264 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5266 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5268 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5269 error ("-mips3d/-mpaired-single must be used with -mips64");
5271 /* If TARGET_DSPR2, enable MASK_DSP. */
5273 target_flags |= MASK_DSP;
5275 if (TARGET_MIPS16 && TARGET_DSP)
5276 error ("-mips16 and -mdsp cannot be used together");
5278 mips_print_operand_punct['?'] = 1;
5279 mips_print_operand_punct['#'] = 1;
5280 mips_print_operand_punct['/'] = 1;
5281 mips_print_operand_punct['&'] = 1;
5282 mips_print_operand_punct['!'] = 1;
5283 mips_print_operand_punct['*'] = 1;
5284 mips_print_operand_punct['@'] = 1;
5285 mips_print_operand_punct['.'] = 1;
5286 mips_print_operand_punct['('] = 1;
5287 mips_print_operand_punct[')'] = 1;
5288 mips_print_operand_punct['['] = 1;
5289 mips_print_operand_punct[']'] = 1;
5290 mips_print_operand_punct['<'] = 1;
5291 mips_print_operand_punct['>'] = 1;
5292 mips_print_operand_punct['{'] = 1;
5293 mips_print_operand_punct['}'] = 1;
5294 mips_print_operand_punct['^'] = 1;
5295 mips_print_operand_punct['$'] = 1;
5296 mips_print_operand_punct['+'] = 1;
5297 mips_print_operand_punct['~'] = 1;
5299 /* Set up array to map GCC register number to debug register number.
5300 Ignore the special purpose register numbers. */
5302 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5304 mips_dbx_regno[i] = INVALID_REGNUM;
5305 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
5306 mips_dwarf_regno[i] = i;
5308 mips_dwarf_regno[i] = INVALID_REGNUM;
5311 start = GP_DBX_FIRST - GP_REG_FIRST;
5312 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5313 mips_dbx_regno[i] = i + start;
5315 start = FP_DBX_FIRST - FP_REG_FIRST;
5316 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5317 mips_dbx_regno[i] = i + start;
5319 /* HI and LO debug registers use big-endian ordering. */
5320 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5321 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5322 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
5323 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
5324 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
5326 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
5327 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
5330 /* Set up array giving whether a given register can hold a given mode. */
5332 for (mode = VOIDmode;
5333 mode != MAX_MACHINE_MODE;
5334 mode = (enum machine_mode) ((int)mode + 1))
5336 register int size = GET_MODE_SIZE (mode);
5337 register enum mode_class class = GET_MODE_CLASS (mode);
5339 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5343 if (mode == CCV2mode)
5346 && (regno - ST_REG_FIRST) % 2 == 0);
5348 else if (mode == CCV4mode)
5351 && (regno - ST_REG_FIRST) % 4 == 0);
5353 else if (mode == CCmode)
5356 temp = (regno == FPSW_REGNUM);
5358 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5359 || FP_REG_P (regno));
5362 else if (GP_REG_P (regno))
5363 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5365 else if (FP_REG_P (regno))
5366 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5367 || (MIN_FPRS_PER_FMT == 1
5368 && size <= UNITS_PER_FPREG))
5369 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5370 || class == MODE_VECTOR_FLOAT)
5371 && size <= UNITS_PER_FPVALUE)
5372 /* Allow integer modes that fit into a single
5373 register. We need to put integers into FPRs
5374 when using instructions like cvt and trunc.
5375 We can't allow sizes smaller than a word,
5376 the FPU has no appropriate load/store
5377 instructions for those. */
5378 || (class == MODE_INT
5379 && size >= MIN_UNITS_PER_WORD
5380 && size <= UNITS_PER_FPREG)
5381 /* Allow TFmode for CCmode reloads. */
5382 || (ISA_HAS_8CC && mode == TFmode)));
5384 else if (ACC_REG_P (regno))
5385 temp = (INTEGRAL_MODE_P (mode)
5386 && size <= UNITS_PER_WORD * 2
5387 && (size <= UNITS_PER_WORD
5388 || regno == MD_REG_FIRST
5389 || (DSP_ACC_REG_P (regno)
5390 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
5392 else if (ALL_COP_REG_P (regno))
5393 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5397 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5401 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5402 initialized yet, so we can't use that here. */
5403 gpr_mode = TARGET_64BIT ? DImode : SImode;
5405 /* Provide default values for align_* for 64-bit targets. */
5406 if (TARGET_64BIT && !TARGET_MIPS16)
5408 if (align_loops == 0)
5410 if (align_jumps == 0)
5412 if (align_functions == 0)
5413 align_functions = 8;
5416 /* Function to allocate machine-dependent function status. */
5417 init_machine_status = &mips_init_machine_status;
5419 if (ABI_HAS_64BIT_SYMBOLS)
5421 if (TARGET_EXPLICIT_RELOCS)
5423 mips_split_p[SYMBOL_64_HIGH] = true;
5424 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5425 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5427 mips_split_p[SYMBOL_64_MID] = true;
5428 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5429 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5431 mips_split_p[SYMBOL_64_LOW] = true;
5432 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5433 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5435 mips_split_p[SYMBOL_ABSOLUTE] = true;
5436 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5441 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5443 mips_split_p[SYMBOL_ABSOLUTE] = true;
5444 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
5445 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
5451 /* The high part is provided by a pseudo copy of $gp. */
5452 mips_split_p[SYMBOL_GP_RELATIVE] = true;
5453 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
5456 if (TARGET_EXPLICIT_RELOCS)
5458 /* Small data constants are kept whole until after reload,
5459 then lowered by mips_rewrite_small_data. */
5460 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
5462 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5465 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5466 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5470 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5471 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5476 /* The HIGH and LO_SUM are matched by special .md patterns. */
5477 mips_split_p[SYMBOL_GOT_DISP] = true;
5479 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5480 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5481 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5483 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5484 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5485 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5490 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5492 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5493 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5499 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5500 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5501 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5504 /* Thread-local relocation operators. */
5505 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5506 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5507 mips_split_p[SYMBOL_DTPREL] = 1;
5508 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5509 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5510 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5511 mips_split_p[SYMBOL_TPREL] = 1;
5512 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5513 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5515 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5517 /* We don't have a thread pointer access instruction on MIPS16, or
5518 appropriate TLS relocations. */
5520 targetm.have_tls = false;
5522 /* Default to working around R4000 errata only if the processor
5523 was selected explicitly. */
5524 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5525 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5526 target_flags |= MASK_FIX_R4000;
5528 /* Default to working around R4400 errata only if the processor
5529 was selected explicitly. */
5530 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5531 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5532 target_flags |= MASK_FIX_R4400;
5535 /* Swap the register information for registers I and I + 1, which
5536 currently have the wrong endianness. Note that the registers'
5537 fixedness and call-clobberedness might have been set on the
5541 mips_swap_registers (unsigned int i)
5546 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5547 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5549 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
5550 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
5551 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
5552 SWAP_STRING (reg_names[i], reg_names[i + 1]);
5558 /* Implement CONDITIONAL_REGISTER_USAGE. */
5561 mips_conditional_register_usage (void)
5567 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5568 fixed_regs[regno] = call_used_regs[regno] = 1;
5570 if (!TARGET_HARD_FLOAT)
5574 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5575 fixed_regs[regno] = call_used_regs[regno] = 1;
5576 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5577 fixed_regs[regno] = call_used_regs[regno] = 1;
5579 else if (! ISA_HAS_8CC)
5583 /* We only have a single condition code register. We
5584 implement this by hiding all the condition code registers,
5585 and generating RTL that refers directly to ST_REG_FIRST. */
5586 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5587 fixed_regs[regno] = call_used_regs[regno] = 1;
5589 /* In mips16 mode, we permit the $t temporary registers to be used
5590 for reload. We prohibit the unused $s registers, since they
5591 are caller saved, and saving them via a mips16 register would
5592 probably waste more time than just reloading the value. */
5595 fixed_regs[18] = call_used_regs[18] = 1;
5596 fixed_regs[19] = call_used_regs[19] = 1;
5597 fixed_regs[20] = call_used_regs[20] = 1;
5598 fixed_regs[21] = call_used_regs[21] = 1;
5599 fixed_regs[22] = call_used_regs[22] = 1;
5600 fixed_regs[23] = call_used_regs[23] = 1;
5601 fixed_regs[26] = call_used_regs[26] = 1;
5602 fixed_regs[27] = call_used_regs[27] = 1;
5603 fixed_regs[30] = call_used_regs[30] = 1;
5605 /* fp20-23 are now caller saved. */
5606 if (mips_abi == ABI_64)
5609 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5610 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5612 /* Odd registers from fp21 to fp31 are now caller saved. */
5613 if (mips_abi == ABI_N32)
5616 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5617 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5619 /* Make sure that double-register accumulator values are correctly
5620 ordered for the current endianness. */
5621 if (TARGET_LITTLE_ENDIAN)
5624 mips_swap_registers (MD_REG_FIRST);
5625 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
5626 mips_swap_registers (regno);
5630 /* Allocate a chunk of memory for per-function machine-dependent data. */
5631 static struct machine_function *
5632 mips_init_machine_status (void)
5634 return ((struct machine_function *)
5635 ggc_alloc_cleared (sizeof (struct machine_function)));
5638 /* On the mips16, we want to allocate $24 (T_REG) before other
5639 registers for instructions for which it is possible. This helps
5640 avoid shuffling registers around in order to set up for an xor,
5641 encouraging the compiler to use a cmp instead. */
5644 mips_order_regs_for_local_alloc (void)
5648 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5649 reg_alloc_order[i] = i;
5653 /* It really doesn't matter where we put register 0, since it is
5654 a fixed register anyhow. */
5655 reg_alloc_order[0] = 24;
5656 reg_alloc_order[24] = 0;
5661 /* The MIPS debug format wants all automatic variables and arguments
5662 to be in terms of the virtual frame pointer (stack pointer before
5663 any adjustment in the function), while the MIPS 3.0 linker wants
5664 the frame pointer to be the stack pointer after the initial
5665 adjustment. So, we do the adjustment here. The arg pointer (which
5666 is eliminated) points to the virtual frame pointer, while the frame
5667 pointer (which may be eliminated) points to the stack pointer after
5668 the initial adjustments. */
5671 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5673 rtx offset2 = const0_rtx;
5674 rtx reg = eliminate_constant_term (addr, &offset2);
5677 offset = INTVAL (offset2);
5679 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5680 || reg == hard_frame_pointer_rtx)
5682 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5683 ? compute_frame_size (get_frame_size ())
5684 : cfun->machine->frame.total_size;
5686 /* MIPS16 frame is smaller */
5687 if (frame_pointer_needed && TARGET_MIPS16)
5688 frame_size -= cfun->machine->frame.args_size;
5690 offset = offset - frame_size;
5693 /* sdbout_parms does not want this to crash for unrecognized cases. */
5695 else if (reg != arg_pointer_rtx)
5696 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5703 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5705 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5706 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5707 'h' OP is HIGH, prints %hi(X),
5708 'd' output integer constant in decimal,
5709 'z' if the operand is 0, use $0 instead of normal operand.
5710 'D' print second part of double-word register or memory operand.
5711 'L' print low-order register of double-word register operand.
5712 'M' print high-order register of double-word register operand.
5713 'C' print part of opcode for a branch condition.
5714 'F' print part of opcode for a floating-point branch condition.
5715 'N' print part of opcode for a branch condition, inverted.
5716 'W' print part of opcode for a floating-point branch condition, inverted.
5717 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5718 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5719 't' like 'T', but with the EQ/NE cases reversed
5720 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5721 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5722 'R' print the reloc associated with LO_SUM
5723 'q' print DSP accumulator registers
5725 The punctuation characters are:
5727 '(' Turn on .set noreorder
5728 ')' Turn on .set reorder
5729 '[' Turn on .set noat
5731 '<' Turn on .set nomacro
5732 '>' Turn on .set macro
5733 '{' Turn on .set volatile (not GAS)
5734 '}' Turn on .set novolatile (not GAS)
5735 '&' Turn on .set noreorder if filling delay slots
5736 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5737 '!' Turn on .set nomacro if filling delay slots
5738 '#' Print nop if in a .set noreorder section.
5739 '/' Like '#', but does nothing within a delayed branch sequence
5740 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5741 '@' Print the name of the assembler temporary register (at or $1).
5742 '.' Print the name of the register with a hard-wired zero (zero or $0).
5743 '^' Print the name of the pic call-through register (t9 or $25).
5744 '$' Print the name of the stack pointer register (sp or $29).
5745 '+' Print the name of the gp register (usually gp or $28).
5746 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5749 print_operand (FILE *file, rtx op, int letter)
5751 register enum rtx_code code;
5753 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5758 if (mips_branch_likely)
5763 fputs (reg_names [GP_REG_FIRST + 1], file);
5767 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5771 fputs (reg_names [GP_REG_FIRST + 0], file);
5775 fputs (reg_names[STACK_POINTER_REGNUM], file);
5779 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5783 if (final_sequence != 0 && set_noreorder++ == 0)
5784 fputs (".set\tnoreorder\n\t", file);
5788 if (final_sequence != 0)
5790 if (set_noreorder++ == 0)
5791 fputs (".set\tnoreorder\n\t", file);
5793 if (set_nomacro++ == 0)
5794 fputs (".set\tnomacro\n\t", file);
5799 if (final_sequence != 0 && set_nomacro++ == 0)
5800 fputs ("\n\t.set\tnomacro", file);
5804 if (set_noreorder != 0)
5805 fputs ("\n\tnop", file);
5809 /* Print an extra newline so that the delayed insn is separated
5810 from the following ones. This looks neater and is consistent
5811 with non-nop delayed sequences. */
5812 if (set_noreorder != 0 && final_sequence == 0)
5813 fputs ("\n\tnop\n", file);
5817 if (set_noreorder++ == 0)
5818 fputs (".set\tnoreorder\n\t", file);
5822 if (set_noreorder == 0)
5823 error ("internal error: %%) found without a %%( in assembler pattern");
5825 else if (--set_noreorder == 0)
5826 fputs ("\n\t.set\treorder", file);
5831 if (set_noat++ == 0)
5832 fputs (".set\tnoat\n\t", file);
5837 error ("internal error: %%] found without a %%[ in assembler pattern");
5838 else if (--set_noat == 0)
5839 fputs ("\n\t.set\tat", file);
5844 if (set_nomacro++ == 0)
5845 fputs (".set\tnomacro\n\t", file);
5849 if (set_nomacro == 0)
5850 error ("internal error: %%> found without a %%< in assembler pattern");
5851 else if (--set_nomacro == 0)
5852 fputs ("\n\t.set\tmacro", file);
5857 if (set_volatile++ == 0)
5858 fputs ("#.set\tvolatile\n\t", file);
5862 if (set_volatile == 0)
5863 error ("internal error: %%} found without a %%{ in assembler pattern");
5864 else if (--set_volatile == 0)
5865 fputs ("\n\t#.set\tnovolatile", file);
5871 if (align_labels_log > 0)
5872 ASM_OUTPUT_ALIGN (file, align_labels_log);
5877 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5886 error ("PRINT_OPERAND null pointer");
5890 code = GET_CODE (op);
5895 case EQ: fputs ("eq", file); break;
5896 case NE: fputs ("ne", file); break;
5897 case GT: fputs ("gt", file); break;
5898 case GE: fputs ("ge", file); break;
5899 case LT: fputs ("lt", file); break;
5900 case LE: fputs ("le", file); break;
5901 case GTU: fputs ("gtu", file); break;
5902 case GEU: fputs ("geu", file); break;
5903 case LTU: fputs ("ltu", file); break;
5904 case LEU: fputs ("leu", file); break;
5906 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5909 else if (letter == 'N')
5912 case EQ: fputs ("ne", file); break;
5913 case NE: fputs ("eq", file); break;
5914 case GT: fputs ("le", file); break;
5915 case GE: fputs ("lt", file); break;
5916 case LT: fputs ("ge", file); break;
5917 case LE: fputs ("gt", file); break;
5918 case GTU: fputs ("leu", file); break;
5919 case GEU: fputs ("ltu", file); break;
5920 case LTU: fputs ("geu", file); break;
5921 case LEU: fputs ("gtu", file); break;
5923 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5926 else if (letter == 'F')
5929 case EQ: fputs ("c1f", file); break;
5930 case NE: fputs ("c1t", file); break;
5932 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5935 else if (letter == 'W')
5938 case EQ: fputs ("c1t", file); break;
5939 case NE: fputs ("c1f", file); break;
5941 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5944 else if (letter == 'h')
5946 if (GET_CODE (op) == HIGH)
5949 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
5952 else if (letter == 'R')
5953 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
5955 else if (letter == 'Y')
5957 if (GET_CODE (op) == CONST_INT
5958 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5959 < ARRAY_SIZE (mips_fp_conditions)))
5960 fputs (mips_fp_conditions[INTVAL (op)], file);
5962 output_operand_lossage ("invalid %%Y value");
5965 else if (letter == 'Z')
5969 print_operand (file, op, 0);
5974 else if (letter == 'q')
5979 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5981 regnum = REGNO (op);
5982 if (MD_REG_P (regnum))
5983 fprintf (file, "$ac0");
5984 else if (DSP_ACC_REG_P (regnum))
5985 fprintf (file, "$ac%c", reg_names[regnum][3]);
5987 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5990 else if (code == REG || code == SUBREG)
5992 register int regnum;
5995 regnum = REGNO (op);
5997 regnum = true_regnum (op);
5999 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6000 || (letter == 'L' && WORDS_BIG_ENDIAN)
6004 fprintf (file, "%s", reg_names[regnum]);
6007 else if (code == MEM)
6010 output_address (plus_constant (XEXP (op, 0), 4));
6012 output_address (XEXP (op, 0));
6015 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6016 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6018 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6019 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6021 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6022 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6024 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6025 fputs (reg_names[GP_REG_FIRST], file);
6027 else if (letter == 'd' || letter == 'x' || letter == 'X')
6028 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6030 else if (letter == 'T' || letter == 't')
6032 int truth = (code == NE) == (letter == 'T');
6033 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6036 else if (CONST_GP_P (op))
6037 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6040 output_addr_const (file, op);
6044 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6045 in context CONTEXT. RELOCS is the array of relocations to use. */
6048 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6049 const char **relocs)
6051 enum mips_symbol_type symbol_type;
6055 if (!mips_symbolic_constant_p (op, context, &symbol_type)
6056 || relocs[symbol_type] == 0)
6057 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6059 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
6060 split_const (op, &base, &offset);
6061 if (UNSPEC_ADDRESS_P (base))
6062 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6064 fputs (relocs[symbol_type], file);
6065 output_addr_const (file, op);
6066 for (p = relocs[symbol_type]; *p != 0; p++)
6071 /* Output address operand X to FILE. */
6074 print_operand_address (FILE *file, rtx x)
6076 struct mips_address_info addr;
6078 if (mips_classify_address (&addr, x, word_mode, true))
6082 print_operand (file, addr.offset, 0);
6083 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6086 case ADDRESS_LO_SUM:
6087 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6089 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6092 case ADDRESS_CONST_INT:
6093 output_addr_const (file, x);
6094 fprintf (file, "(%s)", reg_names[0]);
6097 case ADDRESS_SYMBOLIC:
6098 output_addr_const (file, x);
6104 /* When using assembler macros, keep track of all of small-data externs
6105 so that mips_file_end can emit the appropriate declarations for them.
6107 In most cases it would be safe (though pointless) to emit .externs
6108 for other symbols too. One exception is when an object is within
6109 the -G limit but declared by the user to be in a section other
6110 than .sbss or .sdata. */
6113 mips_output_external (FILE *file, tree decl, const char *name)
6115 default_elf_asm_output_external (file, decl, name);
6117 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6118 set in order to avoid putting out names that are never really
6120 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6122 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6124 fputs ("\t.extern\t", file);
6125 assemble_name (file, name);
6126 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6127 int_size_in_bytes (TREE_TYPE (decl)));
6129 else if (TARGET_IRIX
6130 && mips_abi == ABI_32
6131 && TREE_CODE (decl) == FUNCTION_DECL)
6133 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6134 `.global name .text' directive for every used but
6135 undefined function. If we don't, the linker may perform
6136 an optimization (skipping over the insns that set $gp)
6137 when it is unsafe. */
6138 fputs ("\t.globl ", file);
6139 assemble_name (file, name);
6140 fputs (" .text\n", file);
6145 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6146 put out a MIPS ECOFF file and a stab. */
6149 mips_output_filename (FILE *stream, const char *name)
6152 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6154 if (write_symbols == DWARF2_DEBUG)
6156 else if (mips_output_filename_first_time)
6158 mips_output_filename_first_time = 0;
6159 num_source_filenames += 1;
6160 current_function_file = name;
6161 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6162 output_quoted_string (stream, name);
6163 putc ('\n', stream);
6166 /* If we are emitting stabs, let dbxout.c handle this (except for
6167 the mips_output_filename_first_time case). */
6168 else if (write_symbols == DBX_DEBUG)
6171 else if (name != current_function_file
6172 && strcmp (name, current_function_file) != 0)
6174 num_source_filenames += 1;
6175 current_function_file = name;
6176 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6177 output_quoted_string (stream, name);
6178 putc ('\n', stream);
6182 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6183 that should be written before the opening quote, such as "\t.ascii\t"
6184 for real string data or "\t# " for a comment. */
6187 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6192 register const unsigned char *string =
6193 (const unsigned char *)string_param;
6195 fprintf (stream, "%s\"", prefix);
6196 for (i = 0; i < len; i++)
6198 register int c = string[i];
6202 if (c == '\\' || c == '\"')
6204 putc ('\\', stream);
6212 fprintf (stream, "\\%03o", c);
6216 if (cur_pos > 72 && i+1 < len)
6219 fprintf (stream, "\"\n%s\"", prefix);
6222 fprintf (stream, "\"\n");
6225 /* Implement TARGET_ASM_FILE_START. */
6228 mips_file_start (void)
6230 default_file_start ();
6234 /* Generate a special section to describe the ABI switches used to
6235 produce the resultant binary. This used to be done by the assembler
6236 setting bits in the ELF header's flags field, but we have run out of
6237 bits. GDB needs this information in order to be able to correctly
6238 debug these binaries. See the function mips_gdbarch_init() in
6239 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6240 causes unnecessary IRIX 6 ld warnings. */
6241 const char * abi_string = NULL;
6245 case ABI_32: abi_string = "abi32"; break;
6246 case ABI_N32: abi_string = "abiN32"; break;
6247 case ABI_64: abi_string = "abi64"; break;
6248 case ABI_O64: abi_string = "abiO64"; break;
6249 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6253 /* Note - we use fprintf directly rather than calling switch_to_section
6254 because in this way we can avoid creating an allocated section. We
6255 do not want this section to take up any space in the running
6257 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6259 /* There is no ELF header flag to distinguish long32 forms of the
6260 EABI from long64 forms. Emit a special section to help tools
6261 such as GDB. Do the same for o64, which is sometimes used with
6263 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6264 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6265 TARGET_LONG64 ? 64 : 32);
6267 /* Restore the default section. */
6268 fprintf (asm_out_file, "\t.previous\n");
6270 #ifdef HAVE_AS_GNU_ATTRIBUTE
6271 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6272 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6276 /* Generate the pseudo ops that System V.4 wants. */
6277 if (TARGET_ABICALLS)
6278 fprintf (asm_out_file, "\t.abicalls\n");
6281 fprintf (asm_out_file, "\t.set\tmips16\n");
6283 if (flag_verbose_asm)
6284 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6286 mips_section_threshold, mips_arch_info->name, mips_isa);
6289 #ifdef BSS_SECTION_ASM_OP
6290 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6291 in the use of sbss. */
6294 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6295 unsigned HOST_WIDE_INT size, int align)
6297 extern tree last_assemble_variable_decl;
6299 if (mips_in_small_data_p (decl))
6300 switch_to_section (get_named_section (NULL, ".sbss", 0));
6302 switch_to_section (bss_section);
6303 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6304 last_assemble_variable_decl = decl;
6305 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6306 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6310 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6311 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6314 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6315 unsigned HOST_WIDE_INT size,
6318 /* If the target wants uninitialized const declarations in
6319 .rdata then don't put them in .comm. */
6320 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6321 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6322 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6324 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6325 targetm.asm_out.globalize_label (stream, name);
6327 switch_to_section (readonly_data_section);
6328 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6329 mips_declare_object (stream, name, "",
6330 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6334 mips_declare_common_object (stream, name, "\n\t.comm\t",
6338 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6339 NAME is the name of the object and ALIGN is the required alignment
6340 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6341 alignment argument. */
6344 mips_declare_common_object (FILE *stream, const char *name,
6345 const char *init_string,
6346 unsigned HOST_WIDE_INT size,
6347 unsigned int align, bool takes_alignment_p)
6349 if (!takes_alignment_p)
6351 size += (align / BITS_PER_UNIT) - 1;
6352 size -= size % (align / BITS_PER_UNIT);
6353 mips_declare_object (stream, name, init_string,
6354 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6357 mips_declare_object (stream, name, init_string,
6358 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6359 size, align / BITS_PER_UNIT);
6362 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6363 macros, mark the symbol as written so that mips_file_end won't emit an
6364 .extern for it. STREAM is the output file, NAME is the name of the
6365 symbol, INIT_STRING is the string that should be written before the
6366 symbol and FINAL_STRING is the string that should be written after it.
6367 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6370 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6371 const char *final_string, ...)
6375 fputs (init_string, stream);
6376 assemble_name (stream, name);
6377 va_start (ap, final_string);
6378 vfprintf (stream, final_string, ap);
6381 if (!TARGET_EXPLICIT_RELOCS)
6383 tree name_tree = get_identifier (name);
6384 TREE_ASM_WRITTEN (name_tree) = 1;
6388 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6389 extern int size_directive_output;
6391 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6392 definitions except that it uses mips_declare_object() to emit the label. */
6395 mips_declare_object_name (FILE *stream, const char *name,
6396 tree decl ATTRIBUTE_UNUSED)
6398 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6399 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6402 size_directive_output = 0;
6403 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6407 size_directive_output = 1;
6408 size = int_size_in_bytes (TREE_TYPE (decl));
6409 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6412 mips_declare_object (stream, name, "", ":\n");
6415 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6418 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6422 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6423 if (!flag_inhibit_size_directive
6424 && DECL_SIZE (decl) != 0
6425 && !at_end && top_level
6426 && DECL_INITIAL (decl) == error_mark_node
6427 && !size_directive_output)
6431 size_directive_output = 1;
6432 size = int_size_in_bytes (TREE_TYPE (decl));
6433 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6438 /* Return true if X in context CONTEXT is a small data address that can
6439 be rewritten as a LO_SUM. */
6442 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
6444 enum mips_symbol_type symbol_type;
6446 return (TARGET_EXPLICIT_RELOCS
6447 && mips_symbolic_constant_p (x, context, &symbol_type)
6448 && symbol_type == SYMBOL_GP_RELATIVE);
6452 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
6453 containing MEM, or null if none. */
6456 mips_small_data_pattern_1 (rtx *loc, void *data)
6458 enum mips_symbol_context context;
6460 if (GET_CODE (*loc) == LO_SUM)
6465 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
6470 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6471 return mips_rewrite_small_data_p (*loc, context);
6474 /* Return true if OP refers to small data symbols directly, not through
6478 mips_small_data_pattern_p (rtx op)
6480 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6483 /* A for_each_rtx callback, used by mips_rewrite_small_data.
6484 DATA is the containing MEM, or null if none. */
6487 mips_rewrite_small_data_1 (rtx *loc, void *data)
6489 enum mips_symbol_context context;
6493 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
6497 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
6498 if (mips_rewrite_small_data_p (*loc, context))
6499 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6501 if (GET_CODE (*loc) == LO_SUM)
6507 /* If possible, rewrite OP so that it refers to small data using
6508 explicit relocations. */
6511 mips_rewrite_small_data (rtx op)
6513 op = copy_insn (op);
6514 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6518 /* Return true if the current function has an insn that implicitly
6522 mips_function_has_gp_insn (void)
6524 /* Don't bother rechecking if we found one last time. */
6525 if (!cfun->machine->has_gp_insn_p)
6529 push_topmost_sequence ();
6530 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6532 && GET_CODE (PATTERN (insn)) != USE
6533 && GET_CODE (PATTERN (insn)) != CLOBBER
6534 && (get_attr_got (insn) != GOT_UNSET
6535 || small_data_pattern (PATTERN (insn), VOIDmode)))
6537 pop_topmost_sequence ();
6539 cfun->machine->has_gp_insn_p = (insn != 0);
6541 return cfun->machine->has_gp_insn_p;
6545 /* Return the register that should be used as the global pointer
6546 within this function. Return 0 if the function doesn't need
6547 a global pointer. */
6550 mips_global_pointer (void)
6554 /* $gp is always available unless we're using a GOT. */
6555 if (!TARGET_USE_GOT)
6556 return GLOBAL_POINTER_REGNUM;
6558 /* We must always provide $gp when it is used implicitly. */
6559 if (!TARGET_EXPLICIT_RELOCS)
6560 return GLOBAL_POINTER_REGNUM;
6562 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6564 if (current_function_profile)
6565 return GLOBAL_POINTER_REGNUM;
6567 /* If the function has a nonlocal goto, $gp must hold the correct
6568 global pointer for the target function. */
6569 if (current_function_has_nonlocal_goto)
6570 return GLOBAL_POINTER_REGNUM;
6572 /* If the gp is never referenced, there's no need to initialize it.
6573 Note that reload can sometimes introduce constant pool references
6574 into a function that otherwise didn't need them. For example,
6575 suppose we have an instruction like:
6577 (set (reg:DF R1) (float:DF (reg:SI R2)))
6579 If R2 turns out to be constant such as 1, the instruction may have a
6580 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6581 using this constant if R2 doesn't get allocated to a register.
6583 In cases like these, reload will have added the constant to the pool
6584 but no instruction will yet refer to it. */
6585 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6586 && !current_function_uses_const_pool
6587 && !mips_function_has_gp_insn ())
6590 /* We need a global pointer, but perhaps we can use a call-clobbered
6591 register instead of $gp. */
6592 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6593 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6594 if (!df_regs_ever_live_p (regno)
6595 && call_used_regs[regno]
6596 && !fixed_regs[regno]
6597 && regno != PIC_FUNCTION_ADDR_REGNUM)
6600 return GLOBAL_POINTER_REGNUM;
6604 /* Return true if the function return value MODE will get returned in a
6605 floating-point register. */
6608 mips_return_mode_in_fpr_p (enum machine_mode mode)
6610 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6611 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6612 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6613 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6616 /* Return a two-character string representing a function floating-point
6617 return mode, used to name MIPS16 function stubs. */
6620 mips16_call_stub_mode_suffix (enum machine_mode mode)
6624 else if (mode == DFmode)
6626 else if (mode == SCmode)
6628 else if (mode == DCmode)
6630 else if (mode == V2SFmode)
6636 /* Return true if the current function returns its value in a floating-point
6637 register in MIPS16 mode. */
6640 mips16_cfun_returns_in_fpr_p (void)
6642 tree return_type = DECL_RESULT (current_function_decl);
6643 return (mips16_hard_float
6644 && !aggregate_value_p (return_type, current_function_decl)
6645 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6649 /* Return true if the current function must save REGNO. */
6652 mips_save_reg_p (unsigned int regno)
6654 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6655 if we have not chosen a call-clobbered substitute. */
6656 if (regno == GLOBAL_POINTER_REGNUM)
6657 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6659 /* Check call-saved registers. */
6660 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6663 /* Save both registers in an FPR pair if either one is used. This is
6664 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6665 register to be used without the even register. */
6666 if (FP_REG_P (regno)
6667 && MAX_FPRS_PER_FMT == 2
6668 && df_regs_ever_live_p (regno + 1)
6669 && !call_used_regs[regno + 1])
6672 /* We need to save the old frame pointer before setting up a new one. */
6673 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6676 /* We need to save the incoming return address if it is ever clobbered
6677 within the function. */
6678 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6683 /* $18 is a special case in mips16 code. It may be used to call
6684 a function which returns a floating point value, but it is
6685 marked in call_used_regs. */
6686 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6689 /* $31 is also a special case. It will be used to copy a return
6690 value into the floating point registers if the return value is
6692 if (regno == GP_REG_FIRST + 31
6693 && mips16_cfun_returns_in_fpr_p ())
6700 /* Return the index of the lowest X in the range [0, SIZE) for which
6701 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6704 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
6709 for (i = 0; i < size; i++)
6710 if (BITSET_P (mask, regs[i]))
6716 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
6717 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
6718 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
6719 the same is true for all indexes (X, SIZE). */
6722 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
6723 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
6727 i = mips16e_find_first_register (*mask_ptr, regs, size);
6728 for (i++; i < size; i++)
6729 if (!BITSET_P (*mask_ptr, regs[i]))
6731 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
6732 *mask_ptr |= 1 << regs[i];
6736 /* Return the bytes needed to compute the frame pointer from the current
6737 stack pointer. SIZE is the size (in bytes) of the local variables.
6739 MIPS stack frames look like:
6741 Before call After call
6742 high +-----------------------+ +-----------------------+
6744 | caller's temps. | | caller's temps. |
6746 +-----------------------+ +-----------------------+
6748 | arguments on stack. | | arguments on stack. |
6750 +-----------------------+ +-----------------------+
6751 | 4 words to save | | 4 words to save |
6752 | arguments passed | | arguments passed |
6753 | in registers, even | | in registers, even |
6754 | if not passed. | | if not passed. |
6755 SP->+-----------------------+ VFP->+-----------------------+
6756 (VFP = SP+fp_sp_offset) | |\
6757 | fp register save | | fp_reg_size
6759 SP+gp_sp_offset->+-----------------------+
6761 | | gp register save | | gp_reg_size
6762 gp_reg_rounded | | |/
6763 | +-----------------------+
6764 \| alignment padding |
6765 +-----------------------+
6767 | local variables | | var_size
6769 +-----------------------+
6771 | alloca allocations |
6773 +-----------------------+
6775 cprestore_size | | GP save for V.4 abi |
6777 +-----------------------+
6779 | arguments on stack | |
6781 +-----------------------+ |
6782 | 4 words to save | | args_size
6783 | arguments passed | |
6784 | in registers, even | |
6785 | if not passed. | |
6786 low | (TARGET_OLDABI only) |/
6787 memory SP->+-----------------------+
6792 compute_frame_size (HOST_WIDE_INT size)
6795 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6796 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6797 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6798 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6799 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6800 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6801 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6802 unsigned int mask; /* mask of saved gp registers */
6803 unsigned int fmask; /* mask of saved fp registers */
6805 cfun->machine->global_pointer = mips_global_pointer ();
6811 var_size = MIPS_STACK_ALIGN (size);
6812 args_size = current_function_outgoing_args_size;
6813 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6815 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6816 functions. If the function has local variables, we're committed
6817 to allocating it anyway. Otherwise reclaim it here. */
6818 if (var_size == 0 && current_function_is_leaf)
6819 cprestore_size = args_size = 0;
6821 /* The MIPS 3.0 linker does not like functions that dynamically
6822 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6823 looks like we are trying to create a second frame pointer to the
6824 function, so allocate some stack space to make it happy. */
6826 if (args_size == 0 && current_function_calls_alloca)
6827 args_size = 4 * UNITS_PER_WORD;
6829 total_size = var_size + args_size + cprestore_size;
6831 /* Calculate space needed for gp registers. */
6832 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6833 if (mips_save_reg_p (regno))
6835 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6836 mask |= 1 << (regno - GP_REG_FIRST);
6839 /* We need to restore these for the handler. */
6840 if (current_function_calls_eh_return)
6845 regno = EH_RETURN_DATA_REGNO (i);
6846 if (regno == INVALID_REGNUM)
6848 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6849 mask |= 1 << (regno - GP_REG_FIRST);
6853 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
6854 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
6855 save all later registers too. */
6856 if (GENERATE_MIPS16E_SAVE_RESTORE)
6858 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
6859 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
6860 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
6861 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
6864 /* This loop must iterate over the same space as its companion in
6865 mips_for_each_saved_reg. */
6866 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6867 regno >= FP_REG_FIRST;
6868 regno -= MAX_FPRS_PER_FMT)
6870 if (mips_save_reg_p (regno))
6872 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
6873 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
6877 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6878 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6880 /* Add in the space required for saving incoming register arguments. */
6881 total_size += current_function_pretend_args_size;
6882 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6884 /* Save other computed information. */
6885 cfun->machine->frame.total_size = total_size;
6886 cfun->machine->frame.var_size = var_size;
6887 cfun->machine->frame.args_size = args_size;
6888 cfun->machine->frame.cprestore_size = cprestore_size;
6889 cfun->machine->frame.gp_reg_size = gp_reg_size;
6890 cfun->machine->frame.fp_reg_size = fp_reg_size;
6891 cfun->machine->frame.mask = mask;
6892 cfun->machine->frame.fmask = fmask;
6893 cfun->machine->frame.initialized = reload_completed;
6894 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6895 cfun->machine->frame.num_fp = (fp_reg_size
6896 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
6900 HOST_WIDE_INT offset;
6902 if (GENERATE_MIPS16E_SAVE_RESTORE)
6903 /* MIPS16e SAVE and RESTORE instructions require the GP save area
6904 to be aligned at the high end with any padding at the low end.
6905 It is only safe to use this calculation for o32, where we never
6906 have pretend arguments, and where any varargs will be saved in
6907 the caller-allocated area rather than at the top of the frame. */
6908 offset = (total_size - GET_MODE_SIZE (gpr_mode));
6910 offset = (args_size + cprestore_size + var_size
6911 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6912 cfun->machine->frame.gp_sp_offset = offset;
6913 cfun->machine->frame.gp_save_offset = offset - total_size;
6917 cfun->machine->frame.gp_sp_offset = 0;
6918 cfun->machine->frame.gp_save_offset = 0;
6923 HOST_WIDE_INT offset;
6925 offset = (args_size + cprestore_size + var_size
6926 + gp_reg_rounded + fp_reg_size
6927 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
6928 cfun->machine->frame.fp_sp_offset = offset;
6929 cfun->machine->frame.fp_save_offset = offset - total_size;
6933 cfun->machine->frame.fp_sp_offset = 0;
6934 cfun->machine->frame.fp_save_offset = 0;
6937 /* Ok, we're done. */
6941 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6942 pointer or argument pointer. TO is either the stack pointer or
6943 hard frame pointer. */
6946 mips_initial_elimination_offset (int from, int to)
6948 HOST_WIDE_INT offset;
6950 compute_frame_size (get_frame_size ());
6952 /* Set OFFSET to the offset from the stack pointer. */
6955 case FRAME_POINTER_REGNUM:
6959 case ARG_POINTER_REGNUM:
6960 offset = (cfun->machine->frame.total_size
6961 - current_function_pretend_args_size);
6968 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6969 offset -= cfun->machine->frame.args_size;
6974 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6975 back to a previous frame. */
6977 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6982 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6985 /* Use FN to save or restore register REGNO. MODE is the register's
6986 mode and OFFSET is the offset of its save slot from the current
6990 mips_save_restore_reg (enum machine_mode mode, int regno,
6991 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6995 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6997 fn (gen_rtx_REG (mode, regno), mem);
7001 /* Call FN for each register that is saved by the current function.
7002 SP_OFFSET is the offset of the current stack pointer from the start
7006 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
7008 enum machine_mode fpr_mode;
7009 HOST_WIDE_INT offset;
7012 /* Save registers starting from high to low. The debuggers prefer at least
7013 the return register be stored at func+4, and also it allows us not to
7014 need a nop in the epilogue if at least one register is reloaded in
7015 addition to return address. */
7016 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
7017 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
7018 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
7020 mips_save_restore_reg (gpr_mode, regno, offset, fn);
7021 offset -= GET_MODE_SIZE (gpr_mode);
7024 /* This loop must iterate over the same space as its companion in
7025 compute_frame_size. */
7026 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
7027 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
7028 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
7029 regno >= FP_REG_FIRST;
7030 regno -= MAX_FPRS_PER_FMT)
7031 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7033 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7034 offset -= GET_MODE_SIZE (fpr_mode);
7038 /* If we're generating n32 or n64 abicalls, and the current function
7039 does not use $28 as its global pointer, emit a cplocal directive.
7040 Use pic_offset_table_rtx as the argument to the directive. */
7043 mips_output_cplocal (void)
7045 if (!TARGET_EXPLICIT_RELOCS
7046 && cfun->machine->global_pointer > 0
7047 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7048 output_asm_insn (".cplocal %+", 0);
7051 /* Return the style of GP load sequence that is being used for the
7052 current function. */
7054 enum mips_loadgp_style
7055 mips_current_loadgp_style (void)
7057 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7063 if (TARGET_ABSOLUTE_ABICALLS)
7064 return LOADGP_ABSOLUTE;
7066 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7069 /* The __gnu_local_gp symbol. */
7071 static GTY(()) rtx mips_gnu_local_gp;
7073 /* If we're generating n32 or n64 abicalls, emit instructions
7074 to set up the global pointer. */
7077 mips_emit_loadgp (void)
7079 rtx addr, offset, incoming_address, base, index;
7081 switch (mips_current_loadgp_style ())
7083 case LOADGP_ABSOLUTE:
7084 if (mips_gnu_local_gp == NULL)
7086 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7087 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7089 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7093 addr = XEXP (DECL_RTL (current_function_decl), 0);
7094 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7095 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7096 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7097 if (!TARGET_EXPLICIT_RELOCS)
7098 emit_insn (gen_loadgp_blockage ());
7102 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7103 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7104 emit_insn (gen_loadgp_rtp (base, index));
7105 if (!TARGET_EXPLICIT_RELOCS)
7106 emit_insn (gen_loadgp_blockage ());
7114 /* Set up the stack and frame (if desired) for the function. */
7117 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7120 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7122 #ifdef SDB_DEBUGGING_INFO
7123 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7124 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7127 /* In mips16 mode, we may need to generate a 32 bit to handle
7128 floating point arguments. The linker will arrange for any 32-bit
7129 functions to call this stub, which will then jump to the 16-bit
7131 if (mips16_hard_float
7132 && current_function_args_info.fp_code != 0)
7133 build_mips16_function_stub (file);
7135 if (!FUNCTION_NAME_ALREADY_DECLARED)
7137 /* Get the function name the same way that toplev.c does before calling
7138 assemble_start_function. This is needed so that the name used here
7139 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7140 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7142 if (!flag_inhibit_size_directive)
7144 fputs ("\t.ent\t", file);
7145 assemble_name (file, fnname);
7149 assemble_name (file, fnname);
7150 fputs (":\n", file);
7153 /* Stop mips_file_end from treating this function as external. */
7154 if (TARGET_IRIX && mips_abi == ABI_32)
7155 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7157 if (!flag_inhibit_size_directive)
7159 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7161 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7162 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7163 ", args= " HOST_WIDE_INT_PRINT_DEC
7164 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7165 (reg_names[(frame_pointer_needed)
7166 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7167 ((frame_pointer_needed && TARGET_MIPS16)
7168 ? tsize - cfun->machine->frame.args_size
7170 reg_names[GP_REG_FIRST + 31],
7171 cfun->machine->frame.var_size,
7172 cfun->machine->frame.num_gp,
7173 cfun->machine->frame.num_fp,
7174 cfun->machine->frame.args_size,
7175 cfun->machine->frame.cprestore_size);
7177 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7178 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7179 cfun->machine->frame.mask,
7180 cfun->machine->frame.gp_save_offset);
7181 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7182 cfun->machine->frame.fmask,
7183 cfun->machine->frame.fp_save_offset);
7186 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7187 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7190 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7192 /* Handle the initialization of $gp for SVR4 PIC. */
7193 if (!cfun->machine->all_noreorder_p)
7194 output_asm_insn ("%(.cpload\t%^%)", 0);
7196 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7198 else if (cfun->machine->all_noreorder_p)
7199 output_asm_insn ("%(%<", 0);
7201 /* Tell the assembler which register we're using as the global
7202 pointer. This is needed for thunks, since they can use either
7203 explicit relocs or assembler macros. */
7204 mips_output_cplocal ();
7207 /* Make the last instruction frame related and note that it performs
7208 the operation described by FRAME_PATTERN. */
7211 mips_set_frame_expr (rtx frame_pattern)
7215 insn = get_last_insn ();
7216 RTX_FRAME_RELATED_P (insn) = 1;
7217 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7223 /* Return a frame-related rtx that stores REG at MEM.
7224 REG must be a single register. */
7227 mips_frame_set (rtx mem, rtx reg)
7231 /* If we're saving the return address register and the dwarf return
7232 address column differs from the hard register number, adjust the
7233 note reg to refer to the former. */
7234 if (REGNO (reg) == GP_REG_FIRST + 31
7235 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7236 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7238 set = gen_rtx_SET (VOIDmode, mem, reg);
7239 RTX_FRAME_RELATED_P (set) = 1;
7245 /* Save register REG to MEM. Make the instruction frame-related. */
7248 mips_save_reg (rtx reg, rtx mem)
7250 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7254 if (mips_split_64bit_move_p (mem, reg))
7255 mips_split_64bit_move (mem, reg);
7257 emit_move_insn (mem, reg);
7259 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7260 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7261 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7266 && REGNO (reg) != GP_REG_FIRST + 31
7267 && !M16_REG_P (REGNO (reg)))
7269 /* Save a non-mips16 register by moving it through a temporary.
7270 We don't need to do this for $31 since there's a special
7271 instruction for it. */
7272 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7273 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7276 emit_move_insn (mem, reg);
7278 mips_set_frame_expr (mips_frame_set (mem, reg));
7282 /* Return a move between register REGNO and memory location SP + OFFSET.
7283 Make the move a load if RESTORE_P, otherwise make it a frame-related
7287 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7292 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7293 reg = gen_rtx_REG (SImode, regno);
7295 ? gen_rtx_SET (VOIDmode, reg, mem)
7296 : mips_frame_set (mem, reg));
7299 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7300 The instruction must:
7302 - Allocate or deallocate SIZE bytes in total; SIZE is known
7305 - Save or restore as many registers in *MASK_PTR as possible.
7306 The instruction saves the first registers at the top of the
7307 allocated area, with the other registers below it.
7309 - Save NARGS argument registers above the allocated area.
7311 (NARGS is always zero if RESTORE_P.)
7313 The SAVE and RESTORE instructions cannot save and restore all general
7314 registers, so there may be some registers left over for the caller to
7315 handle. Destructively modify *MASK_PTR so that it contains the registers
7316 that still need to be saved or restored. The caller can save these
7317 registers in the memory immediately below *OFFSET_PTR, which is a
7318 byte offset from the bottom of the allocated stack area. */
7321 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7322 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7326 HOST_WIDE_INT offset, top_offset;
7327 unsigned int i, regno;
7330 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7332 /* Calculate the number of elements in the PARALLEL. We need one element
7333 for the stack adjustment, one for each argument register save, and one
7334 for each additional register move. */
7336 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7337 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7340 /* Create the final PARALLEL. */
7341 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7344 /* Add the stack pointer adjustment. */
7345 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7346 plus_constant (stack_pointer_rtx,
7347 restore_p ? size : -size));
7348 RTX_FRAME_RELATED_P (set) = 1;
7349 XVECEXP (pattern, 0, n++) = set;
7351 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7352 top_offset = restore_p ? size : 0;
7354 /* Save the arguments. */
7355 for (i = 0; i < nargs; i++)
7357 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7358 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7359 XVECEXP (pattern, 0, n++) = set;
7362 /* Then fill in the other register moves. */
7363 offset = top_offset;
7364 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7366 regno = mips16e_save_restore_regs[i];
7367 if (BITSET_P (*mask_ptr, regno))
7369 offset -= UNITS_PER_WORD;
7370 set = mips16e_save_restore_reg (restore_p, offset, regno);
7371 XVECEXP (pattern, 0, n++) = set;
7372 *mask_ptr &= ~(1 << regno);
7376 /* Tell the caller what offset it should use for the remaining registers. */
7377 *offset_ptr = size + (offset - top_offset) + size;
7379 gcc_assert (n == XVECLEN (pattern, 0));
7384 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7385 pointer. Return true if PATTERN matches the kind of instruction
7386 generated by mips16e_build_save_restore. If INFO is nonnull,
7387 initialize it when returning true. */
7390 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7391 struct mips16e_save_restore_info *info)
7393 unsigned int i, nargs, mask;
7394 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7395 rtx set, reg, mem, base;
7398 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7401 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7402 top_offset = adjust > 0 ? adjust : 0;
7404 /* Interpret all other members of the PARALLEL. */
7405 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7409 for (n = 1; n < XVECLEN (pattern, 0); n++)
7411 /* Check that we have a SET. */
7412 set = XVECEXP (pattern, 0, n);
7413 if (GET_CODE (set) != SET)
7416 /* Check that the SET is a load (if restoring) or a store
7418 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7422 /* Check that the address is the sum of the stack pointer and a
7423 possibly-zero constant offset. */
7424 mips_split_plus (XEXP (mem, 0), &base, &offset);
7425 if (base != stack_pointer_rtx)
7428 /* Check that SET's other operand is a register. */
7429 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7433 /* Check for argument saves. */
7434 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7435 && REGNO (reg) == GP_ARG_FIRST + nargs)
7437 else if (offset == save_offset)
7439 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7440 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7443 mask |= 1 << REGNO (reg);
7444 save_offset -= GET_MODE_SIZE (gpr_mode);
7450 /* Check that the restrictions on register ranges are met. */
7452 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7453 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7454 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7455 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7459 /* Make sure that the topmost argument register is not saved twice.
7460 The checks above ensure that the same is then true for the other
7461 argument registers. */
7462 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7465 /* Pass back information, if requested. */
7468 info->nargs = nargs;
7470 info->size = (adjust > 0 ? adjust : -adjust);
7476 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7477 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7478 the null terminator. */
7481 mips16e_add_register_range (char *s, unsigned int min_reg,
7482 unsigned int max_reg)
7484 if (min_reg != max_reg)
7485 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7487 s += sprintf (s, ",%s", reg_names[min_reg]);
7491 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7492 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7495 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7497 static char buffer[300];
7499 struct mips16e_save_restore_info info;
7500 unsigned int i, end;
7503 /* Parse the pattern. */
7504 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7507 /* Add the mnemonic. */
7508 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7511 /* Save the arguments. */
7513 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7514 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7515 else if (info.nargs == 1)
7516 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7518 /* Emit the amount of stack space to allocate or deallocate. */
7519 s += sprintf (s, "%d", (int) info.size);
7521 /* Save or restore $16. */
7522 if (BITSET_P (info.mask, 16))
7523 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7525 /* Save or restore $17. */
7526 if (BITSET_P (info.mask, 17))
7527 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7529 /* Save or restore registers in the range $s2...$s8, which
7530 mips16e_s2_s8_regs lists in decreasing order. Note that this
7531 is a software register range; the hardware registers are not
7532 numbered consecutively. */
7533 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7534 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7536 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7537 mips16e_s2_s8_regs[i]);
7539 /* Save or restore registers in the range $a0...$a3. */
7540 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7541 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7543 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7544 mips16e_a0_a3_regs[end - 1]);
7546 /* Save or restore $31. */
7547 if (BITSET_P (info.mask, 31))
7548 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7553 /* Return a simplified form of X using the register values in REG_VALUES.
7554 REG_VALUES[R] is the last value assigned to hard register R, or null
7555 if R has not been modified.
7557 This function is rather limited, but is good enough for our purposes. */
7560 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7564 x = avoid_constant_pool_reference (x);
7568 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7569 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7570 x0, GET_MODE (XEXP (x, 0)));
7573 if (ARITHMETIC_P (x))
7575 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7576 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7577 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7581 && reg_values[REGNO (x)]
7582 && !rtx_unstable_p (reg_values[REGNO (x)]))
7583 return reg_values[REGNO (x)];
7588 /* Return true if (set DEST SRC) stores an argument register into its
7589 caller-allocated save slot, storing the number of that argument
7590 register in *REGNO_PTR if so. REG_VALUES is as for
7591 mips16e_collect_propagate_value. */
7594 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7595 unsigned int *regno_ptr)
7597 unsigned int argno, regno;
7598 HOST_WIDE_INT offset, required_offset;
7601 /* Check that this is a word-mode store. */
7602 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7605 /* Check that the register being saved is an unmodified argument
7607 regno = REGNO (src);
7608 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7610 argno = regno - GP_ARG_FIRST;
7612 /* Check whether the address is an appropriate stack pointer or
7613 frame pointer access. The frame pointer is offset from the
7614 stack pointer by the size of the outgoing arguments. */
7615 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7616 mips_split_plus (addr, &base, &offset);
7617 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7618 if (base == hard_frame_pointer_rtx)
7619 required_offset -= cfun->machine->frame.args_size;
7620 else if (base != stack_pointer_rtx)
7622 if (offset != required_offset)
7629 /* A subroutine of mips_expand_prologue, called only when generating
7630 MIPS16e SAVE instructions. Search the start of the function for any
7631 instructions that save argument registers into their caller-allocated
7632 save slots. Delete such instructions and return a value N such that
7633 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7634 instructions redundant. */
7637 mips16e_collect_argument_saves (void)
7639 rtx reg_values[FIRST_PSEUDO_REGISTER];
7640 rtx insn, next, set, dest, src;
7641 unsigned int nargs, regno;
7643 push_topmost_sequence ();
7645 memset (reg_values, 0, sizeof (reg_values));
7646 for (insn = get_insns (); insn; insn = next)
7648 next = NEXT_INSN (insn);
7655 set = PATTERN (insn);
7656 if (GET_CODE (set) != SET)
7659 dest = SET_DEST (set);
7660 src = SET_SRC (set);
7661 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7663 if (!BITSET_P (cfun->machine->frame.mask, regno))
7666 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7669 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7670 reg_values[REGNO (dest)]
7671 = mips16e_collect_propagate_value (src, reg_values);
7675 pop_topmost_sequence ();
7680 /* Expand the prologue into a bunch of separate insns. */
7683 mips_expand_prologue (void)
7689 if (cfun->machine->global_pointer > 0)
7690 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7692 size = compute_frame_size (get_frame_size ());
7694 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7695 bytes beforehand; this is enough to cover the register save area
7696 without going out of range. */
7697 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7699 HOST_WIDE_INT step1;
7701 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
7703 if (GENERATE_MIPS16E_SAVE_RESTORE)
7705 HOST_WIDE_INT offset;
7706 unsigned int mask, regno;
7708 /* Try to merge argument stores into the save instruction. */
7709 nargs = mips16e_collect_argument_saves ();
7711 /* Build the save instruction. */
7712 mask = cfun->machine->frame.mask;
7713 insn = mips16e_build_save_restore (false, &mask, &offset,
7715 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7718 /* Check if we need to save other registers. */
7719 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7720 if (BITSET_P (mask, regno - GP_REG_FIRST))
7722 offset -= GET_MODE_SIZE (gpr_mode);
7723 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
7728 insn = gen_add3_insn (stack_pointer_rtx,
7731 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7733 mips_for_each_saved_reg (size, mips_save_reg);
7737 /* Allocate the rest of the frame. */
7740 if (SMALL_OPERAND (-size))
7741 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
7743 GEN_INT (-size)))) = 1;
7746 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
7749 /* There are no instructions to add or subtract registers
7750 from the stack pointer, so use the frame pointer as a
7751 temporary. We should always be using a frame pointer
7752 in this case anyway. */
7753 gcc_assert (frame_pointer_needed);
7754 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7755 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
7756 hard_frame_pointer_rtx,
7757 MIPS_PROLOGUE_TEMP (Pmode)));
7758 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
7761 emit_insn (gen_sub3_insn (stack_pointer_rtx,
7763 MIPS_PROLOGUE_TEMP (Pmode)));
7765 /* Describe the combined effect of the previous instructions. */
7767 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7768 plus_constant (stack_pointer_rtx, -size)));
7772 /* Set up the frame pointer, if we're using one. In mips16 code,
7773 we point the frame pointer ahead of the outgoing argument area.
7774 This should allow more variables & incoming arguments to be
7775 accessed with unextended instructions. */
7776 if (frame_pointer_needed)
7778 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
7780 rtx offset = GEN_INT (cfun->machine->frame.args_size);
7781 if (SMALL_OPERAND (cfun->machine->frame.args_size))
7783 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7788 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
7789 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7790 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7791 hard_frame_pointer_rtx,
7792 MIPS_PROLOGUE_TEMP (Pmode)));
7794 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
7795 plus_constant (stack_pointer_rtx,
7796 cfun->machine->frame.args_size)));
7800 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
7801 stack_pointer_rtx)) = 1;
7804 mips_emit_loadgp ();
7806 /* If generating o32/o64 abicalls, save $gp on the stack. */
7807 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
7808 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
7810 /* If we are profiling, make sure no instructions are scheduled before
7811 the call to mcount. */
7813 if (current_function_profile)
7814 emit_insn (gen_blockage ());
7817 /* Do any necessary cleanup after a function to restore stack, frame,
7820 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
7823 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7824 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7826 /* Reinstate the normal $gp. */
7827 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
7828 mips_output_cplocal ();
7830 if (cfun->machine->all_noreorder_p)
7832 /* Avoid using %>%) since it adds excess whitespace. */
7833 output_asm_insn (".set\tmacro", 0);
7834 output_asm_insn (".set\treorder", 0);
7835 set_noreorder = set_nomacro = 0;
7838 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
7842 /* Get the function name the same way that toplev.c does before calling
7843 assemble_start_function. This is needed so that the name used here
7844 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7845 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7846 fputs ("\t.end\t", file);
7847 assemble_name (file, fnname);
7852 /* Emit instructions to restore register REG from slot MEM. */
7855 mips_restore_reg (rtx reg, rtx mem)
7857 /* There's no mips16 instruction to load $31 directly. Load into
7858 $7 instead and adjust the return insn appropriately. */
7859 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
7860 reg = gen_rtx_REG (GET_MODE (reg), 7);
7862 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
7864 /* Can't restore directly; move through a temporary. */
7865 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
7866 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
7869 emit_move_insn (reg, mem);
7873 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7874 if this epilogue precedes a sibling call, false if it is for a normal
7875 "epilogue" pattern. */
7878 mips_expand_epilogue (int sibcall_p)
7880 HOST_WIDE_INT step1, step2;
7883 if (!sibcall_p && mips_can_use_return_insn ())
7885 emit_jump_insn (gen_return ());
7889 /* In mips16 mode, if the return value should go into a floating-point
7890 register, we need to call a helper routine to copy it over. */
7891 if (mips16_cfun_returns_in_fpr_p ())
7900 enum machine_mode return_mode;
7902 return_type = DECL_RESULT (current_function_decl);
7903 return_mode = DECL_MODE (return_type);
7905 name = ACONCAT (("__mips16_ret_",
7906 mips16_call_stub_mode_suffix (return_mode),
7908 id = get_identifier (name);
7909 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7910 retval = gen_rtx_REG (return_mode, GP_RETURN);
7911 call = gen_call_value_internal (retval, func, const0_rtx);
7912 insn = emit_call_insn (call);
7913 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
7916 /* Split the frame into two. STEP1 is the amount of stack we should
7917 deallocate before restoring the registers. STEP2 is the amount we
7918 should deallocate afterwards.
7920 Start off by assuming that no registers need to be restored. */
7921 step1 = cfun->machine->frame.total_size;
7924 /* Work out which register holds the frame address. Account for the
7925 frame pointer offset used by mips16 code. */
7926 if (!frame_pointer_needed)
7927 base = stack_pointer_rtx;
7930 base = hard_frame_pointer_rtx;
7932 step1 -= cfun->machine->frame.args_size;
7935 /* If we need to restore registers, deallocate as much stack as
7936 possible in the second step without going out of range. */
7937 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7939 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
7943 /* Set TARGET to BASE + STEP1. */
7949 /* Get an rtx for STEP1 that we can add to BASE. */
7950 adjust = GEN_INT (step1);
7951 if (!SMALL_OPERAND (step1))
7953 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7954 adjust = MIPS_EPILOGUE_TEMP (Pmode);
7957 /* Normal mode code can copy the result straight into $sp. */
7959 target = stack_pointer_rtx;
7961 emit_insn (gen_add3_insn (target, base, adjust));
7964 /* Copy TARGET into the stack pointer. */
7965 if (target != stack_pointer_rtx)
7966 emit_move_insn (stack_pointer_rtx, target);
7968 /* If we're using addressing macros, $gp is implicitly used by all
7969 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
7971 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
7972 emit_insn (gen_blockage ());
7974 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
7976 unsigned int regno, mask;
7977 HOST_WIDE_INT offset;
7980 /* Generate the restore instruction. */
7981 mask = cfun->machine->frame.mask;
7982 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
7984 /* Restore any other registers manually. */
7985 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7986 if (BITSET_P (mask, regno - GP_REG_FIRST))
7988 offset -= GET_MODE_SIZE (gpr_mode);
7989 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
7992 /* Restore the remaining registers and deallocate the final bit
7994 emit_insn (restore);
7998 /* Restore the registers. */
7999 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8002 /* Deallocate the final bit of the frame. */
8004 emit_insn (gen_add3_insn (stack_pointer_rtx,
8009 /* Add in the __builtin_eh_return stack adjustment. We need to
8010 use a temporary in mips16 code. */
8011 if (current_function_calls_eh_return)
8015 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8016 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8017 MIPS_EPILOGUE_TEMP (Pmode),
8018 EH_RETURN_STACKADJ_RTX));
8019 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8022 emit_insn (gen_add3_insn (stack_pointer_rtx,
8024 EH_RETURN_STACKADJ_RTX));
8029 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8030 path will restore the return address into $7 rather than $31. */
8032 && !GENERATE_MIPS16E_SAVE_RESTORE
8033 && (cfun->machine->frame.mask & RA_MASK) != 0)
8034 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8035 GP_REG_FIRST + 7)));
8037 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8038 GP_REG_FIRST + 31)));
8042 /* Return nonzero if this function is known to have a null epilogue.
8043 This allows the optimizer to omit jumps to jumps if no stack
8047 mips_can_use_return_insn (void)
8049 if (! reload_completed)
8052 if (df_regs_ever_live_p (31) || current_function_profile)
8055 /* In mips16 mode, a function that returns a floating point value
8056 needs to arrange to copy the return value into the floating point
8058 if (mips16_cfun_returns_in_fpr_p ())
8061 if (cfun->machine->frame.initialized)
8062 return cfun->machine->frame.total_size == 0;
8064 return compute_frame_size (get_frame_size ()) == 0;
8067 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8068 in order to avoid duplicating too much logic from elsewhere. */
8071 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8072 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8075 rtx this, temp1, temp2, insn, fnaddr;
8077 /* Pretend to be a post-reload pass while generating rtl. */
8078 reload_completed = 1;
8080 /* Mark the end of the (empty) prologue. */
8081 emit_note (NOTE_INSN_PROLOGUE_END);
8083 /* Pick a global pointer. Use a call-clobbered register if
8084 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8087 cfun->machine->global_pointer =
8088 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8090 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8094 /* Set up the global pointer for n32 or n64 abicalls. If
8095 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8096 no need to load it.*/
8097 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8098 || !targetm.binds_local_p (function))
8099 mips_emit_loadgp ();
8101 /* We need two temporary registers in some cases. */
8102 temp1 = gen_rtx_REG (Pmode, 2);
8103 temp2 = gen_rtx_REG (Pmode, 3);
8105 /* Find out which register contains the "this" pointer. */
8106 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8107 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8109 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8111 /* Add DELTA to THIS. */
8114 rtx offset = GEN_INT (delta);
8115 if (!SMALL_OPERAND (delta))
8117 emit_move_insn (temp1, offset);
8120 emit_insn (gen_add3_insn (this, this, offset));
8123 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8124 if (vcall_offset != 0)
8128 /* Set TEMP1 to *THIS. */
8129 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
8131 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8132 addr = mips_add_offset (temp2, temp1, vcall_offset);
8134 /* Load the offset and add it to THIS. */
8135 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
8136 emit_insn (gen_add3_insn (this, this, temp1));
8139 /* Jump to the target function. Use a sibcall if direct jumps are
8140 allowed, otherwise load the address into a register first. */
8141 fnaddr = XEXP (DECL_RTL (function), 0);
8142 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr))
8144 /* This is messy. gas treats "la $25,foo" as part of a call
8145 sequence and may allow a global "foo" to be lazily bound.
8146 The general move patterns therefore reject this combination.
8148 In this context, lazy binding would actually be OK
8149 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8150 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8151 We must therefore load the address via a temporary
8152 register if mips_dangerous_for_la25_p.
8154 If we jump to the temporary register rather than $25, the assembler
8155 can use the move insn to fill the jump's delay slot. */
8156 if (TARGET_USE_PIC_FN_ADDR_REG
8157 && !mips_dangerous_for_la25_p (fnaddr))
8158 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8159 mips_load_call_address (temp1, fnaddr, true);
8161 if (TARGET_USE_PIC_FN_ADDR_REG
8162 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8163 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8164 emit_jump_insn (gen_indirect_jump (temp1));
8168 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8169 SIBLING_CALL_P (insn) = 1;
8172 /* Run just enough of rest_of_compilation. This sequence was
8173 "borrowed" from alpha.c. */
8174 insn = get_insns ();
8175 insn_locators_alloc ();
8176 split_all_insns_noflow ();
8178 mips16_lay_out_constants ();
8179 shorten_branches (insn);
8180 final_start_function (insn, file, 1);
8181 final (insn, file, 1);
8182 final_end_function ();
8184 /* Clean up the vars set above. Note that final_end_function resets
8185 the global pointer for us. */
8186 reload_completed = 0;
8189 /* Returns nonzero if X contains a SYMBOL_REF. */
8192 symbolic_expression_p (rtx x)
8194 if (GET_CODE (x) == SYMBOL_REF)
8197 if (GET_CODE (x) == CONST)
8198 return symbolic_expression_p (XEXP (x, 0));
8201 return symbolic_expression_p (XEXP (x, 0));
8203 if (ARITHMETIC_P (x))
8204 return (symbolic_expression_p (XEXP (x, 0))
8205 || symbolic_expression_p (XEXP (x, 1)));
8210 /* Choose the section to use for the constant rtx expression X that has
8214 mips_select_rtx_section (enum machine_mode mode, rtx x,
8215 unsigned HOST_WIDE_INT align)
8219 /* In mips16 mode, the constant table always goes in the same section
8220 as the function, so that constants can be loaded using PC relative
8222 return function_section (current_function_decl);
8224 else if (TARGET_EMBEDDED_DATA)
8226 /* For embedded applications, always put constants in read-only data,
8227 in order to reduce RAM usage. */
8228 return mergeable_constant_section (mode, align, 0);
8232 /* For hosted applications, always put constants in small data if
8233 possible, as this gives the best performance. */
8234 /* ??? Consider using mergeable small data sections. */
8236 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
8237 && mips_section_threshold > 0)
8238 return get_named_section (NULL, ".sdata", 0);
8239 else if (flag_pic && symbolic_expression_p (x))
8240 return get_named_section (NULL, ".data.rel.ro", 3);
8242 return mergeable_constant_section (mode, align, 0);
8246 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8248 The complication here is that, with the combination TARGET_ABICALLS
8249 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8250 therefore not be included in the read-only part of a DSO. Handle such
8251 cases by selecting a normal data section instead of a read-only one.
8252 The logic apes that in default_function_rodata_section. */
8255 mips_function_rodata_section (tree decl)
8257 if (!TARGET_ABICALLS || TARGET_GPWORD)
8258 return default_function_rodata_section (decl);
8260 if (decl && DECL_SECTION_NAME (decl))
8262 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8263 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8265 char *rname = ASTRDUP (name);
8267 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8269 else if (flag_function_sections && flag_data_sections
8270 && strncmp (name, ".text.", 6) == 0)
8272 char *rname = ASTRDUP (name);
8273 memcpy (rname + 1, "data", 4);
8274 return get_section (rname, SECTION_WRITE, decl);
8277 return data_section;
8280 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8281 locally-defined objects go in a small data section. It also controls
8282 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8283 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8286 mips_in_small_data_p (tree decl)
8290 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8293 /* We don't yet generate small-data references for -mabicalls or
8294 VxWorks RTP code. See the related -G handling in override_options. */
8295 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8298 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8302 /* Reject anything that isn't in a known small-data section. */
8303 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8304 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8307 /* If a symbol is defined externally, the assembler will use the
8308 usual -G rules when deciding how to implement macros. */
8309 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
8312 else if (TARGET_EMBEDDED_DATA)
8314 /* Don't put constants into the small data section: we want them
8315 to be in ROM rather than RAM. */
8316 if (TREE_CODE (decl) != VAR_DECL)
8319 if (TREE_READONLY (decl)
8320 && !TREE_SIDE_EFFECTS (decl)
8321 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8325 size = int_size_in_bytes (TREE_TYPE (decl));
8326 return (size > 0 && size <= mips_section_threshold);
8329 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8330 anchors for small data: the GP register acts as an anchor in that
8331 case. We also don't want to use them for PC-relative accesses,
8332 where the PC acts as an anchor. */
8335 mips_use_anchors_for_symbol_p (rtx symbol)
8337 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
8339 case SYMBOL_PC_RELATIVE:
8340 case SYMBOL_GP_RELATIVE:
8348 /* See whether VALTYPE is a record whose fields should be returned in
8349 floating-point registers. If so, return the number of fields and
8350 list them in FIELDS (which should have two elements). Return 0
8353 For n32 & n64, a structure with one or two fields is returned in
8354 floating-point registers as long as every field has a floating-point
8358 mips_fpr_return_fields (tree valtype, tree *fields)
8366 if (TREE_CODE (valtype) != RECORD_TYPE)
8370 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8372 if (TREE_CODE (field) != FIELD_DECL)
8375 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8381 fields[i++] = field;
8387 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8388 a value in the most significant part of $2/$3 if:
8390 - the target is big-endian;
8392 - the value has a structure or union type (we generalize this to
8393 cover aggregates from other languages too); and
8395 - the structure is not returned in floating-point registers. */
8398 mips_return_in_msb (tree valtype)
8402 return (TARGET_NEWABI
8403 && TARGET_BIG_ENDIAN
8404 && AGGREGATE_TYPE_P (valtype)
8405 && mips_fpr_return_fields (valtype, fields) == 0);
8409 /* Return a composite value in a pair of floating-point registers.
8410 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8411 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8414 For n32 & n64, $f0 always holds the first value and $f2 the second.
8415 Otherwise the values are packed together as closely as possible. */
8418 mips_return_fpr_pair (enum machine_mode mode,
8419 enum machine_mode mode1, HOST_WIDE_INT offset1,
8420 enum machine_mode mode2, HOST_WIDE_INT offset2)
8424 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8425 return gen_rtx_PARALLEL
8428 gen_rtx_EXPR_LIST (VOIDmode,
8429 gen_rtx_REG (mode1, FP_RETURN),
8431 gen_rtx_EXPR_LIST (VOIDmode,
8432 gen_rtx_REG (mode2, FP_RETURN + inc),
8433 GEN_INT (offset2))));
8438 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8439 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8440 VALTYPE is null and MODE is the mode of the return value. */
8443 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
8444 enum machine_mode mode)
8451 mode = TYPE_MODE (valtype);
8452 unsignedp = TYPE_UNSIGNED (valtype);
8454 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8455 true, we must promote the mode just as PROMOTE_MODE does. */
8456 mode = promote_mode (valtype, mode, &unsignedp, 1);
8458 /* Handle structures whose fields are returned in $f0/$f2. */
8459 switch (mips_fpr_return_fields (valtype, fields))
8462 return gen_rtx_REG (mode, FP_RETURN);
8465 return mips_return_fpr_pair (mode,
8466 TYPE_MODE (TREE_TYPE (fields[0])),
8467 int_byte_position (fields[0]),
8468 TYPE_MODE (TREE_TYPE (fields[1])),
8469 int_byte_position (fields[1]));
8472 /* If a value is passed in the most significant part of a register, see
8473 whether we have to round the mode up to a whole number of words. */
8474 if (mips_return_in_msb (valtype))
8476 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8477 if (size % UNITS_PER_WORD != 0)
8479 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8480 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8484 /* For EABI, the class of return register depends entirely on MODE.
8485 For example, "struct { some_type x; }" and "union { some_type x; }"
8486 are returned in the same way as a bare "some_type" would be.
8487 Other ABIs only use FPRs for scalar, complex or vector types. */
8488 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8489 return gen_rtx_REG (mode, GP_RETURN);
8494 /* Handle long doubles for n32 & n64. */
8496 return mips_return_fpr_pair (mode,
8498 DImode, GET_MODE_SIZE (mode) / 2);
8500 if (mips_return_mode_in_fpr_p (mode))
8502 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8503 return mips_return_fpr_pair (mode,
8504 GET_MODE_INNER (mode), 0,
8505 GET_MODE_INNER (mode),
8506 GET_MODE_SIZE (mode) / 2);
8508 return gen_rtx_REG (mode, FP_RETURN);
8512 return gen_rtx_REG (mode, GP_RETURN);
8515 /* Return nonzero when an argument must be passed by reference. */
8518 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8519 enum machine_mode mode, tree type,
8520 bool named ATTRIBUTE_UNUSED)
8522 if (mips_abi == ABI_EABI)
8526 /* ??? How should SCmode be handled? */
8527 if (mode == DImode || mode == DFmode)
8530 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8531 return size == -1 || size > UNITS_PER_WORD;
8535 /* If we have a variable-sized parameter, we have no choice. */
8536 return targetm.calls.must_pass_in_stack (mode, type);
8541 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8542 enum machine_mode mode ATTRIBUTE_UNUSED,
8543 tree type ATTRIBUTE_UNUSED, bool named)
8545 return mips_abi == ABI_EABI && named;
8548 /* Return true if registers of class CLASS cannot change from mode FROM
8552 mips_cannot_change_mode_class (enum machine_mode from,
8553 enum machine_mode to, enum reg_class class)
8555 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8556 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8558 if (TARGET_BIG_ENDIAN)
8560 /* When a multi-word value is stored in paired floating-point
8561 registers, the first register always holds the low word.
8562 We therefore can't allow FPRs to change between single-word
8563 and multi-word modes. */
8564 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8569 /* gcc assumes that each word of a multiword register can be accessed
8570 individually using SUBREGs. This is not true for floating-point
8571 registers if they are bigger than a word. */
8572 if (UNITS_PER_FPREG > UNITS_PER_WORD
8573 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8574 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8575 && reg_classes_intersect_p (FP_REGS, class))
8578 /* Loading a 32-bit value into a 64-bit floating-point register
8579 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8580 We can't allow 64-bit float registers to change from SImode to
8585 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8586 && reg_classes_intersect_p (FP_REGS, class))
8592 /* Return true if X should not be moved directly into register $25.
8593 We need this because many versions of GAS will treat "la $25,foo" as
8594 part of a call sequence and so allow a global "foo" to be lazily bound. */
8597 mips_dangerous_for_la25_p (rtx x)
8599 return (!TARGET_EXPLICIT_RELOCS
8601 && GET_CODE (x) == SYMBOL_REF
8602 && mips_global_symbol_p (x));
8605 /* Implement PREFERRED_RELOAD_CLASS. */
8608 mips_preferred_reload_class (rtx x, enum reg_class class)
8610 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8613 if (TARGET_HARD_FLOAT
8614 && FLOAT_MODE_P (GET_MODE (x))
8615 && reg_class_subset_p (FP_REGS, class))
8618 if (reg_class_subset_p (GR_REGS, class))
8621 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8627 /* This function returns the register class required for a secondary
8628 register when copying between one of the registers in CLASS, and X,
8629 using MODE. If IN_P is nonzero, the copy is going from X to the
8630 register, otherwise the register is the source. A return value of
8631 NO_REGS means that no secondary register is required. */
8634 mips_secondary_reload_class (enum reg_class class,
8635 enum machine_mode mode, rtx x, int in_p)
8637 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8641 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8642 regno = true_regnum (x);
8644 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8646 if (mips_dangerous_for_la25_p (x))
8649 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8653 /* Copying from HI or LO to anywhere other than a general register
8654 requires a general register.
8655 This rule applies to both the original HI/LO pair and the new
8656 DSP accumulators. */
8657 if (reg_class_subset_p (class, ACC_REGS))
8659 if (TARGET_MIPS16 && in_p)
8661 /* We can't really copy to HI or LO at all in mips16 mode. */
8664 return gp_reg_p ? NO_REGS : gr_regs;
8666 if (ACC_REG_P (regno))
8668 if (TARGET_MIPS16 && ! in_p)
8670 /* We can't really copy to HI or LO at all in mips16 mode. */
8673 return class == gr_regs ? NO_REGS : gr_regs;
8676 /* We can only copy a value to a condition code register from a
8677 floating point register, and even then we require a scratch
8678 floating point register. We can only copy a value out of a
8679 condition code register into a general register. */
8680 if (class == ST_REGS)
8684 return gp_reg_p ? NO_REGS : gr_regs;
8686 if (ST_REG_P (regno))
8690 return class == gr_regs ? NO_REGS : gr_regs;
8693 if (class == FP_REGS)
8697 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8700 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8702 /* We can use the l.s and l.d macros to load floating-point
8703 constants. ??? For l.s, we could probably get better
8704 code by returning GR_REGS here. */
8707 else if (gp_reg_p || x == CONST0_RTX (mode))
8709 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8712 else if (FP_REG_P (regno))
8714 /* In this case we can use mov.s or mov.d. */
8719 /* Otherwise, we need to reload through an integer register. */
8724 /* In mips16 mode, going between memory and anything but M16_REGS
8725 requires an M16_REG. */
8728 if (class != M16_REGS && class != M16_NA_REGS)
8736 if (class == M16_REGS || class == M16_NA_REGS)
8745 /* Implement CLASS_MAX_NREGS.
8747 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8749 - ST_REGS are always hold CCmode values, and CCmode values are
8750 considered to be 4 bytes wide.
8752 All other register classes are covered by UNITS_PER_WORD. Note that
8753 this is true even for unions of integer and float registers when the
8754 latter are smaller than the former. The only supported combination
8755 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8756 words but 32-bit float registers. A word-based calculation is correct
8757 in that case since -msingle-float disallows multi-FPR values. */
8760 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8761 enum machine_mode mode)
8763 if (class == ST_REGS)
8764 return (GET_MODE_SIZE (mode) + 3) / 4;
8765 else if (class == FP_REGS)
8766 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8768 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8772 mips_valid_pointer_mode (enum machine_mode mode)
8774 return (mode == SImode || (TARGET_64BIT && mode == DImode));
8777 /* Target hook for vector_mode_supported_p. */
8780 mips_vector_mode_supported_p (enum machine_mode mode)
8785 return TARGET_PAIRED_SINGLE_FLOAT;
8796 /* If we can access small data directly (using gp-relative relocation
8797 operators) return the small data pointer, otherwise return null.
8799 For each mips16 function which refers to GP relative symbols, we
8800 use a pseudo register, initialized at the start of the function, to
8801 hold the $gp value. */
8804 mips16_gp_pseudo_reg (void)
8806 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
8807 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
8809 /* Don't initialize the pseudo register if we are being called from
8810 the tree optimizers' cost-calculation routines. */
8811 if (!cfun->machine->initialized_mips16_gp_pseudo_p
8812 && current_ir_type () != IR_GIMPLE)
8816 /* We want to initialize this to a value which gcc will believe
8818 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
8820 push_topmost_sequence ();
8821 /* We need to emit the initialization after the FUNCTION_BEG
8822 note, so that it will be integrated. */
8823 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
8825 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
8827 if (scan == NULL_RTX)
8828 scan = get_insns ();
8829 insn = emit_insn_after (insn, scan);
8830 pop_topmost_sequence ();
8832 cfun->machine->initialized_mips16_gp_pseudo_p = true;
8835 return cfun->machine->mips16_gp_pseudo_rtx;
8838 /* Write out code to move floating point arguments in or out of
8839 general registers. Output the instructions to FILE. FP_CODE is
8840 the code describing which arguments are present (see the comment at
8841 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
8842 we are copying from the floating point registers. */
8845 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
8850 CUMULATIVE_ARGS cum;
8852 /* This code only works for the original 32-bit ABI and the O64 ABI. */
8853 gcc_assert (TARGET_OLDABI);
8860 init_cumulative_args (&cum, NULL, NULL);
8862 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8864 enum machine_mode mode;
8865 struct mips_arg_info info;
8869 else if ((f & 3) == 2)
8874 mips_arg_info (&cum, mode, NULL, true, &info);
8875 gparg = mips_arg_regno (&info, false);
8876 fparg = mips_arg_regno (&info, true);
8879 fprintf (file, "\t%s\t%s,%s\n", s,
8880 reg_names[gparg], reg_names[fparg]);
8881 else if (TARGET_64BIT)
8882 fprintf (file, "\td%s\t%s,%s\n", s,
8883 reg_names[gparg], reg_names[fparg]);
8884 else if (ISA_HAS_MXHC1)
8885 /* -mips32r2 -mfp64 */
8886 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
8888 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
8890 from_fp_p ? "mfhc1" : "mthc1",
8891 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
8893 else if (TARGET_BIG_ENDIAN)
8894 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8895 reg_names[gparg], reg_names[fparg + 1], s,
8896 reg_names[gparg + 1], reg_names[fparg]);
8898 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8899 reg_names[gparg], reg_names[fparg], s,
8900 reg_names[gparg + 1], reg_names[fparg + 1]);
8902 function_arg_advance (&cum, mode, NULL, true);
8906 /* Build a mips16 function stub. This is used for functions which
8907 take arguments in the floating point registers. It is 32-bit code
8908 that moves the floating point args into the general registers, and
8909 then jumps to the 16-bit code. */
8912 build_mips16_function_stub (FILE *file)
8915 char *secname, *stubname;
8916 tree stubid, stubdecl;
8920 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8921 secname = (char *) alloca (strlen (fnname) + 20);
8922 sprintf (secname, ".mips16.fn.%s", fnname);
8923 stubname = (char *) alloca (strlen (fnname) + 20);
8924 sprintf (stubname, "__fn_stub_%s", fnname);
8925 stubid = get_identifier (stubname);
8926 stubdecl = build_decl (FUNCTION_DECL, stubid,
8927 build_function_type (void_type_node, NULL_TREE));
8928 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8929 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
8931 fprintf (file, "\t# Stub function for %s (", current_function_name ());
8933 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
8935 fprintf (file, "%s%s",
8936 need_comma ? ", " : "",
8937 (f & 3) == 1 ? "float" : "double");
8940 fprintf (file, ")\n");
8942 fprintf (file, "\t.set\tnomips16\n");
8943 switch_to_section (function_section (stubdecl));
8944 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
8946 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
8947 within a .ent, and we cannot emit another .ent. */
8948 if (!FUNCTION_NAME_ALREADY_DECLARED)
8950 fputs ("\t.ent\t", file);
8951 assemble_name (file, stubname);
8955 assemble_name (file, stubname);
8956 fputs (":\n", file);
8958 /* We don't want the assembler to insert any nops here. */
8959 fprintf (file, "\t.set\tnoreorder\n");
8961 mips16_fp_args (file, current_function_args_info.fp_code, 1);
8963 fprintf (asm_out_file, "\t.set\tnoat\n");
8964 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
8965 assemble_name (file, fnname);
8966 fprintf (file, "\n");
8967 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8968 fprintf (asm_out_file, "\t.set\tat\n");
8970 /* Unfortunately, we can't fill the jump delay slot. We can't fill
8971 with one of the mfc1 instructions, because the result is not
8972 available for one instruction, so if the very first instruction
8973 in the function refers to the register, it will see the wrong
8975 fprintf (file, "\tnop\n");
8977 fprintf (file, "\t.set\treorder\n");
8979 if (!FUNCTION_NAME_ALREADY_DECLARED)
8981 fputs ("\t.end\t", file);
8982 assemble_name (file, stubname);
8986 fprintf (file, "\t.set\tmips16\n");
8988 switch_to_section (function_section (current_function_decl));
8991 /* We keep a list of functions for which we have already built stubs
8992 in build_mips16_call_stub. */
8996 struct mips16_stub *next;
9001 static struct mips16_stub *mips16_stubs;
9003 /* Emit code to return a double value from a mips16 stub. GPREG is the
9004 first GP reg to use, FPREG is the first FP reg to use. */
9007 mips16_fpret_double (int gpreg, int fpreg)
9010 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
9011 reg_names[gpreg], reg_names[fpreg]);
9012 else if (TARGET_FLOAT64)
9014 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9015 reg_names[gpreg + WORDS_BIG_ENDIAN],
9017 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
9018 reg_names[gpreg + !WORDS_BIG_ENDIAN],
9023 if (TARGET_BIG_ENDIAN)
9025 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9026 reg_names[gpreg + 0],
9027 reg_names[fpreg + 1]);
9028 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9029 reg_names[gpreg + 1],
9030 reg_names[fpreg + 0]);
9034 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9035 reg_names[gpreg + 0],
9036 reg_names[fpreg + 0]);
9037 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9038 reg_names[gpreg + 1],
9039 reg_names[fpreg + 1]);
9044 /* Build a call stub for a mips16 call. A stub is needed if we are
9045 passing any floating point values which should go into the floating
9046 point registers. If we are, and the call turns out to be to a
9047 32-bit function, the stub will be used to move the values into the
9048 floating point registers before calling the 32-bit function. The
9049 linker will magically adjust the function call to either the 16-bit
9050 function or the 32-bit stub, depending upon where the function call
9051 is actually defined.
9053 Similarly, we need a stub if the return value might come back in a
9054 floating point register.
9056 RETVAL is the location of the return value, or null if this is
9057 a call rather than a call_value. FN is the address of the
9058 function and ARG_SIZE is the size of the arguments. FP_CODE
9059 is the code built by function_arg. This function returns a nonzero
9060 value if it builds the call instruction itself. */
9063 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9067 char *secname, *stubname;
9068 struct mips16_stub *l;
9069 tree stubid, stubdecl;
9073 /* We don't need to do anything if we aren't in mips16 mode, or if
9074 we were invoked with the -msoft-float option. */
9075 if (!mips16_hard_float)
9078 /* Figure out whether the value might come back in a floating point
9081 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9083 /* We don't need to do anything if there were no floating point
9084 arguments and the value will not be returned in a floating point
9086 if (fp_code == 0 && ! fpret)
9089 /* We don't need to do anything if this is a call to a special
9090 mips16 support function. */
9091 if (GET_CODE (fn) == SYMBOL_REF
9092 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9095 /* This code will only work for o32 and o64 abis. The other ABI's
9096 require more sophisticated support. */
9097 gcc_assert (TARGET_OLDABI);
9099 /* If we're calling via a function pointer, then we must always call
9100 via a stub. There are magic stubs provided in libgcc.a for each
9101 of the required cases. Each of them expects the function address
9102 to arrive in register $2. */
9104 if (GET_CODE (fn) != SYMBOL_REF)
9110 /* ??? If this code is modified to support other ABI's, we need
9111 to handle PARALLEL return values here. */
9114 sprintf (buf, "__mips16_call_stub_%s_%d",
9115 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9118 sprintf (buf, "__mips16_call_stub_%d",
9121 id = get_identifier (buf);
9122 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9124 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
9126 if (retval == NULL_RTX)
9127 insn = gen_call_internal (stub_fn, arg_size);
9129 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9130 insn = emit_call_insn (insn);
9132 /* Put the register usage information on the CALL. */
9133 CALL_INSN_FUNCTION_USAGE (insn) =
9134 gen_rtx_EXPR_LIST (VOIDmode,
9135 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9136 CALL_INSN_FUNCTION_USAGE (insn));
9138 /* If we are handling a floating point return value, we need to
9139 save $18 in the function prologue. Putting a note on the
9140 call will mean that df_regs_ever_live_p ($18) will be true if the
9141 call is not eliminated, and we can check that in the prologue
9144 CALL_INSN_FUNCTION_USAGE (insn) =
9145 gen_rtx_EXPR_LIST (VOIDmode,
9146 gen_rtx_USE (VOIDmode,
9147 gen_rtx_REG (word_mode, 18)),
9148 CALL_INSN_FUNCTION_USAGE (insn));
9150 /* Return 1 to tell the caller that we've generated the call
9155 /* We know the function we are going to call. If we have already
9156 built a stub, we don't need to do anything further. */
9158 fnname = XSTR (fn, 0);
9159 for (l = mips16_stubs; l != NULL; l = l->next)
9160 if (strcmp (l->name, fnname) == 0)
9165 /* Build a special purpose stub. When the linker sees a
9166 function call in mips16 code, it will check where the target
9167 is defined. If the target is a 32-bit call, the linker will
9168 search for the section defined here. It can tell which
9169 symbol this section is associated with by looking at the
9170 relocation information (the name is unreliable, since this
9171 might be a static function). If such a section is found, the
9172 linker will redirect the call to the start of the magic
9175 If the function does not return a floating point value, the
9176 special stub section is named
9179 If the function does return a floating point value, the stub
9181 .mips16.call.fp.FNNAME
9184 secname = (char *) alloca (strlen (fnname) + 40);
9185 sprintf (secname, ".mips16.call.%s%s",
9188 stubname = (char *) alloca (strlen (fnname) + 20);
9189 sprintf (stubname, "__call_stub_%s%s",
9192 stubid = get_identifier (stubname);
9193 stubdecl = build_decl (FUNCTION_DECL, stubid,
9194 build_function_type (void_type_node, NULL_TREE));
9195 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9196 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9198 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9200 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9204 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9206 fprintf (asm_out_file, "%s%s",
9207 need_comma ? ", " : "",
9208 (f & 3) == 1 ? "float" : "double");
9211 fprintf (asm_out_file, ")\n");
9213 fprintf (asm_out_file, "\t.set\tnomips16\n");
9214 assemble_start_function (stubdecl, stubname);
9216 if (!FUNCTION_NAME_ALREADY_DECLARED)
9218 fputs ("\t.ent\t", asm_out_file);
9219 assemble_name (asm_out_file, stubname);
9220 fputs ("\n", asm_out_file);
9222 assemble_name (asm_out_file, stubname);
9223 fputs (":\n", asm_out_file);
9226 /* We build the stub code by hand. That's the only way we can
9227 do it, since we can't generate 32-bit code during a 16-bit
9230 /* We don't want the assembler to insert any nops here. */
9231 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9233 mips16_fp_args (asm_out_file, fp_code, 0);
9237 fprintf (asm_out_file, "\t.set\tnoat\n");
9238 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9240 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9241 fprintf (asm_out_file, "\t.set\tat\n");
9242 /* Unfortunately, we can't fill the jump delay slot. We
9243 can't fill with one of the mtc1 instructions, because the
9244 result is not available for one instruction, so if the
9245 very first instruction in the function refers to the
9246 register, it will see the wrong value. */
9247 fprintf (asm_out_file, "\tnop\n");
9251 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9252 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9253 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9254 /* As above, we can't fill the delay slot. */
9255 fprintf (asm_out_file, "\tnop\n");
9256 if (GET_MODE (retval) == SFmode)
9257 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9258 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9259 else if (GET_MODE (retval) == SCmode)
9261 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9262 reg_names[GP_REG_FIRST + 2],
9263 reg_names[FP_REG_FIRST + 0]);
9264 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9265 reg_names[GP_REG_FIRST + 3],
9266 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9268 else if (GET_MODE (retval) == DFmode
9269 || GET_MODE (retval) == V2SFmode)
9271 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9273 else if (GET_MODE (retval) == DCmode)
9275 mips16_fpret_double (GP_REG_FIRST + 2,
9277 mips16_fpret_double (GP_REG_FIRST + 4,
9278 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9282 if (TARGET_BIG_ENDIAN)
9284 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9285 reg_names[GP_REG_FIRST + 2],
9286 reg_names[FP_REG_FIRST + 1]);
9287 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9288 reg_names[GP_REG_FIRST + 3],
9289 reg_names[FP_REG_FIRST + 0]);
9293 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9294 reg_names[GP_REG_FIRST + 2],
9295 reg_names[FP_REG_FIRST + 0]);
9296 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9297 reg_names[GP_REG_FIRST + 3],
9298 reg_names[FP_REG_FIRST + 1]);
9301 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9302 /* As above, we can't fill the delay slot. */
9303 fprintf (asm_out_file, "\tnop\n");
9306 fprintf (asm_out_file, "\t.set\treorder\n");
9308 #ifdef ASM_DECLARE_FUNCTION_SIZE
9309 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9312 if (!FUNCTION_NAME_ALREADY_DECLARED)
9314 fputs ("\t.end\t", asm_out_file);
9315 assemble_name (asm_out_file, stubname);
9316 fputs ("\n", asm_out_file);
9319 fprintf (asm_out_file, "\t.set\tmips16\n");
9321 /* Record this stub. */
9322 l = (struct mips16_stub *) xmalloc (sizeof *l);
9323 l->name = xstrdup (fnname);
9325 l->next = mips16_stubs;
9329 /* If we expect a floating point return value, but we've built a
9330 stub which does not expect one, then we're in trouble. We can't
9331 use the existing stub, because it won't handle the floating point
9332 value. We can't build a new stub, because the linker won't know
9333 which stub to use for the various calls in this object file.
9334 Fortunately, this case is illegal, since it means that a function
9335 was declared in two different ways in a single compilation. */
9336 if (fpret && ! l->fpret)
9337 error ("cannot handle inconsistent calls to %qs", fnname);
9339 /* If we are calling a stub which handles a floating point return
9340 value, we need to arrange to save $18 in the prologue. We do
9341 this by marking the function call as using the register. The
9342 prologue will later see that it is used, and emit code to save
9349 if (retval == NULL_RTX)
9350 insn = gen_call_internal (fn, arg_size);
9352 insn = gen_call_value_internal (retval, fn, arg_size);
9353 insn = emit_call_insn (insn);
9355 CALL_INSN_FUNCTION_USAGE (insn) =
9356 gen_rtx_EXPR_LIST (VOIDmode,
9357 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9358 CALL_INSN_FUNCTION_USAGE (insn));
9360 /* Return 1 to tell the caller that we've generated the call
9365 /* Return 0 to let the caller generate the call insn. */
9369 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9370 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9372 struct mips16_constant {
9373 struct mips16_constant *next;
9376 enum machine_mode mode;
9379 /* Information about an incomplete mips16 constant pool. FIRST is the
9380 first constant, HIGHEST_ADDRESS is the highest address that the first
9381 byte of the pool can have, and INSN_ADDRESS is the current instruction
9384 struct mips16_constant_pool {
9385 struct mips16_constant *first;
9386 int highest_address;
9390 /* Add constant VALUE to POOL and return its label. MODE is the
9391 value's mode (used for CONST_INTs, etc.). */
9394 add_constant (struct mips16_constant_pool *pool,
9395 rtx value, enum machine_mode mode)
9397 struct mips16_constant **p, *c;
9398 bool first_of_size_p;
9400 /* See whether the constant is already in the pool. If so, return the
9401 existing label, otherwise leave P pointing to the place where the
9402 constant should be added.
9404 Keep the pool sorted in increasing order of mode size so that we can
9405 reduce the number of alignments needed. */
9406 first_of_size_p = true;
9407 for (p = &pool->first; *p != 0; p = &(*p)->next)
9409 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9411 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9413 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9414 first_of_size_p = false;
9417 /* In the worst case, the constant needed by the earliest instruction
9418 will end up at the end of the pool. The entire pool must then be
9419 accessible from that instruction.
9421 When adding the first constant, set the pool's highest address to
9422 the address of the first out-of-range byte. Adjust this address
9423 downwards each time a new constant is added. */
9424 if (pool->first == 0)
9425 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9426 is the address of the instruction with the lowest two bits clear.
9427 The base PC value for ld has the lowest three bits clear. Assume
9428 the worst case here. */
9429 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9430 pool->highest_address -= GET_MODE_SIZE (mode);
9431 if (first_of_size_p)
9432 /* Take into account the worst possible padding due to alignment. */
9433 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9435 /* Create a new entry. */
9436 c = (struct mips16_constant *) xmalloc (sizeof *c);
9439 c->label = gen_label_rtx ();
9446 /* Output constant VALUE after instruction INSN and return the last
9447 instruction emitted. MODE is the mode of the constant. */
9450 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9452 switch (GET_MODE_CLASS (mode))
9456 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9457 return emit_insn_after (gen_consttable_int (value, size), insn);
9461 return emit_insn_after (gen_consttable_float (value), insn);
9463 case MODE_VECTOR_FLOAT:
9464 case MODE_VECTOR_INT:
9467 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9468 insn = dump_constants_1 (GET_MODE_INNER (mode),
9469 CONST_VECTOR_ELT (value, i), insn);
9479 /* Dump out the constants in CONSTANTS after INSN. */
9482 dump_constants (struct mips16_constant *constants, rtx insn)
9484 struct mips16_constant *c, *next;
9488 for (c = constants; c != NULL; c = next)
9490 /* If necessary, increase the alignment of PC. */
9491 if (align < GET_MODE_SIZE (c->mode))
9493 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9494 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9496 align = GET_MODE_SIZE (c->mode);
9498 insn = emit_label_after (c->label, insn);
9499 insn = dump_constants_1 (c->mode, c->value, insn);
9505 emit_barrier_after (insn);
9508 /* Return the length of instruction INSN. */
9511 mips16_insn_length (rtx insn)
9515 rtx body = PATTERN (insn);
9516 if (GET_CODE (body) == ADDR_VEC)
9517 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9518 if (GET_CODE (body) == ADDR_DIFF_VEC)
9519 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9521 return get_attr_length (insn);
9524 /* Rewrite *X so that constant pool references refer to the constant's
9525 label instead. DATA points to the constant pool structure. */
9528 mips16_rewrite_pool_refs (rtx *x, void *data)
9530 struct mips16_constant_pool *pool = data;
9531 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
9532 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
9533 get_pool_constant (*x),
9534 get_pool_mode (*x)));
9538 /* Build MIPS16 constant pools. */
9541 mips16_lay_out_constants (void)
9543 struct mips16_constant_pool pool;
9547 memset (&pool, 0, sizeof (pool));
9548 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9550 /* Rewrite constant pool references in INSN. */
9552 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9554 pool.insn_address += mips16_insn_length (insn);
9556 if (pool.first != NULL)
9558 /* If there are no natural barriers between the first user of
9559 the pool and the highest acceptable address, we'll need to
9560 create a new instruction to jump around the constant pool.
9561 In the worst case, this instruction will be 4 bytes long.
9563 If it's too late to do this transformation after INSN,
9564 do it immediately before INSN. */
9565 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9569 label = gen_label_rtx ();
9571 jump = emit_jump_insn_before (gen_jump (label), insn);
9572 JUMP_LABEL (jump) = label;
9573 LABEL_NUSES (label) = 1;
9574 barrier = emit_barrier_after (jump);
9576 emit_label_after (label, barrier);
9577 pool.insn_address += 4;
9580 /* See whether the constant pool is now out of range of the first
9581 user. If so, output the constants after the previous barrier.
9582 Note that any instructions between BARRIER and INSN (inclusive)
9583 will use negative offsets to refer to the pool. */
9584 if (pool.insn_address > pool.highest_address)
9586 dump_constants (pool.first, barrier);
9590 else if (BARRIER_P (insn))
9594 dump_constants (pool.first, get_last_insn ());
9597 /* A temporary variable used by for_each_rtx callbacks, etc. */
9598 static rtx mips_sim_insn;
9600 /* A structure representing the state of the processor pipeline.
9601 Used by the mips_sim_* family of functions. */
9603 /* The maximum number of instructions that can be issued in a cycle.
9604 (Caches mips_issue_rate.) */
9605 unsigned int issue_rate;
9607 /* The current simulation time. */
9610 /* How many more instructions can be issued in the current cycle. */
9611 unsigned int insns_left;
9613 /* LAST_SET[X].INSN is the last instruction to set register X.
9614 LAST_SET[X].TIME is the time at which that instruction was issued.
9615 INSN is null if no instruction has yet set register X. */
9619 } last_set[FIRST_PSEUDO_REGISTER];
9621 /* The pipeline's current DFA state. */
9625 /* Reset STATE to the initial simulation state. */
9628 mips_sim_reset (struct mips_sim *state)
9631 state->insns_left = state->issue_rate;
9632 memset (&state->last_set, 0, sizeof (state->last_set));
9633 state_reset (state->dfa_state);
9636 /* Initialize STATE before its first use. DFA_STATE points to an
9637 allocated but uninitialized DFA state. */
9640 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9642 state->issue_rate = mips_issue_rate ();
9643 state->dfa_state = dfa_state;
9644 mips_sim_reset (state);
9647 /* Advance STATE by one clock cycle. */
9650 mips_sim_next_cycle (struct mips_sim *state)
9653 state->insns_left = state->issue_rate;
9654 state_transition (state->dfa_state, 0);
9657 /* Advance simulation state STATE until instruction INSN can read
9661 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9665 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9666 if (state->last_set[REGNO (reg) + i].insn != 0)
9670 t = state->last_set[REGNO (reg) + i].time;
9671 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9672 while (state->time < t)
9673 mips_sim_next_cycle (state);
9677 /* A for_each_rtx callback. If *X is a register, advance simulation state
9678 DATA until mips_sim_insn can read the register's value. */
9681 mips_sim_wait_regs_2 (rtx *x, void *data)
9684 mips_sim_wait_reg (data, mips_sim_insn, *x);
9688 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9691 mips_sim_wait_regs_1 (rtx *x, void *data)
9693 for_each_rtx (x, mips_sim_wait_regs_2, data);
9696 /* Advance simulation state STATE until all of INSN's register
9697 dependencies are satisfied. */
9700 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9702 mips_sim_insn = insn;
9703 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9706 /* Advance simulation state STATE until the units required by
9707 instruction INSN are available. */
9710 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9714 tmp_state = alloca (state_size ());
9715 while (state->insns_left == 0
9716 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9717 state_transition (tmp_state, insn) >= 0))
9718 mips_sim_next_cycle (state);
9721 /* Advance simulation state STATE until INSN is ready to issue. */
9724 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
9726 mips_sim_wait_regs (state, insn);
9727 mips_sim_wait_units (state, insn);
9730 /* mips_sim_insn has just set X. Update the LAST_SET array
9731 in simulation state DATA. */
9734 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9736 struct mips_sim *state;
9741 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
9743 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
9744 state->last_set[REGNO (x) + i].time = state->time;
9748 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
9749 can issue immediately (i.e., that mips_sim_wait_insn has already
9753 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
9755 state_transition (state->dfa_state, insn);
9756 state->insns_left--;
9758 mips_sim_insn = insn;
9759 note_stores (PATTERN (insn), mips_sim_record_set, state);
9762 /* Simulate issuing a NOP in state STATE. */
9765 mips_sim_issue_nop (struct mips_sim *state)
9767 if (state->insns_left == 0)
9768 mips_sim_next_cycle (state);
9769 state->insns_left--;
9772 /* Update simulation state STATE so that it's ready to accept the instruction
9773 after INSN. INSN should be part of the main rtl chain, not a member of a
9777 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
9779 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
9781 mips_sim_issue_nop (state);
9783 switch (GET_CODE (SEQ_BEGIN (insn)))
9787 /* We can't predict the processor state after a call or label. */
9788 mips_sim_reset (state);
9792 /* The delay slots of branch likely instructions are only executed
9793 when the branch is taken. Therefore, if the caller has simulated
9794 the delay slot instruction, STATE does not really reflect the state
9795 of the pipeline for the instruction after the delay slot. Also,
9796 branch likely instructions tend to incur a penalty when not taken,
9797 so there will probably be an extra delay between the branch and
9798 the instruction after the delay slot. */
9799 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
9800 mips_sim_reset (state);
9808 /* The VR4130 pipeline issues aligned pairs of instructions together,
9809 but it stalls the second instruction if it depends on the first.
9810 In order to cut down the amount of logic required, this dependence
9811 check is not based on a full instruction decode. Instead, any non-SPECIAL
9812 instruction is assumed to modify the register specified by bits 20-16
9813 (which is usually the "rt" field).
9815 In beq, beql, bne and bnel instructions, the rt field is actually an
9816 input, so we can end up with a false dependence between the branch
9817 and its delay slot. If this situation occurs in instruction INSN,
9818 try to avoid it by swapping rs and rt. */
9821 vr4130_avoid_branch_rt_conflict (rtx insn)
9825 first = SEQ_BEGIN (insn);
9826 second = SEQ_END (insn);
9828 && NONJUMP_INSN_P (second)
9829 && GET_CODE (PATTERN (first)) == SET
9830 && GET_CODE (SET_DEST (PATTERN (first))) == PC
9831 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
9833 /* Check for the right kind of condition. */
9834 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
9835 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
9836 && REG_P (XEXP (cond, 0))
9837 && REG_P (XEXP (cond, 1))
9838 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
9839 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
9841 /* SECOND mentions the rt register but not the rs register. */
9842 rtx tmp = XEXP (cond, 0);
9843 XEXP (cond, 0) = XEXP (cond, 1);
9844 XEXP (cond, 1) = tmp;
9849 /* Implement -mvr4130-align. Go through each basic block and simulate the
9850 processor pipeline. If we find that a pair of instructions could execute
9851 in parallel, and the first of those instruction is not 8-byte aligned,
9852 insert a nop to make it aligned. */
9855 vr4130_align_insns (void)
9857 struct mips_sim state;
9858 rtx insn, subinsn, last, last2, next;
9863 /* LAST is the last instruction before INSN to have a nonzero length.
9864 LAST2 is the last such instruction before LAST. */
9868 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
9871 mips_sim_init (&state, alloca (state_size ()));
9872 for (insn = get_insns (); insn != 0; insn = next)
9874 unsigned int length;
9876 next = NEXT_INSN (insn);
9878 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
9879 This isn't really related to the alignment pass, but we do it on
9880 the fly to avoid a separate instruction walk. */
9881 vr4130_avoid_branch_rt_conflict (insn);
9883 if (USEFUL_INSN_P (insn))
9884 FOR_EACH_SUBINSN (subinsn, insn)
9886 mips_sim_wait_insn (&state, subinsn);
9888 /* If we want this instruction to issue in parallel with the
9889 previous one, make sure that the previous instruction is
9890 aligned. There are several reasons why this isn't worthwhile
9891 when the second instruction is a call:
9893 - Calls are less likely to be performance critical,
9894 - There's a good chance that the delay slot can execute
9895 in parallel with the call.
9896 - The return address would then be unaligned.
9898 In general, if we're going to insert a nop between instructions
9899 X and Y, it's better to insert it immediately after X. That
9900 way, if the nop makes Y aligned, it will also align any labels
9902 if (state.insns_left != state.issue_rate
9903 && !CALL_P (subinsn))
9905 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
9907 /* SUBINSN is the first instruction in INSN and INSN is
9908 aligned. We want to align the previous instruction
9909 instead, so insert a nop between LAST2 and LAST.
9911 Note that LAST could be either a single instruction
9912 or a branch with a delay slot. In the latter case,
9913 LAST, like INSN, is already aligned, but the delay
9914 slot must have some extra delay that stops it from
9915 issuing at the same time as the branch. We therefore
9916 insert a nop before the branch in order to align its
9918 emit_insn_after (gen_nop (), last2);
9921 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
9923 /* SUBINSN is the delay slot of INSN, but INSN is
9924 currently unaligned. Insert a nop between
9925 LAST and INSN to align it. */
9926 emit_insn_after (gen_nop (), last);
9930 mips_sim_issue_insn (&state, subinsn);
9932 mips_sim_finish_insn (&state, insn);
9934 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
9935 length = get_attr_length (insn);
9938 /* If the instruction is an asm statement or multi-instruction
9939 mips.md patern, the length is only an estimate. Insert an
9940 8 byte alignment after it so that the following instructions
9941 can be handled correctly. */
9942 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
9943 && (recog_memoized (insn) < 0 || length >= 8))
9945 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
9946 next = NEXT_INSN (next);
9947 mips_sim_next_cycle (&state);
9950 else if (length & 4)
9951 aligned_p = !aligned_p;
9956 /* See whether INSN is an aligned label. */
9957 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
9963 /* Subroutine of mips_reorg. If there is a hazard between INSN
9964 and a previous instruction, avoid it by inserting nops after
9967 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
9968 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
9969 before using the value of that register. *HILO_DELAY counts the
9970 number of instructions since the last hilo hazard (that is,
9971 the number of instructions since the last mflo or mfhi).
9973 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
9974 for the next instruction.
9976 LO_REG is an rtx for the LO register, used in dependence checking. */
9979 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
9980 rtx *delayed_reg, rtx lo_reg)
9988 pattern = PATTERN (insn);
9990 /* Do not put the whole function in .set noreorder if it contains
9991 an asm statement. We don't know whether there will be hazards
9992 between the asm statement and the gcc-generated code. */
9993 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
9994 cfun->machine->all_noreorder_p = false;
9996 /* Ignore zero-length instructions (barriers and the like). */
9997 ninsns = get_attr_length (insn) / 4;
10001 /* Work out how many nops are needed. Note that we only care about
10002 registers that are explicitly mentioned in the instruction's pattern.
10003 It doesn't matter that calls use the argument registers or that they
10004 clobber hi and lo. */
10005 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
10006 nops = 2 - *hilo_delay;
10007 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
10012 /* Insert the nops between this instruction and the previous one.
10013 Each new nop takes us further from the last hilo hazard. */
10014 *hilo_delay += nops;
10016 emit_insn_after (gen_hazard_nop (), after);
10018 /* Set up the state for the next instruction. */
10019 *hilo_delay += ninsns;
10021 if (INSN_CODE (insn) >= 0)
10022 switch (get_attr_hazard (insn))
10032 set = single_set (insn);
10033 gcc_assert (set != 0);
10034 *delayed_reg = SET_DEST (set);
10040 /* Go through the instruction stream and insert nops where necessary.
10041 See if the whole function can then be put into .set noreorder &
10045 mips_avoid_hazards (void)
10047 rtx insn, last_insn, lo_reg, delayed_reg;
10050 /* Force all instructions to be split into their final form. */
10051 split_all_insns_noflow ();
10053 /* Recalculate instruction lengths without taking nops into account. */
10054 cfun->machine->ignore_hazard_length_p = true;
10055 shorten_branches (get_insns ());
10057 cfun->machine->all_noreorder_p = true;
10059 /* Profiled functions can't be all noreorder because the profiler
10060 support uses assembler macros. */
10061 if (current_function_profile)
10062 cfun->machine->all_noreorder_p = false;
10064 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10065 we rely on the assembler to work around some errata. */
10066 if (TARGET_FIX_VR4120)
10067 cfun->machine->all_noreorder_p = false;
10069 /* The same is true for -mfix-vr4130 if we might generate mflo or
10070 mfhi instructions. Note that we avoid using mflo and mfhi if
10071 the VR4130 macc and dmacc instructions are available instead;
10072 see the *mfhilo_{si,di}_macc patterns. */
10073 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10074 cfun->machine->all_noreorder_p = false;
10079 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10081 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10084 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10085 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10086 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10087 &hilo_delay, &delayed_reg, lo_reg);
10089 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10090 &delayed_reg, lo_reg);
10097 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10103 mips16_lay_out_constants ();
10104 else if (TARGET_EXPLICIT_RELOCS)
10106 if (mips_flag_delayed_branch)
10107 dbr_schedule (get_insns ());
10108 mips_avoid_hazards ();
10109 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10110 vr4130_align_insns ();
10114 /* This function does three things:
10116 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10117 - Register the mips16 hardware floating point stubs.
10118 - Register the gofast functions if selected using --enable-gofast. */
10120 #include "config/gofast.h"
10123 mips_init_libfuncs (void)
10125 if (TARGET_FIX_VR4120)
10127 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10128 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10131 if (mips16_hard_float)
10133 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10134 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10135 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10136 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10138 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10139 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10140 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10141 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10142 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10143 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10144 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
10146 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10147 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10148 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
10150 if (TARGET_DOUBLE_FLOAT)
10152 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10153 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10154 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10155 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10157 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10158 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10159 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10160 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10161 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10162 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10163 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
10165 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10166 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10168 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10169 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10170 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
10174 gofast_maybe_init_libfuncs ();
10177 /* Return a number assessing the cost of moving a register in class
10178 FROM to class TO. The classes are expressed using the enumeration
10179 values such as `GENERAL_REGS'. A value of 2 is the default; other
10180 values are interpreted relative to that.
10182 It is not required that the cost always equal 2 when FROM is the
10183 same as TO; on some machines it is expensive to move between
10184 registers if they are not general registers.
10186 If reload sees an insn consisting of a single `set' between two
10187 hard registers, and if `REGISTER_MOVE_COST' applied to their
10188 classes returns a value of 2, reload does not check to ensure that
10189 the constraints of the insn are met. Setting a cost of other than
10190 2 will allow reload to verify that the constraints are met. You
10191 should do this if the `movM' pattern's constraints do not allow
10194 ??? We make the cost of moving from HI/LO into general
10195 registers the same as for one of moving general registers to
10196 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10197 pseudo to HI/LO. This might hurt optimizations though, it
10198 isn't clear if it is wise. And it might not work in all cases. We
10199 could solve the DImode LO reg problem by using a multiply, just
10200 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10201 problem by using divide instructions. divu puts the remainder in
10202 the HI reg, so doing a divide by -1 will move the value in the HI
10203 reg for all values except -1. We could handle that case by using a
10204 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10205 a compare/branch to test the input value to see which instruction
10206 we need to use. This gets pretty messy, but it is feasible. */
10209 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10210 enum reg_class to, enum reg_class from)
10212 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10214 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10216 else if (reg_class_subset_p (from, GENERAL_REGS))
10218 if (to == M16_REGS)
10220 else if (to == M16_NA_REGS)
10222 else if (reg_class_subset_p (to, GENERAL_REGS))
10229 else if (to == FP_REGS)
10231 else if (reg_class_subset_p (to, ACC_REGS))
10238 else if (reg_class_subset_p (to, ALL_COP_REGS))
10243 else if (from == FP_REGS)
10245 if (reg_class_subset_p (to, GENERAL_REGS))
10247 else if (to == FP_REGS)
10249 else if (to == ST_REGS)
10252 else if (reg_class_subset_p (from, ACC_REGS))
10254 if (reg_class_subset_p (to, GENERAL_REGS))
10262 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10264 else if (reg_class_subset_p (from, ALL_COP_REGS))
10270 ??? What cases are these? Shouldn't we return 2 here? */
10275 /* Return the length of INSN. LENGTH is the initial length computed by
10276 attributes in the machine-description file. */
10279 mips_adjust_insn_length (rtx insn, int length)
10281 /* A unconditional jump has an unfilled delay slot if it is not part
10282 of a sequence. A conditional jump normally has a delay slot, but
10283 does not on MIPS16. */
10284 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10287 /* See how many nops might be needed to avoid hardware hazards. */
10288 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10289 switch (get_attr_hazard (insn))
10303 /* All MIPS16 instructions are a measly two bytes. */
10311 /* Return an asm sequence to start a noat block and load the address
10312 of a label into $1. */
10315 mips_output_load_label (void)
10317 if (TARGET_EXPLICIT_RELOCS)
10321 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10324 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10327 if (ISA_HAS_LOAD_DELAY)
10328 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10329 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10333 if (Pmode == DImode)
10334 return "%[dla\t%@,%0";
10336 return "%[la\t%@,%0";
10340 /* Return the assembly code for INSN, which has the operands given by
10341 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10342 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10343 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10344 version of BRANCH_IF_TRUE. */
10347 mips_output_conditional_branch (rtx insn, rtx *operands,
10348 const char *branch_if_true,
10349 const char *branch_if_false)
10351 unsigned int length;
10352 rtx taken, not_taken;
10354 length = get_attr_length (insn);
10357 /* Just a simple conditional branch. */
10358 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10359 return branch_if_true;
10362 /* Generate a reversed branch around a direct jump. This fallback does
10363 not use branch-likely instructions. */
10364 mips_branch_likely = false;
10365 not_taken = gen_label_rtx ();
10366 taken = operands[1];
10368 /* Generate the reversed branch to NOT_TAKEN. */
10369 operands[1] = not_taken;
10370 output_asm_insn (branch_if_false, operands);
10372 /* If INSN has a delay slot, we must provide delay slots for both the
10373 branch to NOT_TAKEN and the conditional jump. We must also ensure
10374 that INSN's delay slot is executed in the appropriate cases. */
10375 if (final_sequence)
10377 /* This first delay slot will always be executed, so use INSN's
10378 delay slot if is not annulled. */
10379 if (!INSN_ANNULLED_BRANCH_P (insn))
10381 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10382 asm_out_file, optimize, 1, NULL);
10383 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10386 output_asm_insn ("nop", 0);
10387 fprintf (asm_out_file, "\n");
10390 /* Output the unconditional branch to TAKEN. */
10392 output_asm_insn ("j\t%0%/", &taken);
10395 output_asm_insn (mips_output_load_label (), &taken);
10396 output_asm_insn ("jr\t%@%]%/", 0);
10399 /* Now deal with its delay slot; see above. */
10400 if (final_sequence)
10402 /* This delay slot will only be executed if the branch is taken.
10403 Use INSN's delay slot if is annulled. */
10404 if (INSN_ANNULLED_BRANCH_P (insn))
10406 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10407 asm_out_file, optimize, 1, NULL);
10408 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10411 output_asm_insn ("nop", 0);
10412 fprintf (asm_out_file, "\n");
10415 /* Output NOT_TAKEN. */
10416 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10417 CODE_LABEL_NUMBER (not_taken));
10421 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10422 if some ordered condition is true. The condition is given by
10423 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10424 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10425 its second is always zero. */
10428 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10430 const char *branch[2];
10432 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10433 Make BRANCH[0] branch on the inverse condition. */
10434 switch (GET_CODE (operands[0]))
10436 /* These cases are equivalent to comparisons against zero. */
10438 inverted_p = !inverted_p;
10439 /* Fall through. */
10441 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10442 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10445 /* These cases are always true or always false. */
10447 inverted_p = !inverted_p;
10448 /* Fall through. */
10450 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10451 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10455 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10456 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10459 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10462 /* Used to output div or ddiv instruction DIVISION, which has the operands
10463 given by OPERANDS. Add in a divide-by-zero check if needed.
10465 When working around R4000 and R4400 errata, we need to make sure that
10466 the division is not immediately followed by a shift[1][2]. We also
10467 need to stop the division from being put into a branch delay slot[3].
10468 The easiest way to avoid both problems is to add a nop after the
10469 division. When a divide-by-zero check is needed, this nop can be
10470 used to fill the branch delay slot.
10472 [1] If a double-word or a variable shift executes immediately
10473 after starting an integer division, the shift may give an
10474 incorrect result. See quotations of errata #16 and #28 from
10475 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10476 in mips.md for details.
10478 [2] A similar bug to [1] exists for all revisions of the
10479 R4000 and the R4400 when run in an MC configuration.
10480 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10482 "19. In this following sequence:
10484 ddiv (or ddivu or div or divu)
10485 dsll32 (or dsrl32, dsra32)
10487 if an MPT stall occurs, while the divide is slipping the cpu
10488 pipeline, then the following double shift would end up with an
10491 Workaround: The compiler needs to avoid generating any
10492 sequence with divide followed by extended double shift."
10494 This erratum is also present in "MIPS R4400MC Errata, Processor
10495 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10496 & 3.0" as errata #10 and #4, respectively.
10498 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10499 (also valid for MIPS R4000MC processors):
10501 "52. R4000SC: This bug does not apply for the R4000PC.
10503 There are two flavors of this bug:
10505 1) If the instruction just after divide takes an RF exception
10506 (tlb-refill, tlb-invalid) and gets an instruction cache
10507 miss (both primary and secondary) and the line which is
10508 currently in secondary cache at this index had the first
10509 data word, where the bits 5..2 are set, then R4000 would
10510 get a wrong result for the div.
10515 ------------------- # end-of page. -tlb-refill
10520 ------------------- # end-of page. -tlb-invalid
10523 2) If the divide is in the taken branch delay slot, where the
10524 target takes RF exception and gets an I-cache miss for the
10525 exception vector or where I-cache miss occurs for the
10526 target address, under the above mentioned scenarios, the
10527 div would get wrong results.
10530 j r2 # to next page mapped or unmapped
10531 div r8,r9 # this bug would be there as long
10532 # as there is an ICache miss and
10533 nop # the "data pattern" is present
10536 beq r0, r0, NextPage # to Next page
10540 This bug is present for div, divu, ddiv, and ddivu
10543 Workaround: For item 1), OS could make sure that the next page
10544 after the divide instruction is also mapped. For item 2), the
10545 compiler could make sure that the divide instruction is not in
10546 the branch delay slot."
10548 These processors have PRId values of 0x00004220 and 0x00004300 for
10549 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10552 mips_output_division (const char *division, rtx *operands)
10557 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10559 output_asm_insn (s, operands);
10562 if (TARGET_CHECK_ZERO_DIV)
10566 output_asm_insn (s, operands);
10567 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10569 else if (GENERATE_DIVIDE_TRAPS)
10571 output_asm_insn (s, operands);
10572 s = "teq\t%2,%.,7";
10576 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10577 output_asm_insn (s, operands);
10578 s = "break\t7%)\n1:";
10584 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10585 with a final "000" replaced by "k". Ignore case.
10587 Note: this function is shared between GCC and GAS. */
10590 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10592 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10593 given++, canonical++;
10595 return ((*given == 0 && *canonical == 0)
10596 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10600 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10601 CPU name. We've traditionally allowed a lot of variation here.
10603 Note: this function is shared between GCC and GAS. */
10606 mips_matching_cpu_name_p (const char *canonical, const char *given)
10608 /* First see if the name matches exactly, or with a final "000"
10609 turned into "k". */
10610 if (mips_strict_matching_cpu_name_p (canonical, given))
10613 /* If not, try comparing based on numerical designation alone.
10614 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10615 if (TOLOWER (*given) == 'r')
10617 if (!ISDIGIT (*given))
10620 /* Skip over some well-known prefixes in the canonical name,
10621 hoping to find a number there too. */
10622 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10624 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10626 else if (TOLOWER (canonical[0]) == 'r')
10629 return mips_strict_matching_cpu_name_p (canonical, given);
10633 /* Return the mips_cpu_info entry for the processor or ISA given
10634 by CPU_STRING. Return null if the string isn't recognized.
10636 A similar function exists in GAS. */
10638 static const struct mips_cpu_info *
10639 mips_parse_cpu (const char *cpu_string)
10641 const struct mips_cpu_info *p;
10644 /* In the past, we allowed upper-case CPU names, but it doesn't
10645 work well with the multilib machinery. */
10646 for (s = cpu_string; *s != 0; s++)
10649 warning (0, "the cpu name must be lower case");
10653 /* 'from-abi' selects the most compatible architecture for the given
10654 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10655 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10656 version. Look first at the -mgp options, if given, otherwise base
10657 the choice on MASK_64BIT in TARGET_DEFAULT. */
10658 if (strcasecmp (cpu_string, "from-abi") == 0)
10659 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10660 : ABI_NEEDS_64BIT_REGS ? 3
10661 : (TARGET_64BIT ? 3 : 1));
10663 /* 'default' has traditionally been a no-op. Probably not very useful. */
10664 if (strcasecmp (cpu_string, "default") == 0)
10667 for (p = mips_cpu_info_table; p->name != 0; p++)
10668 if (mips_matching_cpu_name_p (p->name, cpu_string))
10675 /* Return the processor associated with the given ISA level, or null
10676 if the ISA isn't valid. */
10678 static const struct mips_cpu_info *
10679 mips_cpu_info_from_isa (int isa)
10681 const struct mips_cpu_info *p;
10683 for (p = mips_cpu_info_table; p->name != 0; p++)
10690 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10691 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10692 they only hold condition code modes, and CCmode is always considered to
10693 be 4 bytes wide. All other registers are word sized. */
10696 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10698 if (ST_REG_P (regno))
10699 return ((GET_MODE_SIZE (mode) + 3) / 4);
10700 else if (! FP_REG_P (regno))
10701 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10703 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10706 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10707 all BLKmode objects are returned in memory. Under the new (N32 and
10708 64-bit MIPS ABIs) small structures are returned in a register.
10709 Objects with varying size must still be returned in memory, of
10713 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
10716 return (TYPE_MODE (type) == BLKmode);
10718 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10719 || (int_size_in_bytes (type) == -1));
10723 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
10725 return !TARGET_OLDABI;
10728 /* Return true if INSN is a multiply-add or multiply-subtract
10729 instruction and PREV assigns to the accumulator operand. */
10732 mips_linked_madd_p (rtx prev, rtx insn)
10736 x = single_set (insn);
10742 if (GET_CODE (x) == PLUS
10743 && GET_CODE (XEXP (x, 0)) == MULT
10744 && reg_set_p (XEXP (x, 1), prev))
10747 if (GET_CODE (x) == MINUS
10748 && GET_CODE (XEXP (x, 1)) == MULT
10749 && reg_set_p (XEXP (x, 0), prev))
10755 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10756 that may clobber hi or lo. */
10758 static rtx mips_macc_chains_last_hilo;
10760 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10761 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10764 mips_macc_chains_record (rtx insn)
10766 if (get_attr_may_clobber_hilo (insn))
10767 mips_macc_chains_last_hilo = insn;
10770 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10771 has NREADY elements, looking for a multiply-add or multiply-subtract
10772 instruction that is cumulative with mips_macc_chains_last_hilo.
10773 If there is one, promote it ahead of anything else that might
10774 clobber hi or lo. */
10777 mips_macc_chains_reorder (rtx *ready, int nready)
10781 if (mips_macc_chains_last_hilo != 0)
10782 for (i = nready - 1; i >= 0; i--)
10783 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10785 for (j = nready - 1; j > i; j--)
10786 if (recog_memoized (ready[j]) >= 0
10787 && get_attr_may_clobber_hilo (ready[j]))
10789 mips_promote_ready (ready, i, j);
10796 /* The last instruction to be scheduled. */
10798 static rtx vr4130_last_insn;
10800 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10801 points to an rtx that is initially an instruction. Nullify the rtx
10802 if the instruction uses the value of register X. */
10805 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
10807 rtx *insn_ptr = data;
10810 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10814 /* Return true if there is true register dependence between vr4130_last_insn
10818 vr4130_true_reg_dependence_p (rtx insn)
10820 note_stores (PATTERN (vr4130_last_insn),
10821 vr4130_true_reg_dependence_p_1, &insn);
10825 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10826 the ready queue and that INSN2 is the instruction after it, return
10827 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10828 in which INSN1 and INSN2 can probably issue in parallel, but for
10829 which (INSN2, INSN1) should be less sensitive to instruction
10830 alignment than (INSN1, INSN2). See 4130.md for more details. */
10833 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10837 /* Check for the following case:
10839 1) there is some other instruction X with an anti dependence on INSN1;
10840 2) X has a higher priority than INSN2; and
10841 3) X is an arithmetic instruction (and thus has no unit restrictions).
10843 If INSN1 is the last instruction blocking X, it would better to
10844 choose (INSN1, X) over (INSN2, INSN1). */
10845 FOR_EACH_DEP_LINK (dep, INSN_FORW_DEPS (insn1))
10846 if (DEP_LINK_KIND (dep) == REG_DEP_ANTI
10847 && INSN_PRIORITY (DEP_LINK_CON (dep)) > INSN_PRIORITY (insn2)
10848 && recog_memoized (DEP_LINK_CON (dep)) >= 0
10849 && get_attr_vr4130_class (DEP_LINK_CON (dep)) == VR4130_CLASS_ALU)
10852 if (vr4130_last_insn != 0
10853 && recog_memoized (insn1) >= 0
10854 && recog_memoized (insn2) >= 0)
10856 /* See whether INSN1 and INSN2 use different execution units,
10857 or if they are both ALU-type instructions. If so, they can
10858 probably execute in parallel. */
10859 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10860 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10861 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10863 /* If only one of the instructions has a dependence on
10864 vr4130_last_insn, prefer to schedule the other one first. */
10865 bool dep1 = vr4130_true_reg_dependence_p (insn1);
10866 bool dep2 = vr4130_true_reg_dependence_p (insn2);
10870 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10871 is not an ALU-type instruction and if INSN1 uses the same
10872 execution unit. (Note that if this condition holds, we already
10873 know that INSN2 uses a different execution unit.) */
10874 if (class1 != VR4130_CLASS_ALU
10875 && recog_memoized (vr4130_last_insn) >= 0
10876 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10883 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10884 queue with at least two instructions. Swap the first two if
10885 vr4130_swap_insns_p says that it could be worthwhile. */
10888 vr4130_reorder (rtx *ready, int nready)
10890 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10891 mips_promote_ready (ready, nready - 2, nready - 1);
10894 /* Remove the instruction at index LOWER from ready queue READY and
10895 reinsert it in front of the instruction at index HIGHER. LOWER must
10899 mips_promote_ready (rtx *ready, int lower, int higher)
10904 new_head = ready[lower];
10905 for (i = lower; i < higher; i++)
10906 ready[i] = ready[i + 1];
10907 ready[i] = new_head;
10910 /* Implement TARGET_SCHED_REORDER. */
10913 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10914 rtx *ready, int *nreadyp, int cycle)
10916 if (!reload_completed && TUNE_MACC_CHAINS)
10919 mips_macc_chains_last_hilo = 0;
10921 mips_macc_chains_reorder (ready, *nreadyp);
10923 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
10926 vr4130_last_insn = 0;
10928 vr4130_reorder (ready, *nreadyp);
10930 return mips_issue_rate ();
10933 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10936 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10937 rtx insn, int more)
10939 switch (GET_CODE (PATTERN (insn)))
10943 /* Don't count USEs and CLOBBERs against the issue rate. */
10948 if (!reload_completed && TUNE_MACC_CHAINS)
10949 mips_macc_chains_record (insn);
10950 vr4130_last_insn = insn;
10956 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10957 dependencies have no cost, except on the 20Kc where output-dependence
10958 is treated like input-dependence. */
10961 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
10962 rtx dep ATTRIBUTE_UNUSED, int cost)
10964 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
10967 if (REG_NOTE_KIND (link) != 0)
10972 /* Return the number of instructions that can be issued per cycle. */
10975 mips_issue_rate (void)
10979 case PROCESSOR_74KC:
10980 case PROCESSOR_74KF2_1:
10981 case PROCESSOR_74KF1_1:
10982 case PROCESSOR_74KF3_2:
10983 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10984 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10985 but in reality only a maximum of 3 insns can be issued as the
10986 floating point load/stores also require a slot in the AGEN pipe. */
10989 case PROCESSOR_20KC:
10990 case PROCESSOR_R4130:
10991 case PROCESSOR_R5400:
10992 case PROCESSOR_R5500:
10993 case PROCESSOR_R7000:
10994 case PROCESSOR_R9000:
10997 case PROCESSOR_SB1:
10998 case PROCESSOR_SB1A:
10999 /* This is actually 4, but we get better performance if we claim 3.
11000 This is partly because of unwanted speculative code motion with the
11001 larger number, and partly because in most common cases we can't
11002 reach the theoretical max of 4. */
11010 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
11011 be as wide as the scheduling freedom in the DFA. */
11014 mips_multipass_dfa_lookahead (void)
11016 /* Can schedule up to 4 of the 6 function units in any one cycle. */
11023 /* Implements a store data bypass check. We need this because the cprestore
11024 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
11025 default routine to abort. We just return false for that case. */
11026 /* ??? Should try to give a better result here than assuming false. */
11029 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
11031 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
11034 return ! store_data_bypass_p (out_insn, in_insn);
11037 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
11038 return the first operand of the associated "pref" or "prefx" insn. */
11041 mips_prefetch_cookie (rtx write, rtx locality)
11043 /* store_streamed / load_streamed. */
11044 if (INTVAL (locality) <= 0)
11045 return GEN_INT (INTVAL (write) + 4);
11047 /* store / load. */
11048 if (INTVAL (locality) <= 2)
11051 /* store_retained / load_retained. */
11052 return GEN_INT (INTVAL (write) + 6);
11055 /* MIPS builtin function support. */
11057 struct builtin_description
11059 /* The code of the main .md file instruction. See mips_builtin_type
11060 for more information. */
11061 enum insn_code icode;
11063 /* The floating-point comparison code to use with ICODE, if any. */
11064 enum mips_fp_condition cond;
11066 /* The name of the builtin function. */
11069 /* Specifies how the function should be expanded. */
11070 enum mips_builtin_type builtin_type;
11072 /* The function's prototype. */
11073 enum mips_function_type function_type;
11075 /* The target flags required for this function. */
11079 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11080 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11081 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11082 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11083 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11085 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11087 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11088 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11089 "__builtin_mips_" #INSN "_" #COND "_s", \
11090 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11091 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11092 "__builtin_mips_" #INSN "_" #COND "_d", \
11093 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11095 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11096 The lower and upper forms require TARGET_FLAGS while the any and all
11097 forms require MASK_MIPS3D. */
11098 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11099 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11100 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11101 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11102 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11103 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11104 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11105 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11106 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11107 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11108 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11109 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11110 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11112 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11113 require MASK_MIPS3D. */
11114 #define CMP_4S_BUILTINS(INSN, COND) \
11115 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11116 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11117 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11119 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11120 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11121 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11124 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11125 instruction requires TARGET_FLAGS. */
11126 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11127 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11128 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11129 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11131 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11132 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11133 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11136 /* Define all the builtins related to c.cond.fmt condition COND. */
11137 #define CMP_BUILTINS(COND) \
11138 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11139 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11140 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11141 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11142 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11143 CMP_4S_BUILTINS (c, COND), \
11144 CMP_4S_BUILTINS (cabs, COND)
11146 static const struct builtin_description mips_bdesc[] =
11148 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11149 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11150 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11151 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11152 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11153 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11154 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11155 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11157 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11158 MASK_PAIRED_SINGLE_FLOAT),
11159 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11160 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11161 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11162 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11164 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11165 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11166 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11167 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11168 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11169 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11171 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11172 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11173 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11174 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11175 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11176 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11178 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11181 /* Builtin functions for the SB-1 processor. */
11183 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11185 static const struct builtin_description sb1_bdesc[] =
11187 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11190 /* Builtin functions for DSP ASE. */
11192 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11193 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11194 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11195 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11196 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11198 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11199 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11200 builtin_description fields. */
11201 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11202 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11203 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11205 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11206 branch instruction. TARGET_FLAGS is a builtin_description field. */
11207 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11208 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11209 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11211 static const struct builtin_description dsp_bdesc[] =
11213 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11214 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11215 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11216 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11217 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11218 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11219 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11220 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11221 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11222 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11223 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11224 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11225 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11226 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11227 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11228 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11229 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11230 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11231 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11232 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11233 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11234 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11235 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11236 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11237 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11238 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11239 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11240 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11241 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11242 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11243 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11244 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11245 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11246 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11247 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11248 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11249 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11250 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11251 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11252 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11253 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11254 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11255 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11256 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11257 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11258 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11259 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11260 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11261 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11262 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11263 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11264 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11265 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11266 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11267 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11268 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11269 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11270 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11271 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11272 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11273 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11274 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11275 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11276 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11277 BPOSGE_BUILTIN (32, MASK_DSP),
11279 /* The following are for the MIPS DSP ASE REV 2. */
11280 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11281 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11282 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11283 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11284 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11285 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11286 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11287 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11288 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11289 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11290 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11291 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11292 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11293 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11294 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11295 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11296 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11297 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11298 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11299 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11300 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11301 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11302 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11303 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11304 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11305 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11306 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11307 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11308 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11309 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11310 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11311 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11312 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11313 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11316 static const struct builtin_description dsp_32only_bdesc[] =
11318 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11319 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11320 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11321 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11322 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11323 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11324 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11325 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11326 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11327 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11328 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11329 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11330 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11331 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11332 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11333 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11334 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11335 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11336 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11337 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11338 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11340 /* The following are for the MIPS DSP ASE REV 2. */
11341 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11342 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11343 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11344 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11345 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11346 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11347 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11348 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11349 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11350 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11351 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11352 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11353 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11354 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11355 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11358 /* This helps provide a mapping from builtin function codes to bdesc
11363 /* The builtin function table that this entry describes. */
11364 const struct builtin_description *bdesc;
11366 /* The number of entries in the builtin function table. */
11369 /* The target processor that supports these builtin functions.
11370 PROCESSOR_MAX means we enable them for all processors. */
11371 enum processor_type proc;
11373 /* If the target has these flags, this builtin function table
11374 will not be supported. */
11375 int unsupported_target_flags;
11378 static const struct bdesc_map bdesc_arrays[] =
11380 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11381 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11382 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11383 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11387 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11388 suitable for input operand OP of instruction ICODE. Return the value. */
11391 mips_prepare_builtin_arg (enum insn_code icode,
11392 unsigned int op, tree exp, unsigned int argnum)
11395 enum machine_mode mode;
11397 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11398 mode = insn_data[icode].operand[op].mode;
11399 if (!insn_data[icode].operand[op].predicate (value, mode))
11401 value = copy_to_mode_reg (mode, value);
11402 /* Check the predicate again. */
11403 if (!insn_data[icode].operand[op].predicate (value, mode))
11405 error ("invalid argument to builtin function");
11413 /* Return an rtx suitable for output operand OP of instruction ICODE.
11414 If TARGET is non-null, try to use it where possible. */
11417 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11419 enum machine_mode mode;
11421 mode = insn_data[icode].operand[op].mode;
11422 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11423 target = gen_reg_rtx (mode);
11428 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11431 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11432 enum machine_mode mode ATTRIBUTE_UNUSED,
11433 int ignore ATTRIBUTE_UNUSED)
11435 enum insn_code icode;
11436 enum mips_builtin_type type;
11438 unsigned int fcode;
11439 const struct builtin_description *bdesc;
11440 const struct bdesc_map *m;
11442 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11443 fcode = DECL_FUNCTION_CODE (fndecl);
11446 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11448 if (fcode < m->size)
11451 icode = bdesc[fcode].icode;
11452 type = bdesc[fcode].builtin_type;
11462 case MIPS_BUILTIN_DIRECT:
11463 return mips_expand_builtin_direct (icode, target, exp, true);
11465 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11466 return mips_expand_builtin_direct (icode, target, exp, false);
11468 case MIPS_BUILTIN_MOVT:
11469 case MIPS_BUILTIN_MOVF:
11470 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11473 case MIPS_BUILTIN_CMP_ANY:
11474 case MIPS_BUILTIN_CMP_ALL:
11475 case MIPS_BUILTIN_CMP_UPPER:
11476 case MIPS_BUILTIN_CMP_LOWER:
11477 case MIPS_BUILTIN_CMP_SINGLE:
11478 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11481 case MIPS_BUILTIN_BPOSGE32:
11482 return mips_expand_builtin_bposge (type, target);
11489 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11492 mips_init_builtins (void)
11494 const struct builtin_description *d;
11495 const struct bdesc_map *m;
11496 tree types[(int) MIPS_MAX_FTYPE_MAX];
11497 tree V2SF_type_node;
11498 tree V2HI_type_node;
11499 tree V4QI_type_node;
11500 unsigned int offset;
11502 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11503 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11506 if (TARGET_PAIRED_SINGLE_FLOAT)
11508 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11510 types[MIPS_V2SF_FTYPE_V2SF]
11511 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11513 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11514 = build_function_type_list (V2SF_type_node,
11515 V2SF_type_node, V2SF_type_node, NULL_TREE);
11517 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11518 = build_function_type_list (V2SF_type_node,
11519 V2SF_type_node, V2SF_type_node,
11520 integer_type_node, NULL_TREE);
11522 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11523 = build_function_type_list (V2SF_type_node,
11524 V2SF_type_node, V2SF_type_node,
11525 V2SF_type_node, V2SF_type_node, NULL_TREE);
11527 types[MIPS_V2SF_FTYPE_SF_SF]
11528 = build_function_type_list (V2SF_type_node,
11529 float_type_node, float_type_node, NULL_TREE);
11531 types[MIPS_INT_FTYPE_V2SF_V2SF]
11532 = build_function_type_list (integer_type_node,
11533 V2SF_type_node, V2SF_type_node, NULL_TREE);
11535 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11536 = build_function_type_list (integer_type_node,
11537 V2SF_type_node, V2SF_type_node,
11538 V2SF_type_node, V2SF_type_node, NULL_TREE);
11540 types[MIPS_INT_FTYPE_SF_SF]
11541 = build_function_type_list (integer_type_node,
11542 float_type_node, float_type_node, NULL_TREE);
11544 types[MIPS_INT_FTYPE_DF_DF]
11545 = build_function_type_list (integer_type_node,
11546 double_type_node, double_type_node, NULL_TREE);
11548 types[MIPS_SF_FTYPE_V2SF]
11549 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11551 types[MIPS_SF_FTYPE_SF]
11552 = build_function_type_list (float_type_node,
11553 float_type_node, NULL_TREE);
11555 types[MIPS_SF_FTYPE_SF_SF]
11556 = build_function_type_list (float_type_node,
11557 float_type_node, float_type_node, NULL_TREE);
11559 types[MIPS_DF_FTYPE_DF]
11560 = build_function_type_list (double_type_node,
11561 double_type_node, NULL_TREE);
11563 types[MIPS_DF_FTYPE_DF_DF]
11564 = build_function_type_list (double_type_node,
11565 double_type_node, double_type_node, NULL_TREE);
11570 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11571 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11573 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11574 = build_function_type_list (V2HI_type_node,
11575 V2HI_type_node, V2HI_type_node,
11578 types[MIPS_SI_FTYPE_SI_SI]
11579 = build_function_type_list (intSI_type_node,
11580 intSI_type_node, intSI_type_node,
11583 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11584 = build_function_type_list (V4QI_type_node,
11585 V4QI_type_node, V4QI_type_node,
11588 types[MIPS_SI_FTYPE_V4QI]
11589 = build_function_type_list (intSI_type_node,
11593 types[MIPS_V2HI_FTYPE_V2HI]
11594 = build_function_type_list (V2HI_type_node,
11598 types[MIPS_SI_FTYPE_SI]
11599 = build_function_type_list (intSI_type_node,
11603 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11604 = build_function_type_list (V4QI_type_node,
11605 V2HI_type_node, V2HI_type_node,
11608 types[MIPS_V2HI_FTYPE_SI_SI]
11609 = build_function_type_list (V2HI_type_node,
11610 intSI_type_node, intSI_type_node,
11613 types[MIPS_SI_FTYPE_V2HI]
11614 = build_function_type_list (intSI_type_node,
11618 types[MIPS_V2HI_FTYPE_V4QI]
11619 = build_function_type_list (V2HI_type_node,
11623 types[MIPS_V4QI_FTYPE_V4QI_SI]
11624 = build_function_type_list (V4QI_type_node,
11625 V4QI_type_node, intSI_type_node,
11628 types[MIPS_V2HI_FTYPE_V2HI_SI]
11629 = build_function_type_list (V2HI_type_node,
11630 V2HI_type_node, intSI_type_node,
11633 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
11634 = build_function_type_list (V2HI_type_node,
11635 V4QI_type_node, V2HI_type_node,
11638 types[MIPS_SI_FTYPE_V2HI_V2HI]
11639 = build_function_type_list (intSI_type_node,
11640 V2HI_type_node, V2HI_type_node,
11643 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
11644 = build_function_type_list (intDI_type_node,
11645 intDI_type_node, V4QI_type_node, V4QI_type_node,
11648 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
11649 = build_function_type_list (intDI_type_node,
11650 intDI_type_node, V2HI_type_node, V2HI_type_node,
11653 types[MIPS_DI_FTYPE_DI_SI_SI]
11654 = build_function_type_list (intDI_type_node,
11655 intDI_type_node, intSI_type_node, intSI_type_node,
11658 types[MIPS_V4QI_FTYPE_SI]
11659 = build_function_type_list (V4QI_type_node,
11663 types[MIPS_V2HI_FTYPE_SI]
11664 = build_function_type_list (V2HI_type_node,
11668 types[MIPS_VOID_FTYPE_V4QI_V4QI]
11669 = build_function_type_list (void_type_node,
11670 V4QI_type_node, V4QI_type_node,
11673 types[MIPS_SI_FTYPE_V4QI_V4QI]
11674 = build_function_type_list (intSI_type_node,
11675 V4QI_type_node, V4QI_type_node,
11678 types[MIPS_VOID_FTYPE_V2HI_V2HI]
11679 = build_function_type_list (void_type_node,
11680 V2HI_type_node, V2HI_type_node,
11683 types[MIPS_SI_FTYPE_DI_SI]
11684 = build_function_type_list (intSI_type_node,
11685 intDI_type_node, intSI_type_node,
11688 types[MIPS_DI_FTYPE_DI_SI]
11689 = build_function_type_list (intDI_type_node,
11690 intDI_type_node, intSI_type_node,
11693 types[MIPS_VOID_FTYPE_SI_SI]
11694 = build_function_type_list (void_type_node,
11695 intSI_type_node, intSI_type_node,
11698 types[MIPS_SI_FTYPE_PTR_SI]
11699 = build_function_type_list (intSI_type_node,
11700 ptr_type_node, intSI_type_node,
11703 types[MIPS_SI_FTYPE_VOID]
11704 = build_function_type (intSI_type_node, void_list_node);
11708 types[MIPS_V4QI_FTYPE_V4QI]
11709 = build_function_type_list (V4QI_type_node,
11713 types[MIPS_SI_FTYPE_SI_SI_SI]
11714 = build_function_type_list (intSI_type_node,
11715 intSI_type_node, intSI_type_node,
11716 intSI_type_node, NULL_TREE);
11718 types[MIPS_DI_FTYPE_DI_USI_USI]
11719 = build_function_type_list (intDI_type_node,
11721 unsigned_intSI_type_node,
11722 unsigned_intSI_type_node, NULL_TREE);
11724 types[MIPS_DI_FTYPE_SI_SI]
11725 = build_function_type_list (intDI_type_node,
11726 intSI_type_node, intSI_type_node,
11729 types[MIPS_DI_FTYPE_USI_USI]
11730 = build_function_type_list (intDI_type_node,
11731 unsigned_intSI_type_node,
11732 unsigned_intSI_type_node, NULL_TREE);
11734 types[MIPS_V2HI_FTYPE_SI_SI_SI]
11735 = build_function_type_list (V2HI_type_node,
11736 intSI_type_node, intSI_type_node,
11737 intSI_type_node, NULL_TREE);
11742 /* Iterate through all of the bdesc arrays, initializing all of the
11743 builtin functions. */
11746 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11748 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
11749 && (m->unsupported_target_flags & target_flags) == 0)
11750 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
11751 if ((d->target_flags & target_flags) == d->target_flags)
11752 add_builtin_function (d->name, types[d->function_type],
11753 d - m->bdesc + offset,
11754 BUILT_IN_MD, NULL, NULL);
11759 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
11760 .md pattern and CALL is the function expr with arguments. TARGET,
11761 if nonnull, suggests a good place to put the result.
11762 HAS_TARGET indicates the function must return something. */
11765 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11768 rtx ops[MAX_RECOG_OPERANDS];
11774 /* We save target to ops[0]. */
11775 ops[0] = mips_prepare_builtin_target (icode, 0, target);
11779 /* We need to test if the arglist is not zero. Some instructions have extra
11780 clobber registers. */
11781 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
11782 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11787 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11791 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11795 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11799 gcc_unreachable ();
11804 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
11805 function (TYPE says which). EXP is the tree for the function
11806 function, ICODE is the instruction that should be used to compare
11807 the first two arguments, and COND is the condition it should test.
11808 TARGET, if nonnull, suggests a good place to put the result. */
11811 mips_expand_builtin_movtf (enum mips_builtin_type type,
11812 enum insn_code icode, enum mips_fp_condition cond,
11813 rtx target, tree exp)
11815 rtx cmp_result, op0, op1;
11817 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11818 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11819 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11820 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11822 icode = CODE_FOR_mips_cond_move_tf_ps;
11823 target = mips_prepare_builtin_target (icode, 0, target);
11824 if (type == MIPS_BUILTIN_MOVT)
11826 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11827 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11831 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11832 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11834 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11838 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11839 into TARGET otherwise. Return TARGET. */
11842 mips_builtin_branch_and_move (rtx condition, rtx target,
11843 rtx value_if_true, rtx value_if_false)
11845 rtx true_label, done_label;
11847 true_label = gen_label_rtx ();
11848 done_label = gen_label_rtx ();
11850 /* First assume that CONDITION is false. */
11851 emit_move_insn (target, value_if_false);
11853 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11854 emit_jump_insn (gen_condjump (condition, true_label));
11855 emit_jump_insn (gen_jump (done_label));
11858 /* Fix TARGET if CONDITION is true. */
11859 emit_label (true_label);
11860 emit_move_insn (target, value_if_true);
11862 emit_label (done_label);
11866 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
11867 of the comparison instruction and COND is the condition it should test.
11868 EXP is the function call and arguments and TARGET, if nonnull,
11869 suggests a good place to put the boolean result. */
11872 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11873 enum insn_code icode, enum mips_fp_condition cond,
11874 rtx target, tree exp)
11876 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
11880 if (target == 0 || GET_MODE (target) != SImode)
11881 target = gen_reg_rtx (SImode);
11883 /* Prepare the operands to the comparison. */
11884 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11885 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
11886 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11888 switch (insn_data[icode].n_operands)
11891 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
11895 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
11896 ops[3], ops[4], GEN_INT (cond)));
11900 gcc_unreachable ();
11903 /* If the comparison sets more than one register, we define the result
11904 to be 0 if all registers are false and -1 if all registers are true.
11905 The value of the complete result is indeterminate otherwise. */
11906 switch (builtin_type)
11908 case MIPS_BUILTIN_CMP_ALL:
11909 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11910 return mips_builtin_branch_and_move (condition, target,
11911 const0_rtx, const1_rtx);
11913 case MIPS_BUILTIN_CMP_UPPER:
11914 case MIPS_BUILTIN_CMP_LOWER:
11915 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11916 condition = gen_single_cc (cmp_result, offset);
11917 return mips_builtin_branch_and_move (condition, target,
11918 const1_rtx, const0_rtx);
11921 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11922 return mips_builtin_branch_and_move (condition, target,
11923 const1_rtx, const0_rtx);
11927 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
11928 suggests a good place to put the boolean result. */
11931 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11933 rtx condition, cmp_result;
11936 if (target == 0 || GET_MODE (target) != SImode)
11937 target = gen_reg_rtx (SImode);
11939 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11941 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11946 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11947 return mips_builtin_branch_and_move (condition, target,
11948 const1_rtx, const0_rtx);
11951 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
11952 FIRST is true if this is the first time handling this decl. */
11955 mips_encode_section_info (tree decl, rtx rtl, int first)
11957 default_encode_section_info (decl, rtl, first);
11959 if (TREE_CODE (decl) == FUNCTION_DECL)
11961 rtx symbol = XEXP (rtl, 0);
11963 if ((TARGET_LONG_CALLS && !mips_near_type_p (TREE_TYPE (decl)))
11964 || mips_far_type_p (TREE_TYPE (decl)))
11965 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
11969 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
11970 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
11973 mips_extra_live_on_entry (bitmap regs)
11975 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
11976 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
11979 /* SImode values are represented as sign-extended to DImode. */
11982 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
11984 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
11985 return SIGN_EXTEND;
11990 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
11993 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
11998 fputs ("\t.dtprelword\t", file);
12002 fputs ("\t.dtpreldword\t", file);
12006 gcc_unreachable ();
12008 output_addr_const (file, x);
12009 fputs ("+0x8000", file);
12012 #include "gt-mips.h"