OTI_vec_widen_umult_lo,
OTI_vec_widen_smult_hi,
OTI_vec_widen_smult_lo,
+ /* Widening shift left.
+ The high/low part of the resulting vector is returned. */
+ OTI_vec_widen_ushiftl_hi,
+ OTI_vec_widen_ushiftl_lo,
+ OTI_vec_widen_sshiftl_hi,
+ OTI_vec_widen_sshiftl_lo,
/* Extract and widen the high/low part of a vector of signed or
floating point elements. */
OTI_vec_unpacks_hi,
#define vec_widen_umult_lo_optab (&optab_table[OTI_vec_widen_umult_lo])
#define vec_widen_smult_hi_optab (&optab_table[OTI_vec_widen_smult_hi])
#define vec_widen_smult_lo_optab (&optab_table[OTI_vec_widen_smult_lo])
+#define vec_widen_ushiftl_hi_optab (&optab_table[OTI_vec_widen_ushiftl_hi])
+#define vec_widen_ushiftl_lo_optab (&optab_table[OTI_vec_widen_ushiftl_lo])
+#define vec_widen_sshiftl_hi_optab (&optab_table[OTI_vec_widen_sshiftl_hi])
+#define vec_widen_sshiftl_lo_optab (&optab_table[OTI_vec_widen_sshiftl_lo])
#define vec_unpacks_hi_optab (&optab_table[OTI_vec_unpacks_hi])
#define vec_unpacks_lo_optab (&optab_table[OTI_vec_unpacks_lo])
#define vec_unpacku_hi_optab (&optab_table[OTI_vec_unpacku_hi])
COI_vec_load_lanes,
COI_vec_store_lanes,
+ /* Vector conditional operations. */
+ COI_vcond,
+ COI_vcondu,
+
COI_MAX
};
#define satfractuns_optab (&convert_optab_table[COI_satfractuns])
#define vec_load_lanes_optab (&convert_optab_table[COI_vec_load_lanes])
#define vec_store_lanes_optab (&convert_optab_table[COI_vec_store_lanes])
+#define vcond_optab (&convert_optab_table[(int) COI_vcond])
+#define vcondu_optab (&convert_optab_table[(int) COI_vcondu])
/* Contains the optab used for each rtx code. */
extern optab code_to_optab[NUM_RTX_CODE + 1];
DOI_reload_in,
DOI_reload_out,
- /* Vector conditional operations. */
- DOI_vcond,
- DOI_vcondu,
-
/* Block move operation. */
DOI_movmem,
/* Atomic clear with release semantics. */
DOI_sync_lock_release,
+ /* Atomic operations with memory model parameters. */
+ DOI_atomic_exchange,
+ DOI_atomic_compare_and_swap,
+ DOI_atomic_load,
+ DOI_atomic_store,
+ DOI_atomic_add_fetch,
+ DOI_atomic_sub_fetch,
+ DOI_atomic_and_fetch,
+ DOI_atomic_nand_fetch,
+ DOI_atomic_xor_fetch,
+ DOI_atomic_or_fetch,
+ DOI_atomic_fetch_add,
+ DOI_atomic_fetch_sub,
+ DOI_atomic_fetch_and,
+ DOI_atomic_fetch_nand,
+ DOI_atomic_fetch_xor,
+ DOI_atomic_fetch_or,
+ DOI_atomic_add,
+ DOI_atomic_sub,
+ DOI_atomic_and,
+ DOI_atomic_nand,
+ DOI_atomic_xor,
+ DOI_atomic_or,
+ DOI_atomic_always_lock_free,
+ DOI_atomic_is_lock_free,
+ DOI_atomic_thread_fence,
+ DOI_atomic_signal_fence,
+
+ /* Vector permutation. */
+ DOI_vec_perm,
+ DOI_vec_perm_const,
+
DOI_MAX
};
#endif
#define reload_in_optab (&direct_optab_table[(int) DOI_reload_in])
#define reload_out_optab (&direct_optab_table[(int) DOI_reload_out])
-#define vcond_optab (&direct_optab_table[(int) DOI_vcond])
-#define vcondu_optab (&direct_optab_table[(int) DOI_vcondu])
#define movmem_optab (&direct_optab_table[(int) DOI_movmem])
#define setmem_optab (&direct_optab_table[(int) DOI_setmem])
#define cmpstr_optab (&direct_optab_table[(int) DOI_cmpstr])
(&direct_optab_table[(int) DOI_sync_lock_test_and_set])
#define sync_lock_release_optab \
(&direct_optab_table[(int) DOI_sync_lock_release])
+
+#define atomic_exchange_optab \
+ (&direct_optab_table[(int) DOI_atomic_exchange])
+#define atomic_compare_and_swap_optab \
+ (&direct_optab_table[(int) DOI_atomic_compare_and_swap])
+#define atomic_load_optab \
+ (&direct_optab_table[(int) DOI_atomic_load])
+#define atomic_store_optab \
+ (&direct_optab_table[(int) DOI_atomic_store])
+#define atomic_add_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_add_fetch])
+#define atomic_sub_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_sub_fetch])
+#define atomic_and_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_and_fetch])
+#define atomic_nand_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_nand_fetch])
+#define atomic_xor_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_xor_fetch])
+#define atomic_or_fetch_optab \
+ (&direct_optab_table[(int) DOI_atomic_or_fetch])
+#define atomic_fetch_add_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_add])
+#define atomic_fetch_sub_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_sub])
+#define atomic_fetch_and_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_and])
+#define atomic_fetch_nand_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_nand])
+#define atomic_fetch_xor_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_xor])
+#define atomic_fetch_or_optab \
+ (&direct_optab_table[(int) DOI_atomic_fetch_or])
+#define atomic_add_optab \
+ (&direct_optab_table[(int) DOI_atomic_add])
+#define atomic_sub_optab \
+ (&direct_optab_table[(int) DOI_atomic_sub])
+#define atomic_and_optab \
+ (&direct_optab_table[(int) DOI_atomic_and])
+#define atomic_nand_optab \
+ (&direct_optab_table[(int) DOI_atomic_nand])
+#define atomic_xor_optab \
+ (&direct_optab_table[(int) DOI_atomic_xor])
+#define atomic_or_optab \
+ (&direct_optab_table[(int) DOI_atomic_or])
+#define atomic_always_lock_free_optab \
+ (&direct_optab_table[(int) DOI_atomic_always_lock_free])
+#define atomic_is_lock_free_optab \
+ (&direct_optab_table[(int) DOI_atomic_is_lock_free])
+#define atomic_thread_fence_optab \
+ (&direct_optab_table[(int) DOI_atomic_thread_fence])
+#define atomic_signal_fence_optab \
+ (&direct_optab_table[(int) DOI_atomic_signal_fence])
+
+#define vec_perm_optab (&direct_optab_table[DOI_vec_perm])
+#define vec_perm_const_optab (&direct_optab_table[(int) DOI_vec_perm_const])
\f
/* Target-dependent globals. */
struct target_optabs {
/* Return the insn_code for a FLOAT_EXPR. */
enum insn_code can_float_p (enum machine_mode, enum machine_mode, int);
+/* Return true if there is an inline compare and swap pattern. */
+extern bool can_compare_and_swap_p (enum machine_mode);
+
+/* Generate code for a compare and swap. */
+extern bool expand_atomic_compare_and_swap (rtx *, rtx *, rtx, rtx, rtx, bool,
+ enum memmodel, enum memmodel);
+
+/* Check whether an operation represented by the code CODE is a
+ convert operation that is supported by the target platform in
+ vector form */
+bool supportable_convert_operation (enum tree_code, tree, tree, tree *,
+ enum tree_code *);
+
/* Generate code for a FIX_EXPR. */
extern void expand_fix (rtx, rtx, int);
extern rtx expand_widening_mult (enum machine_mode, rtx, rtx, rtx, int, optab);
/* Return tree if target supports vector operations for COND_EXPR. */
-bool expand_vec_cond_expr_p (tree, enum machine_mode);
+bool expand_vec_cond_expr_p (tree, tree);
/* Generate code for VEC_COND_EXPR. */
extern rtx expand_vec_cond_expr (tree, tree, tree, tree, rtx);
/* Generate code for VEC_LSHIFT_EXPR and VEC_RSHIFT_EXPR. */
extern rtx expand_vec_shift_expr (sepops, rtx);
+/* Return tree if target supports vector operations for VEC_PERM_EXPR. */
+extern bool can_vec_perm_p (enum machine_mode, bool, const unsigned char *);
+
+/* Return true if target supports vector operations using VEC_PERM_EXPR. */
+extern bool can_vec_perm_for_code_p (enum tree_code, enum machine_mode, rtx *);
+
+/* Generate code for VEC_PERM_EXPR. */
+extern rtx expand_vec_perm (enum machine_mode, rtx, rtx, rtx, rtx);
+
/* Return the insn used to implement mode MODE of OP, or CODE_FOR_nothing
if the target does not have such an insn. */