(define_mode_iterator SSEMODE248 [V8HI V4SI V2DI])
(define_mode_iterator SSEMODE1248 [V16QI V8HI V4SI V2DI])
(define_mode_iterator SSEMODEF4 [SF DF V4SF V2DF])
+(define_mode_iterator FMA4MODEF4 [V8SF V4DF])
(define_mode_iterator SSEMODEF2P [V4SF V2DF])
(define_mode_iterator AVX256MODEF2P [V8SF V4DF])
(define_mode_iterator SSEMODE4S [V4SF V4SI])
(define_mode_iterator SSEMODE2D [V2DF V2DI])
+;; Modes handled by integer vcond pattern
+(define_mode_iterator SSEMODE124C8 [V16QI V8HI V4SI
+ (V2DI "TARGET_SSE4_2")])
+
;; Mapping from float mode to required SSE level
(define_mode_attr sse [(SF "sse") (DF "sse2") (V4SF "sse") (V2DF "sse2")])
;; Mapping from integer vector mode to mnemonic suffix
(define_mode_attr ssevecsize [(V16QI "b") (V8HI "w") (V4SI "d") (V2DI "q")])
-;; Mapping of the sse5 suffix
-(define_mode_attr ssemodesuffixf4 [(SF "ss") (DF "sd")
- (V4SF "ps") (V2DF "pd")])
+;; Mapping of the fma4 suffix
+(define_mode_attr fma4modesuffixf4 [(V8SF "ps") (V4DF "pd")])
(define_mode_attr ssemodesuffixf2s [(SF "ss") (DF "sd")
(V4SF "ss") (V2DF "sd")])
+
+;; Mapping of the avx suffix
+(define_mode_attr ssemodesuffixf4 [(SF "ss") (DF "sd")
+ (V4SF "ps") (V2DF "pd")])
+
(define_mode_attr ssemodesuffixf2c [(V4SF "s") (V2DF "d")])
-;; Mapping of the max integer size for sse5 rotate immediate constraint
+;; Mapping of the max integer size for xop rotate immediate constraint
(define_mode_attr sserotatemax [(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")])
;; Mapping of vector modes back to the scalar modes
(define_mode_attr avxmodesuffixp
[(V2DF "pd") (V4SI "si") (V4SF "ps") (V8SF "ps") (V8SI "si")
(V4DF "pd")])
-(define_mode_attr avxmodesuffixs
- [(V16QI "b") (V8HI "w") (V4SI "d")])
(define_mode_attr avxmodesuffix
[(V16QI "") (V32QI "256") (V4SI "") (V4SF "") (V2DF "")
(V8SI "256") (V8SF "256") (V4DF "256")])
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"vmovup<avxmodesuffixf2c>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "movu" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
&& !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"movup<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "movu" "1")
(set_attr "mode" "<MODE>")])
(define_insn "avx_movdqu<avxmodesuffix>"
"TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"vmovdqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "movu" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<avxvecmode>")])
"TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"movdqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "movu" "1")
(set_attr "prefix_data16" "1")
(set_attr "mode" "TI")])
UNSPEC_MOVNT))]
"TARGET_SSE2"
"movntdq\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
(set_attr "prefix_data16" "1")
(set_attr "mode" "TI")])
UNSPEC_MOVNT))]
"TARGET_SSE2"
"movnti\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_data16" "0")
(set_attr "mode" "V2DF")])
(define_insn "avx_lddqu<avxmodesuffix>"
"TARGET_AVX"
"vlddqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
+ (set_attr "movu" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<avxvecmode>")])
UNSPEC_LDDQU))]
"TARGET_SSE3"
"lddqu\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
+ (set_attr "movu" "1")
+ (set_attr "prefix_data16" "0")
(set_attr "prefix_rep" "1")
(set_attr "mode" "TI")])
{
ix86_fixup_binary_operands_no_copy (DIV, V8SFmode, operands);
- if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
+ if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
"TARGET_SSE"
"%vrcpps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
+ (set_attr "atom_sse_attr" "rcp")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "V4SF")])
"TARGET_SSE"
"rcpss\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
+ (set_attr "atom_sse_attr" "rcp")
(set_attr "mode" "SF")])
(define_expand "sqrtv8sf2"
(sqrt:V8SF (match_operand:V8SF 1 "nonimmediate_operand" "")))]
"TARGET_AVX"
{
- if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_size
+ if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
"TARGET_SSE"
"%vsqrtps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
+ (set_attr "atom_sse_attr" "sqrt")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "V4SF")])
"SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
"sqrts<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
+ (set_attr "atom_sse_attr" "sqrt")
(set_attr "mode" "<ssescalarmode>")])
(define_expand "rsqrtv8sf2"
(match_operand:AVXMODEF2P 2 "nonimmediate_operand" "xm")))]
"AVX_VEC_FLOAT_MODE_P (<MODE>mode) && flag_finite_math_only
&& ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
- "v<maxminfprefix>p<ssemodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
+ "v<maxminfprefix>p<avxmodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
(const_int 1)))]
"SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
"<maxminfprefix>s<ssemodesuffixf2c>\t{%2, %0|%0, %2}"
- [(set_attr "type" "sse")
+ [(set_attr "type" "sseadd")
(set_attr "mode" "<ssescalarmode>")])
;; These versions of the min/max patterns implement exactly the operations
(match_operand:V8SF 1 "register_operand" "x")
(match_operand:V8SF 2 "nonimmediate_operand" "xm"))
(minus:V8SF (match_dup 1) (match_dup 2))
- (const_int 66)))]
+ (const_int 170)))]
"TARGET_AVX"
"vaddsubps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(match_operand:V4DF 1 "register_operand" "x")
(match_operand:V4DF 2 "nonimmediate_operand" "xm"))
(minus:V4DF (match_dup 1) (match_dup 2))
- (const_int 6)))]
+ (const_int 10)))]
"TARGET_AVX"
"vaddsubpd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(match_operand:V4SF 1 "register_operand" "x")
(match_operand:V4SF 2 "nonimmediate_operand" "xm"))
(minus:V4SF (match_dup 1) (match_dup 2))
- (const_int 6)))]
+ (const_int 10)))]
"TARGET_AVX"
"vaddsubps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseadd")
(match_operand:V4SF 1 "register_operand" "0")
(match_operand:V4SF 2 "nonimmediate_operand" "xm"))
(minus:V4SF (match_dup 1) (match_dup 2))
- (const_int 6)))]
+ (const_int 10)))]
"TARGET_SSE3"
"addsubps\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
"TARGET_SSE3"
"addsubpd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
+ (set_attr "atom_unit" "complex")
(set_attr "mode" "V2DF")])
(define_insn "avx_h<plusminus_insn>v4df3"
"TARGET_SSE3"
"h<plusminus_mnemonic>ps\t{%2, %0|%0, %2}"
[(set_attr "type" "sseadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_rep" "1")
(set_attr "mode" "V4SF")])
"TARGET_AVX"
"vcmpp<avxmodesuffixf2c>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
"TARGET_AVX"
"vcmps<ssemodesuffixf2c>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<ssescalarmode>")])
"vcmp%D3p<avxmodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecmp")
(set_attr "prefix" "vex")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "<avxvecmode>")])
(define_insn "<sse>_maskcmp<mode>3"
(match_operator:SSEMODEF4 3 "sse_comparison_operator"
[(match_operand:SSEMODEF4 1 "register_operand" "0")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" "xm")]))]
- "(SSE_FLOAT_MODE_P (<MODE>mode) || SSE_VEC_FLOAT_MODE_P (<MODE>mode))
- && !TARGET_SSE5"
+ "!TARGET_XOP
+ && (SSE_FLOAT_MODE_P (<MODE>mode) || SSE_VEC_FLOAT_MODE_P (<MODE>mode))"
"cmp%D3<ssemodesuffixf4>\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "<MODE>")])
(define_insn "<sse>_vmmaskcmp<mode>3"
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")])
(match_dup 1)
(const_int 1)))]
- "SSE_VEC_FLOAT_MODE_P (<MODE>mode) && !TARGET_SSE5"
+ "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
"cmp%D3s<ssemodesuffixf2c>\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "<ssescalarmode>")])
(define_insn "<sse>_comi"
"%vcomis<ssemodefsuffix>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "maybe_vex")
+ (set_attr "prefix_rep" "0")
+ (set (attr "prefix_data16")
+ (if_then_else (eq_attr "mode" "DF")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "<MODE>")])
(define_insn "<sse>_ucomi"
"%vucomis<ssemodefsuffix>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "maybe_vex")
+ (set_attr "prefix_rep" "0")
+ (set (attr "prefix_data16")
+ (if_then_else (eq_attr "mode" "DF")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "<MODE>")])
(define_expand "vcond<mode>"
(match_operand:SSEMODEF2P 2 "general_operand" "")))]
"SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
{
- if (ix86_expand_fp_vcond (operands))
- DONE;
- else
- FAIL;
+ bool ok = ix86_expand_fp_vcond (operands);
+ gcc_assert (ok);
+ DONE;
})
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
[(set_attr "type" "sselog")
(set_attr "mode" "<MODE>")])
+(define_expand "copysign<mode>3"
+ [(set (match_dup 4)
+ (and:SSEMODEF2P
+ (not:SSEMODEF2P (match_dup 3))
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")))
+ (set (match_dup 5)
+ (and:SSEMODEF2P (match_dup 3)
+ (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "")))
+ (set (match_operand:SSEMODEF2P 0 "register_operand" "")
+ (ior:SSEMODEF2P (match_dup 4) (match_dup 5)))]
+ "SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
+{
+ operands[3] = ix86_build_signbit_mask (<ssescalarmode>mode, 1, 0);
+
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = gen_reg_rtx (<MODE>mode);
+})
+
;; Also define scalar versions. These are used for abs, neg, and
;; conditional move. Using subregs into vector modes causes register
;; allocation lossage. These patterns do not allow memory operands
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
-;; SSE5 floating point multiply/accumulate instructions This includes the
+;; FMA4 floating point multiply/accumulate instructions This includes the
;; scalar version of the instructions as well as the vector
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;; (set (reg2) (mult (reg1) (mem (addr2))))
;; (set (reg3) (plus (reg2) (mem (addr3))))
-(define_insn "sse5_fmadd<mode>4"
- [(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x,x")
+(define_insn "fma4_fmadd<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x,x")
+ (plus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x,xm")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm,x"))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x,x")))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmadd<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
+;; Split fmadd with two memory operands into a load and the fmadd.
+(define_split
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "")
+ (plus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" ""))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "")))]
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)
+ && !reg_mentioned_p (operands[0], operands[1])
+ && !reg_mentioned_p (operands[0], operands[2])
+ && !reg_mentioned_p (operands[0], operands[3])"
+ [(const_int 0)]
+{
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fmadd<mode>4256 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+;; Floating multiply and subtract
+;; Allow two memory operands the same as fmadd
+(define_insn "fma4_fmsub<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x,x")
+ (minus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x,xm")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm,x"))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x,x")))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsub<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
+;; Split fmsub with two memory operands into a load and the fmsub.
+(define_split
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "")
+ (minus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" ""))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "")))]
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)
+ && !reg_mentioned_p (operands[0], operands[1])
+ && !reg_mentioned_p (operands[0], operands[2])
+ && !reg_mentioned_p (operands[0], operands[3])"
+ [(const_int 0)]
+{
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fmsub<mode>4256 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+;; Floating point negative multiply and add
+;; Rewrite (- (a * b) + c) into the canonical form: c - (a * b)
+;; Note operands are out of order to simplify call to ix86_fma4_valid_p
+;; Allow two memory operands to help in optimizing.
+(define_insn "fma4_fnmadd<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x,x")
+ (minus:FMA4MODEF4
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x,x")
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x,xm")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm,x"))))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfnmadd<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
+;; Split fnmadd with two memory operands into a load and the fnmadd.
+(define_split
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "")
+ (minus:FMA4MODEF4
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "")
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" ""))))]
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)
+ && !reg_mentioned_p (operands[0], operands[1])
+ && !reg_mentioned_p (operands[0], operands[2])
+ && !reg_mentioned_p (operands[0], operands[3])"
+ [(const_int 0)]
+{
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fnmadd<mode>4256 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+;; Floating point negative multiply and subtract
+;; Rewrite (- (a * b) - c) into the canonical form: ((-a) * b) - c
+;; Allow 2 memory operands to help with optimization
+(define_insn "fma4_fnmsub<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x")
+ (minus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (neg:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x"))
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x")))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, false)"
+ "vfnmsub<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
+;; Split fnmsub with two memory operands into a load and the fmsub.
+(define_split
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "")
+ (minus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (neg:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" ""))
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" ""))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "")))]
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, false)
+ && !reg_mentioned_p (operands[0], operands[1])
+ && !reg_mentioned_p (operands[0], operands[2])
+ && !reg_mentioned_p (operands[0], operands[3])"
+ [(const_int 0)]
+{
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fnmsub<mode>4256 (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+})
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+(define_insn "fma4_fmadd<mode>4"
+ [(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x")
(plus:SSEMODEF4
(mult:SSEMODEF4
- (match_operand:SSEMODEF4 1 "nonimmediate_operand" "%0,0,x,xm")
- (match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,xm,x"))
- (match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,0,0")))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, true)"
- "fmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:SSEMODEF4 1 "nonimmediate_operand" "x,x,xm")
+ (match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,x"))
+ (match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,x")))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "")))]
- "TARGET_SSE5
- && !ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, true)
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
- ix86_expand_sse5_multiple_memory (operands, 4, <MODE>mode);
- emit_insn (gen_sse5_fmadd<mode>4 (operands[0], operands[1],
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fmadd<mode>4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fmadd
-(define_insn "sse5_vmfmadd<mode>4"
+(define_insn "fma4_vmfmadd<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(plus:SSEMODEF2P
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0")
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
- (match_dup 1)
+ (match_dup 0)
(const_int 1)))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)"
- "fmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
;; Floating multiply and subtract
;; Allow two memory operands the same as fmadd
-(define_insn "sse5_fmsub<mode>4"
- [(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x,x")
+(define_insn "fma4_fmsub<mode>4"
+ [(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x")
(minus:SSEMODEF4
(mult:SSEMODEF4
- (match_operand:SSEMODEF4 1 "nonimmediate_operand" "%0,0,x,xm")
- (match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,xm,x"))
- (match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,0,0")))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, true)"
- "fmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:SSEMODEF4 1 "nonimmediate_operand" "x,x,xm")
+ (match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,x"))
+ (match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,x")))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "")))]
- "TARGET_SSE5
- && !ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, true)
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
- ix86_expand_sse5_multiple_memory (operands, 4, <MODE>mode);
- emit_insn (gen_sse5_fmsub<mode>4 (operands[0], operands[1],
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fmsub<mode>4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fmsub
-(define_insn "sse5_vmfmsub<mode>4"
+(define_insn "fma4_vmfmsub<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0")
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
- (match_dup 1)
+ (match_dup 0)
(const_int 1)))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "fmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vfmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
;; Floating point negative multiply and add
;; Rewrite (- (a * b) + c) into the canonical form: c - (a * b)
-;; Note operands are out of order to simplify call to ix86_sse5_valid_p
+;; Note operands are out of order to simplify call to ix86_fma4_valid_p
;; Allow two memory operands to help in optimizing.
-(define_insn "sse5_fnmadd<mode>4"
- [(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x,x")
+(define_insn "fma4_fnmadd<mode>4"
+ [(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x,x")
(minus:SSEMODEF4
- (match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,0,0")
+ (match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x,x")
(mult:SSEMODEF4
- (match_operand:SSEMODEF4 1 "nonimmediate_operand" "%0,0,x,xm")
- (match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,xm,x"))))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, true)"
- "fnmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:SSEMODEF4 1 "nonimmediate_operand" "x,x,xm")
+ (match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm,x"))))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfnmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
(mult:SSEMODEF4
(match_operand:SSEMODEF4 1 "nonimmediate_operand" "")
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))))]
- "TARGET_SSE5
- && !ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, true)
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
- ix86_expand_sse5_multiple_memory (operands, 4, <MODE>mode);
- emit_insn (gen_sse5_fnmadd<mode>4 (operands[0], operands[1],
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fnmadd<mode>4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fnmadd
-(define_insn "sse5_vmfnmadd<mode>4"
+(define_insn "fma4_vmfnmadd<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x")
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0")
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm")))
- (match_dup 1)
+ (match_dup 0)
(const_int 1)))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)"
- "fnmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfnmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
;; Floating point negative multiply and subtract
;; Rewrite (- (a * b) - c) into the canonical form: ((-a) * b) - c
;; Allow 2 memory operands to help with optimization
-(define_insn "sse5_fnmsub<mode>4"
+(define_insn "fma4_fnmsub<mode>4"
[(set (match_operand:SSEMODEF4 0 "register_operand" "=x,x")
(minus:SSEMODEF4
(mult:SSEMODEF4
(neg:SSEMODEF4
- (match_operand:SSEMODEF4 1 "nonimmediate_operand" "0,0"))
+ (match_operand:SSEMODEF4 1 "nonimmediate_operand" "x,x"))
(match_operand:SSEMODEF4 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "xm,x")))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, false)"
- "fnmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, false)"
+ "vfnmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
(match_operand:SSEMODEF4 1 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 2 "nonimmediate_operand" ""))
(match_operand:SSEMODEF4 3 "nonimmediate_operand" "")))]
- "TARGET_SSE5
- && !ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, false)
+ "TARGET_FMA4
+ && !ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, false)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
- ix86_expand_sse5_multiple_memory (operands, 4, <MODE>mode);
- emit_insn (gen_sse5_fnmsub<mode>4 (operands[0], operands[1],
+ ix86_expand_fma4_multiple_memory (operands, 4, <MODE>mode);
+ emit_insn (gen_fma4_fnmsub<mode>4 (operands[0], operands[1],
operands[2], operands[3]));
DONE;
})
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are generated.
;; Scalar version of fnmsub
-(define_insn "sse5_vmfnmsub<mode>4"
+(define_insn "fma4_vmfnmsub<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
(neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0"))
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x"))
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
- (match_dup 1)
+ (match_dup 0)
(const_int 1)))]
- "TARGET_SSE5 && TARGET_FUSED_MADD
- && ix86_sse5_valid_op_p (operands, insn, 4, true, 2, false)"
- "fnmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, false)"
+ "vfnmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
-;; The same instructions using an UNSPEC to allow the intrinsic to be used
-;; even if the user used -mno-fused-madd
-;; Parallel instructions. During instruction generation, just default
-;; to registers, and let combine later build the appropriate instruction.
-(define_expand "sse5i_fmadd<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
- (unspec:SSEMODEF2P
- [(plus:SSEMODEF2P
- (mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "")
- (match_operand:SSEMODEF2P 2 "register_operand" ""))
- (match_operand:SSEMODEF2P 3 "register_operand" ""))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_fmadd<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "fma4i_fmadd<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x")
+ (unspec:FMA4MODEF4
+ [(plus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x"))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfmadd<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
-(define_insn "*sse5i_fmadd<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
- (unspec:SSEMODEF2P
- [(plus:SSEMODEF2P
- (mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0,0,x,xm")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x"))
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0"))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)"
- "fmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+(define_insn "fma4i_fmsub<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x")
+ (unspec:FMA4MODEF4
+ [(minus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x"))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfmsub<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
-(define_expand "sse5i_fmsub<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
- (unspec:SSEMODEF2P
- [(minus:SSEMODEF2P
- (mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "")
- (match_operand:SSEMODEF2P 2 "register_operand" ""))
- (match_operand:SSEMODEF2P 3 "register_operand" ""))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_fmsub<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
+(define_insn "fma4i_fnmadd<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x")
+ (unspec:FMA4MODEF4
+ [(minus:FMA4MODEF4
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x")
+ (mult:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x")
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm")))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfnmadd<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
-(define_insn "*sse5i_fmsub<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
- (unspec:SSEMODEF2P
- [(minus:SSEMODEF2P
- (mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0,0,x,xm")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x"))
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0"))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)"
- "fmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+(define_insn "fma4i_fnmsub<mode>4256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x,x")
+ (unspec:FMA4MODEF4
+ [(minus:FMA4MODEF4
+ (mult:FMA4MODEF4
+ (neg:FMA4MODEF4
+ (match_operand:FMA4MODEF4 1 "nonimmediate_operand" "x,x"))
+ (match_operand:FMA4MODEF4 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:FMA4MODEF4 3 "nonimmediate_operand" "xm,x"))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vfnmsub<fma4modesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;; Rewrite (- (a * b) + c) into the canonical form: c - (a * b)
-;; Note operands are out of order to simplify call to ix86_sse5_valid_p
-(define_expand "sse5i_fnmadd<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
+(define_insn "fma4i_fmadd<mode>4"
+ [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
- [(minus:SSEMODEF2P
- (match_operand:SSEMODEF2P 3 "register_operand" "")
+ [(plus:SSEMODEF2P
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "")
- (match_operand:SSEMODEF2P 2 "register_operand" "")))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_fnmadd<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x")
+ (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
-(define_insn "*sse5i_fnmadd<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
+(define_insn "fma4i_fmsub<mode>4"
+ [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0")
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0,0,x,xm")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x")))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)"
- "fnmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x")
+ (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
-;; Rewrite (- (a * b) - c) into the canonical form: ((-a) * b) - c
-(define_expand "sse5i_fnmsub<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
+(define_insn "fma4i_fnmadd<mode>4"
+ [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
+ (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x")
(mult:SSEMODEF2P
- (neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" ""))
- (match_operand:SSEMODEF2P 2 "register_operand" ""))
- (match_operand:SSEMODEF2P 3 "register_operand" ""))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_fnmsub<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x")
+ (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm")))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfnmadd<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
-(define_insn "*sse5i_fnmsub<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
+(define_insn "fma4i_fnmsub<mode>4"
+ [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(minus:SSEMODEF2P
(mult:SSEMODEF2P
(neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0,x,xm"))
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x"))
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x,0,0"))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "fnmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x"))
+ (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vfnmsub<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
-;; Scalar instructions
-(define_expand "sse5i_vmfmadd<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
- (unspec:SSEMODEF2P
- [(vec_merge:SSEMODEF2P
- (plus:SSEMODEF2P
- (mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "")
- (match_operand:SSEMODEF2P 2 "register_operand" ""))
- (match_operand:SSEMODEF2P 3 "register_operand" ""))
- (match_dup 1)
- (const_int 0))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_vmfmadd<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
-
;; For the scalar operations, use operand1 for the upper words that aren't
;; modified, so restrict the forms that are accepted.
-(define_insn "*sse5i_vmfmadd<mode>4"
+(define_insn "fma4i_vmfmadd<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(plus:SSEMODEF2P
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "0,0")
+ (match_operand:SSEMODEF2P 1 "register_operand" "x,x")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
(match_dup 0)
- (const_int 0))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "fmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (const_int 1))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vfmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<ssescalarmode>")])
-(define_expand "sse5i_vmfmsub<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
- (unspec:SSEMODEF2P
- [(vec_merge:SSEMODEF2P
- (minus:SSEMODEF2P
- (mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "")
- (match_operand:SSEMODEF2P 2 "register_operand" ""))
- (match_operand:SSEMODEF2P 3 "register_operand" ""))
- (match_dup 0)
- (const_int 1))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_vmfmsub<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
-
-(define_insn "*sse5i_vmfmsub<mode>4"
+(define_insn "fma4i_vmfmsub<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "0,0")
+ (match_operand:SSEMODEF2P 1 "register_operand" "x,x")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
- (match_dup 1)
+ (match_dup 0)
(const_int 1))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "fmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vfmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<ssescalarmode>")])
-;; Note operands are out of order to simplify call to ix86_sse5_valid_p
-(define_expand "sse5i_vmfnmadd<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
- (unspec:SSEMODEF2P
- [(vec_merge:SSEMODEF2P
- (minus:SSEMODEF2P
- (match_operand:SSEMODEF2P 3 "register_operand" "")
- (mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "")
- (match_operand:SSEMODEF2P 2 "register_operand" "")))
- (match_dup 1)
- (const_int 1))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_vmfnmadd<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
-
-(define_insn "*sse5i_vmfnmadd<mode>4"
+(define_insn "fma4i_vmfnmadd<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x")
(mult:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "x,x")
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm")))
- (match_dup 1)
+ (match_dup 0)
(const_int 1))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, true)"
- "fnmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, true)"
+ "vfnmadd<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<ssescalarmode>")])
-(define_expand "sse5i_vmfnmsub<mode>4"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
- (unspec:SSEMODEF2P
- [(vec_merge:SSEMODEF2P
- (minus:SSEMODEF2P
- (mult:SSEMODEF2P
- (neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" ""))
- (match_operand:SSEMODEF2P 2 "register_operand" ""))
- (match_operand:SSEMODEF2P 3 "register_operand" ""))
- (match_dup 1)
- (const_int 1))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5"
-{
- /* If we have -mfused-madd, emit the normal insn rather than the UNSPEC */
- if (TARGET_FUSED_MADD)
- {
- emit_insn (gen_sse5_vmfnmsub<mode>4 (operands[0], operands[1],
- operands[2], operands[3]));
- DONE;
- }
-})
-
-(define_insn "*sse5i_vmfnmsub<mode>4"
+(define_insn "fma4i_vmfnmsub<mode>4"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
(unspec:SSEMODEF2P
[(vec_merge:SSEMODEF2P
(minus:SSEMODEF2P
(mult:SSEMODEF2P
(neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "register_operand" "0,0"))
+ (match_operand:SSEMODEF2P 1 "register_operand" "x,x"))
(match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm"))
(match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
- (match_dup 1)
+ (match_dup 0)
(const_int 1))]
- UNSPEC_SSE5_INTRINSIC))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "fnmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4 && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vfnmsub<ssemodesuffixf2s>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<ssescalarmode>")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
+;; FMA4 Parallel floating point multiply addsub and subadd operations
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "fma4_fmaddsubv8sf4"
+ [(set (match_operand:V8SF 0 "register_operand" "=x,x")
+ (vec_merge:V8SF
+ (plus:V8SF
+ (mult:V8SF
+ (match_operand:V8SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V8SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V8SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V8SF
+ (mult:V8SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 170)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8SF")])
+
+(define_insn "fma4_fmaddsubv4df4"
+ [(set (match_operand:V4DF 0 "register_operand" "=x,x")
+ (vec_merge:V4DF
+ (plus:V4DF
+ (mult:V4DF
+ (match_operand:V4DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4DF
+ (mult:V4DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 10)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4DF")])
+
+(define_insn "fma4_fmaddsubv4sf4"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (vec_merge:V4SF
+ (plus:V4SF
+ (mult:V4SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4SF
+ (mult:V4SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 10)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "fma4_fmaddsubv2df4"
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
+ (vec_merge:V2DF
+ (plus:V2DF
+ (mult:V2DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V2DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V2DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V2DF
+ (mult:V2DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 2)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "fma4_fmsubaddv8sf4"
+ [(set (match_operand:V8SF 0 "register_operand" "=x,x")
+ (vec_merge:V8SF
+ (plus:V8SF
+ (mult:V8SF
+ (match_operand:V8SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V8SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V8SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V8SF
+ (mult:V8SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 85)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8SF")])
+
+(define_insn "fma4_fmsubaddv4df4"
+ [(set (match_operand:V4DF 0 "register_operand" "=x,x")
+ (vec_merge:V4DF
+ (plus:V4DF
+ (mult:V4DF
+ (match_operand:V4DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4DF
+ (mult:V4DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 5)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4DF")])
+
+(define_insn "fma4_fmsubaddv4sf4"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (vec_merge:V4SF
+ (plus:V4SF
+ (mult:V4SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4SF
+ (mult:V4SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 5)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "fma4_fmsubaddv2df4"
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
+ (vec_merge:V2DF
+ (plus:V2DF
+ (mult:V2DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V2DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V2DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V2DF
+ (mult:V2DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 1)))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V2DF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn "fma4i_fmaddsubv8sf4"
+ [(set (match_operand:V8SF 0 "register_operand" "=x,x")
+ (unspec:V8SF
+ [(vec_merge:V8SF
+ (plus:V8SF
+ (mult:V8SF
+ (match_operand:V8SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V8SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V8SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V8SF
+ (mult:V8SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 170))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8SF")])
+
+(define_insn "fma4i_fmaddsubv4df4"
+ [(set (match_operand:V4DF 0 "register_operand" "=x,x")
+ (unspec:V4DF
+ [(vec_merge:V4DF
+ (plus:V4DF
+ (mult:V4DF
+ (match_operand:V4DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4DF
+ (mult:V4DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 10))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4DF")])
+
+(define_insn "fma4i_fmaddsubv4sf4"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (unspec:V4SF
+ [(vec_merge:V4SF
+ (plus:V4SF
+ (mult:V4SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4SF
+ (mult:V4SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 10))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "fma4i_fmaddsubv2df4"
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
+ (unspec:V2DF
+ [(vec_merge:V2DF
+ (plus:V2DF
+ (mult:V2DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V2DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V2DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V2DF
+ (mult:V2DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 2))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmaddsubpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V2DF")])
+
+(define_insn "fma4i_fmsubaddv8sf4"
+ [(set (match_operand:V8SF 0 "register_operand" "=x,x")
+ (unspec:V8SF
+ [(vec_merge:V8SF
+ (plus:V8SF
+ (mult:V8SF
+ (match_operand:V8SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V8SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V8SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V8SF
+ (mult:V8SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 85))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V8SF")])
+
+(define_insn "fma4i_fmsubaddv4df4"
+ [(set (match_operand:V4DF 0 "register_operand" "=x,x")
+ (unspec:V4DF
+ [(vec_merge:V4DF
+ (plus:V4DF
+ (mult:V4DF
+ (match_operand:V4DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4DF
+ (mult:V4DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 5))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4DF")])
+
+(define_insn "fma4i_fmsubaddv4sf4"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+ (unspec:V4SF
+ [(vec_merge:V4SF
+ (plus:V4SF
+ (mult:V4SF
+ (match_operand:V4SF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V4SF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V4SF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V4SF
+ (mult:V4SF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 5))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V4SF")])
+
+(define_insn "fma4i_fmsubaddv2df4"
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
+ (unspec:V2DF
+ [(vec_merge:V2DF
+ (plus:V2DF
+ (mult:V2DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x")
+ (match_operand:V2DF 2 "nonimmediate_operand" "x,xm"))
+ (match_operand:V2DF 3 "nonimmediate_operand" "xm,x"))
+ (minus:V2DF
+ (mult:V2DF
+ (match_dup 1)
+ (match_dup 2))
+ (match_dup 3))
+ (const_int 1))]
+ UNSPEC_FMA4_INTRINSIC))]
+ "TARGET_FMA4
+ && ix86_fma4_valid_op_p (operands, insn, 4, true, 2, true)"
+ "vfmsubaddpd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "V2DF")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
;; Parallel single-precision floating point conversion operations
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
"cvttps2pi\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "unit" "mmx")
+ (set_attr "prefix_rep" "0")
(set_attr "mode" "SF")])
(define_insn "*avx_cvtsi2ss"
"TARGET_AVX && TARGET_64BIT"
"vcvtsi2ssq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseicvt")
+ (set_attr "length_vex" "4")
(set_attr "prefix" "vex")
(set_attr "mode" "SF")])
"TARGET_SSE && TARGET_64BIT"
"cvtsi2ssq\t{%2, %0|%0, %2}"
[(set_attr "type" "sseicvt")
+ (set_attr "prefix_rex" "1")
(set_attr "athlon_decode" "vector,double")
(set_attr "amdfam10_decode" "vector,double")
(set_attr "mode" "SF")])
[(set_attr "type" "ssecvt")
(set_attr "mode" "V4SF")])
+(define_expand "sse2_cvtudq2ps"
+ [(set (match_dup 5)
+ (float:V4SF (match_operand:V4SI 1 "nonimmediate_operand" "")))
+ (set (match_dup 6)
+ (lt:V4SF (match_dup 5) (match_dup 3)))
+ (set (match_dup 7)
+ (and:V4SF (match_dup 6) (match_dup 4)))
+ (set (match_operand:V4SF 0 "register_operand" "")
+ (plus:V4SF (match_dup 5) (match_dup 7)))]
+ "TARGET_SSE2"
+{
+ REAL_VALUE_TYPE TWO32r;
+ rtx x;
+ int i;
+
+ real_ldexp (&TWO32r, &dconst1, 32);
+ x = const_double_from_real_value (TWO32r, SFmode);
+
+ operands[3] = force_reg (V4SFmode, CONST0_RTX (V4SFmode));
+ operands[4] = force_reg (V4SFmode, ix86_build_const_vector (SFmode, 1, x));
+
+ for (i = 5; i < 8; i++)
+ operands[i] = gen_reg_rtx (V4SFmode);
+})
+
(define_insn "avx_cvtps2dq<avxmodesuffix>"
[(set (match_operand:AVXMODEDCVTPS2DQ 0 "register_operand" "=x")
(unspec:AVXMODEDCVTPS2DQ
"cvttps2dq\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix_rep" "1")
+ (set_attr "prefix_data16" "0")
(set_attr "mode" "TI")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
"cvtpi2pd\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "unit" "mmx,*")
+ (set_attr "prefix_data16" "1,*")
(set_attr "mode" "V2DF")])
(define_insn "sse2_cvtpd2pi"
"TARGET_AVX && TARGET_64BIT"
"vcvtsi2sdq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseicvt")
+ (set_attr "length_vex" "4")
(set_attr "prefix" "vex")
(set_attr "mode" "DF")])
"TARGET_SSE2 && TARGET_64BIT"
"cvtsi2sdq\t{%2, %0|%0, %2}"
[(set_attr "type" "sseicvt")
+ (set_attr "prefix_rex" "1")
(set_attr "mode" "DF")
(set_attr "athlon_decode" "double,direct")
(set_attr "amdfam10_decode" "vector,double")])
: \"cvtpd2dq\t{%1, %0|%0, %1}\";"
[(set_attr "type" "ssecvt")
(set_attr "prefix_rep" "1")
+ (set_attr "prefix_data16" "0")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")
(set_attr "amdfam10_decode" "double")])
"* return TARGET_AVX ? \"vcvttpd2dq{x}\t{%1, %0|%0, %1}\"
: \"cvttpd2dq\t{%1, %0|%0, %1}\";"
[(set_attr "type" "ssecvt")
- (set_attr "prefix_rep" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")
(set_attr "amdfam10_decode" "double")])
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "V2DF")
+ (set_attr "prefix_data16" "0")
(set_attr "amdfam10_decode" "direct")])
(define_expand "vec_unpacks_hi_v4sf"
DONE;
})
-(define_expand "vec_unpacks_float_hi_v4si"
- [(set (match_dup 2)
+(define_expand "vec_unpacks_float_hi_v4si"
+ [(set (match_dup 2)
+ (vec_select:V4SI
+ (match_operand:V4SI 1 "nonimmediate_operand" "")
+ (parallel [(const_int 2)
+ (const_int 3)
+ (const_int 2)
+ (const_int 3)])))
+ (set (match_operand:V2DF 0 "register_operand" "")
+ (float:V2DF
+ (vec_select:V2SI
+ (match_dup 2)
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_SSE2"
+ "operands[2] = gen_reg_rtx (V4SImode);")
+
+(define_expand "vec_unpacks_float_lo_v4si"
+ [(set (match_operand:V2DF 0 "register_operand" "")
+ (float:V2DF
+ (vec_select:V2SI
+ (match_operand:V4SI 1 "nonimmediate_operand" "")
+ (parallel [(const_int 0) (const_int 1)]))))]
+ "TARGET_SSE2")
+
+(define_expand "vec_unpacku_float_hi_v4si"
+ [(set (match_dup 5)
(vec_select:V4SI
(match_operand:V4SI 1 "nonimmediate_operand" "")
(parallel [(const_int 2)
(const_int 3)
(const_int 2)
(const_int 3)])))
- (set (match_operand:V2DF 0 "register_operand" "")
+ (set (match_dup 6)
(float:V2DF
(vec_select:V2SI
- (match_dup 2)
- (parallel [(const_int 0) (const_int 1)]))))]
+ (match_dup 5)
+ (parallel [(const_int 0) (const_int 1)]))))
+ (set (match_dup 7)
+ (lt:V2DF (match_dup 6) (match_dup 3)))
+ (set (match_dup 8)
+ (and:V2DF (match_dup 7) (match_dup 4)))
+ (set (match_operand:V2DF 0 "register_operand" "")
+ (plus:V2DF (match_dup 6) (match_dup 8)))]
"TARGET_SSE2"
{
- operands[2] = gen_reg_rtx (V4SImode);
+ REAL_VALUE_TYPE TWO32r;
+ rtx x;
+ int i;
+
+ real_ldexp (&TWO32r, &dconst1, 32);
+ x = const_double_from_real_value (TWO32r, DFmode);
+
+ operands[3] = force_reg (V2DFmode, CONST0_RTX (V2DFmode));
+ operands[4] = force_reg (V2DFmode, ix86_build_const_vector (DFmode, 1, x));
+
+ operands[5] = gen_reg_rtx (V4SImode);
+
+ for (i = 6; i < 9; i++)
+ operands[i] = gen_reg_rtx (V2DFmode);
})
-(define_expand "vec_unpacks_float_lo_v4si"
- [(set (match_operand:V2DF 0 "register_operand" "")
+(define_expand "vec_unpacku_float_lo_v4si"
+ [(set (match_dup 5)
(float:V2DF
(vec_select:V2SI
(match_operand:V4SI 1 "nonimmediate_operand" "")
- (parallel [(const_int 0) (const_int 1)]))))]
- "TARGET_SSE2")
+ (parallel [(const_int 0) (const_int 1)]))))
+ (set (match_dup 6)
+ (lt:V2DF (match_dup 5) (match_dup 3)))
+ (set (match_dup 7)
+ (and:V2DF (match_dup 6) (match_dup 4)))
+ (set (match_operand:V2DF 0 "register_operand" "")
+ (plus:V2DF (match_dup 5) (match_dup 7)))]
+ "TARGET_SSE2"
+{
+ REAL_VALUE_TYPE TWO32r;
+ rtx x;
+ int i;
+
+ real_ldexp (&TWO32r, &dconst1, 32);
+ x = const_double_from_real_value (TWO32r, DFmode);
+
+ operands[3] = force_reg (V2DFmode, CONST0_RTX (V2DFmode));
+ operands[4] = force_reg (V2DFmode, ix86_build_const_vector (DFmode, 1, x));
+
+ for (i = 5; i < 8; i++)
+ operands[i] = gen_reg_rtx (V2DFmode);
+})
(define_expand "vec_pack_trunc_v2df"
[(match_operand:V4SF 0 "register_operand" "")
return "vshufps\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
return "vshufps\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V4SF")])
return "shufps\t{%3, %2, %0|%0, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "V4SF")])
(define_insn "sse_storehps"
vmovlps\t{%2, %1, %0|%0, %1, %2}
vmovlps\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "length_immediate" "1,*,*")
(set_attr "prefix" "vex")
(set_attr "mode" "V4SF,V2SF,V2SF")])
movlps\t{%2, %0|%0, %2}
movlps\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "length_immediate" "1,*,*")
(set_attr "mode" "V4SF,V2SF,V2SF")])
(define_insn "*avx_movss"
"TARGET_AVX"
"vshufps\t{$0, %1, %1, %0|%0, %1, %1, 0}"
[(set_attr "type" "sselog1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V4SF")])
"TARGET_SSE"
"shufps\t{$0, %0, %0|%0, %0, 0}"
[(set_attr "type" "sselog1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "V4SF")])
(define_insn "*vec_concatv2sf_avx"
punpckldq\t{%2, %0|%0, %2}
movd\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog,sselog,ssemov,mmxcvt,mmxmov")
+ (set_attr "length_immediate" "*,1,*,*,*")
+ (set_attr "prefix_extra" "*,1,*,*,*")
(set (attr "prefix")
(if_then_else (eq_attr "alternative" "3,4")
(const_string "orig")
punpckldq\t{%2, %0|%0, %2}
movd\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog,sselog,ssemov,mmxcvt,mmxmov")
+ (set_attr "prefix_data16" "*,1,*,*,*")
(set_attr "prefix_extra" "*,1,*,*,*")
+ (set_attr "length_immediate" "*,1,*,*,*")
(set_attr "mode" "V4SF,V4SF,SF,DI,DI")])
;; ??? In theory we can match memory for the MMX alternative, but allowing
return "vinsertps\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V4SF")])
return "insertps\t{%3, %2, %0|%0, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "V4SF")])
(define_insn "*avx_insertps"
"vinsertps\t{%3, %2, %1, %0|%0, %1, %2, %3}";
[(set_attr "type" "sselog")
(set_attr "prefix" "vex")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "V4SF")])
(define_insn "sse4_1_insertps"
"TARGET_SSE4_1"
"insertps\t{%3, %2, %0|%0, %2, %3}";
[(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "V4SF")])
(define_split
"TARGET_AVX"
"vextractf128\t{$0x0, %1, %0|%0, %1, 0x0}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_SSE4_1"
"%vextractps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "V4SF")])
movlpd\t{%H1, %0|%0, %H1}
movhpd\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "prefix_data16" "*,1,1")
(set_attr "mode" "V2DF,V1DF,V1DF")])
(define_insn "avx_movddup256"
movhpd\t{%2, %0|%0, %2}
movlpd\t{%2, %H0|%H0, %2}"
[(set_attr "type" "sselog,ssemov,ssemov")
+ (set_attr "prefix_data16" "*,1,1")
(set_attr "mode" "V2DF,V1DF,V1DF")])
(define_expand "avx_shufpd256"
return "vshufpd\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V4DF")])
return "vshufpd\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V2DF")])
return "shufpd\t{%3, %2, %0|%0, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "V2DF")])
;; Avoid combining registers from different units in a single alternative,
#
#"
[(set_attr "type" "ssemov,sselog1,ssemov,fmov,imov")
+ (set_attr "prefix_data16" "1,*,*,*,*")
(set_attr "mode" "V1DF,V2DF,DF,DF,DF")])
(define_split
#
#"
[(set_attr "type" "ssemov,ssemov,ssemov,fmov,imov")
+ (set_attr "prefix_data16" "1,*,*,*,*")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "V1DF,DF,DF,DF,DF")])
#
#"
[(set_attr "type" "ssemov,sselog,sselog,ssemov,fmov,imov")
+ (set_attr "prefix_data16" "1,*,*,*,*,*")
+ (set_attr "length_immediate" "*,*,1,*,*,*")
(set_attr "mode" "V1DF,V2DF,V2DF,DF,DF,DF")])
(define_split
#
#"
[(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,ssemov,fmov,imov")
+ (set_attr "prefix_data16" "*,1,*,*,1,*,*,*")
+ (set_attr "length_immediate" "*,*,*,1,*,*,*,*")
(set_attr "mode" "DF,V1DF,V1DF,V2DF,V1DF,DF,DF,DF")])
(define_split
movhps\t{%H1, %0|%0, %H1}
movhps\t{%1, %H0|%H0, %1}"
[(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,ssemov")
+ (set_attr "prefix_data16" "*,1,1,*,*,*")
+ (set_attr "length_immediate" "*,*,*,1,*,*")
(set_attr "mode" "DF,V1DF,V1DF,V2DF,V1DF,V1DF")])
(define_insn "*vec_dupv2df_sse3"
movlhps\t{%2, %0|%0, %2}
movhps\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog,ssemov,ssemov,ssemov,ssemov")
+ (set_attr "prefix_data16" "*,1,*,*,*")
(set_attr "mode" "V2DF,V1DF,DF,V4SF,V2SF")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(mult:V16QI (match_operand:V16QI 1 "register_operand" "")
(match_operand:V16QI 2 "register_operand" "")))]
"TARGET_SSE2
- && !(reload_completed || reload_in_progress)"
+ && can_create_pseudo_p ()"
"#"
"&& 1"
[(const_int 0)]
{
- rtx t[12], op0, op[3];
+ rtx t[12];
int i;
- if (TARGET_SSE5)
- {
- /* On SSE5, we can take advantage of the pperm instruction to pack and
- unpack the bytes. Unpack data such that we've got a source byte in
- each low byte of each word. We don't care what goes into the high
- byte, so put 0 there. */
- for (i = 0; i < 6; ++i)
- t[i] = gen_reg_rtx (V8HImode);
-
- for (i = 0; i < 2; i++)
- {
- op[0] = t[i];
- op[1] = operands[i+1];
- ix86_expand_sse5_unpack (op, true, true); /* high bytes */
-
- op[0] = t[i+2];
- ix86_expand_sse5_unpack (op, true, false); /* low bytes */
- }
-
- /* Multiply words. */
- emit_insn (gen_mulv8hi3 (t[4], t[0], t[1])); /* high bytes */
- emit_insn (gen_mulv8hi3 (t[5], t[2], t[3])); /* low bytes */
-
- /* Pack the low byte of each word back into a single xmm */
- op[0] = operands[0];
- op[1] = t[5];
- op[2] = t[4];
- ix86_expand_sse5_pack (op);
- DONE;
- }
-
for (i = 0; i < 12; ++i)
t[i] = gen_reg_rtx (V16QImode);
emit_insn (gen_sse2_punpckhbw (t[10], t[9], t[8])); /* ........ACEGIKMO */
emit_insn (gen_sse2_punpcklbw (t[11], t[9], t[8])); /* ........BDFHJLNP */
- op0 = operands[0];
- emit_insn (gen_sse2_punpcklbw (op0, t[11], t[10])); /* ABCDEFGHIJKLMNOP */
+ emit_insn (gen_sse2_punpcklbw (operands[0], t[11], t[10])); /* ABCDEFGHIJKLMNOP */
DONE;
})
"TARGET_AVX && ix86_binary_operator_ok (MULT, V4SImode, operands)"
"vpmuldq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseimul")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
"pmaddwd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "simul")
(set_attr "prefix_data16" "1")
(set_attr "mode" "TI")])
(match_operand:V4SI 2 "register_operand" "")))]
"TARGET_SSE2"
{
- if (TARGET_SSE4_1 || TARGET_SSE5)
+ if (TARGET_SSE4_1 || TARGET_XOP)
ix86_fixup_binary_operands_no_copy (MULT, V4SImode, operands);
})
"TARGET_AVX && ix86_binary_operator_ok (MULT, V4SImode, operands)"
"vpmulld\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseimul")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
-;; We don't have a straight 32-bit parallel multiply on SSE5, so fake it with a
-;; multiply/add. In general, we expect the define_split to occur before
-;; register allocation, so we have to handle the corner case where the target
-;; is the same as one of the inputs.
-(define_insn_and_split "*sse5_mulv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=&x")
- (mult:V4SI (match_operand:V4SI 1 "register_operand" "%x")
- (match_operand:V4SI 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE5"
- "#"
- "&& (reload_completed
- || (!reg_mentioned_p (operands[0], operands[1])
- && !reg_mentioned_p (operands[0], operands[2])))"
- [(set (match_dup 0)
- (match_dup 3))
- (set (match_dup 0)
- (plus:V4SI (mult:V4SI (match_dup 1)
- (match_dup 2))
- (match_dup 0)))]
-{
- operands[3] = CONST0_RTX (V4SImode);
-}
- [(set_attr "type" "ssemuladd")
- (set_attr "mode" "TI")])
-
(define_insn_and_split "*sse2_mulv4si3"
[(set (match_operand:V4SI 0 "register_operand" "")
(mult:V4SI (match_operand:V4SI 1 "register_operand" "")
(match_operand:V4SI 2 "register_operand" "")))]
- "TARGET_SSE2 && !TARGET_SSE4_1 && !TARGET_SSE5
- && !(reload_completed || reload_in_progress)"
+ "TARGET_SSE2 && !TARGET_SSE4_1 && !TARGET_XOP
+ && can_create_pseudo_p ()"
"#"
"&& 1"
[(const_int 0)]
(mult:V2DI (match_operand:V2DI 1 "register_operand" "")
(match_operand:V2DI 2 "register_operand" "")))]
"TARGET_SSE2
- && !(reload_completed || reload_in_progress)"
+ && can_create_pseudo_p ()"
"#"
"&& 1"
[(const_int 0)]
rtx t1, t2, t3, t4, t5, t6, thirtytwo;
rtx op0, op1, op2;
- if (TARGET_SSE5)
+ if (TARGET_XOP)
{
/* op1: A,B,C,D, op2: E,F,G,H */
op0 = operands[0];
emit_move_insn (t2, CONST0_RTX (V4SImode));
/* t3: (B*E),(A*F),(D*G),(C*H) */
- emit_insn (gen_sse5_pmacsdd (t3, t1, op2, t2));
+ emit_insn (gen_xop_pmacsdd (t3, t1, op2, t2));
/* t4: (B*E)+(A*F), (D*G)+(C*H) */
- emit_insn (gen_sse5_phadddq (t4, t3));
+ emit_insn (gen_xop_phadddq (t4, t3));
/* t5: ((B*E)+(A*F))<<32, ((D*G)+(C*H))<<32 */
emit_insn (gen_ashlv2di3 (t5, t4, GEN_INT (32)));
/* op0: (((B*E)+(A*F))<<32)+(B*F), (((D*G)+(C*H))<<32)+(D*H) */
- emit_insn (gen_sse5_pmacsdql (op0, op1, op2, t5));
+ emit_insn (gen_xop_pmacsdql (op0, op1, op2, t5));
DONE;
}
[(match_operand:V2DI 0 "register_operand" "")
(match_operand:V4SI 1 "register_operand" "")
(match_operand:V4SI 2 "register_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtx t1, t2;
GEN_INT (2),
GEN_INT (1),
GEN_INT (3)));
- emit_insn (gen_sse5_mulv2div2di3_high (operands[0], t1, t2));
+ emit_insn (gen_xop_mulv2div2di3_high (operands[0], t1, t2));
DONE;
})
[(match_operand:V2DI 0 "register_operand" "")
(match_operand:V4SI 1 "register_operand" "")
(match_operand:V4SI 2 "register_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtx t1, t2;
GEN_INT (2),
GEN_INT (1),
GEN_INT (3)));
- emit_insn (gen_sse5_mulv2div2di3_low (operands[0], t1, t2));
- DONE;
+ emit_insn (gen_xop_mulv2div2di3_low (operands[0], t1, t2));
DONE;
})
"vpsra<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix" "vex")
+ (set (attr "length_immediate")
+ (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "TI")])
(define_insn "ashr<mode>3"
"psra<ssevecsize>\t{%2, %0|%0, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix_data16" "1")
+ (set (attr "length_immediate")
+ (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "TI")])
(define_insn "*avx_lshr<mode>3"
"vpsrl<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix" "vex")
+ (set (attr "length_immediate")
+ (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "TI")])
(define_insn "lshr<mode>3"
"psrl<ssevecsize>\t{%2, %0|%0, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix_data16" "1")
+ (set (attr "length_immediate")
+ (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "TI")])
(define_insn "*avx_ashl<mode>3"
"vpsll<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix" "vex")
+ (set (attr "length_immediate")
+ (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "TI")])
(define_insn "ashl<mode>3"
"psll<ssevecsize>\t{%2, %0|%0, %2}"
[(set_attr "type" "sseishft")
(set_attr "prefix_data16" "1")
+ (set (attr "length_immediate")
+ (if_then_else (match_operand 2 "const_int_operand" "")
+ (const_string "1")
+ (const_string "0")))
(set_attr "mode" "TI")])
(define_expand "vec_shl_<mode>"
"TARGET_AVX && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
"vp<maxminiprefix><ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set (attr "prefix_extra")
+ (if_then_else
+ (ne (symbol_ref "<MODE>mode != ((<CODE> == SMAX || <CODE> == SMIN) ? V8HImode : V16QImode)")
+ (const_int 0))
+ (const_string "1")
+ (const_string "0")))
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
(eq:SSEMODE124
(match_operand:SSEMODE124 1 "nonimmediate_operand" "")
(match_operand:SSEMODE124 2 "nonimmediate_operand" "")))]
- "TARGET_SSE2 && !TARGET_SSE5"
+ "TARGET_SSE2 && !TARGET_XOP "
"ix86_fixup_binary_operands_no_copy (EQ, <MODE>mode, operands);")
(define_insn "*avx_eq<mode>3"
"TARGET_AVX && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
"vpcmpeq<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecmp")
+ (set (attr "prefix_extra")
+ (if_then_else (match_operand:V2DI 0 "" "")
+ (const_string "1")
+ (const_string "*")))
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
(eq:SSEMODE124
(match_operand:SSEMODE124 1 "nonimmediate_operand" "%0")
(match_operand:SSEMODE124 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2 && !TARGET_SSE5
+ "TARGET_SSE2 && !TARGET_XOP
&& ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
"pcmpeq<ssevecsize>\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecmp")
"TARGET_AVX"
"vpcmpgt<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecmp")
+ (set (attr "prefix_extra")
+ (if_then_else (match_operand:V2DI 0 "" "")
+ (const_string "1")
+ (const_string "*")))
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
(gt:SSEMODE124
(match_operand:SSEMODE124 1 "register_operand" "0")
(match_operand:SSEMODE124 2 "nonimmediate_operand" "xm")))]
- "TARGET_SSE2 && !TARGET_SSE5"
+ "TARGET_SSE2 && !TARGET_XOP"
"pcmpgt<ssevecsize>\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecmp")
(set_attr "prefix_data16" "1")
"TARGET_SSE4_2"
"pcmpgtq\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecmp")
+ (set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
(define_expand "vcond<mode>"
- [(set (match_operand:SSEMODEI 0 "register_operand" "")
- (if_then_else:SSEMODEI
+ [(set (match_operand:SSEMODE124C8 0 "register_operand" "")
+ (if_then_else:SSEMODE124C8
(match_operator 3 ""
- [(match_operand:SSEMODEI 4 "nonimmediate_operand" "")
- (match_operand:SSEMODEI 5 "nonimmediate_operand" "")])
- (match_operand:SSEMODEI 1 "general_operand" "")
- (match_operand:SSEMODEI 2 "general_operand" "")))]
+ [(match_operand:SSEMODE124C8 4 "nonimmediate_operand" "")
+ (match_operand:SSEMODE124C8 5 "nonimmediate_operand" "")])
+ (match_operand:SSEMODE124C8 1 "general_operand" "")
+ (match_operand:SSEMODE124C8 2 "general_operand" "")))]
"TARGET_SSE2"
{
- if (ix86_expand_int_vcond (operands))
- DONE;
- else
- FAIL;
+ bool ok = ix86_expand_int_vcond (operands);
+ gcc_assert (ok);
+ DONE;
})
(define_expand "vcondu<mode>"
- [(set (match_operand:SSEMODEI 0 "register_operand" "")
- (if_then_else:SSEMODEI
+ [(set (match_operand:SSEMODE124C8 0 "register_operand" "")
+ (if_then_else:SSEMODE124C8
(match_operator 3 ""
- [(match_operand:SSEMODEI 4 "nonimmediate_operand" "")
- (match_operand:SSEMODEI 5 "nonimmediate_operand" "")])
- (match_operand:SSEMODEI 1 "general_operand" "")
- (match_operand:SSEMODEI 2 "general_operand" "")))]
+ [(match_operand:SSEMODE124C8 4 "nonimmediate_operand" "")
+ (match_operand:SSEMODE124C8 5 "nonimmediate_operand" "")])
+ (match_operand:SSEMODE124C8 1 "general_operand" "")
+ (match_operand:SSEMODE124C8 2 "general_operand" "")))]
"TARGET_SSE2"
{
- if (ix86_expand_int_vcond (operands))
- DONE;
- else
- FAIL;
+ bool ok = ix86_expand_int_vcond (operands);
+ gcc_assert (ok);
+ DONE;
})
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
{
rtx op1, op2, h1, l1, h2, l2, h3, l3;
- if (TARGET_SSE5)
- {
- ix86_expand_sse5_pack (operands);
- DONE;
- }
-
op1 = gen_lowpart (V16QImode, operands[1]);
op2 = gen_lowpart (V16QImode, operands[2]);
h1 = gen_reg_rtx (V16QImode);
{
rtx op1, op2, h1, l1, h2, l2;
- if (TARGET_SSE5)
- {
- ix86_expand_sse5_pack (operands);
- DONE;
- }
-
op1 = gen_lowpart (V8HImode, operands[1]);
op2 = gen_lowpart (V8HImode, operands[2]);
h1 = gen_reg_rtx (V8HImode);
{
rtx op1, op2, h1, l1;
- if (TARGET_SSE5)
- {
- ix86_expand_sse5_pack (operands);
- DONE;
- }
-
op1 = gen_lowpart (V4SImode, operands[1]);
op2 = gen_lowpart (V4SImode, operands[2]);
h1 = gen_reg_rtx (V4SImode);
(set_attr "prefix_data16" "1")
(set_attr "mode" "TI")])
-(define_insn "*avx_pinsr<avxmodesuffixs>"
+(define_insn "*avx_pinsr<ssevecsize>"
[(set (match_operand:SSEMODE124 0 "register_operand" "=x")
(vec_merge:SSEMODE124
(vec_duplicate:SSEMODE124
"TARGET_AVX"
{
operands[3] = GEN_INT (exact_log2 (INTVAL (operands[3])));
- return "vpinsr<avxmodesuffixs>\t{%3, %k2, %1, %0|%0, %1, %k2, %3}";
+ return "vpinsr<ssevecsize>\t{%3, %k2, %1, %0|%0, %1, %k2, %3}";
}
[(set_attr "type" "sselog")
+ (set (attr "prefix_extra")
+ (if_then_else (match_operand:V8HI 0 "register_operand" "")
+ (const_string "0")
+ (const_string "1")))
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
}
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "*sse2_pinsrw"
}
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
;; It must come before sse2_loadld since it is preferred.
}
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "*avx_pinsrq"
return "vpinsrq\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
return "pinsrq\t{%3, %2, %0|%0, %2, %3}";
}
[(set_attr "type" "sselog")
+ (set_attr "prefix_rex" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "*sse4_1_pextrb"
"%vpextrb\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"%vpextrb\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"%vpextrw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"%vpextrw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"%vpextrd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"TARGET_SSE4_1 && TARGET_64BIT"
"%vpextrq\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_rex" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
}
[(set_attr "type" "sselog1")
(set_attr "prefix_data16" "1")
- (set_attr "prefix" "vex")
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_expand "sse2_pshuflw"
return "%vpshuflw\t{%2, %1, %0|%0, %1, %2}";
}
[(set_attr "type" "sselog")
+ (set_attr "prefix_data16" "0")
(set_attr "prefix_rep" "1")
(set_attr "prefix" "maybe_vex")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_expand "sse2_pshufhw"
}
[(set_attr "type" "sselog")
(set_attr "prefix_rep" "1")
+ (set_attr "prefix_data16" "0")
(set_attr "prefix" "maybe_vex")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_expand "sse2_loadd"
vmovq\t{%H1, %0|%0, %H1}
vmov{q}\t{%H1, %0|%0, %H1}"
[(set_attr "type" "ssemov,sseishft,ssemov,imov")
+ (set_attr "length_immediate" "*,1,*,*")
(set_attr "memory" "*,none,*,*")
(set_attr "prefix" "vex")
(set_attr "mode" "V2SF,TI,TI,DI")])
movq\t{%H1, %0|%0, %H1}
mov{q}\t{%H1, %0|%0, %H1}"
[(set_attr "type" "ssemov,sseishft,ssemov,imov")
+ (set_attr "length_immediate" "*,1,*,*")
+ (set_attr "atom_unit" "*,sishuf,*,*")
(set_attr "memory" "*,none,*,*")
(set_attr "mode" "V2SF,TI,TI,DI")])
vpsrldq\t{$8, %1, %0|%0, %1, 8}
vmovq\t{%H1, %0|%0, %H1}"
[(set_attr "type" "ssemov,sseishft,ssemov")
+ (set_attr "length_immediate" "*,1,*")
(set_attr "memory" "*,none,*")
(set_attr "prefix" "vex")
(set_attr "mode" "V2SF,TI,TI")])
psrldq\t{$8, %0|%0, 8}
movq\t{%H1, %0|%0, %H1}"
[(set_attr "type" "ssemov,sseishft,ssemov")
+ (set_attr "length_immediate" "*,1,*")
+ (set_attr "atom_unit" "*,sishuf,*")
(set_attr "memory" "*,none,*")
(set_attr "mode" "V2SF,TI,TI")])
shufps\t{$0, %0, %0|%0, %0, 0}"
[(set_attr "type" "sselog1")
(set_attr "prefix" "maybe_vex,orig")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI,V4SF")])
(define_insn "*vec_dupv2di_avx"
punpckldq\t{%2, %0|%0, %2}
movd\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog,sselog,ssemov,mmxcvt,mmxmov")
+ (set_attr "prefix_extra" "1,*,*,*,*")
+ (set_attr "length_immediate" "1,*,*,*,*")
(set (attr "prefix")
(if_then_else (eq_attr "alternative" "3,4")
(const_string "orig")
movd\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog,sselog,ssemov,mmxcvt,mmxmov")
(set_attr "prefix_extra" "1,*,*,*,*")
+ (set_attr "length_immediate" "1,*,*,*,*")
(set_attr "mode" "TI,TI,TI,DI,DI")])
;; ??? In theory we can match memory for the MMX alternative, but allowing
vpunpcklqdq\t{%2, %1, %0|%0, %1, %2}
vmovhps\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog,ssemov,ssemov,ssemov,sselog,ssemov")
+ (set_attr "prefix_extra" "1,*,*,*,*,*")
+ (set_attr "length_immediate" "1,*,*,*,*,*")
(set (attr "prefix")
(if_then_else (eq_attr "alternative" "3")
(const_string "orig")
movlhps\t{%2, %0|%0, %2}
movhps\t{%2, %0|%0, %2}"
[(set_attr "type" "sselog,ssemov,ssemov,ssemov,sselog,ssemov,ssemov")
+ (set_attr "prefix_rex" "1,*,1,*,*,*,*")
(set_attr "prefix_extra" "1,*,*,*,*,*,*")
+ (set_attr "length_immediate" "1,*,*,*,*,*,*")
(set_attr "mode" "TI,TI,TI,TI,TI,V4SF,V2SF")])
(define_insn "*vec_concatv2di_rex64_sse"
movlhps\t{%2, %0|%0, %2}
movhps\t{%2, %0|%0, %2}"
[(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,ssemov")
+ (set_attr "prefix_rex" "*,1,*,*,*,*")
(set_attr "mode" "TI,TI,TI,TI,V4SF,V2SF")])
(define_expand "vec_unpacku_hi_v16qi"
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, true, true);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, true, true);
else
ix86_expand_sse_unpack (operands, true, true);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, false, true);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, false, true);
else
ix86_expand_sse_unpack (operands, false, true);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, true, false);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, true, false);
else
ix86_expand_sse_unpack (operands, true, false);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, false, false);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, false, false);
else
ix86_expand_sse_unpack (operands, false, false);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, true, true);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, true, true);
else
ix86_expand_sse_unpack (operands, true, true);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, false, true);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, false, true);
else
ix86_expand_sse_unpack (operands, false, true);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, true, false);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, true, false);
else
ix86_expand_sse_unpack (operands, true, false);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, false, false);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, false, false);
else
ix86_expand_sse_unpack (operands, false, false);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, true, true);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, true, true);
else
ix86_expand_sse_unpack (operands, true, true);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, false, true);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, false, true);
else
ix86_expand_sse_unpack (operands, false, true);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, true, false);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, true, false);
else
ix86_expand_sse_unpack (operands, true, false);
DONE;
{
if (TARGET_SSE4_1)
ix86_expand_sse4_unpack (operands, false, false);
- else if (TARGET_SSE5)
- ix86_expand_sse5_unpack (operands, false, false);
else
ix86_expand_sse_unpack (operands, false, false);
DONE;
"TARGET_SSE2"
"psadbw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "simul")
(set_attr "prefix_data16" "1")
(set_attr "mode" "TI")])
UNSPEC_MOVMSK))]
"SSE_VEC_FLOAT_MODE_P (<MODE>mode)"
"%vmovmskp<ssemodesuffixf2c>\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
UNSPEC_MOVMSK))]
"TARGET_SSE2"
"%vpmovmskb\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
(set_attr "prefix_data16" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "SI")])
"TARGET_SSE2 && !TARGET_64BIT"
;; @@@ check ordering of operands in intel/nonintel syntax
"%vmaskmovdqu\t{%2, %1|%1, %2}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
(set_attr "prefix_data16" "1")
+ ;; The implicit %rdi operand confuses default length_vex computation.
+ (set_attr "length_vex" "3")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"TARGET_SSE2 && TARGET_64BIT"
;; @@@ check ordering of operands in intel/nonintel syntax
"%vmaskmovdqu\t{%2, %1|%1, %2}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
(set_attr "prefix_data16" "1")
+ ;; The implicit %rdi operand confuses default length_vex computation.
+ (set (attr "length_vex")
+ (symbol_ref ("REGNO (operands[2]) >= FIRST_REX_SSE_REG ? 3 + 1 : 2 + 1")))
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"TARGET_SSE"
"%vldmxcsr\t%0"
[(set_attr "type" "sse")
+ (set_attr "atom_sse_attr" "mxcsr")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "load")])
"TARGET_SSE"
"%vstmxcsr\t%0"
[(set_attr "type" "sse")
+ (set_attr "atom_sse_attr" "mxcsr")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "store")])
"TARGET_SSE || TARGET_3DNOW_A"
"sfence"
[(set_attr "type" "sse")
+ (set_attr "length_address" "0")
+ (set_attr "atom_sse_attr" "fence")
(set_attr "memory" "unknown")])
(define_insn "sse2_clflush"
"TARGET_SSE2"
"clflush\t%a0"
[(set_attr "type" "sse")
+ (set_attr "atom_sse_attr" "fence")
(set_attr "memory" "unknown")])
(define_expand "sse2_mfence"
"TARGET_64BIT || TARGET_SSE2"
"mfence"
[(set_attr "type" "sse")
+ (set_attr "length_address" "0")
+ (set_attr "atom_sse_attr" "fence")
(set_attr "memory" "unknown")])
(define_expand "sse2_lfence"
"TARGET_SSE2"
"lfence"
[(set_attr "type" "sse")
+ (set_attr "length_address" "0")
+ (set_attr "atom_sse_attr" "lfence")
(set_attr "memory" "unknown")])
(define_insn "sse3_mwait"
"TARGET_AVX"
"vphaddw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phaddw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phaddw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_phadddv4si3"
"TARGET_AVX"
"vphaddd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phaddd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phaddd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_phaddswv8hi3"
"TARGET_AVX"
"vphaddsw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phaddsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phaddsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_phsubwv8hi3"
"TARGET_AVX"
"vphsubw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phsubw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phsubw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_phsubdv4si3"
"TARGET_AVX"
"vphsubd\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phsubd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phsubd\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_phsubswv8hi3"
"TARGET_AVX"
"vphsubsw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phsubsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"phsubsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "complex")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_pmaddubsw128"
"TARGET_AVX"
"vpmaddubsw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"pmaddubsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "simul")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "TI")])
"TARGET_SSSE3"
"pmaddubsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseiadd")
+ (set_attr "atom_unit" "simul")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_expand "ssse3_pmulhrswv8hi3"
"TARGET_AVX && ix86_binary_operator_ok (MULT, V8HImode, operands)"
"vpmulhrsw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseimul")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"pmulhrsw\t{%2, %0|%0, %2}"
[(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_pshufbv16qi3"
"TARGET_AVX"
"vpshufb\t{%2, %1, %0|%0, %1, %2}";
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"pshufb\t{%2, %0|%0, %2}";
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_psign<mode>3"
"TARGET_AVX"
"vpsign<ssevecsize>\t{%2, %1, %0|%0, %1, %2}";
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"psign<mmxvecsize>\t{%2, %0|%0, %2}";
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "*avx_palignrti"
return "vpalignr\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sseishft")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
return "palignr\t{%3, %2, %0|%0, %2, %3}";
}
[(set_attr "type" "sseishft")
+ (set_attr "atom_unit" "sishuf")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "ssse3_palignrdi"
return "palignr\t{%3, %2, %0|%0, %2, %3}";
}
[(set_attr "type" "sseishft")
+ (set_attr "atom_unit" "sishuf")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
(define_insn "abs<mode>2"
"TARGET_SSSE3"
"pabs<mmxvecsize>\t{%1, %0|%0, %1}";
[(set_attr "type" "sselog1")
+ (set_attr "prefix_rep" "0")
(set_attr "prefix_extra" "1")
+ (set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
"extrq\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "sse")
(set_attr "prefix_data16" "1")
+ (set_attr "length_immediate" "2")
(set_attr "mode" "TI")])
(define_insn "sse4a_extrq"
"TARGET_SSE4A"
"insertq\t{%4, %3, %2, %0|%0, %2, %3, %4}"
[(set_attr "type" "sseins")
+ (set_attr "prefix_data16" "0")
(set_attr "prefix_rep" "1")
+ (set_attr "length_immediate" "2")
(set_attr "mode" "TI")])
(define_insn "sse4a_insertq"
"TARGET_SSE4A"
"insertq\t{%2, %0|%0, %2}"
[(set_attr "type" "sseins")
+ (set_attr "prefix_data16" "0")
(set_attr "prefix_rep" "1")
(set_attr "mode" "TI")])
"TARGET_AVX"
"vblendp<avxmodesuffixf2c>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<avxvecmode>")])
"TARGET_AVX"
"vblendvp<avxmodesuffixf2c>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<avxvecmode>")])
"TARGET_SSE4_1"
"blendp<ssemodesuffixf2c>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "<MODE>")])
(define_insn "sse4_1_blendvp<ssemodesuffixf2c>"
"TARGET_SSE4_1"
"blendvp<ssemodesuffixf2c>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "mode" "<MODE>")])
"vdpp<avxmodesuffixf2c>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemul")
(set_attr "prefix" "vex")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "<avxvecmode>")])
(define_insn "sse4_1_dpp<ssemodesuffixf2c>"
"TARGET_SSE4_1"
"dpp<ssemodesuffixf2c>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssemul")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "<MODE>")])
(define_insn "sse4_1_movntdqa"
UNSPEC_MOVNTDQA))]
"TARGET_SSE4_1"
"%vmovntdqa\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
+ [(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"vmpsadbw\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sselog1")
(set_attr "prefix" "vex")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "sse4_1_mpsadbw"
"mpsadbw\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "*avx_packusdw"
"TARGET_AVX"
"vpackusdw\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_AVX"
"vpblendvb\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"vpblendw\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemov")
(set_attr "prefix" "vex")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "sse4_1_pblendw"
"pblendw\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_insn "sse4_1_phminposuw"
"TARGET_AVX"
"vtestp<avxmodesuffixf2c>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
"TARGET_AVX"
"vptest\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
"TARGET_AVX"
"vroundp<avxmodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecvt")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
"TARGET_ROUND"
"%vroundp<ssemodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecvt")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
"TARGET_AVX"
"vrounds<ssemodesuffixf2c>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssecvt")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
"TARGET_ROUND"
"rounds<ssemodesuffixf2c>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssecvt")
+ (set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "<MODE>")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(match_dup 6)]
UNSPEC_PCMPESTR))]
"TARGET_SSE4_2
- && !(reload_completed || reload_in_progress)"
+ && can_create_pseudo_p ()"
"#"
"&& 1"
[(const_int 0)]
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,load,none,load")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
(match_dup 4)]
UNSPEC_PCMPISTR))]
"TARGET_SSE4_2
- && !(reload_completed || reload_in_progress)"
+ && can_create_pseudo_p ()"
"#"
"&& 1"
[(const_int 0)]
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "memory" "none,load")
(set_attr "mode" "TI")])
[(set_attr "type" "sselog")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "memory" "none,load,none,load")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
-;; SSE5 instructions
+;; XOP instructions
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;; SSE5 parallel integer multiply/add instructions.
+;; XOP parallel integer multiply/add instructions.
;; Note the instruction does not allow the value being added to be a memory
;; operation. However by pretending via the nonimmediate_operand predicate
;; that it does and splitting it later allows the following to be recognized:
;; a[i] = b[i] * c[i] + d[i];
-(define_insn "sse5_pmacsww"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacsww"
+ [(set (match_operand:V8HI 0 "register_operand" "=x,x")
(plus:V8HI
(mult:V8HI
- (match_operand:V8HI 1 "nonimmediate_operand" "%x,x,xm")
- (match_operand:V8HI 2 "nonimmediate_operand" "x,xm,x"))
- (match_operand:V8HI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 2, true)"
+ (match_operand:V8HI 1 "nonimmediate_operand" "%x,m")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm,x"))
+ (match_operand:V8HI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 2, true)"
"@
- pmacsww\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsww\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsww\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacsww\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacsww\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
(mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "")
(match_operand:V8HI 2 "nonimmediate_operand" ""))
(match_operand:V8HI 3 "nonimmediate_operand" "")))]
- "TARGET_SSE5
- && !ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)
- && ix86_sse5_valid_op_p (operands, insn, 4, false, 2, true)
+ "TARGET_XOP
+ && !ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, false, 2, true)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
- ix86_expand_sse5_multiple_memory (operands, 4, V8HImode);
- emit_insn (gen_sse5_pmacsww (operands[0], operands[1], operands[2],
- operands[3]));
+ ix86_expand_fma4_multiple_memory (operands, 4, V8HImode);
+ emit_insn (gen_xop_pmacsww (operands[0], operands[1], operands[2],
+ operands[3]));
DONE;
})
-(define_insn "sse5_pmacssww"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacssww"
+ [(set (match_operand:V8HI 0 "register_operand" "=x,x")
(ss_plus:V8HI
- (mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "%x,x,m")
- (match_operand:V8HI 2 "nonimmediate_operand" "x,m,x"))
- (match_operand:V8HI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "%x,m")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm,x"))
+ (match_operand:V8HI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacssww\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssww\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssww\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacssww\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacssww\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
;; operation. However by pretending via the nonimmediate_operand predicate
;; that it does and splitting it later allows the following to be recognized:
;; a[i] = b[i] * c[i] + d[i];
-(define_insn "sse5_pmacsdd"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacsdd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x")
(plus:V4SI
(mult:V4SI
- (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x"))
- (match_operand:V4SI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 2, true)"
+ (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x"))
+ (match_operand:V4SI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 2, true)"
"@
- pmacsdd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsdd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsdd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacsdd\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacsdd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
(mult:V4SI (match_operand:V4SI 1 "nonimmediate_operand" "")
(match_operand:V4SI 2 "nonimmediate_operand" ""))
(match_operand:V4SI 3 "nonimmediate_operand" "")))]
- "TARGET_SSE5
- && !ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)
- && ix86_sse5_valid_op_p (operands, insn, 4, false, 2, true)
+ "TARGET_XOP
+ && !ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)
+ && ix86_fma4_valid_op_p (operands, insn, 4, false, 2, true)
&& !reg_mentioned_p (operands[0], operands[1])
&& !reg_mentioned_p (operands[0], operands[2])
&& !reg_mentioned_p (operands[0], operands[3])"
[(const_int 0)]
{
- ix86_expand_sse5_multiple_memory (operands, 4, V4SImode);
- emit_insn (gen_sse5_pmacsdd (operands[0], operands[1], operands[2],
- operands[3]));
+ ix86_expand_fma4_multiple_memory (operands, 4, V4SImode);
+ emit_insn (gen_xop_pmacsdd (operands[0], operands[1], operands[2],
+ operands[3]));
DONE;
})
-(define_insn "sse5_pmacssdd"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacssdd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x")
(ss_plus:V4SI
- (mult:V4SI (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x"))
- (match_operand:V4SI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (mult:V4SI (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x"))
+ (match_operand:V4SI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacssdd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssdd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssdd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacssdd\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacssdd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn "sse5_pmacssdql"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacssdql"
+ [(set (match_operand:V2DI 0 "register_operand" "=x,x")
(ss_plus:V2DI
(mult:V2DI
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 1)
(const_int 3)])))
- (vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x")
- (parallel [(const_int 1)
- (const_int 3)])))
- (match_operand:V2DI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (vec_select:V2SI
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x")
+ (parallel [(const_int 1)
+ (const_int 3)])))
+ (match_operand:V2DI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacssdql\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssdql\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssdql\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacssdql\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacssdql\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn "sse5_pmacssdqh"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacssdqh"
+ [(set (match_operand:V2DI 0 "register_operand" "=x,x")
(ss_plus:V2DI
(mult:V2DI
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 0)
(const_int 2)])))
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 0)
(const_int 2)]))))
- (match_operand:V2DI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (match_operand:V2DI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacssdqh\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssdqh\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacssdqh\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacssdqh\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacssdqh\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn "sse5_pmacsdql"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacsdql"
+ [(set (match_operand:V2DI 0 "register_operand" "=x,x")
(plus:V2DI
(mult:V2DI
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 1)
(const_int 3)])))
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 1)
(const_int 3)]))))
- (match_operand:V2DI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (match_operand:V2DI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacsdql\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsdql\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsdql\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacsdql\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacsdql\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn_and_split "*sse5_pmacsdql_mem"
- [(set (match_operand:V2DI 0 "register_operand" "=&x,&x,&x")
+(define_insn_and_split "*xop_pmacsdql_mem"
+ [(set (match_operand:V2DI 0 "register_operand" "=&x,&x")
(plus:V2DI
(mult:V2DI
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 1)
(const_int 3)])))
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 1)
(const_int 3)]))))
- (match_operand:V2DI 3 "memory_operand" "m,m,m")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, -1, true)"
+ (match_operand:V2DI 3 "memory_operand" "m,m")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, -1, true)"
"#"
"&& (reload_completed
|| (!reg_mentioned_p (operands[0], operands[1])
(const_int 3)]))))
(match_dup 0)))])
-;; We don't have a straight 32-bit parallel multiply and extend on SSE5, so
+;; We don't have a straight 32-bit parallel multiply and extend on XOP, so
;; fake it with a multiply/add. In general, we expect the define_split to
;; occur before register allocation, so we have to handle the corner case where
;; the target is the same as operands 1/2
-(define_insn_and_split "sse5_mulv2div2di3_low"
+(define_insn_and_split "xop_mulv2div2di3_low"
[(set (match_operand:V2DI 0 "register_operand" "=&x")
(mult:V2DI
(sign_extend:V2DI
(match_operand:V4SI 2 "nonimmediate_operand" "xm")
(parallel [(const_int 1)
(const_int 3)])))))]
- "TARGET_SSE5"
+ "TARGET_XOP"
"#"
"&& (reload_completed
|| (!reg_mentioned_p (operands[0], operands[1])
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn "sse5_pmacsdqh"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacsdqh"
+ [(set (match_operand:V2DI 0 "register_operand" "=x,x")
(plus:V2DI
(mult:V2DI
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 0)
(const_int 2)])))
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 0)
(const_int 2)]))))
- (match_operand:V2DI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (match_operand:V2DI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacsdqh\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsdqh\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsdqh\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacsdqh\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacsdqh\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn_and_split "*sse5_pmacsdqh_mem"
- [(set (match_operand:V2DI 0 "register_operand" "=&x,&x,&x")
+(define_insn_and_split "*xop_pmacsdqh_mem"
+ [(set (match_operand:V2DI 0 "register_operand" "=&x,&x")
(plus:V2DI
(mult:V2DI
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V4SI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 0)
(const_int 2)])))
(sign_extend:V2DI
(vec_select:V2SI
- (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V4SI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 0)
(const_int 2)]))))
- (match_operand:V2DI 3 "memory_operand" "m,m,m")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, -1, true)"
+ (match_operand:V2DI 3 "memory_operand" "m,m")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, -1, true)"
"#"
"&& (reload_completed
|| (!reg_mentioned_p (operands[0], operands[1])
(const_int 2)]))))
(match_dup 0)))])
-;; We don't have a straight 32-bit parallel multiply and extend on SSE5, so
+;; We don't have a straight 32-bit parallel multiply and extend on XOP, so
;; fake it with a multiply/add. In general, we expect the define_split to
;; occur before register allocation, so we have to handle the corner case where
;; the target is the same as either operands[1] or operands[2]
-(define_insn_and_split "sse5_mulv2div2di3_high"
+(define_insn_and_split "xop_mulv2div2di3_high"
[(set (match_operand:V2DI 0 "register_operand" "=&x")
(mult:V2DI
(sign_extend:V2DI
(match_operand:V4SI 2 "nonimmediate_operand" "xm")
(parallel [(const_int 0)
(const_int 2)])))))]
- "TARGET_SSE5"
+ "TARGET_XOP"
"#"
"&& (reload_completed
|| (!reg_mentioned_p (operands[0], operands[1])
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-;; SSE5 parallel integer multiply/add instructions for the intrinisics
-(define_insn "sse5_pmacsswd"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
+;; XOP parallel integer multiply/add instructions for the intrinisics
+(define_insn "xop_pmacsswd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x")
(ss_plus:V4SI
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V8HI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 1)
(const_int 3)
(const_int 5)
(const_int 7)])))
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 1)
(const_int 3)
(const_int 5)
(const_int 7)]))))
- (match_operand:V4SI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (match_operand:V4SI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacsswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacsswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacsswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacsswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn "sse5_pmacswd"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmacswd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x")
(plus:V4SI
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V8HI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 1)
(const_int 3)
(const_int 5)
(const_int 7)])))
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 1)
(const_int 3)
(const_int 5)
(const_int 7)]))))
- (match_operand:V4SI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (match_operand:V4SI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmacswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmacswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmacswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmacswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn "sse5_pmadcsswd"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmadcsswd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x")
(ss_plus:V4SI
(plus:V4SI
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V8HI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 0)
(const_int 2)
(const_int 4)
(const_int 6)])))
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 0)
(const_int 2)
(const_int 4)
(const_int 3)
(const_int 5)
(const_int 7)])))))
- (match_operand:V4SI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (match_operand:V4SI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmadcsswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmadcsswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmadcsswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmadcsswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmadcsswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-(define_insn "sse5_pmadcswd"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
+(define_insn "xop_pmadcswd"
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x")
(plus:V4SI
(plus:V4SI
(mult:V4SI
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 1 "nonimmediate_operand" "%x,x,m")
+ (match_operand:V8HI 1 "nonimmediate_operand" "%x,m")
(parallel [(const_int 0)
(const_int 2)
(const_int 4)
(const_int 6)])))
(sign_extend:V4SI
(vec_select:V4HI
- (match_operand:V8HI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V8HI 2 "nonimmediate_operand" "xm,x")
(parallel [(const_int 0)
(const_int 2)
(const_int 4)
(const_int 3)
(const_int 5)
(const_int 7)])))))
- (match_operand:V4SI 3 "register_operand" "0,0,0")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, false, 1, true)"
+ (match_operand:V4SI 3 "register_operand" "x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, false, 1, true)"
"@
- pmadcswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmadcswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pmadcswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
+ vpmadcswd\t{%3, %2, %1, %0|%0, %1, %2, %3}
+ vpmadcswd\t{%3, %1, %2, %0|%0, %2, %1, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "TI")])
-;; SSE5 parallel XMM conditional moves
-(define_insn "sse5_pcmov_<mode>"
- [(set (match_operand:SSEMODE 0 "register_operand" "=x,x,x,x")
+;; XOP parallel XMM conditional moves
+(define_insn "xop_pcmov_<mode>"
+ [(set (match_operand:SSEMODE 0 "register_operand" "=x,x,x")
(if_then_else:SSEMODE
- (match_operand:SSEMODE 3 "nonimmediate_operand" "0,0,xm,x")
- (match_operand:SSEMODE 1 "vector_move_operand" "x,xm,0,0")
- (match_operand:SSEMODE 2 "vector_move_operand" "xm,x,x,xm")))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "@
- pcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}
- pcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:SSEMODE 3 "nonimmediate_operand" "x,x,m")
+ (match_operand:SSEMODE 1 "vector_move_operand" "x,m,x")
+ (match_operand:SSEMODE 2 "vector_move_operand" "xm,x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vpcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "sse4arg")])
+
+(define_insn "xop_pcmov_<mode>256"
+ [(set (match_operand:AVX256MODE 0 "register_operand" "=x,x,x")
+ (if_then_else:AVX256MODE
+ (match_operand:AVX256MODE 3 "nonimmediate_operand" "x,x,m")
+ (match_operand:AVX256MODE 1 "vector_move_operand" "x,m,x")
+ (match_operand:AVX256MODE 2 "vector_move_operand" "xm,x,x")))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vpcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sse4arg")])
-;; SSE5 horizontal add/subtract instructions
-(define_insn "sse5_phaddbw"
+;; XOP horizontal add/subtract instructions
+(define_insn "xop_phaddbw"
[(set (match_operand:V8HI 0 "register_operand" "=x")
(plus:V8HI
(sign_extend:V8HI
(const_int 11)
(const_int 13)
(const_int 15)])))))]
- "TARGET_SSE5"
- "phaddbw\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddbw\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddbd"
+(define_insn "xop_phaddbd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(plus:V4SI
(plus:V4SI
(const_int 7)
(const_int 11)
(const_int 15)]))))))]
- "TARGET_SSE5"
- "phaddbd\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddbd\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddbq"
+(define_insn "xop_phaddbq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(plus:V2DI
(plus:V2DI
(match_dup 1)
(parallel [(const_int 11)
(const_int 15)])))))))]
- "TARGET_SSE5"
- "phaddbq\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddbq\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddwd"
+(define_insn "xop_phaddwd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(plus:V4SI
(sign_extend:V4SI
(const_int 3)
(const_int 5)
(const_int 7)])))))]
- "TARGET_SSE5"
- "phaddwd\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddwd\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddwq"
+(define_insn "xop_phaddwq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(plus:V2DI
(plus:V2DI
(match_dup 1)
(parallel [(const_int 3)
(const_int 7)]))))))]
- "TARGET_SSE5"
- "phaddwq\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddwq\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phadddq"
+(define_insn "xop_phadddq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(plus:V2DI
(sign_extend:V2DI
(match_dup 1)
(parallel [(const_int 1)
(const_int 3)])))))]
- "TARGET_SSE5"
- "phadddq\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphadddq\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddubw"
+(define_insn "xop_phaddubw"
[(set (match_operand:V8HI 0 "register_operand" "=x")
(plus:V8HI
(zero_extend:V8HI
(const_int 11)
(const_int 13)
(const_int 15)])))))]
- "TARGET_SSE5"
- "phaddubw\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddubw\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddubd"
+(define_insn "xop_phaddubd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(plus:V4SI
(plus:V4SI
(const_int 7)
(const_int 11)
(const_int 15)]))))))]
- "TARGET_SSE5"
- "phaddubd\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddubd\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddubq"
+(define_insn "xop_phaddubq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(plus:V2DI
(plus:V2DI
(match_dup 1)
(parallel [(const_int 11)
(const_int 15)])))))))]
- "TARGET_SSE5"
- "phaddubq\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddubq\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phadduwd"
+(define_insn "xop_phadduwd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(plus:V4SI
(zero_extend:V4SI
(const_int 3)
(const_int 5)
(const_int 7)])))))]
- "TARGET_SSE5"
- "phadduwd\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphadduwd\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phadduwq"
+(define_insn "xop_phadduwq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(plus:V2DI
(plus:V2DI
(match_dup 1)
(parallel [(const_int 3)
(const_int 7)]))))))]
- "TARGET_SSE5"
- "phadduwq\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphadduwq\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phaddudq"
+(define_insn "xop_phaddudq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(plus:V2DI
(zero_extend:V2DI
(match_dup 1)
(parallel [(const_int 1)
(const_int 3)])))))]
- "TARGET_SSE5"
- "phaddudq\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphaddudq\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phsubbw"
+(define_insn "xop_phsubbw"
[(set (match_operand:V8HI 0 "register_operand" "=x")
(minus:V8HI
(sign_extend:V8HI
(const_int 11)
(const_int 13)
(const_int 15)])))))]
- "TARGET_SSE5"
- "phsubbw\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphsubbw\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phsubwd"
+(define_insn "xop_phsubwd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(minus:V4SI
(sign_extend:V4SI
(const_int 3)
(const_int 5)
(const_int 7)])))))]
- "TARGET_SSE5"
- "phsubwd\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphsubwd\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-(define_insn "sse5_phsubdq"
+(define_insn "xop_phsubdq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(minus:V2DI
(sign_extend:V2DI
(match_dup 1)
(parallel [(const_int 1)
(const_int 3)])))))]
- "TARGET_SSE5"
- "phsubdq\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vphsubdq\t{%1, %0|%0, %1}"
[(set_attr "type" "sseiadd1")])
-;; SSE5 permute instructions
-(define_insn "sse5_pperm"
- [(set (match_operand:V16QI 0 "register_operand" "=x,x,x,x")
+;; XOP permute instructions
+(define_insn "xop_pperm"
+ [(set (match_operand:V16QI 0 "register_operand" "=x,x,x")
(unspec:V16QI
- [(match_operand:V16QI 1 "nonimmediate_operand" "0,0,x,xm")
- (match_operand:V16QI 2 "nonimmediate_operand" "x,xm,xm,x")
- (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,0,0")]
- UNSPEC_SSE5_PERMUTE))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "pperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(match_operand:V16QI 1 "nonimmediate_operand" "x,x,m")
+ (match_operand:V16QI 2 "nonimmediate_operand" "x,m,x")
+ (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,x")]
+ UNSPEC_XOP_PERMUTE))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sse4arg")
(set_attr "mode" "TI")])
-;; The following are for the various unpack insns which doesn't need the first
-;; source operand, so we can just use the output operand for the first operand.
-;; This allows either of the other two operands to be a memory operand. We
-;; can't just use the first operand as an argument to the normal pperm because
-;; then an output only argument, suddenly becomes an input operand.
-(define_insn "sse5_pperm_zero_v16qi_v8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x")
- (zero_extend:V8HI
- (vec_select:V8QI
- (match_operand:V16QI 1 "nonimmediate_operand" "xm,x")
- (match_operand 2 "" "")))) ;; parallel with const_int's
- (use (match_operand:V16QI 3 "nonimmediate_operand" "x,xm"))]
- "TARGET_SSE5
- && (register_operand (operands[1], V16QImode)
- || register_operand (operands[2], V16QImode))"
- "pperm\t{%3, %1, %0, %0|%0, %0, %1, %3}"
- [(set_attr "type" "sseadd")
- (set_attr "mode" "TI")])
-
-(define_insn "sse5_pperm_sign_v16qi_v8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x")
- (sign_extend:V8HI
- (vec_select:V8QI
- (match_operand:V16QI 1 "nonimmediate_operand" "xm,x")
- (match_operand 2 "" "")))) ;; parallel with const_int's
- (use (match_operand:V16QI 3 "nonimmediate_operand" "x,xm"))]
- "TARGET_SSE5
- && (register_operand (operands[1], V16QImode)
- || register_operand (operands[2], V16QImode))"
- "pperm\t{%3, %1, %0, %0|%0, %0, %1, %3}"
- [(set_attr "type" "sseadd")
- (set_attr "mode" "TI")])
-
-(define_insn "sse5_pperm_zero_v8hi_v4si"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x")
- (zero_extend:V4SI
- (vec_select:V4HI
- (match_operand:V8HI 1 "nonimmediate_operand" "xm,x")
- (match_operand 2 "" "")))) ;; parallel with const_int's
- (use (match_operand:V16QI 3 "nonimmediate_operand" "x,xm"))]
- "TARGET_SSE5
- && (register_operand (operands[1], V8HImode)
- || register_operand (operands[2], V16QImode))"
- "pperm\t{%3, %1, %0, %0|%0, %0, %1, %3}"
- [(set_attr "type" "sseadd")
- (set_attr "mode" "TI")])
-
-(define_insn "sse5_pperm_sign_v8hi_v4si"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x")
- (sign_extend:V4SI
- (vec_select:V4HI
- (match_operand:V8HI 1 "nonimmediate_operand" "xm,x")
- (match_operand 2 "" "")))) ;; parallel with const_int's
- (use (match_operand:V16QI 3 "nonimmediate_operand" "x,xm"))]
- "TARGET_SSE5
- && (register_operand (operands[1], V8HImode)
- || register_operand (operands[2], V16QImode))"
- "pperm\t{%3, %1, %0, %0|%0, %0, %1, %3}"
- [(set_attr "type" "sseadd")
- (set_attr "mode" "TI")])
-
-(define_insn "sse5_pperm_zero_v4si_v2di"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x")
- (zero_extend:V2DI
- (vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "xm,x")
- (match_operand 2 "" "")))) ;; parallel with const_int's
- (use (match_operand:V16QI 3 "nonimmediate_operand" "x,xm"))]
- "TARGET_SSE5
- && (register_operand (operands[1], V4SImode)
- || register_operand (operands[2], V16QImode))"
- "pperm\t{%3, %1, %0, %0|%0, %0, %1, %3}"
- [(set_attr "type" "sseadd")
- (set_attr "mode" "TI")])
-
-(define_insn "sse5_pperm_sign_v4si_v2di"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x")
- (sign_extend:V2DI
- (vec_select:V2SI
- (match_operand:V4SI 1 "nonimmediate_operand" "xm,x")
- (match_operand 2 "" "")))) ;; parallel with const_int's
- (use (match_operand:V16QI 3 "nonimmediate_operand" "x,xm"))]
- "TARGET_SSE5
- && (register_operand (operands[1], V4SImode)
- || register_operand (operands[2], V16QImode))"
- "pperm\t{%3, %1, %0, %0|%0, %0, %1, %3}"
- [(set_attr "type" "sseadd")
- (set_attr "mode" "TI")])
-
-;; SSE5 pack instructions that combine two vectors into a smaller vector
-(define_insn "sse5_pperm_pack_v2di_v4si"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x,x,x")
+;; XOP pack instructions that combine two vectors into a smaller vector
+(define_insn "xop_pperm_pack_v2di_v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
(vec_concat:V4SI
(truncate:V2SI
- (match_operand:V2DI 1 "nonimmediate_operand" "0,0,x,xm"))
+ (match_operand:V2DI 1 "nonimmediate_operand" "x,x,m"))
(truncate:V2SI
- (match_operand:V2DI 2 "nonimmediate_operand" "x,xm,xm,x"))))
- (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,0,0"))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "pperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:V2DI 2 "nonimmediate_operand" "x,m,x"))))
+ (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,x"))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sse4arg")
(set_attr "mode" "TI")])
-(define_insn "sse5_pperm_pack_v4si_v8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x,x,x")
+(define_insn "xop_pperm_pack_v4si_v8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=x,x,x")
(vec_concat:V8HI
(truncate:V4HI
- (match_operand:V4SI 1 "nonimmediate_operand" "0,0,x,xm"))
+ (match_operand:V4SI 1 "nonimmediate_operand" "x,x,m"))
(truncate:V4HI
- (match_operand:V4SI 2 "nonimmediate_operand" "x,xm,xm,x"))))
- (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,0,0"))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "pperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:V4SI 2 "nonimmediate_operand" "x,m,x"))))
+ (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,x"))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sse4arg")
(set_attr "mode" "TI")])
-(define_insn "sse5_pperm_pack_v8hi_v16qi"
- [(set (match_operand:V16QI 0 "register_operand" "=x,x,x,x")
+(define_insn "xop_pperm_pack_v8hi_v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=x,x,x")
(vec_concat:V16QI
(truncate:V8QI
- (match_operand:V8HI 1 "nonimmediate_operand" "0,0,x,xm"))
+ (match_operand:V8HI 1 "nonimmediate_operand" "x,x,m"))
(truncate:V8QI
- (match_operand:V8HI 2 "nonimmediate_operand" "x,xm,xm,x"))))
- (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,0,0"))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "pperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ (match_operand:V8HI 2 "nonimmediate_operand" "x,m,x"))))
+ (use (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,x"))]
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 4, true, 1, false)"
+ "vpperm\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sse4arg")
(set_attr "mode" "TI")])
-;; Floating point permutation (permps, permpd)
-(define_insn "sse5_perm<mode>"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x,x,x")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "0,0,x,xm")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "x,xm,xm,x")
- (match_operand:V16QI 3 "nonimmediate_operand" "xm,x,0,0")]
- UNSPEC_SSE5_PERMUTE))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 4, true, 1, false)"
- "perm<ssemodesuffixf4>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
- [(set_attr "type" "sse4arg")
- (set_attr "mode" "<MODE>")])
-
-;; SSE5 packed rotate instructions
+;; XOP packed rotate instructions
(define_expand "rotl<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "")
(rotate:SSEMODE1248
(match_operand:SSEMODE1248 1 "nonimmediate_operand" "")
(match_operand:SI 2 "general_operand")))]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
/* If we were given a scalar, convert it to parallel */
if (! const_0_to_<sserotatemax>_operand (operands[2], SImode))
RTVEC_ELT (vs, i) = op2;
emit_insn (gen_vec_init<mode> (reg, par));
- emit_insn (gen_sse5_vrotl<mode>3 (operands[0], operands[1], reg));
+ emit_insn (gen_xop_vrotl<mode>3 (operands[0], operands[1], reg));
DONE;
}
})
(rotatert:SSEMODE1248
(match_operand:SSEMODE1248 1 "nonimmediate_operand" "")
(match_operand:SI 2 "general_operand")))]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
/* If we were given a scalar, convert it to parallel */
if (! const_0_to_<sserotatemax>_operand (operands[2], SImode))
emit_insn (gen_vec_init<mode> (reg, par));
emit_insn (gen_neg<mode>2 (neg, reg));
- emit_insn (gen_sse5_vrotl<mode>3 (operands[0], operands[1], neg));
+ emit_insn (gen_xop_vrotl<mode>3 (operands[0], operands[1], neg));
DONE;
}
})
-(define_insn "sse5_rotl<mode>3"
+(define_insn "xop_rotl<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x")
(rotate:SSEMODE1248
(match_operand:SSEMODE1248 1 "nonimmediate_operand" "xm")
(match_operand:SI 2 "const_0_to_<sserotatemax>_operand" "n")))]
- "TARGET_SSE5"
- "prot<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
+ "TARGET_XOP"
+ "vprot<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
-(define_insn "sse5_rotr<mode>3"
+(define_insn "xop_rotr<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x")
(rotatert:SSEMODE1248
(match_operand:SSEMODE1248 1 "nonimmediate_operand" "xm")
(match_operand:SI 2 "const_0_to_<sserotatemax>_operand" "n")))]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
operands[3] = GEN_INT ((<ssescalarnum> * 8) - INTVAL (operands[2]));
- return \"prot<ssevecsize>\t{%3, %1, %0|%0, %1, %3}\";
+ return \"vprot<ssevecsize>\t{%3, %1, %0|%0, %1, %3}\";
}
[(set_attr "type" "sseishft")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_expand "vrotr<mode>3"
[(match_operand:SSEMODE1248 0 "register_operand" "")
(match_operand:SSEMODE1248 1 "register_operand" "")
(match_operand:SSEMODE1248 2 "register_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtx reg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (reg, operands[2]));
- emit_insn (gen_sse5_vrotl<mode>3 (operands[0], operands[1], reg));
+ emit_insn (gen_xop_vrotl<mode>3 (operands[0], operands[1], reg));
DONE;
})
[(match_operand:SSEMODE1248 0 "register_operand" "")
(match_operand:SSEMODE1248 1 "register_operand" "")
(match_operand:SSEMODE1248 2 "register_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
- emit_insn (gen_sse5_vrotl<mode>3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_xop_vrotl<mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
-(define_insn "sse5_vrotl<mode>3"
+(define_insn "xop_vrotl<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x,x")
(if_then_else:SSEMODE1248
(ge:SSEMODE1248
- (match_operand:SSEMODE1248 2 "nonimmediate_operand" "xm,x")
+ (match_operand:SSEMODE1248 2 "nonimmediate_operand" "x,m")
(const_int 0))
(rotate:SSEMODE1248
- (match_operand:SSEMODE1248 1 "nonimmediate_operand" "x,xm")
+ (match_operand:SSEMODE1248 1 "nonimmediate_operand" "xm,x")
(match_dup 2))
(rotatert:SSEMODE1248
(match_dup 1)
(neg:SSEMODE1248 (match_dup 2)))))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 3, true, 1, false)"
- "prot<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 3, true, 1, false)"
+ "vprot<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
+ (set_attr "prefix_data16" "0")
+ (set_attr "prefix_extra" "2")
(set_attr "mode" "TI")])
-;; SSE5 packed shift instructions.
+;; XOP packed shift instructions.
;; FIXME: add V2DI back in
(define_expand "vlshr<mode>3"
[(match_operand:SSEMODE124 0 "register_operand" "")
(match_operand:SSEMODE124 1 "register_operand" "")
(match_operand:SSEMODE124 2 "register_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_sse5_lshl<mode>3 (operands[0], operands[1], neg));
+ emit_insn (gen_xop_lshl<mode>3 (operands[0], operands[1], neg));
DONE;
})
[(match_operand:SSEMODE124 0 "register_operand" "")
(match_operand:SSEMODE124 1 "register_operand" "")
(match_operand:SSEMODE124 2 "register_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_sse5_ashl<mode>3 (operands[0], operands[1], neg));
+ emit_insn (gen_xop_ashl<mode>3 (operands[0], operands[1], neg));
DONE;
})
[(match_operand:SSEMODE124 0 "register_operand" "")
(match_operand:SSEMODE124 1 "register_operand" "")
(match_operand:SSEMODE124 2 "register_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
- emit_insn (gen_sse5_ashl<mode>3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_xop_ashl<mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
-(define_insn "sse5_ashl<mode>3"
+(define_insn "xop_ashl<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x,x")
(if_then_else:SSEMODE1248
(ge:SSEMODE1248
- (match_operand:SSEMODE1248 2 "nonimmediate_operand" "xm,x")
+ (match_operand:SSEMODE1248 2 "nonimmediate_operand" "x,m")
(const_int 0))
(ashift:SSEMODE1248
- (match_operand:SSEMODE1248 1 "nonimmediate_operand" "x,xm")
+ (match_operand:SSEMODE1248 1 "nonimmediate_operand" "xm,x")
(match_dup 2))
(ashiftrt:SSEMODE1248
(match_dup 1)
(neg:SSEMODE1248 (match_dup 2)))))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 3, true, 1, false)"
- "psha<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 3, true, 1, false)"
+ "vpsha<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
+ (set_attr "prefix_data16" "0")
+ (set_attr "prefix_extra" "2")
(set_attr "mode" "TI")])
-(define_insn "sse5_lshl<mode>3"
+(define_insn "xop_lshl<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x,x")
(if_then_else:SSEMODE1248
(ge:SSEMODE1248
- (match_operand:SSEMODE1248 2 "nonimmediate_operand" "xm,x")
+ (match_operand:SSEMODE1248 2 "nonimmediate_operand" "x,m")
(const_int 0))
(ashift:SSEMODE1248
- (match_operand:SSEMODE1248 1 "nonimmediate_operand" "x,xm")
+ (match_operand:SSEMODE1248 1 "nonimmediate_operand" "xm,x")
(match_dup 2))
(lshiftrt:SSEMODE1248
(match_dup 1)
(neg:SSEMODE1248 (match_dup 2)))))]
- "TARGET_SSE5 && ix86_sse5_valid_op_p (operands, insn, 3, true, 1, false)"
- "pshl<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
+ "TARGET_XOP && ix86_fma4_valid_op_p (operands, insn, 3, true, 1, false)"
+ "vpshl<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sseishft")
+ (set_attr "prefix_data16" "0")
+ (set_attr "prefix_extra" "2")
(set_attr "mode" "TI")])
-;; SSE2 doesn't have some shift varients, so define versions for SSE5
+;; SSE2 doesn't have some shift varients, so define versions for XOP
(define_expand "ashlv16qi3"
[(match_operand:V16QI 0 "register_operand" "")
(match_operand:V16QI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtvec vs = rtvec_alloc (16);
rtx par = gen_rtx_PARALLEL (V16QImode, vs);
RTVEC_ELT (vs, i) = operands[2];
emit_insn (gen_vec_initv16qi (reg, par));
- emit_insn (gen_sse5_ashlv16qi3 (operands[0], operands[1], reg));
+ emit_insn (gen_xop_ashlv16qi3 (operands[0], operands[1], reg));
DONE;
})
[(match_operand:V16QI 0 "register_operand" "")
(match_operand:V16QI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtvec vs = rtvec_alloc (16);
rtx par = gen_rtx_PARALLEL (V16QImode, vs);
RTVEC_ELT (vs, i) = operands[2];
emit_insn (gen_vec_initv16qi (reg, par));
- emit_insn (gen_sse5_lshlv16qi3 (operands[0], operands[1], reg));
+ emit_insn (gen_xop_lshlv16qi3 (operands[0], operands[1], reg));
DONE;
})
[(match_operand:V16QI 0 "register_operand" "")
(match_operand:V16QI 1 "register_operand" "")
(match_operand:SI 2 "nonmemory_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtvec vs = rtvec_alloc (16);
rtx par = gen_rtx_PARALLEL (V16QImode, vs);
rtx reg = gen_reg_rtx (V16QImode);
int i;
- rtx ele = ((GET_CODE (operands[2]) == CONST_INT)
+ rtx ele = ((CONST_INT_P (operands[2]))
? GEN_INT (- INTVAL (operands[2]))
: operands[2]);
emit_insn (gen_vec_initv16qi (reg, par));
- if (GET_CODE (operands[2]) != CONST_INT)
+ if (!CONST_INT_P (operands[2]))
{
rtx neg = gen_reg_rtx (V16QImode);
emit_insn (gen_negv16qi2 (neg, reg));
- emit_insn (gen_sse5_ashlv16qi3 (operands[0], operands[1], neg));
+ emit_insn (gen_xop_ashlv16qi3 (operands[0], operands[1], neg));
}
else
- emit_insn (gen_sse5_ashlv16qi3 (operands[0], operands[1], reg));
+ emit_insn (gen_xop_ashlv16qi3 (operands[0], operands[1], reg));
DONE;
})
[(match_operand:V2DI 0 "register_operand" "")
(match_operand:V2DI 1 "register_operand" "")
(match_operand:DI 2 "nonmemory_operand" "")]
- "TARGET_SSE5"
+ "TARGET_XOP"
{
rtvec vs = rtvec_alloc (2);
rtx par = gen_rtx_PARALLEL (V2DImode, vs);
rtx reg = gen_reg_rtx (V2DImode);
rtx ele;
- if (GET_CODE (operands[2]) == CONST_INT)
+ if (CONST_INT_P (operands[2]))
ele = GEN_INT (- INTVAL (operands[2]));
else if (GET_MODE (operands[2]) != DImode)
{
RTVEC_ELT (vs, 0) = ele;
RTVEC_ELT (vs, 1) = ele;
emit_insn (gen_vec_initv2di (reg, par));
- emit_insn (gen_sse5_ashlv2di3 (operands[0], operands[1], reg));
+ emit_insn (gen_xop_ashlv2di3 (operands[0], operands[1], reg));
DONE;
})
-;; SSE5 FRCZ support
+;; XOP FRCZ support
;; parallel insns
-(define_insn "sse5_frcz<mode>2"
+(define_insn "xop_frcz<mode>2"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(unspec:SSEMODEF2P
[(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "xm")]
UNSPEC_FRCZ))]
- "TARGET_SSE5"
- "frcz<ssemodesuffixf4>\t{%1, %0|%0, %1}"
+ "TARGET_XOP"
+ "vfrcz<ssemodesuffixf4>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt1")
- (set_attr "prefix_extra" "1")
(set_attr "mode" "<MODE>")])
;; scalar insns
-(define_insn "sse5_vmfrcz<mode>2"
+(define_insn "xop_vmfrcz<mode>2"
[(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
(vec_merge:SSEMODEF2P
(unspec:SSEMODEF2P
UNSPEC_FRCZ)
(match_operand:SSEMODEF2P 1 "register_operand" "0")
(const_int 1)))]
- "TARGET_SSE5"
- "frcz<ssemodesuffixf2s>\t{%2, %0|%0, %2}"
+ "TARGET_XOP"
+ "vfrcz<ssemodesuffixf2s>\t{%2, %0|%0, %2}"
[(set_attr "type" "ssecvt1")
- (set_attr "prefix_extra" "1")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "sse5_cvtph2ps"
- [(set (match_operand:V4SF 0 "register_operand" "=x")
- (unspec:V4SF [(match_operand:V4HI 1 "nonimmediate_operand" "xm")]
- UNSPEC_CVTPH2PS))]
- "TARGET_SSE5"
- "cvtph2ps\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
- (set_attr "mode" "V4SF")])
-
-(define_insn "sse5_cvtps2ph"
- [(set (match_operand:V4HI 0 "nonimmediate_operand" "=xm")
- (unspec:V4HI [(match_operand:V4SF 1 "register_operand" "x")]
- UNSPEC_CVTPS2PH))]
- "TARGET_SSE5"
- "cvtps2ph\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
- (set_attr "mode" "V4SF")])
-
-;; Scalar versions of the com instructions that use vector types that are
-;; called from the intrinsics. Unlike the the other s{s,d} instructions, the
-;; com instructions fill in 0's in the upper bits instead of leaving them
-;; unmodified, so we use const_vector of 0 instead of match_dup.
-(define_expand "sse5_vmmaskcmp<mode>3"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "")
- (vec_merge:SSEMODEF2P
- (match_operator:SSEMODEF2P 1 "sse5_comparison_float_operator"
- [(match_operand:SSEMODEF2P 2 "register_operand" "")
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "")])
- (match_dup 4)
- (const_int 1)))]
- "TARGET_SSE5"
-{
- operands[4] = CONST0_RTX (<MODE>mode);
-})
-
-(define_insn "*sse5_vmmaskcmp<mode>3"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
- (vec_merge:SSEMODEF2P
- (match_operator:SSEMODEF2P 1 "sse5_comparison_float_operator"
- [(match_operand:SSEMODEF2P 2 "register_operand" "x")
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm")])
- (match_operand:SSEMODEF2P 4 "")
- (const_int 1)))]
- "TARGET_SSE5"
- "com%Y1<ssemodesuffixf2s>\t{%3, %2, %0|%0, %2, %3}"
- [(set_attr "type" "sse4arg")
- (set_attr "mode" "<ssescalarmode>")])
-
-;; We don't have a comparison operator that always returns true/false, so
-;; handle comfalse and comtrue specially.
-(define_insn "sse5_com_tf<mode>3"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "register_operand" "x")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" "xm")
- (match_operand:SI 3 "const_int_operand" "n")]
- UNSPEC_SSE5_TRUEFALSE))]
- "TARGET_SSE5"
-{
- const char *ret = NULL;
-
- switch (INTVAL (operands[3]))
- {
- case COM_FALSE_S:
- ret = \"comfalses<ssemodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}\";
- break;
-
- case COM_FALSE_P:
- ret = \"comfalsep<ssemodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}\";
- break;
-
- case COM_TRUE_S:
- ret = \"comfalses<ssemodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}\";
- break;
-
- case COM_TRUE_P:
- ret = \"comfalsep<ssemodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}\";
- break;
-
- default:
- gcc_unreachable ();
- }
-
- return ret;
-}
- [(set_attr "type" "ssecmp")
(set_attr "mode" "<MODE>")])
-(define_insn "sse5_maskcmp<mode>3"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x")
- (match_operator:SSEMODEF2P 1 "sse5_comparison_float_operator"
- [(match_operand:SSEMODEF2P 2 "register_operand" "x")
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm")]))]
- "TARGET_SSE5"
- "com%Y1<ssemodesuffixf4>\t{%3, %2, %0|%0, %2, %3}"
- [(set_attr "type" "ssecmp")
+(define_insn "xop_frcz<mode>2256"
+ [(set (match_operand:FMA4MODEF4 0 "register_operand" "=x")
+ (unspec:FMA4MODEF4
+ [(match_operand:FMA4MODEF4 1 "nonimmediate_operand" "xm")]
+ UNSPEC_FRCZ))]
+ "TARGET_XOP"
+ "vfrcz<fma4modesuffixf4>\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt1")
(set_attr "mode" "<MODE>")])
-(define_insn "sse5_maskcmp<mode>3"
+(define_insn "xop_maskcmp<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x")
(match_operator:SSEMODE1248 1 "ix86_comparison_int_operator"
[(match_operand:SSEMODE1248 2 "register_operand" "x")
(match_operand:SSEMODE1248 3 "nonimmediate_operand" "xm")]))]
- "TARGET_SSE5"
- "pcom%Y1<ssevecsize>\t{%3, %2, %0|%0, %2, %3}"
+ "TARGET_XOP"
+ "vpcom%Y1<ssevecsize>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "sse4arg")
+ (set_attr "prefix_data16" "0")
+ (set_attr "prefix_rep" "0")
+ (set_attr "prefix_extra" "2")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
-(define_insn "sse5_maskcmp_uns<mode>3"
+(define_insn "xop_maskcmp_uns<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x")
(match_operator:SSEMODE1248 1 "ix86_comparison_uns_operator"
[(match_operand:SSEMODE1248 2 "register_operand" "x")
(match_operand:SSEMODE1248 3 "nonimmediate_operand" "xm")]))]
- "TARGET_SSE5"
- "pcom%Y1u<ssevecsize>\t{%3, %2, %0|%0, %2, %3}"
+ "TARGET_XOP"
+ "vpcom%Y1u<ssevecsize>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssecmp")
+ (set_attr "prefix_data16" "0")
+ (set_attr "prefix_rep" "0")
+ (set_attr "prefix_extra" "2")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
;; Version of pcom*u* that is called from the intrinsics that allows pcomequ*
;; and pcomneu* not to be converted to the signed ones in case somebody needs
;; the exact instruction generated for the intrinsic.
-(define_insn "sse5_maskcmp_uns2<mode>3"
+(define_insn "xop_maskcmp_uns2<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x")
(unspec:SSEMODE1248
[(match_operator:SSEMODE1248 1 "ix86_comparison_uns_operator"
[(match_operand:SSEMODE1248 2 "register_operand" "x")
(match_operand:SSEMODE1248 3 "nonimmediate_operand" "xm")])]
- UNSPEC_SSE5_UNSIGNED_CMP))]
- "TARGET_SSE5"
- "pcom%Y1u<ssevecsize>\t{%3, %2, %0|%0, %2, %3}"
+ UNSPEC_XOP_UNSIGNED_CMP))]
+ "TARGET_XOP"
+ "vpcom%Y1u<ssevecsize>\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "ssecmp")
+ (set_attr "prefix_data16" "0")
+ (set_attr "prefix_extra" "2")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
;; Pcomtrue and pcomfalse support. These are useless instructions, but are
;; being added here to be complete.
-(define_insn "sse5_pcom_tf<mode>3"
+(define_insn "xop_pcom_tf<mode>3"
[(set (match_operand:SSEMODE1248 0 "register_operand" "=x")
(unspec:SSEMODE1248
[(match_operand:SSEMODE1248 1 "register_operand" "x")
(match_operand:SSEMODE1248 2 "nonimmediate_operand" "xm")
(match_operand:SI 3 "const_int_operand" "n")]
- UNSPEC_SSE5_TRUEFALSE))]
- "TARGET_SSE5"
+ UNSPEC_XOP_TRUEFALSE))]
+ "TARGET_XOP"
{
return ((INTVAL (operands[3]) != 0)
- ? "pcomtrue<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
- : "pcomfalse<ssevecsize>\t{%2, %1, %0|%0, %1, %2}");
+ ? "vpcomtrue<ssevecsize>\t{%2, %1, %0|%0, %1, %2}"
+ : "vpcomfalse<ssevecsize>\t{%2, %1, %0|%0, %1, %2}");
}
[(set_attr "type" "ssecmp")
+ (set_attr "prefix_data16" "0")
+ (set_attr "prefix_extra" "2")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_insn "*avx_aesenc"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "x")
"TARGET_AES && TARGET_AVX"
"vaesenc\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_AES && TARGET_AVX"
"vaesenclast\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_AES && TARGET_AVX"
"vaesdec\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"TARGET_AES && TARGET_AVX"
"vaesdeclast\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"%vaeskeygenassist\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
"TARGET_PCLMUL && TARGET_AVX"
"vpclmulqdq\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "TI")])
"pclmulqdq\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
(define_expand "avx_vzeroall"
"TARGET_AVX"
"vzeroall"
[(set_attr "type" "sse")
+ (set_attr "modrm" "0")
(set_attr "memory" "none")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
"TARGET_AVX && !TARGET_64BIT"
"vzeroupper"
[(set_attr "type" "sse")
+ (set_attr "modrm" "0")
(set_attr "memory" "none")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
"TARGET_AVX && TARGET_64BIT"
"vzeroupper"
[(set_attr "type" "sse")
+ (set_attr "modrm" "0")
(set_attr "memory" "none")
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
"TARGET_AVX"
"vpermilp<avxmodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
"TARGET_AVX"
"vpermilp<avxmodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
"TARGET_AVX"
"vperm2f128\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vbroadcasts<avxmodesuffixf2c>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<avxscalarmode>")])
"TARGET_AVX"
"vbroadcastss\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "SF")])
"TARGET_AVX"
"vbroadcastf128\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V4SF")])
"TARGET_AVX"
"vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
"TARGET_AVX"
"vmaskmovp<avxmodesuffixf2c>\t{%1, %2, %0|%0, %2, %1}"
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
"TARGET_AVX"
"vmaskmovp<avxmodesuffixf2c>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
(set_attr "mode" "<MODE>")])
}
}
[(set_attr "type" "sselog,ssemov")
+ (set_attr "prefix_extra" "1,*")
+ (set_attr "length_immediate" "1,*")
(set_attr "prefix" "vex")
(set_attr "mode" "<avxvecmode>")])