;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
-;; All vector modes including V1TImode, used in move patterns.
+;; All vector modes including V?TImode, used in move patterns.
(define_mode_iterator V16
[(V32QI "TARGET_AVX") V16QI
(V16HI "TARGET_AVX") V8HI
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI
- V1TI
+ (V2TI "TARGET_AVX") V1TI
(V8SF "TARGET_AVX") V4SF
(V4DF "TARGET_AVX") V2DF])
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
+(define_mode_iterator VI_AVX2
+ [(V32QI "TARGET_AVX2") V16QI
+ (V16HI "TARGET_AVX2") V8HI
+ (V8SI "TARGET_AVX2") V4SI
+ (V4DI "TARGET_AVX2") V2DI])
+
;; All QImode vector integer modes
(define_mode_iterator VI1
[(V32QI "TARGET_AVX") V16QI])
(define_mode_iterator VI8
[(V4DI "TARGET_AVX") V2DI])
+(define_mode_iterator VI1_AVX2
+ [(V32QI "TARGET_AVX2") V16QI])
+
+(define_mode_iterator VI2_AVX2
+ [(V16HI "TARGET_AVX2") V8HI])
+
+(define_mode_iterator VI4_AVX2
+ [(V8SI "TARGET_AVX2") V4SI])
+
+(define_mode_iterator VI8_AVX2
+ [(V4DI "TARGET_AVX2") V2DI])
+
+;; ??? We should probably use TImode instead.
+(define_mode_iterator VIMAX_AVX2
+ [(V2TI "TARGET_AVX2") V1TI])
+
+;; ??? This should probably be dropped in favor of VIMAX_AVX2.
+(define_mode_iterator SSESCALARMODE
+ [(V2TI "TARGET_AVX2") TI])
+
+(define_mode_iterator VI12_AVX2
+ [(V32QI "TARGET_AVX2") V16QI
+ (V16HI "TARGET_AVX2") V8HI])
+
+(define_mode_iterator VI24_AVX2
+ [(V16HI "TARGET_AVX2") V8HI
+ (V8SI "TARGET_AVX2") V4SI])
+
+(define_mode_iterator VI124_AVX2
+ [(V32QI "TARGET_AVX2") V16QI
+ (V16HI "TARGET_AVX2") V8HI
+ (V8SI "TARGET_AVX2") V4SI])
+
+(define_mode_iterator VI248_AVX2
+ [(V16HI "TARGET_AVX2") V8HI
+ (V8SI "TARGET_AVX2") V4SI
+ (V4DI "TARGET_AVX2") V2DI])
+
+(define_mode_iterator VI48_AVX2
+ [(V8SI "TARGET_AVX2") V4SI
+ (V4DI "TARGET_AVX2") V2DI])
+
+(define_mode_iterator V48_AVX2
+ [V4SF V2DF
+ V8SF V4DF
+ (V4SI "TARGET_AVX2") (V2DI "TARGET_AVX2")
+ (V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")])
+
+(define_mode_attr sse2_avx2
+ [(V16QI "sse2") (V32QI "avx2")
+ (V8HI "sse2") (V16HI "avx2")
+ (V4SI "sse2") (V8SI "avx2")
+ (V2DI "sse2") (V4DI "avx2")
+ (V1TI "sse2") (V2TI "avx2")])
+
+(define_mode_attr ssse3_avx2
+ [(V16QI "ssse3") (V32QI "avx2")
+ (V8HI "ssse3") (V16HI "avx2")
+ (V4SI "ssse3") (V8SI "avx2")
+ (V2DI "ssse3") (V4DI "avx2")
+ (TI "ssse3") (V2TI "avx2")])
+
+(define_mode_attr sse4_1_avx2
+ [(V16QI "sse4_1") (V32QI "avx2")
+ (V8HI "sse4_1") (V16HI "avx2")
+ (V4SI "sse4_1") (V8SI "avx2")
+ (V2DI "sse4_1") (V4DI "avx2")])
+
+(define_mode_attr avx_avx2
+ [(V4SF "avx") (V2DF "avx")
+ (V8SF "avx") (V4DF "avx")
+ (V4SI "avx2") (V2DI "avx2")
+ (V8SI "avx2") (V4DI "avx2")])
+
+(define_mode_attr vec_avx2
+ [(V16QI "vec") (V32QI "avx2")
+ (V8HI "vec") (V16HI "avx2")
+ (V4SI "vec") (V8SI "avx2")
+ (V2DI "vec") (V4DI "avx2")])
+
+(define_mode_attr ssedoublemode
+ [(V16HI "V16SI") (V8HI "V8SI")])
+
+(define_mode_attr ssebytemode
+ [(V4DI "V32QI") (V2DI "V16QI")])
+
;; All 128bit vector integer modes
(define_mode_iterator VI_128 [V16QI V8HI V4SI V2DI])
+;; All 256bit vector integer modes
+(define_mode_iterator VI_256 [V32QI V16HI V8SI V4DI])
+
;; Random 128bit vector integer mode combinations
(define_mode_iterator VI12_128 [V16QI V8HI])
(define_mode_iterator VI14_128 [V16QI V4SI])
(define_mode_iterator VI124_128 [V16QI V8HI V4SI])
+(define_mode_iterator VI128_128 [V16QI V8HI V2DI])
(define_mode_iterator VI24_128 [V8HI V4SI])
(define_mode_iterator VI248_128 [V8HI V4SI V2DI])
+(define_mode_iterator VI48_128 [V4SI V2DI])
+
+;; Random 256bit vector integer mode combinations
+(define_mode_iterator VI124_256 [V32QI V16HI V8SI])
+(define_mode_iterator VI48_256 [V8SI V4DI])
;; Int-float size matches
(define_mode_iterator VI4F_128 [V4SI V4SF])
(V16QI "") (V8HI "") (V4SI "") (V2DI "")
(V8SF "256") (V4DF "256")
(V4SF "") (V2DF "")])
-
+
;; SSE instruction mode
(define_mode_attr sseinsnmode
- [(V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI")
+ [(V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI") (V2TI "OI")
(V16QI "TI") (V8HI "TI") (V4SI "TI") (V2DI "TI") (V1TI "TI")
(V8SF "V8SF") (V4DF "V4DF")
- (V4SF "V4SF") (V2DF "V2DF")])
+ (V4SF "V4SF") (V2DF "V2DF")
+ (TI "TI")])
;; Mapping of vector float modes to an integer mode of the same size
(define_mode_attr sseintvecmode
[(V8SF "V8SI") (V4DF "V4DI")
- (V4SF "V4SI") (V2DF "V2DI")])
+ (V4SF "V4SI") (V2DF "V2DI")
+ (V8SI "V8SI") (V4DI "V4DI")
+ (V4SI "V4SI") (V2DI "V2DI")
+ (V16HI "V16HI") (V8HI "V8HI")
+ (V32QI "V32QI") (V16QI "V16QI")])
+
+(define_mode_attr sseintvecmodelower
+ [(V8SF "v8si") (V4DF "v4di")
+ (V4SF "v4si") (V2DF "v2di")
+ (V8SI "v8si") (V4DI "v4di")
+ (V4SI "v4si") (V2DI "v2di")
+ (V16HI "v16hi") (V8HI "v8hi")
+ (V32QI "v32qi") (V16QI "v16qi")])
;; Mapping of vector modes to a vector mode of double size
(define_mode_attr ssedoublevecmode
(V8SF "8") (V4DF "4")
(V4SF "4") (V2DF "2")])
+;; SSE prefix for integer vector modes
+(define_mode_attr sseintprefix
+ [(V2DI "p") (V2DF "")
+ (V4DI "p") (V4DF "")
+ (V4SI "p") (V4SF "")
+ (V8SI "p") (V8SF "")])
+
;; SSE scalar suffix for vector modes
(define_mode_attr ssescalarmodesuffix
- [(V8SF "ss") (V4DF "sd")
+ [(SF "ss") (DF "sd")
+ (V8SF "ss") (V4DF "sd")
(V4SF "ss") (V2DF "sd")
(V8SI "ss") (V4DI "sd")
(V4SI "d")])
;; Pack/unpack vector modes
(define_mode_attr sseunpackmode
- [(V16QI "V8HI") (V8HI "V4SI") (V4SI "V2DI")])
+ [(V16QI "V8HI") (V8HI "V4SI") (V4SI "V2DI")
+ (V32QI "V16HI") (V16HI "V8SI") (V8SI "V4DI")])
(define_mode_attr ssepackmode
- [(V8HI "V16QI") (V4SI "V8HI") (V2DI "V4SI")])
+ [(V8HI "V16QI") (V4SI "V8HI") (V2DI "V4SI")
+ (V16HI "V32QI") (V8SI "V16HI") (V4DI "V8SI")])
;; Mapping of the max integer size for xop rotate immediate constraint
(define_mode_attr sserotatemax
[(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")])
+;; Mapping of mode to cast intrinsic name
+(define_mode_attr castmode [(V8SI "si") (V8SF "ps") (V4DF "pd")])
+
;; Instruction suffix for sign and zero extensions.
(define_code_attr extsuffix [(sign_extend "sx") (zero_extend "zx")])
-
+;; i128 for integer vectors and TARGET_AVX2, f128 otherwise.
+(define_mode_attr i128
+ [(V8SF "f128") (V4DF "f128") (V32QI "%~128") (V16HI "%~128")
+ (V8SI "%~128") (V4DI "%~128")])
;; Mix-n-match
(define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF])
-(define_mode_iterator AVX256MODE24P [V8SI V8SF V4DI V4DF])
(define_mode_iterator FMAMODE [SF DF V4SF V2DF V8SF V4DF])
+;; Mapping of immediate bits for blend instructions
+(define_mode_attr blendbits
+ [(V8SF "255") (V4SF "15") (V4DF "15") (V2DF "3")])
+
;; Patterns whose name begins with "sse{,2,3}_" are invoked by intrinsics.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
case 1:
case 2:
switch (get_attr_mode (insn))
- {
+ {
case MODE_V8SF:
case MODE_V4SF:
if (TARGET_AVX
[(set_attr "type" "sselog1,ssemov,ssemov")
(set_attr "prefix" "maybe_vex")
(set (attr "mode")
- (cond [(ne (symbol_ref "TARGET_AVX") (const_int 0))
- (const_string "<sseinsnmode>")
- (ior (ior
- (ne (symbol_ref "optimize_function_for_size_p (cfun)")
- (const_int 0))
- (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
+ (cond [(match_test "TARGET_AVX")
+ (const_string "<sseinsnmode>")
+ (ior (ior (match_test "optimize_function_for_size_p (cfun)")
+ (not (match_test "TARGET_SSE2")))
(and (eq_attr "alternative" "2")
- (ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
- (const_int 0))))
+ (match_test "TARGET_SSE_TYPELESS_STORES")))
(const_string "V4SF")
(eq (const_string "<MODE>mode") (const_string "V4SFmode"))
(const_string "V4SF")
/* The DImode arrived in a pair of integral registers (e.g. %edx:%eax).
Assemble the 64-bit DImode value in an xmm register. */
emit_insn (gen_sse2_loadld (operands[0], CONST0_RTX (V4SImode),
- gen_rtx_SUBREG (SImode, operands[1], 0)));
+ gen_rtx_SUBREG (SImode, operands[1], 0)));
emit_insn (gen_sse2_loadld (operands[2], CONST0_RTX (V4SImode),
gen_rtx_SUBREG (SImode, operands[1], 4)));
emit_insn (gen_vec_interleave_lowv4si (operands[0], operands[0],
- operands[2]));
+ operands[2]));
}
else if (memory_operand (operands[1], DImode))
emit_insn (gen_vec_concatv2di (gen_lowpart (V2DImode, operands[0]),
- operands[1], const0_rtx));
+ operands[1], const0_rtx));
else
gcc_unreachable ();
})
(set_attr "movu" "1")
(set (attr "prefix_data16")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set_attr "movu" "1")
(set (attr "prefix_data16")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "0")))
(set (attr "prefix_rep")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
[(set_attr "type" "ssecvt")
(set (attr "prefix_data16")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(use (match_operand:VF 2 "nonimmediate_operand" "xm,0, xm,x"))]
"TARGET_SSE"
"#"
- "reload_completed"
+ "&& reload_completed"
[(const_int 0)]
{
enum rtx_code absneg_op;
{
ix86_fixup_binary_operands_no_copy (DIV, <MODE>mode, operands);
- if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
+ if (TARGET_SSE_MATH
+ && TARGET_RECIP_VEC_DIV
+ && !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
(sqrt:VF1 (match_operand:VF1 1 "nonimmediate_operand" "")))]
"TARGET_SSE"
{
- if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
+ if (TARGET_SSE_MATH
+ && TARGET_RECIP_VEC_SQRT
+ && !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
emit_insn (gen_sse3_haddv4sf3 (operands[0], tmp, tmp));
}
else
- ix86_expand_reduc_v4sf (gen_addv4sf3, operands[0], operands[1]);
+ ix86_expand_reduc (gen_addv4sf3, operands[0], operands[1]);
DONE;
})
+;; Modes handled by reduc_sm{in,ax}* patterns.
+(define_mode_iterator REDUC_SMINMAX_MODE
+ [(V32QI "TARGET_AVX2") (V16HI "TARGET_AVX2")
+ (V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")
+ (V8SF "TARGET_AVX") (V4DF "TARGET_AVX")
+ (V4SF "TARGET_SSE")])
+
+(define_expand "reduc_<code>_<mode>"
+ [(smaxmin:REDUC_SMINMAX_MODE
+ (match_operand:REDUC_SMINMAX_MODE 0 "register_operand" "")
+ (match_operand:REDUC_SMINMAX_MODE 1 "register_operand" ""))]
+ ""
+{
+ ix86_expand_reduc (gen_<code><mode>3, operands[0], operands[1]);
+ DONE;
+})
-(define_expand "reduc_smax_v4sf"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:V4SF 1 "register_operand" "")]
- "TARGET_SSE"
+(define_expand "reduc_<code>_<mode>"
+ [(umaxmin:VI_256
+ (match_operand:VI_256 0 "register_operand" "")
+ (match_operand:VI_256 1 "register_operand" ""))]
+ "TARGET_AVX2"
{
- ix86_expand_reduc_v4sf (gen_smaxv4sf3, operands[0], operands[1]);
+ ix86_expand_reduc (gen_<code><mode>3, operands[0], operands[1]);
DONE;
})
-(define_expand "reduc_smin_v4sf"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:V4SF 1 "register_operand" "")]
- "TARGET_SSE"
+(define_expand "reduc_umin_v8hi"
+ [(umin:V8HI
+ (match_operand:V8HI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" ""))]
+ "TARGET_SSE4_1"
{
- ix86_expand_reduc_v4sf (gen_sminv4sf3, operands[0], operands[1]);
+ ix86_expand_reduc (gen_uminv8hi3, operands[0], operands[1]);
DONE;
})
(const_string "0")))
(set_attr "mode" "<MODE>")])
-(define_expand "vcond<mode>"
- [(set (match_operand:VF 0 "register_operand" "")
- (if_then_else:VF
- (match_operator 3 ""
- [(match_operand:VF 4 "nonimmediate_operand" "")
- (match_operand:VF 5 "nonimmediate_operand" "")])
- (match_operand:VF 1 "general_operand" "")
- (match_operand:VF 2 "general_operand" "")))]
- "TARGET_SSE"
+(define_expand "vcond<V_256:mode><VF_256:mode>"
+ [(set (match_operand:V_256 0 "register_operand" "")
+ (if_then_else:V_256
+ (match_operator 3 ""
+ [(match_operand:VF_256 4 "nonimmediate_operand" "")
+ (match_operand:VF_256 5 "nonimmediate_operand" "")])
+ (match_operand:V_256 1 "general_operand" "")
+ (match_operand:V_256 2 "general_operand" "")))]
+ "TARGET_AVX
+ && (GET_MODE_NUNITS (<V_256:MODE>mode)
+ == GET_MODE_NUNITS (<VF_256:MODE>mode))"
+{
+ bool ok = ix86_expand_fp_vcond (operands);
+ gcc_assert (ok);
+ DONE;
+})
+
+(define_expand "vcond<V_128:mode><VF_128:mode>"
+ [(set (match_operand:V_128 0 "register_operand" "")
+ (if_then_else:V_128
+ (match_operator 3 ""
+ [(match_operand:VF_128 4 "nonimmediate_operand" "")
+ (match_operand:VF_128 5 "nonimmediate_operand" "")])
+ (match_operand:V_128 1 "general_operand" "")
+ (match_operand:V_128 2 "general_operand" "")))]
+ "TARGET_SSE
+ && (GET_MODE_NUNITS (<V_128:MODE>mode)
+ == GET_MODE_NUNITS (<VF_128:MODE>mode))"
{
bool ok = ix86_expand_fp_vcond (operands);
gcc_assert (ok);
operands[4] = CONST0_RTX (<MODE>mode);
})
+(define_expand "fmai_vmfmadd_<mode>"
+ [(set (match_operand:VF_128 0 "register_operand")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand")
+ (match_operand:VF_128 2 "nonimmediate_operand")
+ (match_operand:VF_128 3 "nonimmediate_operand"))
+ (match_dup 0)
+ (const_int 1)))]
+ "TARGET_FMA")
+
+(define_insn "*fmai_fmadd_<mode>"
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%0, 0,x")
+ (match_operand:VF_128 2 "nonimmediate_operand" "xm, x,xm")
+ (match_operand:VF_128 3 "nonimmediate_operand" " x,xm,0"))
+ (match_dup 0)
+ (const_int 1)))]
+ "TARGET_FMA"
+ "@
+ vfmadd132<ssescalarmodesuffix>\t{%2, %3, %0|%0, %3, %2}
+ vfmadd213<ssescalarmodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfmadd231<ssescalarmodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fmai_fmsub_<mode>"
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%0, 0,x")
+ (match_operand:VF_128 2 "nonimmediate_operand" "xm, x,xm")
+ (neg:VF_128
+ (match_operand:VF_128 3 "nonimmediate_operand" " x,xm,0")))
+ (match_dup 0)
+ (const_int 1)))]
+ "TARGET_FMA"
+ "@
+ vfmsub132<ssescalarmodesuffix>\t{%2, %3, %0|%0, %3, %2}
+ vfmsub213<ssescalarmodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfmsub231<ssescalarmodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fmai_fnmadd_<mode>"
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (neg:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%0, 0,x"))
+ (match_operand:VF_128 2 "nonimmediate_operand" "xm, x,xm")
+ (match_operand:VF_128 3 "nonimmediate_operand" " x,xm,0"))
+ (match_dup 0)
+ (const_int 1)))]
+ "TARGET_FMA"
+ "@
+ vfnmadd132<ssescalarmodesuffix>\t{%2, %3, %0|%0, %3, %2}
+ vfnmadd213<ssescalarmodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfnmadd231<ssescalarmodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "*fmai_fnmsub_<mode>"
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (neg:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%0, 0,x"))
+ (match_operand:VF_128 2 "nonimmediate_operand" "xm, x,xm")
+ (neg:VF_128
+ (match_operand:VF_128 3 "nonimmediate_operand" " x,xm,0")))
+ (match_dup 0)
+ (const_int 1)))]
+ "TARGET_FMA"
+ "@
+ vfnmsub132<ssescalarmodesuffix>\t{%2, %3, %0|%0, %3, %2}
+ vfnmsub213<ssescalarmodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfnmsub231<ssescalarmodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssemuladd")
+ (set_attr "mode" "<MODE>")])
+
(define_insn "*fma4i_vmfmadd_<mode>"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
"TARGET_FMA"
"@
vfmadd132<ssemodesuffix>\t{%2, %3, %0|%0, %3, %2}
- vfmadd312<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfmadd213<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
vfmadd231<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
"TARGET_FMA"
"@
vfmsub132<ssemodesuffix>\t{%2, %3, %0|%0, %3, %2}
- vfmsub312<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfmsub213<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
vfmsub231<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
"TARGET_FMA"
"@
vfnmadd132<ssemodesuffix>\t{%2, %3, %0|%0, %3, %2}
- vfnmadd312<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfnmadd213<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
vfnmadd231<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
"TARGET_FMA"
"@
vfnmsub132<ssemodesuffix>\t{%2, %3, %0|%0, %3, %2}
- vfnmsub312<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
+ vfnmsub213<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
vfnmsub231<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "<MODE>")])
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "DI")])
-(define_insn "avx_cvtdq2ps256"
- [(set (match_operand:V8SF 0 "register_operand" "=x")
- (float:V8SF (match_operand:V8SI 1 "nonimmediate_operand" "xm")))]
- "TARGET_AVX"
- "vcvtdq2ps\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
- (set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
-
-(define_insn "sse2_cvtdq2ps"
- [(set (match_operand:V4SF 0 "register_operand" "=x")
- (float:V4SF (match_operand:V4SI 1 "nonimmediate_operand" "xm")))]
+(define_insn "float<sseintvecmodelower><mode>2"
+ [(set (match_operand:VF1 0 "register_operand" "=x")
+ (float:VF1
+ (match_operand:<sseintvecmode> 1 "nonimmediate_operand" "xm")))]
"TARGET_SSE2"
"%vcvtdq2ps\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_vex")
- (set_attr "mode" "V4SF")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_expand "sse2_cvtudq2ps"
- [(set (match_dup 5)
- (float:V4SF (match_operand:V4SI 1 "nonimmediate_operand" "")))
- (set (match_dup 6)
- (lt:V4SF (match_dup 5) (match_dup 3)))
- (set (match_dup 7)
- (and:V4SF (match_dup 6) (match_dup 4)))
- (set (match_operand:V4SF 0 "register_operand" "")
- (plus:V4SF (match_dup 5) (match_dup 7)))]
- "TARGET_SSE2"
+(define_expand "floatuns<sseintvecmodelower><mode>2"
+ [(match_operand:VF1 0 "register_operand" "")
+ (match_operand:<sseintvecmode> 1 "register_operand" "")]
+ "TARGET_SSE2 && (<MODE>mode == V4SFmode || TARGET_AVX2)"
{
- REAL_VALUE_TYPE TWO32r;
- rtx x;
- int i;
-
- real_ldexp (&TWO32r, &dconst1, 32);
- x = const_double_from_real_value (TWO32r, SFmode);
-
- operands[3] = force_reg (V4SFmode, CONST0_RTX (V4SFmode));
- operands[4] = force_reg (V4SFmode,
- ix86_build_const_vector (V4SFmode, 1, x));
-
- for (i = 5; i < 8; i++)
- operands[i] = gen_reg_rtx (V4SFmode);
+ ix86_expand_vector_convert_uns_vsivsf (operands[0], operands[1]);
+ DONE;
})
(define_insn "avx_cvtps2dq256"
[(set_attr "type" "ssecvt")
(set (attr "prefix_data16")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
-(define_insn "avx_cvttps2dq256"
+(define_insn "fix_truncv8sfv8si2"
[(set (match_operand:V8SI 0 "register_operand" "=x")
(fix:V8SI (match_operand:V8SF 1 "nonimmediate_operand" "xm")))]
"TARGET_AVX"
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
-(define_insn "sse2_cvttps2dq"
+(define_insn "fix_truncv4sfv4si2"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(fix:V4SI (match_operand:V4SF 1 "nonimmediate_operand" "xm")))]
"TARGET_SSE2"
[(set_attr "type" "ssecvt")
(set (attr "prefix_rep")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set (attr "prefix_data16")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "0")))
(set_attr "prefix_data16" "0")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_expand "fixuns_trunc<mode><sseintvecmodelower>2"
+ [(match_operand:<sseintvecmode> 0 "register_operand" "")
+ (match_operand:VF1 1 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx tmp[3];
+ tmp[0] = ix86_expand_adjust_ufix_to_sfix_si (operands[1], &tmp[2]);
+ tmp[1] = gen_reg_rtx (<sseintvecmode>mode);
+ emit_insn (gen_fix_trunc<mode><sseintvecmodelower>2 (tmp[1], tmp[0]));
+ emit_insn (gen_xor<sseintvecmodelower>3 (operands[0], tmp[1], tmp[2]));
+ DONE;
+})
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel double-precision floating point conversion operations
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "DI")])
-(define_insn "avx_cvtdq2pd256"
+(define_insn "floatv4siv4df2"
[(set (match_operand:V4DF 0 "register_operand" "=x")
(float:V4DF (match_operand:V4SI 1 "nonimmediate_operand" "xm")))]
"TARGET_AVX"
(set_attr "prefix" "vex")
(set_attr "mode" "V4DF")])
-(define_insn "*avx_cvtdq2pd256_2"
+(define_insn "avx_cvtdq2pd256_2"
[(set (match_operand:V4DF 0 "register_operand" "=x")
(float:V4DF
(vec_select:V4SI
(match_operand:V4SI 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0) (const_int 1)]))))]
"TARGET_SSE2"
- "%vcvtdq2pd\t{%1, %0|%0, %1}"
+ "%vcvtdq2pd\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "V2DF")])
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
+(define_expand "avx_cvtpd2dq256_2"
+ [(set (match_operand:V8SI 0 "register_operand" "")
+ (vec_concat:V8SI
+ (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand" "")]
+ UNSPEC_FIX_NOTRUNC)
+ (match_dup 2)))]
+ "TARGET_AVX"
+ "operands[2] = CONST0_RTX (V4SImode);")
+
+(define_insn "*avx_cvtpd2dq256_2"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (vec_concat:V8SI
+ (unspec:V4SI [(match_operand:V4DF 1 "nonimmediate_operand" "xm")]
+ UNSPEC_FIX_NOTRUNC)
+ (match_operand:V4SI 2 "const0_operand" "")))]
+ "TARGET_AVX"
+ "vcvtpd2dq{y}\t{%1, %x0|%x0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_expand "sse2_cvtpd2dq"
[(set (match_operand:V4SI 0 "register_operand" "")
(vec_concat:V4SI
(set_attr "athlon_decode" "vector")
(set_attr "bdver1_decode" "double")])
-(define_insn "avx_cvttpd2dq256"
+(define_insn "fix_truncv4dfv4si2"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(fix:V4SI (match_operand:V4DF 1 "nonimmediate_operand" "xm")))]
"TARGET_AVX"
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
+(define_expand "avx_cvttpd2dq256_2"
+ [(set (match_operand:V8SI 0 "register_operand" "")
+ (vec_concat:V8SI
+ (fix:V4SI (match_operand:V4DF 1 "nonimmediate_operand" ""))
+ (match_dup 2)))]
+ "TARGET_AVX"
+ "operands[2] = CONST0_RTX (V4SImode);")
+
+(define_insn "*avx_cvttpd2dq256_2"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (vec_concat:V8SI
+ (fix:V4SI (match_operand:V4DF 1 "nonimmediate_operand" "xm"))
+ (match_operand:V4SI 2 "const0_operand" "")))]
+ "TARGET_AVX"
+ "vcvttpd2dq{y}\t{%1, %x0|%x0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_expand "sse2_cvttpd2dq"
[(set (match_operand:V4SI 0 "register_operand" "")
(vec_concat:V4SI
(match_operand:V4SF 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0) (const_int 1)]))))]
"TARGET_SSE2"
- "%vcvtps2pd\t{%1, %0|%0, %1}"
+ "%vcvtps2pd\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssecvt")
(set_attr "amdfam10_decode" "direct")
(set_attr "athlon_decode" "double")
(const_int 2) (const_int 3)]))))]
"TARGET_AVX")
-(define_expand "vec_unpacks_float_hi_v8hi"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")]
+(define_mode_attr sseunpackfltmode
+ [(V8HI "V4SF") (V4SI "V2DF") (V16HI "V8SF") (V8SI "V4DF")])
+
+(define_expand "vec_unpacks_float_hi_<mode>"
+ [(match_operand:<sseunpackfltmode> 0 "register_operand" "")
+ (match_operand:VI2_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
{
- rtx tmp = gen_reg_rtx (V4SImode);
+ rtx tmp = gen_reg_rtx (<sseunpackmode>mode);
- emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
- emit_insn (gen_sse2_cvtdq2ps (operands[0], tmp));
+ emit_insn (gen_vec_unpacks_hi_<mode> (tmp, operands[1]));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_FLOAT (<sseunpackfltmode>mode, tmp)));
DONE;
})
-(define_expand "vec_unpacks_float_lo_v8hi"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")]
+(define_expand "vec_unpacks_float_lo_<mode>"
+ [(match_operand:<sseunpackfltmode> 0 "register_operand" "")
+ (match_operand:VI2_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
{
- rtx tmp = gen_reg_rtx (V4SImode);
+ rtx tmp = gen_reg_rtx (<sseunpackmode>mode);
- emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
- emit_insn (gen_sse2_cvtdq2ps (operands[0], tmp));
+ emit_insn (gen_vec_unpacks_lo_<mode> (tmp, operands[1]));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_FLOAT (<sseunpackfltmode>mode, tmp)));
DONE;
})
-(define_expand "vec_unpacku_float_hi_v8hi"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")]
+(define_expand "vec_unpacku_float_hi_<mode>"
+ [(match_operand:<sseunpackfltmode> 0 "register_operand" "")
+ (match_operand:VI2_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
{
- rtx tmp = gen_reg_rtx (V4SImode);
+ rtx tmp = gen_reg_rtx (<sseunpackmode>mode);
- emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
- emit_insn (gen_sse2_cvtdq2ps (operands[0], tmp));
+ emit_insn (gen_vec_unpacku_hi_<mode> (tmp, operands[1]));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_FLOAT (<sseunpackfltmode>mode, tmp)));
DONE;
})
-(define_expand "vec_unpacku_float_lo_v8hi"
- [(match_operand:V4SF 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")]
+(define_expand "vec_unpacku_float_lo_<mode>"
+ [(match_operand:<sseunpackfltmode> 0 "register_operand" "")
+ (match_operand:VI2_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
{
- rtx tmp = gen_reg_rtx (V4SImode);
+ rtx tmp = gen_reg_rtx (<sseunpackmode>mode);
- emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
- emit_insn (gen_sse2_cvtdq2ps (operands[0], tmp));
+ emit_insn (gen_vec_unpacku_lo_<mode> (tmp, operands[1]));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_FLOAT (<sseunpackfltmode>mode, tmp)));
DONE;
})
(parallel [(const_int 2) (const_int 3)
(const_int 2) (const_int 3)])))
(set (match_operand:V2DF 0 "register_operand" "")
- (float:V2DF
+ (float:V2DF
(vec_select:V2SI
(match_dup 2)
(parallel [(const_int 0) (const_int 1)]))))]
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))
(set (match_operand:V4DF 0 "register_operand" "")
- (float:V4DF
+ (float:V4DF
(match_dup 2)))]
"TARGET_AVX"
"operands[2] = gen_reg_rtx (V4SImode);")
(parallel [(const_int 2) (const_int 3)
(const_int 2) (const_int 3)])))
(set (match_dup 6)
- (float:V2DF
+ (float:V2DF
(vec_select:V2SI
(match_dup 5)
(parallel [(const_int 0) (const_int 1)]))))
ix86_build_const_vector (V2DFmode, 1, x));
operands[5] = gen_reg_rtx (V4SImode);
-
+
for (i = 6; i < 9; i++)
operands[i] = gen_reg_rtx (V2DFmode);
})
operands[i] = gen_reg_rtx (V2DFmode);
})
+(define_expand "vec_unpacku_float_hi_v8si"
+ [(match_operand:V4DF 0 "register_operand" "")
+ (match_operand:V8SI 1 "register_operand" "")]
+ "TARGET_AVX"
+{
+ REAL_VALUE_TYPE TWO32r;
+ rtx x, tmp[6];
+ int i;
+
+ real_ldexp (&TWO32r, &dconst1, 32);
+ x = const_double_from_real_value (TWO32r, DFmode);
+
+ tmp[0] = force_reg (V4DFmode, CONST0_RTX (V4DFmode));
+ tmp[1] = force_reg (V4DFmode, ix86_build_const_vector (V4DFmode, 1, x));
+ tmp[5] = gen_reg_rtx (V4SImode);
+
+ for (i = 2; i < 5; i++)
+ tmp[i] = gen_reg_rtx (V4DFmode);
+ emit_insn (gen_vec_extract_hi_v8si (tmp[5], operands[1]));
+ emit_insn (gen_floatv4siv4df2 (tmp[2], tmp[5]));
+ emit_insn (gen_rtx_SET (VOIDmode, tmp[3],
+ gen_rtx_LT (V4DFmode, tmp[2], tmp[0])));
+ emit_insn (gen_andv4df3 (tmp[4], tmp[3], tmp[1]));
+ emit_insn (gen_addv4df3 (operands[0], tmp[2], tmp[4]));
+ DONE;
+})
+
+(define_expand "vec_unpacku_float_lo_v8si"
+ [(match_operand:V4DF 0 "register_operand" "")
+ (match_operand:V8SI 1 "nonimmediate_operand" "")]
+ "TARGET_AVX"
+{
+ REAL_VALUE_TYPE TWO32r;
+ rtx x, tmp[5];
+ int i;
+
+ real_ldexp (&TWO32r, &dconst1, 32);
+ x = const_double_from_real_value (TWO32r, DFmode);
+
+ tmp[0] = force_reg (V4DFmode, CONST0_RTX (V4DFmode));
+ tmp[1] = force_reg (V4DFmode, ix86_build_const_vector (V4DFmode, 1, x));
+
+ for (i = 2; i < 5; i++)
+ tmp[i] = gen_reg_rtx (V4DFmode);
+ emit_insn (gen_avx_cvtdq2pd256_2 (tmp[2], operands[1]));
+ emit_insn (gen_rtx_SET (VOIDmode, tmp[3],
+ gen_rtx_LT (V4DFmode, tmp[2], tmp[0])));
+ emit_insn (gen_andv4df3 (tmp[4], tmp[3], tmp[1]));
+ emit_insn (gen_addv4df3 (operands[0], tmp[2], tmp[4]));
+ DONE;
+})
+
(define_expand "vec_pack_trunc_v4df"
[(set (match_dup 3)
(float_truncate:V4SF
DONE;
})
+(define_expand "vec_pack_sfix_trunc_v4df"
+ [(match_operand:V8SI 0 "register_operand" "")
+ (match_operand:V4DF 1 "nonimmediate_operand" "")
+ (match_operand:V4DF 2 "nonimmediate_operand" "")]
+ "TARGET_AVX"
+{
+ rtx r1, r2;
+
+ r1 = gen_reg_rtx (V8SImode);
+ r2 = gen_reg_rtx (V8SImode);
+
+ emit_insn (gen_avx_cvttpd2dq256_2 (r1, operands[1]));
+ emit_insn (gen_avx_cvttpd2dq256_2 (r2, operands[2]));
+ emit_insn (gen_avx_vperm2f128v8si3 (operands[0], r1, r2, GEN_INT (0x20)));
+ DONE;
+})
+
(define_expand "vec_pack_sfix_trunc_v2df"
[(match_operand:V4SI 0 "register_operand" "")
(match_operand:V2DF 1 "nonimmediate_operand" "")
emit_insn (gen_sse2_cvttpd2dq (r1, operands[1]));
emit_insn (gen_sse2_cvttpd2dq (r2, operands[2]));
emit_insn (gen_vec_interleave_lowv2di (gen_lowpart (V2DImode, operands[0]),
- gen_lowpart (V2DImode, r1),
- gen_lowpart (V2DImode, r2)));
+ gen_lowpart (V2DImode, r1),
+ gen_lowpart (V2DImode, r2)));
+ DONE;
+})
+
+(define_mode_attr ssepackfltmode
+ [(V4DF "V8SI") (V2DF "V4SI")])
+
+(define_expand "vec_pack_ufix_trunc_<mode>"
+ [(match_operand:<ssepackfltmode> 0 "register_operand" "")
+ (match_operand:VF2 1 "register_operand" "")
+ (match_operand:VF2 2 "register_operand" "")]
+ "TARGET_SSE2"
+{
+ rtx tmp[7];
+ tmp[0] = ix86_expand_adjust_ufix_to_sfix_si (operands[1], &tmp[2]);
+ tmp[1] = ix86_expand_adjust_ufix_to_sfix_si (operands[2], &tmp[3]);
+ tmp[4] = gen_reg_rtx (<ssepackfltmode>mode);
+ emit_insn (gen_vec_pack_sfix_trunc_<mode> (tmp[4], tmp[0], tmp[1]));
+ if (<ssepackfltmode>mode == V4SImode || TARGET_AVX2)
+ {
+ tmp[5] = gen_reg_rtx (<ssepackfltmode>mode);
+ ix86_expand_vec_extract_even_odd (tmp[5], tmp[2], tmp[3], 0);
+ }
+ else
+ {
+ tmp[5] = gen_reg_rtx (V8SFmode);
+ ix86_expand_vec_extract_even_odd (tmp[5], gen_lowpart (V8SFmode, tmp[2]),
+ gen_lowpart (V8SFmode, tmp[3]), 0);
+ tmp[5] = gen_lowpart (V8SImode, tmp[5]);
+ }
+ tmp[6] = expand_simple_binop (<ssepackfltmode>mode, XOR, tmp[4], tmp[5],
+ operands[0], 0, OPTAB_DIRECT);
+ if (tmp[6] != operands[0])
+ emit_move_insn (operands[0], tmp[6]);
+ DONE;
+})
+
+(define_expand "vec_pack_sfix_v4df"
+ [(match_operand:V8SI 0 "register_operand" "")
+ (match_operand:V4DF 1 "nonimmediate_operand" "")
+ (match_operand:V4DF 2 "nonimmediate_operand" "")]
+ "TARGET_AVX"
+{
+ rtx r1, r2;
+
+ r1 = gen_reg_rtx (V8SImode);
+ r2 = gen_reg_rtx (V8SImode);
+
+ emit_insn (gen_avx_cvtpd2dq256_2 (r1, operands[1]));
+ emit_insn (gen_avx_cvtpd2dq256_2 (r2, operands[2]));
+ emit_insn (gen_avx_vperm2f128v8si3 (operands[0], r1, r2, GEN_INT (0x20)));
DONE;
})
emit_insn (gen_sse2_cvtpd2dq (r1, operands[1]));
emit_insn (gen_sse2_cvtpd2dq (r2, operands[2]));
emit_insn (gen_vec_interleave_lowv2di (gen_lowpart (V2DImode, operands[0]),
- gen_lowpart (V2DImode, r1),
- gen_lowpart (V2DImode, r2)));
+ gen_lowpart (V2DImode, r1),
+ gen_lowpart (V2DImode, r2)));
DONE;
})
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_movhlps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
movlps\t{%H2, %0|%0, %H2}
vmovlps\t{%H2, %1, %0|%0, %1, %H2}
%vmovhps\t{%2, %0|%0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")])
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_movlhps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
movhps\t{%2, %0|%0, %2}
vmovhps\t{%2, %1, %0|%0, %1, %2}
%vmovlps\t{%2, %H0|%H0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")])
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_loadhps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
movlhps\t{%2, %0|%0, %2}
vmovlhps\t{%2, %1, %0|%0, %1, %2}
%vmovlps\t{%2, %H0|%H0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V2SF,V2SF,V4SF,V4SF,V2SF")])
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_loadlps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
movlps\t{%2, %0|%0, %2}
vmovlps\t{%2, %1, %0|%0, %1, %2}
%vmovlps\t{%2, %0|%0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "sselog,sselog,ssemov,ssemov,ssemov")
(set_attr "length_immediate" "1,1,*,*,*")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "SF")])
-(define_expand "vec_dupv4sf"
- [(set (match_operand:V4SF 0 "register_operand" "")
- (vec_duplicate:V4SF
- (match_operand:SF 1 "nonimmediate_operand" "")))]
- "TARGET_SSE"
-{
- if (!TARGET_AVX)
- operands[1] = force_reg (V4SFmode, operands[1]);
-})
+(define_insn "avx2_vec_dup<mode>"
+ [(set (match_operand:VF1 0 "register_operand" "=x")
+ (vec_duplicate:VF1
+ (vec_select:SF
+ (match_operand:V4SF 1 "register_operand" "x")
+ (parallel [(const_int 0)]))))]
+ "TARGET_AVX2"
+ "vbroadcastss\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<MODE>")])
-(define_insn "*vec_dupv4sf_avx"
- [(set (match_operand:V4SF 0 "register_operand" "=x,x")
+(define_insn "vec_dupv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=x,x,x")
(vec_duplicate:V4SF
- (match_operand:SF 1 "nonimmediate_operand" "x,m")))]
- "TARGET_AVX"
+ (match_operand:SF 1 "nonimmediate_operand" "x,m,0")))]
+ "TARGET_SSE"
"@
vshufps\t{$0, %1, %1, %0|%0, %1, %1, 0}
- vbroadcastss\t{%1, %0|%0, %1}"
- [(set_attr "type" "sselog1,ssemov")
- (set_attr "length_immediate" "1,0")
- (set_attr "prefix_extra" "0,1")
- (set_attr "prefix" "vex")
- (set_attr "mode" "V4SF")])
-
-(define_insn "*vec_dupv4sf"
- [(set (match_operand:V4SF 0 "register_operand" "=x")
- (vec_duplicate:V4SF
- (match_operand:SF 1 "register_operand" "0")))]
- "TARGET_SSE"
- "shufps\t{$0, %0, %0|%0, %0, 0}"
- [(set_attr "type" "sselog1")
- (set_attr "length_immediate" "1")
+ vbroadcastss\t{%1, %0|%0, %1}
+ shufps\t{$0, %0, %0|%0, %0, 0}"
+ [(set_attr "isa" "avx,avx,noavx")
+ (set_attr "type" "sselog1,ssemov,sselog1")
+ (set_attr "length_immediate" "1,0,1")
+ (set_attr "prefix_extra" "0,1,*")
+ (set_attr "prefix" "vex,vex,orig")
(set_attr "mode" "V4SF")])
;; Although insertps takes register source, we prefer
%vmovss\t{%1, %0|%0, %1}
punpckldq\t{%2, %0|%0, %2}
movd\t{%1, %0|%0, %1}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base,base,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*,*,*")
(set_attr "type" "sselog,sselog,sselog,sselog,ssemov,mmxcvt,mmxmov")
(set_attr "prefix_data16" "*,*,1,*,*,*,*")
(set_attr "prefix_extra" "*,*,1,1,*,*,*")
;; Avoid combining registers from different units in a single alternative,
;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "*vec_set<mode>_0_sse4_1"
+(define_insn "vec_set<mode>_0"
[(set (match_operand:VI4F_128 0 "nonimmediate_operand"
- "=x,x,x ,x,x,x ,x ,m,m,m")
+ "=x,x,x ,x,x,x,x ,x ,m,m ,m")
(vec_merge:VI4F_128
(vec_duplicate:VI4F_128
(match_operand:<ssescalarmode> 2 "general_operand"
- " x,m,*r,x,x,*rm,*rm,x,*r,fF"))
+ " x,m,*r,m,x,x,*rm,*rm,x,fF,*r"))
(match_operand:VI4F_128 1 "vector_move_operand"
- " C,C,C ,0,x,0 ,x ,0,0 ,0")
+ " C,C,C ,C,0,x,0 ,x ,0,0 ,0")
(const_int 1)))]
- "TARGET_SSE4_1"
+ "TARGET_SSE"
"@
%vinsertps\t{$0xe, %d2, %0|%0, %d2, 0xe}
%vmov<ssescalarmodesuffix>\t{%2, %0|%0, %2}
%vmovd\t{%2, %0|%0, %2}
movss\t{%2, %0|%0, %2}
+ movss\t{%2, %0|%0, %2}
vmovss\t{%2, %1, %0|%0, %1, %2}
pinsrd\t{$0, %2, %0|%0, %2, 0}
vpinsrd\t{$0, %2, %1, %0|%0, %1, %2, 0}
#
#
#"
- [(set_attr "isa" "base,base,base,noavx,avx,noavx,avx,base,base,base")
- (set_attr "type" "sselog,ssemov,ssemov,ssemov,ssemov,sselog,sselog,*,*,*")
- (set_attr "prefix_extra" "*,*,*,*,*,1,1,*,*,*")
- (set_attr "length_immediate" "*,*,*,*,*,1,1,*,*,*")
- (set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,orig,vex,orig,vex,*,*,*")
- (set_attr "mode" "SF,<ssescalarmode>,SI,SF,SF,TI,TI,*,*,*")])
-
-;; Avoid combining registers from different units in a single alternative,
-;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "*vec_set<mode>_0_sse2"
- [(set (match_operand:VI4F_128 0 "nonimmediate_operand"
- "=x,x ,x,m,m ,m")
- (vec_merge:VI4F_128
- (vec_duplicate:VI4F_128
- (match_operand:<ssescalarmode> 2 "general_operand"
- " m,*r,x,x,*r,fF"))
- (match_operand:VI4F_128 1 "vector_move_operand"
- " C, C,0,0,0 ,0")
- (const_int 1)))]
- "TARGET_SSE2"
- "@
- mov<ssescalarmodesuffix>\t{%2, %0|%0, %2}
- movd\t{%2, %0|%0, %2}
- movss\t{%2, %0|%0, %2}
- #
- #
- #"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "<ssescalarmode>,SI,SF,*,*,*")])
-
-;; Avoid combining registers from different units in a single alternative,
-;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "vec_set<mode>_0"
- [(set (match_operand:VI4F_128 0 "nonimmediate_operand"
- "=x,x,m,m ,m")
- (vec_merge:VI4F_128
- (vec_duplicate:VI4F_128
- (match_operand:<ssescalarmode> 2 "general_operand"
- " m,x,x,*r,fF"))
- (match_operand:VI4F_128 1 "vector_move_operand"
- " C,0,0,0 ,0")
- (const_int 1)))]
- "TARGET_SSE"
- "@
- movss\t{%2, %0|%0, %2}
- movss\t{%2, %0|%0, %2}
- #
- #
- #"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "SF,SF,*,*,*")])
+ [(set_attr "isa" "sse4,sse2,sse2,noavx,noavx,avx,sse4_noavx,avx,*,*,*")
+ (set (attr "type")
+ (cond [(eq_attr "alternative" "0,6,7")
+ (const_string "sselog")
+ (eq_attr "alternative" "9")
+ (const_string "fmov")
+ (eq_attr "alternative" "10")
+ (const_string "imov")
+ ]
+ (const_string "ssemov")))
+ (set_attr "prefix_extra" "*,*,*,*,*,*,1,1,*,*,*")
+ (set_attr "length_immediate" "*,*,*,*,*,*,1,1,*,*,*")
+ (set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,orig,orig,vex,orig,vex,*,*,*")
+ (set_attr "mode" "SF,<ssescalarmode>,SI,SF,SF,SF,TI,TI,*,*,*")])
;; A subset is vec_setv4sf.
(define_insn "*vec_setv4sf_sse4_1"
})
(define_expand "vec_set<mode>"
- [(match_operand:V_128 0 "register_operand" "")
+ [(match_operand:V 0 "register_operand" "")
(match_operand:<ssescalarmode> 1 "register_operand" "")
(match_operand 2 "const_int_operand" "")]
"TARGET_SSE"
DONE;
})
-(define_expand "avx_vextractf128<mode>"
+(define_insn_and_split "*sse4_1_extractps"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=rm,x,x")
+ (vec_select:SF
+ (match_operand:V4SF 1 "register_operand" "x,0,x")
+ (parallel [(match_operand:SI 2 "const_0_to_3_operand" "n,n,n")])))]
+ "TARGET_SSE4_1"
+ "@
+ %vextractps\t{%2, %1, %0|%0, %1, %2}
+ #
+ #"
+ "&& reload_completed && SSE_REG_P (operands[0])"
+ [(const_int 0)]
+{
+ rtx dest = gen_rtx_REG (V4SFmode, REGNO (operands[0]));
+ switch (INTVAL (operands[2]))
+ {
+ case 1:
+ case 3:
+ emit_insn (gen_sse_shufps_v4sf (dest, operands[1], operands[1],
+ operands[2], operands[2],
+ GEN_INT (INTVAL (operands[2]) + 4),
+ GEN_INT (INTVAL (operands[2]) + 4)));
+ break;
+ case 2:
+ emit_insn (gen_vec_interleave_highv4sf (dest, operands[1], operands[1]));
+ break;
+ default:
+ /* 0 should be handled by the *vec_extractv4sf_0 pattern above. */
+ gcc_unreachable ();
+ }
+ DONE;
+}
+ [(set_attr "isa" "*,noavx,avx")
+ (set_attr "type" "sselog,*,*")
+ (set_attr "prefix_data16" "1,*,*")
+ (set_attr "prefix_extra" "1,*,*")
+ (set_attr "length_immediate" "1,*,*")
+ (set_attr "prefix" "maybe_vex,*,*")
+ (set_attr "mode" "V4SF,*,*")])
+
+(define_insn_and_split "*vec_extract_v4sf_mem"
+ [(set (match_operand:SF 0 "register_operand" "=x,*r,f")
+ (vec_select:SF
+ (match_operand:V4SF 1 "memory_operand" "o,o,o")
+ (parallel [(match_operand 2 "const_0_to_3_operand" "n,n,n")])))]
+ "TARGET_SSE"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ int i = INTVAL (operands[2]);
+
+ emit_move_insn (operands[0], adjust_address (operands[1], SFmode, i*4));
+ DONE;
+})
+
+(define_expand "avx_vextractf128<mode>"
[(match_operand:<ssehalfvecmode> 0 "nonimmediate_operand" "")
(match_operand:V_256 1 "register_operand" "")
(match_operand:SI 2 "const_0_to_1_operand" "")]
(vec_select:<ssehalfvecmode>
(match_operand:VI8F_256 1 "nonimmediate_operand" "xm,x")
(parallel [(const_int 0) (const_int 1)])))]
- "TARGET_AVX"
+ "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"#"
"&& reload_completed"
[(const_int 0)]
(match_operand:VI8F_256 1 "register_operand" "x,x")
(parallel [(const_int 2) (const_int 3)])))]
"TARGET_AVX"
- "vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
+ "vextract<i128>\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn_and_split "vec_extract_lo_<mode>"
[(set (match_operand:<ssehalfvecmode> 0 "nonimmediate_operand" "=x,m")
(match_operand:VI4F_256 1 "nonimmediate_operand" "xm,x")
(parallel [(const_int 0) (const_int 1)
(const_int 2) (const_int 3)])))]
- "TARGET_AVX"
+ "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"#"
"&& reload_completed"
[(const_int 0)]
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
"TARGET_AVX"
- "vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
+ "vextract<i128>\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn_and_split "vec_extract_lo_v16hi"
[(set (match_operand:V8HI 0 "nonimmediate_operand" "=x,m")
(const_int 2) (const_int 3)
(const_int 4) (const_int 5)
(const_int 6) (const_int 7)])))]
- "TARGET_AVX"
+ "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"#"
"&& reload_completed"
[(const_int 0)]
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)])))]
"TARGET_AVX"
- "vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
+ "vextract%~128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "OI")])
(define_insn_and_split "vec_extract_lo_v32qi"
[(set (match_operand:V16QI 0 "nonimmediate_operand" "=x,m")
(const_int 10) (const_int 11)
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)])))]
- "TARGET_AVX"
+ "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"#"
"&& reload_completed"
[(const_int 0)]
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)])))]
"TARGET_AVX"
- "vextractf128\t{$0x1, %1, %0|%0, %1, 0x1}"
+ "vextract%~128\t{$0x1, %1, %0|%0, %1, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "memory" "none,store")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
-
-(define_insn "*sse4_1_extractps"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=rm")
- (vec_select:SF
- (match_operand:V4SF 1 "register_operand" "x")
- (parallel [(match_operand:SI 2 "const_0_to_3_operand" "n")])))]
- "TARGET_SSE4_1"
- "%vextractps\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "type" "sselog")
- (set_attr "prefix_data16" "1")
- (set_attr "prefix_extra" "1")
- (set_attr "length_immediate" "1")
- (set_attr "prefix" "maybe_vex")
- (set_attr "mode" "V4SF")])
-
-(define_insn_and_split "*vec_extract_v4sf_mem"
- [(set (match_operand:SF 0 "register_operand" "=x*rf")
- (vec_select:SF
- (match_operand:V4SF 1 "memory_operand" "o")
- (parallel [(match_operand 2 "const_0_to_3_operand" "n")])))]
- ""
- "#"
- "reload_completed"
- [(const_int 0)]
-{
- int i = INTVAL (operands[2]);
-
- emit_move_insn (operands[0], adjust_address (operands[1], SFmode, i*4));
- DONE;
-})
+ (set_attr "mode" "OI")])
;; Modes handled by vec_extract patterns.
(define_mode_iterator VEC_EXTRACT_MODE
- [V16QI V8HI V4SI V2DI
+ [(V32QI "TARGET_AVX") V16QI
+ (V16HI "TARGET_AVX") V8HI
+ (V8SI "TARGET_AVX") V4SI
+ (V4DI "TARGET_AVX") V2DI
(V8SF "TARGET_AVX") V4SF
(V4DF "TARGET_AVX") V2DF])
operands[2] = force_reg (V2DFmode, operands[2]);
})
-(define_insn "*sse3_interleave_highv2df"
+(define_insn "*vec_interleave_highv2df"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,x,x,x,m")
(vec_select:V2DF
(vec_concat:V4DF
(match_operand:V2DF 2 "nonimmediate_operand" " x,x,1,0,x,0"))
(parallel [(const_int 1)
(const_int 3)])))]
- "TARGET_SSE3 && ix86_vec_interleave_v2df_operator_ok (operands, 1)"
+ "TARGET_SSE2 && ix86_vec_interleave_v2df_operator_ok (operands, 1)"
"@
unpckhpd\t{%2, %0|%0, %2}
vunpckhpd\t{%2, %1, %0|%0, %1, %2}
movlpd\t{%H1, %0|%0, %H1}
vmovlpd\t{%H1, %2, %0|%0, %2, %H1}
%vmovhpd\t{%1, %0|%0, %1}"
- [(set_attr "isa" "noavx,avx,base,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,sse3,noavx,avx,*")
(set_attr "type" "sselog,sselog,sselog,ssemov,ssemov,ssemov")
(set_attr "prefix_data16" "*,*,*,1,*,1")
(set_attr "prefix" "orig,vex,maybe_vex,orig,vex,maybe_vex")
(set_attr "mode" "V2DF,V2DF,V2DF,V1DF,V1DF,V1DF")])
-(define_insn "*sse2_interleave_highv2df"
- [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,m")
- (vec_select:V2DF
- (vec_concat:V4DF
- (match_operand:V2DF 1 "nonimmediate_operand" " 0,o,x")
- (match_operand:V2DF 2 "nonimmediate_operand" " x,0,0"))
- (parallel [(const_int 1)
- (const_int 3)])))]
- "TARGET_SSE2 && ix86_vec_interleave_v2df_operator_ok (operands, 1)"
- "@
- unpckhpd\t{%2, %0|%0, %2}
- movlpd\t{%H1, %0|%0, %H1}
- movhpd\t{%1, %0|%0, %1}"
- [(set_attr "type" "sselog,ssemov,ssemov")
- (set_attr "prefix_data16" "*,1,1")
- (set_attr "mode" "V2DF,V1DF,V1DF")])
-
;; Recall that the 256-bit unpck insns only shuffle within their lanes.
(define_expand "avx_movddup256"
[(set (match_operand:V4DF 0 "register_operand" "")
[(set (match_operand:V4DF 0 "register_operand" "=x,x")
(vec_select:V4DF
(vec_concat:V8DF
- (match_operand:V4DF 1 "nonimmediate_operand" "xm,x")
- (match_operand:V4DF 2 "nonimmediate_operand" " 1,xm"))
+ (match_operand:V4DF 1 "nonimmediate_operand" " x,m")
+ (match_operand:V4DF 2 "nonimmediate_operand" "xm,1"))
(parallel [(const_int 0) (const_int 4)
(const_int 2) (const_int 6)])))]
- "TARGET_AVX
- && (!MEM_P (operands[1]) || rtx_equal_p (operands[1], operands[2]))"
+ "TARGET_AVX"
"@
- vmovddup\t{%1, %0|%0, %1}
- vunpcklpd\t{%2, %1, %0|%0, %1, %2}"
+ vunpcklpd\t{%2, %1, %0|%0, %1, %2}
+ vmovddup\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog")
(set_attr "prefix" "vex")
(set_attr "mode" "V4DF")])
(match_dup 3)
(match_dup 4))
(parallel [(const_int 0) (const_int 1)
- (const_int 4) (const_int 5)])))]
+ (const_int 4) (const_int 5)])))]
"TARGET_AVX"
{
operands[3] = gen_reg_rtx (V4DFmode);
operands[1] = force_reg (V2DFmode, operands[1]);
})
-(define_insn "*sse3_interleave_lowv2df"
+(define_insn "*vec_interleave_lowv2df"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,x,x,x,o")
(vec_select:V2DF
(vec_concat:V4DF
(match_operand:V2DF 2 "nonimmediate_operand" " x,x,1,m,m,x"))
(parallel [(const_int 0)
(const_int 2)])))]
- "TARGET_SSE3 && ix86_vec_interleave_v2df_operator_ok (operands, 0)"
+ "TARGET_SSE2 && ix86_vec_interleave_v2df_operator_ok (operands, 0)"
"@
unpcklpd\t{%2, %0|%0, %2}
vunpcklpd\t{%2, %1, %0|%0, %1, %2}
movhpd\t{%2, %0|%0, %2}
vmovhpd\t{%2, %1, %0|%0, %1, %2}
%vmovlpd\t{%2, %H0|%H0, %2}"
- [(set_attr "isa" "noavx,avx,base,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,sse3,noavx,avx,*")
(set_attr "type" "sselog,sselog,sselog,ssemov,ssemov,ssemov")
(set_attr "prefix_data16" "*,*,*,1,*,1")
(set_attr "prefix" "orig,vex,maybe_vex,orig,vex,maybe_vex")
(set_attr "mode" "V2DF,V2DF,V2DF,V1DF,V1DF,V1DF")])
-(define_insn "*sse2_interleave_lowv2df"
- [(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,o")
- (vec_select:V2DF
- (vec_concat:V4DF
- (match_operand:V2DF 1 "nonimmediate_operand" " 0,0,0")
- (match_operand:V2DF 2 "nonimmediate_operand" " x,m,x"))
- (parallel [(const_int 0)
- (const_int 2)])))]
- "TARGET_SSE2 && ix86_vec_interleave_v2df_operator_ok (operands, 0)"
- "@
- unpcklpd\t{%2, %0|%0, %2}
- movhpd\t{%2, %0|%0, %2}
- movlpd\t{%2, %H0|%H0, %2}"
- [(set_attr "type" "sselog,ssemov,ssemov")
- (set_attr "prefix_data16" "*,1,1")
- (set_attr "mode" "V2DF,V1DF,V1DF")])
-
(define_split
[(set (match_operand:V2DF 0 "memory_operand" "")
(vec_select:V2DF
DONE;
})
-;; Modes handled by vec_extract_even/odd pattern.
-(define_mode_iterator VEC_EXTRACT_EVENODD_MODE
- [(V16QI "TARGET_SSE2")
- (V8HI "TARGET_SSE2")
- (V4SI "TARGET_SSE2")
- (V2DI "TARGET_SSE2")
- (V8SF "TARGET_AVX") V4SF
- (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
-
-(define_expand "vec_extract_even<mode>"
- [(match_operand:VEC_EXTRACT_EVENODD_MODE 0 "register_operand" "")
- (match_operand:VEC_EXTRACT_EVENODD_MODE 1 "register_operand" "")
- (match_operand:VEC_EXTRACT_EVENODD_MODE 2 "register_operand" "")]
- "TARGET_SSE"
-{
- ix86_expand_vec_extract_even_odd (operands[0], operands[1], operands[2], 0);
- DONE;
-})
-
-(define_expand "vec_extract_odd<mode>"
- [(match_operand:VEC_EXTRACT_EVENODD_MODE 0 "register_operand" "")
- (match_operand:VEC_EXTRACT_EVENODD_MODE 1 "register_operand" "")
- (match_operand:VEC_EXTRACT_EVENODD_MODE 2 "register_operand" "")]
- "TARGET_SSE"
-{
- ix86_expand_vec_extract_even_odd (operands[0], operands[1], operands[2], 1);
- DONE;
-})
-
;; punpcklqdq and punpckhqdq are shorter than shufpd.
+(define_insn "avx2_interleave_highv4di"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (vec_select:V4DI
+ (vec_concat:V8DI
+ (match_operand:V4DI 1 "register_operand" "x")
+ (match_operand:V4DI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 1)
+ (const_int 5)
+ (const_int 3)
+ (const_int 7)])))]
+ "TARGET_AVX2"
+ "vpunpckhqdq\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
(define_insn "vec_interleave_highv2di"
[(set (match_operand:V2DI 0 "register_operand" "=x,x")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_interleave_lowv4di"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (vec_select:V4DI
+ (vec_concat:V8DI
+ (match_operand:V4DI 1 "register_operand" "x")
+ (match_operand:V4DI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 2)
+ (const_int 6)])))]
+ "TARGET_AVX2"
+ "vpunpcklqdq\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "vec_interleave_lowv2di"
[(set (match_operand:V2DI 0 "register_operand" "=x,x")
(vec_select:V2DI
#
#
#"
- [(set_attr "isa" "base,noavx,avx,base,base,base")
+ [(set_attr "isa" "*,noavx,avx,*,*,*")
(set_attr "type" "ssemov,sselog1,sselog1,ssemov,fmov,imov")
(set (attr "prefix_data16")
(if_then_else
(and (eq_attr "alternative" "0")
- (eq (symbol_ref "TARGET_AVX") (const_int 0)))
+ (not (match_test "TARGET_AVX")))
(const_string "1")
(const_string "*")))
(set_attr "prefix" "maybe_vex,orig,vex,*,*,*")
[(set (match_dup 0) (match_dup 1))]
"operands[1] = adjust_address (operands[1], DFmode, 8);")
+(define_insn "*vec_extractv2df_1_sse"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x,o")
+ (parallel [(const_int 1)])))]
+ "!TARGET_SSE2 && TARGET_SSE
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movhps\t{%1, %0|%0, %1}
+ movhlps\t{%1, %0|%0, %1}
+ movlps\t{%H1, %0|%0, %H1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
;; Avoid combining registers from different units in a single alternative,
;; see comment above inline_secondary_memory_needed function in i386.c
(define_insn "sse2_storelpd"
DONE;
})
+(define_insn "*vec_extractv2df_0_sse"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x")
+ (vec_select:DF
+ (match_operand:V2DF 1 "nonimmediate_operand" "x,x,m")
+ (parallel [(const_int 0)])))]
+ "!TARGET_SSE2 && TARGET_SSE
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "@
+ movlps\t{%1, %0|%0, %1}
+ movaps\t{%1, %0|%0, %1}
+ movlps\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "mode" "V2SF,V4SF,V2SF")])
+
(define_expand "sse2_loadhpd_exp"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "")
(vec_concat:V2DF
"TARGET_SSE2"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V2DFmode, operands);
-
+
emit_insn (gen_sse2_loadhpd (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
#
#
#"
- [(set_attr "isa" "noavx,avx,noavx,avx,base,base,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*,*,*")
(set_attr "type" "ssemov,ssemov,sselog,sselog,ssemov,fmov,imov")
(set_attr "prefix_data16" "1,*,*,*,*,*,*")
(set_attr "prefix" "orig,vex,orig,vex,*,*,*")
"TARGET_SSE2"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V2DFmode, operands);
-
+
emit_insn (gen_sse2_loadlpd (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
#
#
#"
- [(set_attr "isa" "base,noavx,avx,noavx,avx,noavx,noavx,avx,base,base,base")
- (set_attr "type" "ssemov,ssemov,ssemov,ssemov,ssemov,sselog,ssemov,ssemov,ssemov,fmov,imov")
+ [(set_attr "isa" "*,noavx,avx,noavx,avx,noavx,noavx,avx,*,*,*")
+ (set (attr "type")
+ (cond [(eq_attr "alternative" "5")
+ (const_string "sselog")
+ (eq_attr "alternative" "9")
+ (const_string "fmov")
+ (eq_attr "alternative" "10")
+ (const_string "imov")
+ ]
+ (const_string "ssemov")))
(set_attr "prefix_data16" "*,1,*,*,*,*,1,*,*,*,*")
(set_attr "length_immediate" "*,*,*,*,*,1,*,*,*,*,*")
(set_attr "prefix" "maybe_vex,orig,vex,orig,vex,orig,orig,vex,*,*,*")
[(set (match_dup 0) (match_dup 1))]
"operands[0] = adjust_address (operands[0], DFmode, 8);")
-;; Not sure these two are ever used, but it doesn't hurt to have
-;; them. -aoliva
-(define_insn "*vec_extractv2df_1_sse"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x")
- (vec_select:DF
- (match_operand:V2DF 1 "nonimmediate_operand" "x,x,o")
- (parallel [(const_int 1)])))]
- "!TARGET_SSE2 && TARGET_SSE
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "@
- movhps\t{%1, %0|%0, %1}
- movhlps\t{%1, %0|%0, %1}
- movlps\t{%H1, %0|%0, %H1}"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "V2SF,V4SF,V2SF")])
-
-(define_insn "*vec_extractv2df_0_sse"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=m,x,x")
- (vec_select:DF
- (match_operand:V2DF 1 "nonimmediate_operand" "x,x,m")
- (parallel [(const_int 0)])))]
- "!TARGET_SSE2 && TARGET_SSE
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "@
- movlps\t{%1, %0|%0, %1}
- movaps\t{%1, %0|%0, %1}
- movlps\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "V2SF,V4SF,V2SF")])
-
(define_insn "sse2_movsd"
[(set (match_operand:V2DF 0 "nonimmediate_operand" "=x,x,x,x,m,x,x,x,o")
(vec_merge:V2DF
movhps\t{%H1, %0|%0, %H1}
vmovhps\t{%H1, %2, %0|%0, %2, %H1}
%vmovhps\t{%1, %H0|%H0, %1}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base,noavx,noavx,avx,base")
- (set_attr "type" "ssemov,ssemov,ssemov,ssemov,ssemov,sselog,ssemov,ssemov,ssemov")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*,noavx,noavx,avx,*")
+ (set (attr "type")
+ (if_then_else
+ (eq_attr "alternative" "5")
+ (const_string "sselog")
+ (const_string "ssemov")))
(set (attr "prefix_data16")
(if_then_else
(and (eq_attr "alternative" "2,4")
- (eq (symbol_ref "TARGET_AVX") (const_int 0)))
+ (not (match_test "TARGET_AVX")))
(const_string "1")
(const_string "*")))
(set_attr "length_immediate" "*,*,*,*,*,1,*,*,*")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex,orig,orig,vex,maybe_vex")
(set_attr "mode" "DF,DF,V1DF,V1DF,V1DF,V2DF,V1DF,V1DF,V1DF")])
-(define_insn "*vec_dupv2df_sse3"
- [(set (match_operand:V2DF 0 "register_operand" "=x")
- (vec_duplicate:V2DF
- (match_operand:DF 1 "nonimmediate_operand" "xm")))]
- "TARGET_SSE3"
- "%vmovddup\t{%1, %0|%0, %1}"
- [(set_attr "type" "sselog1")
- (set_attr "prefix" "maybe_vex")
- (set_attr "mode" "DF")])
-
(define_insn "vec_dupv2df"
- [(set (match_operand:V2DF 0 "register_operand" "=x")
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x")
(vec_duplicate:V2DF
- (match_operand:DF 1 "register_operand" "0")))]
+ (match_operand:DF 1 "nonimmediate_operand" " 0,xm")))]
"TARGET_SSE2"
- "unpcklpd\t%0, %0"
- [(set_attr "type" "sselog1")
+ "@
+ unpcklpd\t%0, %0
+ %vmovddup\t{%1, %0|%0, %1}"
+ [(set_attr "isa" "noavx,sse3")
+ (set_attr "type" "sselog1")
+ (set_attr "prefix" "orig,maybe_vex")
(set_attr "mode" "V2DF")])
-(define_insn "*vec_concatv2df_sse3"
- [(set (match_operand:V2DF 0 "register_operand" "=x")
- (vec_concat:V2DF
- (match_operand:DF 1 "nonimmediate_operand" "xm")
- (match_dup 1)))]
- "TARGET_SSE3"
- "%vmovddup\t{%1, %0|%0, %1}"
- [(set_attr "type" "sselog1")
- (set_attr "prefix" "maybe_vex")
- (set_attr "mode" "DF")])
-
(define_insn "*vec_concatv2df"
- [(set (match_operand:V2DF 0 "register_operand" "=Y2,x,Y2,x,Y2,x,x")
+ [(set (match_operand:V2DF 0 "register_operand" "=x,x,x,x,x,x,x,x")
(vec_concat:V2DF
- (match_operand:DF 1 "nonimmediate_operand" " 0 ,x,0 ,x,m ,0,0")
- (match_operand:DF 2 "vector_move_operand" " Y2,x,m ,m,C ,x,m")))]
+ (match_operand:DF 1 "nonimmediate_operand" " 0,x,m,0,x,m,0,0")
+ (match_operand:DF 2 "vector_move_operand" " x,x,1,m,m,C,x,m")))]
"TARGET_SSE"
"@
unpcklpd\t{%2, %0|%0, %2}
vunpcklpd\t{%2, %1, %0|%0, %1, %2}
+ %vmovddup\t{%1, %0|%0, %1}
movhpd\t{%2, %0|%0, %2}
vmovhpd\t{%2, %1, %0|%0, %1, %2}
%vmovsd\t{%1, %0|%0, %1}
movlhps\t{%2, %0|%0, %2}
movhps\t{%2, %0|%0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base,noavx,noavx")
- (set_attr "type" "sselog,sselog,ssemov,ssemov,ssemov,ssemov,ssemov")
- (set_attr "prefix_data16" "*,*,1,*,*,*,*")
- (set_attr "prefix" "orig,vex,orig,vex,maybe_vex,orig,orig")
- (set_attr "mode" "V2DF,V2DF,V1DF,V1DF,DF,V4SF,V2SF")])
+ [(set_attr "isa" "sse2_noavx,avx,sse3,sse2_noavx,avx,sse2,noavx,noavx")
+ (set (attr "type")
+ (if_then_else
+ (eq_attr "alternative" "0,1,2")
+ (const_string "sselog")
+ (const_string "ssemov")))
+ (set_attr "prefix_data16" "*,*,*,1,*,*,*,*")
+ (set_attr "prefix" "orig,vex,maybe_vex,orig,vex,maybe_vex,orig,orig")
+ (set_attr "mode" "V2DF,V2DF,DF,V1DF,V1DF,DF,V4SF,V2SF")])
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
(define_expand "neg<mode>2"
- [(set (match_operand:VI_128 0 "register_operand" "")
- (minus:VI_128
+ [(set (match_operand:VI_AVX2 0 "register_operand" "")
+ (minus:VI_AVX2
(match_dup 2)
- (match_operand:VI_128 1 "nonimmediate_operand" "")))]
+ (match_operand:VI_AVX2 1 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"operands[2] = force_reg (<MODE>mode, CONST0_RTX (<MODE>mode));")
(define_expand "<plusminus_insn><mode>3"
- [(set (match_operand:VI_128 0 "register_operand" "")
- (plusminus:VI_128
- (match_operand:VI_128 1 "nonimmediate_operand" "")
- (match_operand:VI_128 2 "nonimmediate_operand" "")))]
+ [(set (match_operand:VI_AVX2 0 "register_operand" "")
+ (plusminus:VI_AVX2
+ (match_operand:VI_AVX2 1 "nonimmediate_operand" "")
+ (match_operand:VI_AVX2 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);")
(define_insn "*<plusminus_insn><mode>3"
- [(set (match_operand:VI_128 0 "register_operand" "=x,x")
- (plusminus:VI_128
- (match_operand:VI_128 1 "nonimmediate_operand" "<comm>0,x")
- (match_operand:VI_128 2 "nonimmediate_operand" "xm,xm")))]
+ [(set (match_operand:VI_AVX2 0 "register_operand" "=x,x")
+ (plusminus:VI_AVX2
+ (match_operand:VI_AVX2 1 "nonimmediate_operand" "<comm>0,x")
+ (match_operand:VI_AVX2 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE2 && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
"@
p<plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
(set_attr "type" "sseiadd")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_expand "sse2_<plusminus_insn><mode>3"
- [(set (match_operand:VI12_128 0 "register_operand" "")
- (sat_plusminus:VI12_128
- (match_operand:VI12_128 1 "nonimmediate_operand" "")
- (match_operand:VI12_128 2 "nonimmediate_operand" "")))]
+(define_expand "<sse2_avx2>_<plusminus_insn><mode>3"
+ [(set (match_operand:VI12_AVX2 0 "register_operand" "")
+ (sat_plusminus:VI12_AVX2
+ (match_operand:VI12_AVX2 1 "nonimmediate_operand" "")
+ (match_operand:VI12_AVX2 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);")
-(define_insn "*sse2_<plusminus_insn><mode>3"
- [(set (match_operand:VI12_128 0 "register_operand" "=x,x")
- (sat_plusminus:VI12_128
- (match_operand:VI12_128 1 "nonimmediate_operand" "<comm>0,x")
- (match_operand:VI12_128 2 "nonimmediate_operand" "xm,xm")))]
+(define_insn "*<sse2_avx2>_<plusminus_insn><mode>3"
+ [(set (match_operand:VI12_AVX2 0 "register_operand" "=x,x")
+ (sat_plusminus:VI12_AVX2
+ (match_operand:VI12_AVX2 1 "nonimmediate_operand" "<comm>0,x")
+ (match_operand:VI12_AVX2 2 "nonimmediate_operand" "xm,xm")))]
"TARGET_SSE2 && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
"@
p<plusminus_mnemonic><ssemodesuffix>\t{%2, %0|%0, %2}
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
-(define_insn_and_split "mulv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "")
- (mult:V16QI (match_operand:V16QI 1 "register_operand" "")
- (match_operand:V16QI 2 "register_operand" "")))]
+(define_insn_and_split "mul<mode>3"
+ [(set (match_operand:VI1_AVX2 0 "register_operand" "")
+ (mult:VI1_AVX2 (match_operand:VI1_AVX2 1 "register_operand" "")
+ (match_operand:VI1_AVX2 2 "register_operand" "")))]
"TARGET_SSE2
&& can_create_pseudo_p ()"
"#"
{
rtx t[6];
int i;
+ enum machine_mode mulmode = <sseunpackmode>mode;
for (i = 0; i < 6; ++i)
- t[i] = gen_reg_rtx (V16QImode);
+ t[i] = gen_reg_rtx (<MODE>mode);
/* Unpack data such that we've got a source byte in each low byte of
each word. We don't care what goes into the high byte of each word.
Rather than trying to get zero in there, most convenient is to let
it be a copy of the low byte. */
- emit_insn (gen_vec_interleave_highv16qi (t[0], operands[1], operands[1]));
- emit_insn (gen_vec_interleave_highv16qi (t[1], operands[2], operands[2]));
- emit_insn (gen_vec_interleave_lowv16qi (t[2], operands[1], operands[1]));
- emit_insn (gen_vec_interleave_lowv16qi (t[3], operands[2], operands[2]));
+ emit_insn (gen_<vec_avx2>_interleave_high<mode> (t[0], operands[1],
+ operands[1]));
+ emit_insn (gen_<vec_avx2>_interleave_high<mode> (t[1], operands[2],
+ operands[2]));
+ emit_insn (gen_<vec_avx2>_interleave_low<mode> (t[2], operands[1],
+ operands[1]));
+ emit_insn (gen_<vec_avx2>_interleave_low<mode> (t[3], operands[2],
+ operands[2]));
/* Multiply words. The end-of-line annotations here give a picture of what
the output of that instruction looks like. Dot means don't care; the
letters are the bytes of the result with A being the most significant. */
- emit_insn (gen_mulv8hi3 (gen_lowpart (V8HImode, t[4]), /* .A.B.C.D.E.F.G.H */
- gen_lowpart (V8HImode, t[0]),
- gen_lowpart (V8HImode, t[1])));
- emit_insn (gen_mulv8hi3 (gen_lowpart (V8HImode, t[5]), /* .I.J.K.L.M.N.O.P */
- gen_lowpart (V8HImode, t[2]),
- gen_lowpart (V8HImode, t[3])));
+ emit_insn (gen_rtx_SET (VOIDmode, gen_lowpart (mulmode, t[4]),
+ gen_rtx_MULT (mulmode, /* .A.B.C.D.E.F.G.H */
+ gen_lowpart (mulmode, t[0]),
+ gen_lowpart (mulmode, t[1]))));
+ emit_insn (gen_rtx_SET (VOIDmode, gen_lowpart (mulmode, t[5]),
+ gen_rtx_MULT (mulmode, /* .I.J.K.L.M.N.O.P */
+ gen_lowpart (mulmode, t[2]),
+ gen_lowpart (mulmode, t[3]))));
/* Extract the even bytes and merge them back together. */
ix86_expand_vec_extract_even_odd (operands[0], t[5], t[4], 0);
+
+ set_unique_reg_note (get_last_insn (), REG_EQUAL,
+ gen_rtx_MULT (<MODE>mode, operands[1], operands[2]));
DONE;
})
-(define_expand "mulv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "")
- (mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "")
- (match_operand:V8HI 2 "nonimmediate_operand" "")))]
+(define_expand "mul<mode>3"
+ [(set (match_operand:VI2_AVX2 0 "register_operand" "")
+ (mult:VI2_AVX2 (match_operand:VI2_AVX2 1 "nonimmediate_operand" "")
+ (match_operand:VI2_AVX2 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
- "ix86_fixup_binary_operands_no_copy (MULT, V8HImode, operands);")
+ "ix86_fixup_binary_operands_no_copy (MULT, <MODE>mode, operands);")
-(define_insn "*mulv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x")
- (mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "%0,x")
- (match_operand:V8HI 2 "nonimmediate_operand" "xm,xm")))]
- "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+(define_insn "*mul<mode>3"
+ [(set (match_operand:VI2_AVX2 0 "register_operand" "=x,x")
+ (mult:VI2_AVX2 (match_operand:VI2_AVX2 1 "nonimmediate_operand" "%0,x")
+ (match_operand:VI2_AVX2 2 "nonimmediate_operand" "xm,xm")))]
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, <MODE>mode, operands)"
"@
pmullw\t{%2, %0|%0, %2}
vpmullw\t{%2, %1, %0|%0, %1, %2}"
(set_attr "type" "sseimul")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_expand "<s>mulv8hi3_highpart"
- [(set (match_operand:V8HI 0 "register_operand" "")
- (truncate:V8HI
- (lshiftrt:V8SI
- (mult:V8SI
- (any_extend:V8SI
- (match_operand:V8HI 1 "nonimmediate_operand" ""))
- (any_extend:V8SI
- (match_operand:V8HI 2 "nonimmediate_operand" "")))
- (const_int 16))))]
+(define_expand "<s>mul<mode>3_highpart"
+ [(set (match_operand:VI2_AVX2 0 "register_operand" "")
+ (truncate:VI2_AVX2
+ (lshiftrt:<ssedoublemode>
+ (mult:<ssedoublemode>
+ (any_extend:<ssedoublemode>
+ (match_operand:VI2_AVX2 1 "nonimmediate_operand" ""))
+ (any_extend:<ssedoublemode>
+ (match_operand:VI2_AVX2 2 "nonimmediate_operand" "")))
+ (const_int 16))))]
"TARGET_SSE2"
- "ix86_fixup_binary_operands_no_copy (MULT, V8HImode, operands);")
+ "ix86_fixup_binary_operands_no_copy (MULT, <MODE>mode, operands);")
-(define_insn "*<s>mulv8hi3_highpart"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x")
- (truncate:V8HI
- (lshiftrt:V8SI
- (mult:V8SI
- (any_extend:V8SI
- (match_operand:V8HI 1 "nonimmediate_operand" "%0,x"))
- (any_extend:V8SI
- (match_operand:V8HI 2 "nonimmediate_operand" "xm,xm")))
+(define_insn "*<s>mul<mode>3_highpart"
+ [(set (match_operand:VI2_AVX2 0 "register_operand" "=x,x")
+ (truncate:VI2_AVX2
+ (lshiftrt:<ssedoublemode>
+ (mult:<ssedoublemode>
+ (any_extend:<ssedoublemode>
+ (match_operand:VI2_AVX2 1 "nonimmediate_operand" "%0,x"))
+ (any_extend:<ssedoublemode>
+ (match_operand:VI2_AVX2 2 "nonimmediate_operand" "xm,xm")))
(const_int 16))))]
- "TARGET_SSE2 && ix86_binary_operator_ok (MULT, V8HImode, operands)"
+ "TARGET_SSE2 && ix86_binary_operator_ok (MULT, <MODE>mode, operands)"
"@
pmulh<u>w\t{%2, %0|%0, %2}
vpmulh<u>w\t{%2, %1, %0|%0, %1, %2}"
(set_attr "type" "sseimul")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_expand "avx2_umulv4siv4di3"
+ [(set (match_operand:V4DI 0 "register_operand" "")
+ (mult:V4DI
+ (zero_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 1 "nonimmediate_operand" "")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))
+ (zero_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 2 "nonimmediate_operand" "")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V8SImode, operands);")
+
+(define_insn "*avx_umulv4siv4di3"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (mult:V4DI
+ (zero_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 1 "nonimmediate_operand" "%x")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))
+ (zero_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (MULT, V8SImode, operands)"
+ "vpmuludq\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
(define_expand "sse2_umulv2siv2di3"
[(set (match_operand:V2DI 0 "register_operand" "")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_expand "avx2_mulv4siv4di3"
+ [(set (match_operand:V4DI 0 "register_operand" "")
+ (mult:V4DI
+ (sign_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 1 "nonimmediate_operand" "")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))
+ (sign_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 2 "nonimmediate_operand" "")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V8SImode, operands);")
+
+(define_insn "*avx2_mulv4siv4di3"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (mult:V4DI
+ (sign_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 1 "nonimmediate_operand" "x")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))
+ (sign_extend:V4DI
+ (vec_select:V4SI
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0) (const_int 2)
+ (const_int 4) (const_int 6)])))))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (MULT, V8SImode, operands)"
+ "vpmuldq\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "avx")
+ (set_attr "type" "sseimul")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_expand "sse4_1_mulv2siv2di3"
[(set (match_operand:V2DI 0 "register_operand" "")
(mult:V2DI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
-(define_expand "sse2_pmaddwd"
- [(set (match_operand:V4SI 0 "register_operand" "")
- (plus:V4SI
- (mult:V4SI
- (sign_extend:V4SI
- (vec_select:V4HI
- (match_operand:V8HI 1 "nonimmediate_operand" "")
+(define_expand "avx2_pmaddwd"
+ [(set (match_operand:V8SI 0 "register_operand" "")
+ (plus:V8SI
+ (mult:V8SI
+ (sign_extend:V8SI
+ (vec_select:V8HI
+ (match_operand:V16HI 1 "nonimmediate_operand" "")
(parallel [(const_int 0)
(const_int 2)
(const_int 4)
- (const_int 6)])))
- (sign_extend:V4SI
- (vec_select:V4HI
- (match_operand:V8HI 2 "nonimmediate_operand" "")
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)])))
+ (sign_extend:V8SI
+ (vec_select:V8HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)]))))
+ (mult:V8SI
+ (sign_extend:V8SI
+ (vec_select:V8HI (match_dup 1)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)])))
+ (sign_extend:V8SI
+ (vec_select:V8HI (match_dup 2)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)]))))))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V16HImode, operands);")
+
+(define_expand "sse2_pmaddwd"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (plus:V4SI
+ (mult:V4SI
+ (sign_extend:V4SI
+ (vec_select:V4HI
+ (match_operand:V8HI 1 "nonimmediate_operand" "")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)])))
+ (sign_extend:V4SI
+ (vec_select:V4HI
+ (match_operand:V8HI 2 "nonimmediate_operand" "")
(parallel [(const_int 0)
(const_int 2)
(const_int 4)
"TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (MULT, V8HImode, operands);")
+(define_insn "*avx2_pmaddwd"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (plus:V8SI
+ (mult:V8SI
+ (sign_extend:V8SI
+ (vec_select:V8HI
+ (match_operand:V16HI 1 "nonimmediate_operand" "%x")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)])))
+ (sign_extend:V8SI
+ (vec_select:V8HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)]))))
+ (mult:V8SI
+ (sign_extend:V8SI
+ (vec_select:V8HI (match_dup 1)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)])))
+ (sign_extend:V8SI
+ (vec_select:V8HI (match_dup 2)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)]))))))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (MULT, V16HImode, operands)"
+ "vpmaddwd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "*sse2_pmaddwd"
[(set (match_operand:V4SI 0 "register_operand" "=x,x")
(plus:V4SI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
-(define_expand "mulv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "")
- (mult:V4SI (match_operand:V4SI 1 "register_operand" "")
- (match_operand:V4SI 2 "register_operand" "")))]
+(define_expand "mul<mode>3"
+ [(set (match_operand:VI4_AVX2 0 "register_operand" "")
+ (mult:VI4_AVX2 (match_operand:VI4_AVX2 1 "register_operand" "")
+ (match_operand:VI4_AVX2 2 "register_operand" "")))]
"TARGET_SSE2"
{
if (TARGET_SSE4_1 || TARGET_AVX)
- ix86_fixup_binary_operands_no_copy (MULT, V4SImode, operands);
+ ix86_fixup_binary_operands_no_copy (MULT, <MODE>mode, operands);
})
-(define_insn "*sse4_1_mulv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x")
- (mult:V4SI (match_operand:V4SI 1 "nonimmediate_operand" "%0,x")
- (match_operand:V4SI 2 "nonimmediate_operand" "xm,xm")))]
- "TARGET_SSE4_1 && ix86_binary_operator_ok (MULT, V4SImode, operands)"
+(define_insn "*<sse4_1_avx2>_mul<mode>3"
+ [(set (match_operand:VI4_AVX2 0 "register_operand" "=x,x")
+ (mult:VI4_AVX2 (match_operand:VI4_AVX2 1 "nonimmediate_operand" "%0,x")
+ (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xm,xm")))]
+ "TARGET_SSE4_1 && ix86_binary_operator_ok (MULT, <MODE>mode, operands)"
"@
pmulld\t{%2, %0|%0, %2}
vpmulld\t{%2, %1, %0|%0, %1, %2}"
(set_attr "type" "sseimul")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn_and_split "*sse2_mulv4si3"
[(set (match_operand:V4SI 0 "register_operand" "")
/* Merge the parts back together. */
emit_insn (gen_vec_interleave_lowv4si (op0, t5, t6));
+
+ set_unique_reg_note (get_last_insn (), REG_EQUAL,
+ gen_rtx_MULT (V4SImode, operands[1], operands[2]));
DONE;
})
-(define_insn_and_split "mulv2di3"
- [(set (match_operand:V2DI 0 "register_operand" "")
- (mult:V2DI (match_operand:V2DI 1 "register_operand" "")
- (match_operand:V2DI 2 "register_operand" "")))]
+(define_insn_and_split "mul<mode>3"
+ [(set (match_operand:VI8_AVX2 0 "register_operand" "")
+ (mult:VI8_AVX2 (match_operand:VI8_AVX2 1 "register_operand" "")
+ (match_operand:VI8_AVX2 2 "register_operand" "")))]
"TARGET_SSE2
&& can_create_pseudo_p ()"
"#"
op1 = operands[1];
op2 = operands[2];
- if (TARGET_XOP)
+ if (TARGET_XOP && <MODE>mode == V2DImode)
{
/* op1: A,B,C,D, op2: E,F,G,H */
op1 = gen_lowpart (V4SImode, op1);
}
else
{
- t1 = gen_reg_rtx (V2DImode);
- t2 = gen_reg_rtx (V2DImode);
- t3 = gen_reg_rtx (V2DImode);
- t4 = gen_reg_rtx (V2DImode);
- t5 = gen_reg_rtx (V2DImode);
- t6 = gen_reg_rtx (V2DImode);
+ t1 = gen_reg_rtx (<MODE>mode);
+ t2 = gen_reg_rtx (<MODE>mode);
+ t3 = gen_reg_rtx (<MODE>mode);
+ t4 = gen_reg_rtx (<MODE>mode);
+ t5 = gen_reg_rtx (<MODE>mode);
+ t6 = gen_reg_rtx (<MODE>mode);
thirtytwo = GEN_INT (32);
/* Multiply low parts. */
- emit_insn (gen_sse2_umulv2siv2di3 (t1, gen_lowpart (V4SImode, op1),
- gen_lowpart (V4SImode, op2)));
+ emit_insn (gen_<sse2_avx2>_umulv<ssescalarnum>si<mode>3
+ (t1, gen_lowpart (<ssepackmode>mode, op1),
+ gen_lowpart (<ssepackmode>mode, op2)));
- /* Shift input vectors left 32 bits so we can multiply high parts. */
- emit_insn (gen_lshrv2di3 (t2, op1, thirtytwo));
- emit_insn (gen_lshrv2di3 (t3, op2, thirtytwo));
+ /* Shift input vectors right 32 bits so we can multiply high parts. */
+ emit_insn (gen_lshr<mode>3 (t2, op1, thirtytwo));
+ emit_insn (gen_lshr<mode>3 (t3, op2, thirtytwo));
/* Multiply high parts by low parts. */
- emit_insn (gen_sse2_umulv2siv2di3 (t4, gen_lowpart (V4SImode, op1),
- gen_lowpart (V4SImode, t3)));
- emit_insn (gen_sse2_umulv2siv2di3 (t5, gen_lowpart (V4SImode, op2),
- gen_lowpart (V4SImode, t2)));
+ emit_insn (gen_<sse2_avx2>_umulv<ssescalarnum>si<mode>3
+ (t4, gen_lowpart (<ssepackmode>mode, op1),
+ gen_lowpart (<ssepackmode>mode, t3)));
+ emit_insn (gen_<sse2_avx2>_umulv<ssescalarnum>si<mode>3
+ (t5, gen_lowpart (<ssepackmode>mode, op2),
+ gen_lowpart (<ssepackmode>mode, t2)));
/* Shift them back. */
- emit_insn (gen_ashlv2di3 (t4, t4, thirtytwo));
- emit_insn (gen_ashlv2di3 (t5, t5, thirtytwo));
+ emit_insn (gen_ashl<mode>3 (t4, t4, thirtytwo));
+ emit_insn (gen_ashl<mode>3 (t5, t5, thirtytwo));
/* Add the three parts together. */
- emit_insn (gen_addv2di3 (t6, t1, t4));
- emit_insn (gen_addv2di3 (op0, t6, t5));
+ emit_insn (gen_add<mode>3 (t6, t1, t4));
+ emit_insn (gen_add<mode>3 (op0, t6, t5));
}
+
+ set_unique_reg_note (get_last_insn (), REG_EQUAL,
+ gen_rtx_MULT (<MODE>mode, operands[1], operands[2]));
DONE;
})
-(define_expand "vec_widen_smult_hi_v8hi"
- [(match_operand:V4SI 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")
- (match_operand:V8HI 2 "register_operand" "")]
+(define_expand "vec_widen_<s>mult_hi_<mode>"
+ [(match_operand:<sseunpackmode> 0 "register_operand" "")
+ (any_extend:<sseunpackmode>
+ (match_operand:VI2_AVX2 1 "register_operand" ""))
+ (match_operand:VI2_AVX2 2 "register_operand" "")]
"TARGET_SSE2"
{
rtx op1, op2, t1, t2, dest;
op1 = operands[1];
op2 = operands[2];
- t1 = gen_reg_rtx (V8HImode);
- t2 = gen_reg_rtx (V8HImode);
- dest = gen_lowpart (V8HImode, operands[0]);
+ t1 = gen_reg_rtx (<MODE>mode);
+ t2 = gen_reg_rtx (<MODE>mode);
+ dest = gen_lowpart (<MODE>mode, operands[0]);
- emit_insn (gen_mulv8hi3 (t1, op1, op2));
- emit_insn (gen_smulv8hi3_highpart (t2, op1, op2));
- emit_insn (gen_vec_interleave_highv8hi (dest, t1, t2));
+ emit_insn (gen_mul<mode>3 (t1, op1, op2));
+ emit_insn (gen_<s>mul<mode>3_highpart (t2, op1, op2));
+ emit_insn (gen_vec_interleave_high<mode> (dest, t1, t2));
DONE;
})
-(define_expand "vec_widen_smult_lo_v8hi"
- [(match_operand:V4SI 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")
- (match_operand:V8HI 2 "register_operand" "")]
+(define_expand "vec_widen_<s>mult_lo_<mode>"
+ [(match_operand:<sseunpackmode> 0 "register_operand" "")
+ (any_extend:<sseunpackmode>
+ (match_operand:VI2_AVX2 1 "register_operand" ""))
+ (match_operand:VI2_AVX2 2 "register_operand" "")]
"TARGET_SSE2"
{
rtx op1, op2, t1, t2, dest;
op1 = operands[1];
op2 = operands[2];
- t1 = gen_reg_rtx (V8HImode);
- t2 = gen_reg_rtx (V8HImode);
- dest = gen_lowpart (V8HImode, operands[0]);
+ t1 = gen_reg_rtx (<MODE>mode);
+ t2 = gen_reg_rtx (<MODE>mode);
+ dest = gen_lowpart (<MODE>mode, operands[0]);
- emit_insn (gen_mulv8hi3 (t1, op1, op2));
- emit_insn (gen_smulv8hi3_highpart (t2, op1, op2));
- emit_insn (gen_vec_interleave_lowv8hi (dest, t1, t2));
+ emit_insn (gen_mul<mode>3 (t1, op1, op2));
+ emit_insn (gen_<s>mul<mode>3_highpart (t2, op1, op2));
+ emit_insn (gen_vec_interleave_low<mode> (dest, t1, t2));
DONE;
})
-(define_expand "vec_widen_umult_hi_v8hi"
- [(match_operand:V4SI 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")
- (match_operand:V8HI 2 "register_operand" "")]
- "TARGET_SSE2"
+(define_expand "vec_widen_<s>mult_hi_v8si"
+ [(match_operand:V4DI 0 "register_operand" "")
+ (any_extend:V4DI (match_operand:V8SI 1 "nonimmediate_operand" ""))
+ (match_operand:V8SI 2 "nonimmediate_operand" "")]
+ "TARGET_AVX2"
{
- rtx op1, op2, t1, t2, dest;
-
- op1 = operands[1];
- op2 = operands[2];
- t1 = gen_reg_rtx (V8HImode);
- t2 = gen_reg_rtx (V8HImode);
- dest = gen_lowpart (V8HImode, operands[0]);
+ rtx t1, t2, t3, t4;
- emit_insn (gen_mulv8hi3 (t1, op1, op2));
- emit_insn (gen_umulv8hi3_highpart (t2, op1, op2));
- emit_insn (gen_vec_interleave_highv8hi (dest, t1, t2));
+ t1 = gen_reg_rtx (V4DImode);
+ t2 = gen_reg_rtx (V4DImode);
+ t3 = gen_reg_rtx (V8SImode);
+ t4 = gen_reg_rtx (V8SImode);
+ emit_insn (gen_avx2_permv4di_1 (t1, gen_lowpart (V4DImode, operands[1]),
+ const0_rtx, const2_rtx,
+ const1_rtx, GEN_INT (3)));
+ emit_insn (gen_avx2_permv4di_1 (t2, gen_lowpart (V4DImode, operands[2]),
+ const0_rtx, const2_rtx,
+ const1_rtx, GEN_INT (3)));
+ emit_insn (gen_avx2_pshufdv3 (t3, gen_lowpart (V8SImode, t1),
+ GEN_INT (2 + (2 << 2) + (3 << 4) + (3 << 6))));
+ emit_insn (gen_avx2_pshufdv3 (t4, gen_lowpart (V8SImode, t2),
+ GEN_INT (2 + (2 << 2) + (3 << 4) + (3 << 6))));
+ emit_insn (gen_avx2_<u>mulv4siv4di3 (operands[0], t3, t4));
DONE;
})
-(define_expand "vec_widen_umult_lo_v8hi"
- [(match_operand:V4SI 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")
- (match_operand:V8HI 2 "register_operand" "")]
- "TARGET_SSE2"
+(define_expand "vec_widen_<s>mult_lo_v8si"
+ [(match_operand:V4DI 0 "register_operand" "")
+ (any_extend:V4DI (match_operand:V8SI 1 "nonimmediate_operand" ""))
+ (match_operand:V8SI 2 "nonimmediate_operand" "")]
+ "TARGET_AVX2"
{
- rtx op1, op2, t1, t2, dest;
-
- op1 = operands[1];
- op2 = operands[2];
- t1 = gen_reg_rtx (V8HImode);
- t2 = gen_reg_rtx (V8HImode);
- dest = gen_lowpart (V8HImode, operands[0]);
+ rtx t1, t2, t3, t4;
- emit_insn (gen_mulv8hi3 (t1, op1, op2));
- emit_insn (gen_umulv8hi3_highpart (t2, op1, op2));
- emit_insn (gen_vec_interleave_lowv8hi (dest, t1, t2));
+ t1 = gen_reg_rtx (V4DImode);
+ t2 = gen_reg_rtx (V4DImode);
+ t3 = gen_reg_rtx (V8SImode);
+ t4 = gen_reg_rtx (V8SImode);
+ emit_insn (gen_avx2_permv4di_1 (t1, gen_lowpart (V4DImode, operands[1]),
+ const0_rtx, const2_rtx,
+ const1_rtx, GEN_INT (3)));
+ emit_insn (gen_avx2_permv4di_1 (t2, gen_lowpart (V4DImode, operands[2]),
+ const0_rtx, const2_rtx,
+ const1_rtx, GEN_INT (3)));
+ emit_insn (gen_avx2_pshufdv3 (t3, gen_lowpart (V8SImode, t1),
+ GEN_INT (0 + (0 << 2) + (1 << 4) + (1 << 6))));
+ emit_insn (gen_avx2_pshufdv3 (t4, gen_lowpart (V8SImode, t2),
+ GEN_INT (0 + (0 << 2) + (1 << 4) + (1 << 6))));
+ emit_insn (gen_avx2_<u>mulv4siv4di3 (operands[0], t3, t4));
DONE;
})
[(match_operand:V2DI 0 "register_operand" "")
(match_operand:V4SI 1 "register_operand" "")
(match_operand:V4SI 2 "register_operand" "")]
- "TARGET_XOP"
+ "TARGET_SSE4_1"
{
- rtx t1, t2;
+ rtx op1, op2, t1, t2;
+ op1 = operands[1];
+ op2 = operands[2];
t1 = gen_reg_rtx (V4SImode);
t2 = gen_reg_rtx (V4SImode);
- emit_insn (gen_sse2_pshufd_1 (t1, operands[1],
- GEN_INT (0),
- GEN_INT (2),
- GEN_INT (1),
- GEN_INT (3)));
- emit_insn (gen_sse2_pshufd_1 (t2, operands[2],
- GEN_INT (0),
- GEN_INT (2),
- GEN_INT (1),
- GEN_INT (3)));
- emit_insn (gen_xop_mulv2div2di3_high (operands[0], t1, t2));
+ if (TARGET_XOP)
+ {
+ emit_insn (gen_sse2_pshufd_1 (t1, op1, GEN_INT (0), GEN_INT (2),
+ GEN_INT (1), GEN_INT (3)));
+ emit_insn (gen_sse2_pshufd_1 (t2, op2, GEN_INT (0), GEN_INT (2),
+ GEN_INT (1), GEN_INT (3)));
+ emit_insn (gen_xop_mulv2div2di3_high (operands[0], t1, t2));
+ DONE;
+ }
+
+ emit_insn (gen_vec_interleave_highv4si (t1, op1, op1));
+ emit_insn (gen_vec_interleave_highv4si (t2, op2, op2));
+ emit_insn (gen_sse4_1_mulv2siv2di3 (operands[0], t1, t2));
DONE;
})
[(match_operand:V2DI 0 "register_operand" "")
(match_operand:V4SI 1 "register_operand" "")
(match_operand:V4SI 2 "register_operand" "")]
- "TARGET_XOP"
+ "TARGET_SSE4_1"
{
- rtx t1, t2;
+ rtx op1, op2, t1, t2;
+ op1 = operands[1];
+ op2 = operands[2];
t1 = gen_reg_rtx (V4SImode);
t2 = gen_reg_rtx (V4SImode);
- emit_insn (gen_sse2_pshufd_1 (t1, operands[1],
- GEN_INT (0),
- GEN_INT (2),
- GEN_INT (1),
- GEN_INT (3)));
- emit_insn (gen_sse2_pshufd_1 (t2, operands[2],
- GEN_INT (0),
- GEN_INT (2),
- GEN_INT (1),
- GEN_INT (3)));
- emit_insn (gen_xop_mulv2div2di3_low (operands[0], t1, t2));
+ if (TARGET_XOP)
+ {
+ emit_insn (gen_sse2_pshufd_1 (t1, op1, GEN_INT (0), GEN_INT (2),
+ GEN_INT (1), GEN_INT (3)));
+ emit_insn (gen_sse2_pshufd_1 (t2, op2, GEN_INT (0), GEN_INT (2),
+ GEN_INT (1), GEN_INT (3)));
+ emit_insn (gen_xop_mulv2div2di3_low (operands[0], t1, t2));
+ DONE;
+ }
+
+ emit_insn (gen_vec_interleave_lowv4si (t1, op1, op1));
+ emit_insn (gen_vec_interleave_lowv4si (t2, op2, op2));
+ emit_insn (gen_sse4_1_mulv2siv2di3 (operands[0], t1, t2));
DONE;
})
DONE;
})
-(define_expand "sdot_prodv8hi"
- [(match_operand:V4SI 0 "register_operand" "")
- (match_operand:V8HI 1 "register_operand" "")
- (match_operand:V8HI 2 "register_operand" "")
- (match_operand:V4SI 3 "register_operand" "")]
+(define_expand "sdot_prod<mode>"
+ [(match_operand:<sseunpackmode> 0 "register_operand" "")
+ (match_operand:VI2_AVX2 1 "register_operand" "")
+ (match_operand:VI2_AVX2 2 "register_operand" "")
+ (match_operand:<sseunpackmode> 3 "register_operand" "")]
"TARGET_SSE2"
{
- rtx t = gen_reg_rtx (V4SImode);
- emit_insn (gen_sse2_pmaddwd (t, operands[1], operands[2]));
- emit_insn (gen_addv4si3 (operands[0], operands[3], t));
+ rtx t = gen_reg_rtx (<sseunpackmode>mode);
+ emit_insn (gen_<sse2_avx2>_pmaddwd (t, operands[1], operands[2]));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_PLUS (<sseunpackmode>mode,
+ operands[3], t)));
DONE;
})
-(define_expand "udot_prodv4si"
+(define_code_attr sse2_sse4_1
+ [(zero_extend "sse2") (sign_extend "sse4_1")])
+
+(define_expand "<s>dot_prodv4si"
[(match_operand:V2DI 0 "register_operand" "")
- (match_operand:V4SI 1 "register_operand" "")
+ (any_extend:V2DI (match_operand:V4SI 1 "register_operand" ""))
(match_operand:V4SI 2 "register_operand" "")
(match_operand:V2DI 3 "register_operand" "")]
- "TARGET_SSE2"
+ "<CODE> == ZERO_EXTEND ? TARGET_SSE2 : TARGET_SSE4_1"
{
rtx t1, t2, t3, t4;
t1 = gen_reg_rtx (V2DImode);
- emit_insn (gen_sse2_umulv2siv2di3 (t1, operands[1], operands[2]));
+ emit_insn (gen_<sse2_sse4_1>_<u>mulv2siv2di3 (t1, operands[1], operands[2]));
emit_insn (gen_addv2di3 (t1, t1, operands[3]));
t2 = gen_reg_rtx (V4SImode);
GEN_INT (32)));
t4 = gen_reg_rtx (V2DImode);
- emit_insn (gen_sse2_umulv2siv2di3 (t4, t2, t3));
+ emit_insn (gen_<sse2_sse4_1>_<u>mulv2siv2di3 (t4, t2, t3));
emit_insn (gen_addv2di3 (operands[0], t1, t4));
DONE;
})
+(define_expand "<s>dot_prodv8si"
+ [(match_operand:V4DI 0 "register_operand" "")
+ (any_extend:V4DI (match_operand:V8SI 1 "register_operand" ""))
+ (match_operand:V8SI 2 "register_operand" "")
+ (match_operand:V4DI 3 "register_operand" "")]
+ "TARGET_AVX2"
+{
+ rtx t1, t2, t3, t4;
+
+ t1 = gen_reg_rtx (V4DImode);
+ emit_insn (gen_avx2_<u>mulv4siv4di3 (t1, operands[1], operands[2]));
+ emit_insn (gen_addv4di3 (t1, t1, operands[3]));
+
+ t2 = gen_reg_rtx (V8SImode);
+ t3 = gen_reg_rtx (V8SImode);
+ emit_insn (gen_avx2_lshrv2ti3 (gen_lowpart (V2TImode, t2),
+ gen_lowpart (V2TImode, operands[1]),
+ GEN_INT (32)));
+ emit_insn (gen_avx2_lshrv2ti3 (gen_lowpart (V2TImode, t3),
+ gen_lowpart (V2TImode, operands[2]),
+ GEN_INT (32)));
+
+ t4 = gen_reg_rtx (V4DImode);
+ emit_insn (gen_avx2_<u>mulv4siv4di3 (t4, t2, t3));
+
+ emit_insn (gen_addv4di3 (operands[0], t1, t4));
+ DONE;
+})
+
(define_insn "ashr<mode>3"
- [(set (match_operand:VI24_128 0 "register_operand" "=x,x")
- (ashiftrt:VI24_128
- (match_operand:VI24_128 1 "register_operand" "0,x")
+ [(set (match_operand:VI24_AVX2 0 "register_operand" "=x,x")
+ (ashiftrt:VI24_AVX2
+ (match_operand:VI24_AVX2 1 "register_operand" "0,x")
(match_operand:SI 2 "nonmemory_operand" "xN,xN")))]
"TARGET_SSE2"
"@
(const_string "0")))
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
-
-(define_insn "lshr<mode>3"
- [(set (match_operand:VI248_128 0 "register_operand" "=x,x")
- (lshiftrt:VI248_128
- (match_operand:VI248_128 1 "register_operand" "0,x")
- (match_operand:SI 2 "nonmemory_operand" "xN,xN")))]
- "TARGET_SSE2"
- "@
- psrl<ssemodesuffix>\t{%2, %0|%0, %2}
- vpsrl<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "noavx,avx")
- (set_attr "type" "sseishft")
- (set (attr "length_immediate")
- (if_then_else (match_operand 2 "const_int_operand" "")
- (const_string "1")
- (const_string "0")))
- (set_attr "prefix_data16" "1,*")
- (set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_insn "ashl<mode>3"
- [(set (match_operand:VI248_128 0 "register_operand" "=x,x")
- (ashift:VI248_128
- (match_operand:VI248_128 1 "register_operand" "0,x")
+(define_insn "<shift_insn><mode>3"
+ [(set (match_operand:VI248_AVX2 0 "register_operand" "=x,x")
+ (any_lshift:VI248_AVX2
+ (match_operand:VI248_AVX2 1 "register_operand" "0,x")
(match_operand:SI 2 "nonmemory_operand" "xN,xN")))]
"TARGET_SSE2"
"@
- psll<ssemodesuffix>\t{%2, %0|%0, %2}
- vpsll<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ p<vshift><ssemodesuffix>\t{%2, %0|%0, %2}
+ vp<vshift><ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseishft")
(set (attr "length_immediate")
(const_string "0")))
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_expand "vec_shl_<mode>"
[(set (match_operand:VI_128 0 "register_operand" "")
- (ashift:V1TI
+ (ashift:V1TI
(match_operand:VI_128 1 "register_operand" "")
(match_operand:SI 2 "const_0_to_255_mul_8_operand" "")))]
"TARGET_SSE2"
operands[1] = gen_lowpart (V1TImode, operands[1]);
})
-(define_insn "sse2_ashlv1ti3"
- [(set (match_operand:V1TI 0 "register_operand" "=x,x")
- (ashift:V1TI
- (match_operand:V1TI 1 "register_operand" "0,x")
+(define_insn "<sse2_avx2>_ashl<mode>3"
+ [(set (match_operand:VIMAX_AVX2 0 "register_operand" "=x,x")
+ (ashift:VIMAX_AVX2
+ (match_operand:VIMAX_AVX2 1 "register_operand" "0,x")
(match_operand:SI 2 "const_0_to_255_mul_8_operand" "n,n")))]
"TARGET_SSE2"
{
(set_attr "length_immediate" "1")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_expand "vec_shr_<mode>"
[(set (match_operand:VI_128 0 "register_operand" "")
- (lshiftrt:V1TI
+ (lshiftrt:V1TI
(match_operand:VI_128 1 "register_operand" "")
(match_operand:SI 2 "const_0_to_255_mul_8_operand" "")))]
"TARGET_SSE2"
operands[1] = gen_lowpart (V1TImode, operands[1]);
})
-(define_insn "sse2_lshrv1ti3"
- [(set (match_operand:V1TI 0 "register_operand" "=x,x")
- (lshiftrt:V1TI
- (match_operand:V1TI 1 "register_operand" "0,x")
+(define_insn "<sse2_avx2>_lshr<mode>3"
+ [(set (match_operand:VIMAX_AVX2 0 "register_operand" "=x,x")
+ (lshiftrt:VIMAX_AVX2
+ (match_operand:VIMAX_AVX2 1 "register_operand" "0,x")
(match_operand:SI 2 "const_0_to_255_mul_8_operand" "n,n")))]
"TARGET_SSE2"
{
(set_attr "atom_unit" "sishuf")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
+
+
+(define_expand "<code><mode>3"
+ [(set (match_operand:VI124_256 0 "register_operand" "")
+ (maxmin:VI124_256
+ (match_operand:VI124_256 1 "nonimmediate_operand" "")
+ (match_operand:VI124_256 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);")
+
+(define_insn "*avx2_<code><mode>3"
+ [(set (match_operand:VI124_256 0 "register_operand" "=x")
+ (maxmin:VI124_256
+ (match_operand:VI124_256 1 "nonimmediate_operand" "%x")
+ (match_operand:VI124_256 2 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (<CODE>, <MODE>mode, operands)"
+ "vp<maxmin_int><ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_expand "<code><mode>3"
+ [(set (match_operand:VI8_AVX2 0 "register_operand" "")
+ (maxmin:VI8_AVX2
+ (match_operand:VI8_AVX2 1 "register_operand" "")
+ (match_operand:VI8_AVX2 2 "register_operand" "")))]
+ "TARGET_SSE4_2"
+{
+ enum rtx_code code;
+ rtx xops[6];
+ bool ok;
+
+ xops[0] = operands[0];
+
+ if (<CODE> == SMAX || <CODE> == UMAX)
+ {
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ }
+ else
+ {
+ xops[1] = operands[2];
+ xops[2] = operands[1];
+ }
+
+ code = (<CODE> == UMAX || <CODE> == UMIN) ? GTU : GT;
+
+ xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
+ xops[4] = operands[1];
+ xops[5] = operands[2];
+
+ ok = ix86_expand_int_vcond (xops);
+ gcc_assert (ok);
+ DONE;
+})
+
+(define_expand "<code><mode>3"
+ [(set (match_operand:VI124_128 0 "register_operand" "")
+ (smaxmin:VI124_128
+ (match_operand:VI124_128 1 "nonimmediate_operand" "")
+ (match_operand:VI124_128 2 "nonimmediate_operand" "")))]
+ "TARGET_SSE2"
+{
+ if (TARGET_SSE4_1 || <MODE>mode == V8HImode)
+ ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);
+ else
+ {
+ rtx xops[6];
+ bool ok;
+
+ xops[0] = operands[0];
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+
+ if (<CODE> == SMAX)
+ {
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ }
+ else
+ {
+ xops[1] = operands[2];
+ xops[2] = operands[1];
+ }
+
+ xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]);
+ xops[4] = operands[1];
+ xops[5] = operands[2];
+
+ ok = ix86_expand_int_vcond (xops);
+ gcc_assert (ok);
+ DONE;
+ }
+})
(define_insn "*sse4_1_<code><mode>3"
[(set (match_operand:VI14_128 0 "register_operand" "=x,x")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
-(define_expand "smax<mode>3"
- [(set (match_operand:VI14_128 0 "register_operand" "")
- (smax:VI14_128 (match_operand:VI14_128 1 "register_operand" "")
- (match_operand:VI14_128 2 "register_operand" "")))]
+(define_expand "<code><mode>3"
+ [(set (match_operand:VI124_128 0 "register_operand" "")
+ (umaxmin:VI124_128
+ (match_operand:VI124_128 1 "nonimmediate_operand" "")
+ (match_operand:VI124_128 2 "nonimmediate_operand" "")))]
"TARGET_SSE2"
{
- if (TARGET_SSE4_1)
- ix86_fixup_binary_operands_no_copy (SMAX, <MODE>mode, operands);
- else
+ if (TARGET_SSE4_1 || <MODE>mode == V16QImode)
+ ix86_fixup_binary_operands_no_copy (<CODE>, <MODE>mode, operands);
+ else if (<CODE> == UMAX && <MODE>mode == V8HImode)
{
- rtx xops[6];
- bool ok;
-
- xops[0] = operands[0];
- xops[1] = operands[1];
- xops[2] = operands[2];
- xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]);
- xops[4] = operands[1];
- xops[5] = operands[2];
- ok = ix86_expand_int_vcond (xops);
- gcc_assert (ok);
+ rtx op0 = operands[0], op2 = operands[2], op3 = op0;
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ if (rtx_equal_p (op3, op2))
+ op3 = gen_reg_rtx (V8HImode);
+ emit_insn (gen_sse2_ussubv8hi3 (op3, operands[1], op2));
+ emit_insn (gen_addv8hi3 (op0, op3, op2));
DONE;
}
-})
-
-(define_expand "smin<mode>3"
- [(set (match_operand:VI14_128 0 "register_operand" "")
- (smin:VI14_128 (match_operand:VI14_128 1 "register_operand" "")
- (match_operand:VI14_128 2 "register_operand" "")))]
- "TARGET_SSE2"
-{
- if (TARGET_SSE4_1)
- ix86_fixup_binary_operands_no_copy (SMIN, <MODE>mode, operands);
else
{
rtx xops[6];
bool ok;
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+
xops[0] = operands[0];
- xops[1] = operands[2];
- xops[2] = operands[1];
- xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]);
+
+ if (<CODE> == UMAX)
+ {
+ xops[1] = operands[1];
+ xops[2] = operands[2];
+ }
+ else
+ {
+ xops[1] = operands[2];
+ xops[2] = operands[1];
+ }
+
+ xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]);
xops[4] = operands[1];
xops[5] = operands[2];
+
ok = ix86_expand_int_vcond (xops);
gcc_assert (ok);
DONE;
}
})
-(define_expand "<code>v8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "")
- (smaxmin:V8HI
- (match_operand:V8HI 1 "nonimmediate_operand" "")
- (match_operand:V8HI 2 "nonimmediate_operand" "")))]
- "TARGET_SSE2"
- "ix86_fixup_binary_operands_no_copy (<CODE>, V8HImode, operands);")
-
-(define_expand "smaxv2di3"
- [(set (match_operand:V2DI 0 "register_operand" "")
- (smax:V2DI (match_operand:V2DI 1 "register_operand" "")
- (match_operand:V2DI 2 "register_operand" "")))]
- "TARGET_SSE4_2"
-{
- rtx xops[6];
- bool ok;
-
- xops[0] = operands[0];
- xops[1] = operands[1];
- xops[2] = operands[2];
- xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]);
- xops[4] = operands[1];
- xops[5] = operands[2];
- ok = ix86_expand_int_vcond (xops);
- gcc_assert (ok);
- DONE;
-})
-
-(define_expand "sminv2di3"
- [(set (match_operand:V2DI 0 "register_operand" "")
- (smin:V2DI (match_operand:V2DI 1 "register_operand" "")
- (match_operand:V2DI 2 "register_operand" "")))]
- "TARGET_SSE4_2"
-{
- rtx xops[6];
- bool ok;
-
- xops[0] = operands[0];
- xops[1] = operands[2];
- xops[2] = operands[1];
- xops[3] = gen_rtx_GT (VOIDmode, operands[1], operands[2]);
- xops[4] = operands[1];
- xops[5] = operands[2];
- ok = ix86_expand_int_vcond (xops);
- gcc_assert (ok);
- DONE;
-})
-
(define_insn "*sse4_1_<code><mode>3"
[(set (match_operand:VI24_128 0 "register_operand" "=x,x")
(umaxmin:VI24_128
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
-(define_expand "<code>v16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "")
- (umaxmin:V16QI
- (match_operand:V16QI 1 "nonimmediate_operand" "")
- (match_operand:V16QI 2 "nonimmediate_operand" "")))]
- "TARGET_SSE2"
- "ix86_fixup_binary_operands_no_copy (<CODE>, V16QImode, operands);")
-
-(define_expand "umaxv8hi3"
- [(set (match_operand:V8HI 0 "register_operand" "")
- (umax:V8HI (match_operand:V8HI 1 "register_operand" "")
- (match_operand:V8HI 2 "nonimmediate_operand" "")))]
- "TARGET_SSE2"
-{
- if (TARGET_SSE4_1)
- ix86_fixup_binary_operands_no_copy (UMAX, V8HImode, operands);
- else
- {
- rtx op0 = operands[0], op2 = operands[2], op3 = op0;
- if (rtx_equal_p (op3, op2))
- op3 = gen_reg_rtx (V8HImode);
- emit_insn (gen_sse2_ussubv8hi3 (op3, operands[1], op2));
- emit_insn (gen_addv8hi3 (op0, op3, op2));
- DONE;
- }
-})
-
-(define_expand "umaxv4si3"
- [(set (match_operand:V4SI 0 "register_operand" "")
- (umax:V4SI (match_operand:V4SI 1 "register_operand" "")
- (match_operand:V4SI 2 "register_operand" "")))]
- "TARGET_SSE2"
-{
- if (TARGET_SSE4_1)
- ix86_fixup_binary_operands_no_copy (UMAX, V4SImode, operands);
- else
- {
- rtx xops[6];
- bool ok;
-
- xops[0] = operands[0];
- xops[1] = operands[1];
- xops[2] = operands[2];
- xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]);
- xops[4] = operands[1];
- xops[5] = operands[2];
- ok = ix86_expand_int_vcond (xops);
- gcc_assert (ok);
- DONE;
- }
-})
-
-(define_expand "umin<mode>3"
- [(set (match_operand:VI24_128 0 "register_operand" "")
- (umin:VI24_128 (match_operand:VI24_128 1 "register_operand" "")
- (match_operand:VI24_128 2 "register_operand" "")))]
- "TARGET_SSE2"
-{
- if (TARGET_SSE4_1)
- ix86_fixup_binary_operands_no_copy (UMIN, <MODE>mode, operands);
- else
- {
- rtx xops[6];
- bool ok;
-
- xops[0] = operands[0];
- xops[1] = operands[2];
- xops[2] = operands[1];
- xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]);
- xops[4] = operands[1];
- xops[5] = operands[2];
- ok = ix86_expand_int_vcond (xops);
- gcc_assert (ok);
- DONE;
- }
-})
-
-(define_expand "umaxv2di3"
- [(set (match_operand:V2DI 0 "register_operand" "")
- (umax:V2DI (match_operand:V2DI 1 "register_operand" "")
- (match_operand:V2DI 2 "register_operand" "")))]
- "TARGET_SSE4_2"
-{
- rtx xops[6];
- bool ok;
-
- xops[0] = operands[0];
- xops[1] = operands[1];
- xops[2] = operands[2];
- xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]);
- xops[4] = operands[1];
- xops[5] = operands[2];
- ok = ix86_expand_int_vcond (xops);
- gcc_assert (ok);
- DONE;
-})
-
-(define_expand "uminv2di3"
- [(set (match_operand:V2DI 0 "register_operand" "")
- (umin:V2DI (match_operand:V2DI 1 "register_operand" "")
- (match_operand:V2DI 2 "register_operand" "")))]
- "TARGET_SSE4_2"
-{
- rtx xops[6];
- bool ok;
-
- xops[0] = operands[0];
- xops[1] = operands[2];
- xops[2] = operands[1];
- xops[3] = gen_rtx_GTU (VOIDmode, operands[1], operands[2]);
- xops[4] = operands[1];
- xops[5] = operands[2];
- ok = ix86_expand_int_vcond (xops);
- gcc_assert (ok);
- DONE;
-})
-
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel integral comparisons
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+(define_expand "avx2_eq<mode>3"
+ [(set (match_operand:VI_256 0 "register_operand" "")
+ (eq:VI_256
+ (match_operand:VI_256 1 "nonimmediate_operand" "")
+ (match_operand:VI_256 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (EQ, <MODE>mode, operands);")
+
+(define_insn "*avx2_eq<mode>3"
+ [(set (match_operand:VI_256 0 "register_operand" "=x")
+ (eq:VI_256
+ (match_operand:VI_256 1 "nonimmediate_operand" "%x")
+ (match_operand:VI_256 2 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (EQ, <MODE>mode, operands)"
+ "vpcmpeq<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "*sse4_1_eqv2di3"
[(set (match_operand:V2DI 0 "register_operand" "=x,x")
(eq:V2DI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_gt<mode>3"
+ [(set (match_operand:VI_256 0 "register_operand" "=x")
+ (gt:VI_256
+ (match_operand:VI_256 1 "register_operand" "x")
+ (match_operand:VI_256 2 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2"
+ "vpcmpgt<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "ssecmp")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "sse2_gt<mode>3"
[(set (match_operand:VI124_128 0 "register_operand" "=x,x")
(gt:VI124_128
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
-(define_expand "vcond<mode>"
- [(set (match_operand:VI124_128 0 "register_operand" "")
- (if_then_else:VI124_128
- (match_operator 3 ""
- [(match_operand:VI124_128 4 "nonimmediate_operand" "")
- (match_operand:VI124_128 5 "nonimmediate_operand" "")])
- (match_operand:VI124_128 1 "general_operand" "")
- (match_operand:VI124_128 2 "general_operand" "")))]
- "TARGET_SSE2"
+(define_expand "vcond<V_256:mode><VI_256:mode>"
+ [(set (match_operand:V_256 0 "register_operand" "")
+ (if_then_else:V_256
+ (match_operator 3 ""
+ [(match_operand:VI_256 4 "nonimmediate_operand" "")
+ (match_operand:VI_256 5 "nonimmediate_operand" "")])
+ (match_operand:V_256 1 "general_operand" "")
+ (match_operand:V_256 2 "general_operand" "")))]
+ "TARGET_AVX2
+ && (GET_MODE_NUNITS (<V_256:MODE>mode)
+ == GET_MODE_NUNITS (<VI_256:MODE>mode))"
{
bool ok = ix86_expand_int_vcond (operands);
gcc_assert (ok);
DONE;
})
-(define_expand "vcondv2di"
- [(set (match_operand:V2DI 0 "register_operand" "")
- (if_then_else:V2DI
- (match_operator 3 ""
- [(match_operand:V2DI 4 "nonimmediate_operand" "")
- (match_operand:V2DI 5 "nonimmediate_operand" "")])
- (match_operand:V2DI 1 "general_operand" "")
- (match_operand:V2DI 2 "general_operand" "")))]
+(define_expand "vcond<V_128:mode><VI124_128:mode>"
+ [(set (match_operand:V_128 0 "register_operand" "")
+ (if_then_else:V_128
+ (match_operator 3 ""
+ [(match_operand:VI124_128 4 "nonimmediate_operand" "")
+ (match_operand:VI124_128 5 "nonimmediate_operand" "")])
+ (match_operand:V_128 1 "general_operand" "")
+ (match_operand:V_128 2 "general_operand" "")))]
+ "TARGET_SSE2
+ && (GET_MODE_NUNITS (<V_128:MODE>mode)
+ == GET_MODE_NUNITS (<VI124_128:MODE>mode))"
+{
+ bool ok = ix86_expand_int_vcond (operands);
+ gcc_assert (ok);
+ DONE;
+})
+
+(define_expand "vcond<VI8F_128:mode>v2di"
+ [(set (match_operand:VI8F_128 0 "register_operand" "")
+ (if_then_else:VI8F_128
+ (match_operator 3 ""
+ [(match_operand:V2DI 4 "nonimmediate_operand" "")
+ (match_operand:V2DI 5 "nonimmediate_operand" "")])
+ (match_operand:VI8F_128 1 "general_operand" "")
+ (match_operand:VI8F_128 2 "general_operand" "")))]
"TARGET_SSE4_2"
{
bool ok = ix86_expand_int_vcond (operands);
DONE;
})
-(define_expand "vcondu<mode>"
- [(set (match_operand:VI124_128 0 "register_operand" "")
- (if_then_else:VI124_128
- (match_operator 3 ""
- [(match_operand:VI124_128 4 "nonimmediate_operand" "")
- (match_operand:VI124_128 5 "nonimmediate_operand" "")])
- (match_operand:VI124_128 1 "general_operand" "")
- (match_operand:VI124_128 2 "general_operand" "")))]
- "TARGET_SSE2"
+(define_expand "vcondu<V_256:mode><VI_256:mode>"
+ [(set (match_operand:V_256 0 "register_operand" "")
+ (if_then_else:V_256
+ (match_operator 3 ""
+ [(match_operand:VI_256 4 "nonimmediate_operand" "")
+ (match_operand:VI_256 5 "nonimmediate_operand" "")])
+ (match_operand:V_256 1 "general_operand" "")
+ (match_operand:V_256 2 "general_operand" "")))]
+ "TARGET_AVX2
+ && (GET_MODE_NUNITS (<V_256:MODE>mode)
+ == GET_MODE_NUNITS (<VI_256:MODE>mode))"
{
bool ok = ix86_expand_int_vcond (operands);
gcc_assert (ok);
DONE;
})
-(define_expand "vconduv2di"
- [(set (match_operand:V2DI 0 "register_operand" "")
- (if_then_else:V2DI
- (match_operator 3 ""
- [(match_operand:V2DI 4 "nonimmediate_operand" "")
- (match_operand:V2DI 5 "nonimmediate_operand" "")])
- (match_operand:V2DI 1 "general_operand" "")
- (match_operand:V2DI 2 "general_operand" "")))]
+(define_expand "vcondu<V_128:mode><VI124_128:mode>"
+ [(set (match_operand:V_128 0 "register_operand" "")
+ (if_then_else:V_128
+ (match_operator 3 ""
+ [(match_operand:VI124_128 4 "nonimmediate_operand" "")
+ (match_operand:VI124_128 5 "nonimmediate_operand" "")])
+ (match_operand:V_128 1 "general_operand" "")
+ (match_operand:V_128 2 "general_operand" "")))]
+ "TARGET_SSE2
+ && (GET_MODE_NUNITS (<V_128:MODE>mode)
+ == GET_MODE_NUNITS (<VI124_128:MODE>mode))"
+{
+ bool ok = ix86_expand_int_vcond (operands);
+ gcc_assert (ok);
+ DONE;
+})
+
+(define_expand "vcondu<VI8F_128:mode>v2di"
+ [(set (match_operand:VI8F_128 0 "register_operand" "")
+ (if_then_else:VI8F_128
+ (match_operator 3 ""
+ [(match_operand:V2DI 4 "nonimmediate_operand" "")
+ (match_operand:V2DI 5 "nonimmediate_operand" "")])
+ (match_operand:VI8F_128 1 "general_operand" "")
+ (match_operand:VI8F_128 2 "general_operand" "")))]
"TARGET_SSE4_2"
{
bool ok = ix86_expand_int_vcond (operands);
DONE;
})
+(define_mode_iterator VEC_PERM_AVX2
+ [V16QI V8HI V4SI V2DI V4SF V2DF
+ (V32QI "TARGET_AVX2") (V16HI "TARGET_AVX2")
+ (V8SI "TARGET_AVX2") (V4DI "TARGET_AVX2")
+ (V8SF "TARGET_AVX2") (V4DF "TARGET_AVX2")])
+
+(define_expand "vec_perm<mode>"
+ [(match_operand:VEC_PERM_AVX2 0 "register_operand" "")
+ (match_operand:VEC_PERM_AVX2 1 "register_operand" "")
+ (match_operand:VEC_PERM_AVX2 2 "register_operand" "")
+ (match_operand:<sseintvecmode> 3 "register_operand" "")]
+ "TARGET_SSSE3 || TARGET_AVX || TARGET_XOP"
+{
+ ix86_expand_vec_perm (operands);
+ DONE;
+})
+
+(define_mode_iterator VEC_PERM_CONST
+ [(V4SF "TARGET_SSE") (V4SI "TARGET_SSE")
+ (V2DF "TARGET_SSE") (V2DI "TARGET_SSE")
+ (V16QI "TARGET_SSE2") (V8HI "TARGET_SSE2")
+ (V8SF "TARGET_AVX") (V4DF "TARGET_AVX")
+ (V8SI "TARGET_AVX") (V4DI "TARGET_AVX")
+ (V32QI "TARGET_AVX2") (V16HI "TARGET_AVX2")])
+
+(define_expand "vec_perm_const<mode>"
+ [(match_operand:VEC_PERM_CONST 0 "register_operand" "")
+ (match_operand:VEC_PERM_CONST 1 "register_operand" "")
+ (match_operand:VEC_PERM_CONST 2 "register_operand" "")
+ (match_operand:<sseintvecmode> 3 "" "")]
+ ""
+{
+ if (ix86_expand_vec_perm_const (operands))
+ DONE;
+ else
+ FAIL;
+})
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel bitwise logical operations
operands[2] = force_reg (<MODE>mode, gen_rtx_CONST_VECTOR (<MODE>mode, v));
})
-(define_expand "sse2_andnot<mode>3"
- [(set (match_operand:VI_128 0 "register_operand" "")
- (and:VI_128
- (not:VI_128 (match_operand:VI_128 1 "register_operand" ""))
- (match_operand:VI_128 2 "nonimmediate_operand" "")))]
+(define_expand "<sse2_avx2>_andnot<mode>3"
+ [(set (match_operand:VI_AVX2 0 "register_operand" "")
+ (and:VI_AVX2
+ (not:VI_AVX2 (match_operand:VI_AVX2 1 "register_operand" ""))
+ (match_operand:VI_AVX2 2 "nonimmediate_operand" "")))]
"TARGET_SSE2")
(define_insn "*andnot<mode>3"
{
static char buf[32];
const char *ops;
- const char *tmp
- = (get_attr_mode (insn) == MODE_TI) ? "pandn" : "andnps";
+ const char *tmp;
+
+ switch (get_attr_mode (insn))
+ {
+ case MODE_OI:
+ gcc_assert (TARGET_AVX2);
+ case MODE_TI:
+ gcc_assert (TARGET_SSE2);
+
+ tmp = "pandn";
+ break;
+
+ case MODE_V8SF:
+ gcc_assert (TARGET_AVX);
+ case MODE_V4SF:
+ gcc_assert (TARGET_SSE);
+
+ tmp = "andnps";
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
switch (which_alternative)
{
(const_string "*")))
(set_attr "prefix" "orig,vex")
(set (attr "mode")
- (cond [(ne (symbol_ref "GET_MODE_SIZE (<MODE>mode) > 128") (const_int 0))
+ (cond [(and (not (match_test "TARGET_AVX2"))
+ (match_test "GET_MODE_SIZE (<MODE>mode) > 16"))
(const_string "V8SF")
- (ne (symbol_ref "TARGET_SSE2") (const_int 0))
- (const_string "TI")
+ (not (match_test "TARGET_SSE2"))
+ (const_string "V4SF")
]
- (const_string "V4SF")))])
+ (const_string "<sseinsnmode>")))])
(define_expand "<code><mode>3"
[(set (match_operand:VI 0 "register_operand" "")
{
static char buf[32];
const char *ops;
- const char *tmp
- = (get_attr_mode (insn) == MODE_TI) ? "p<logic>" : "<logic>ps";
+ const char *tmp;
+
+ switch (get_attr_mode (insn))
+ {
+ case MODE_OI:
+ gcc_assert (TARGET_AVX2);
+ case MODE_TI:
+ gcc_assert (TARGET_SSE2);
+
+ tmp = "p<logic>";
+ break;
+
+ case MODE_V8SF:
+ gcc_assert (TARGET_AVX);
+ case MODE_V4SF:
+ gcc_assert (TARGET_SSE);
+
+ tmp = "<logic>ps";
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
switch (which_alternative)
{
(const_string "*")))
(set_attr "prefix" "orig,vex")
(set (attr "mode")
- (cond [(ne (symbol_ref "GET_MODE_SIZE (<MODE>mode) > 128") (const_int 0))
+ (cond [(and (not (match_test "TARGET_AVX2"))
+ (match_test "GET_MODE_SIZE (<MODE>mode) > 16"))
(const_string "V8SF")
- (ne (symbol_ref "TARGET_SSE2") (const_int 0))
- (const_string "TI")
+ (not (match_test "TARGET_SSE2"))
+ (const_string "V4SF")
]
- (const_string "V4SF")))])
+ (const_string "<sseinsnmode>")))])
(define_insn "*andnottf3"
[(set (match_operand:TF 0 "register_operand" "=x,x")
(define_expand "vec_pack_trunc_<mode>"
[(match_operand:<ssepackmode> 0 "register_operand" "")
- (match_operand:VI248_128 1 "register_operand" "")
- (match_operand:VI248_128 2 "register_operand" "")]
+ (match_operand:VI248_AVX2 1 "register_operand" "")
+ (match_operand:VI248_AVX2 2 "register_operand" "")]
"TARGET_SSE2"
{
rtx op1 = gen_lowpart (<ssepackmode>mode, operands[1]);
DONE;
})
-(define_insn "sse2_packsswb"
- [(set (match_operand:V16QI 0 "register_operand" "=x,x")
- (vec_concat:V16QI
- (ss_truncate:V8QI
- (match_operand:V8HI 1 "register_operand" "0,x"))
- (ss_truncate:V8QI
- (match_operand:V8HI 2 "nonimmediate_operand" "xm,xm"))))]
+(define_insn "<sse2_avx2>_packsswb"
+ [(set (match_operand:VI1_AVX2 0 "register_operand" "=x,x")
+ (vec_concat:VI1_AVX2
+ (ss_truncate:<ssehalfvecmode>
+ (match_operand:<sseunpackmode> 1 "register_operand" "0,x"))
+ (ss_truncate:<ssehalfvecmode>
+ (match_operand:<sseunpackmode> 2 "nonimmediate_operand" "xm,xm"))))]
"TARGET_SSE2"
"@
packsswb\t{%2, %0|%0, %2}
(set_attr "type" "sselog")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_insn "sse2_packssdw"
- [(set (match_operand:V8HI 0 "register_operand" "=x,x")
- (vec_concat:V8HI
- (ss_truncate:V4HI
- (match_operand:V4SI 1 "register_operand" "0,x"))
- (ss_truncate:V4HI
- (match_operand:V4SI 2 "nonimmediate_operand" "xm,xm"))))]
+(define_insn "<sse2_avx2>_packssdw"
+ [(set (match_operand:VI2_AVX2 0 "register_operand" "=x,x")
+ (vec_concat:VI2_AVX2
+ (ss_truncate:<ssehalfvecmode>
+ (match_operand:<sseunpackmode> 1 "register_operand" "0,x"))
+ (ss_truncate:<ssehalfvecmode>
+ (match_operand:<sseunpackmode> 2 "nonimmediate_operand" "xm,xm"))))]
"TARGET_SSE2"
"@
packssdw\t{%2, %0|%0, %2}
(set_attr "type" "sselog")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_insn "sse2_packuswb"
- [(set (match_operand:V16QI 0 "register_operand" "=x,x")
- (vec_concat:V16QI
- (us_truncate:V8QI
- (match_operand:V8HI 1 "register_operand" "0,x"))
- (us_truncate:V8QI
- (match_operand:V8HI 2 "nonimmediate_operand" "xm,xm"))))]
+(define_insn "<sse2_avx2>_packuswb"
+ [(set (match_operand:VI1_AVX2 0 "register_operand" "=x,x")
+ (vec_concat:VI1_AVX2
+ (us_truncate:<ssehalfvecmode>
+ (match_operand:<sseunpackmode> 1 "register_operand" "0,x"))
+ (us_truncate:<ssehalfvecmode>
+ (match_operand:<sseunpackmode> 2 "nonimmediate_operand" "xm,xm"))))]
"TARGET_SSE2"
"@
packuswb\t{%2, %0|%0, %2}
(set_attr "type" "sselog")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "avx2_interleave_highv32qi"
+ [(set (match_operand:V32QI 0 "register_operand" "=x")
+ (vec_select:V32QI
+ (vec_concat:V64QI
+ (match_operand:V32QI 1 "register_operand" "x")
+ (match_operand:V32QI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 8) (const_int 40)
+ (const_int 9) (const_int 41)
+ (const_int 10) (const_int 42)
+ (const_int 11) (const_int 43)
+ (const_int 12) (const_int 44)
+ (const_int 13) (const_int 45)
+ (const_int 14) (const_int 46)
+ (const_int 15) (const_int 47)
+ (const_int 24) (const_int 56)
+ (const_int 25) (const_int 57)
+ (const_int 26) (const_int 58)
+ (const_int 27) (const_int 59)
+ (const_int 28) (const_int 60)
+ (const_int 29) (const_int 61)
+ (const_int 30) (const_int 62)
+ (const_int 31) (const_int 63)])))]
+ "TARGET_AVX2"
+ "vpunpckhbw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
(define_insn "vec_interleave_highv16qi"
[(set (match_operand:V16QI 0 "register_operand" "=x,x")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_interleave_lowv32qi"
+ [(set (match_operand:V32QI 0 "register_operand" "=x")
+ (vec_select:V32QI
+ (vec_concat:V64QI
+ (match_operand:V32QI 1 "register_operand" "x")
+ (match_operand:V32QI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 32)
+ (const_int 1) (const_int 33)
+ (const_int 2) (const_int 34)
+ (const_int 3) (const_int 35)
+ (const_int 4) (const_int 36)
+ (const_int 5) (const_int 37)
+ (const_int 6) (const_int 38)
+ (const_int 7) (const_int 39)
+ (const_int 16) (const_int 48)
+ (const_int 17) (const_int 49)
+ (const_int 18) (const_int 50)
+ (const_int 19) (const_int 51)
+ (const_int 20) (const_int 52)
+ (const_int 21) (const_int 53)
+ (const_int 22) (const_int 54)
+ (const_int 23) (const_int 55)])))]
+ "TARGET_AVX2"
+ "vpunpcklbw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "vec_interleave_lowv16qi"
[(set (match_operand:V16QI 0 "register_operand" "=x,x")
(vec_select:V16QI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_interleave_highv16hi"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_select:V16HI
+ (vec_concat:V32HI
+ (match_operand:V16HI 1 "register_operand" "x")
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 4) (const_int 20)
+ (const_int 5) (const_int 21)
+ (const_int 6) (const_int 22)
+ (const_int 7) (const_int 23)
+ (const_int 12) (const_int 28)
+ (const_int 13) (const_int 29)
+ (const_int 14) (const_int 30)
+ (const_int 15) (const_int 31)])))]
+ "TARGET_AVX2"
+ "vpunpckhwd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "vec_interleave_highv8hi"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(vec_select:V8HI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_interleave_lowv16hi"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_select:V16HI
+ (vec_concat:V32HI
+ (match_operand:V16HI 1 "register_operand" "x")
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 16)
+ (const_int 1) (const_int 17)
+ (const_int 2) (const_int 18)
+ (const_int 3) (const_int 19)
+ (const_int 8) (const_int 24)
+ (const_int 9) (const_int 25)
+ (const_int 10) (const_int 26)
+ (const_int 11) (const_int 27)])))]
+ "TARGET_AVX2"
+ "vpunpcklwd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "vec_interleave_lowv8hi"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(vec_select:V8HI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_interleave_highv8si"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (vec_select:V8SI
+ (vec_concat:V16SI
+ (match_operand:V8SI 1 "register_operand" "x")
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 2) (const_int 10)
+ (const_int 3) (const_int 11)
+ (const_int 6) (const_int 14)
+ (const_int 7) (const_int 15)])))]
+ "TARGET_AVX2"
+ "vpunpckhdq\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "vec_interleave_highv4si"
[(set (match_operand:V4SI 0 "register_operand" "=x,x")
(vec_select:V4SI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_interleave_lowv8si"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (vec_select:V8SI
+ (vec_concat:V16SI
+ (match_operand:V8SI 1 "register_operand" "x")
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm"))
+ (parallel [(const_int 0) (const_int 8)
+ (const_int 1) (const_int 9)
+ (const_int 4) (const_int 12)
+ (const_int 5) (const_int 13)])))]
+ "TARGET_AVX2"
+ "vpunpckldq\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "vec_interleave_lowv4si"
[(set (match_operand:V4SI 0 "register_operand" "=x,x")
(vec_select:V4SI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_expand "vec_interleave_high<mode>"
+ [(match_operand:VI_256 0 "register_operand" "=x")
+ (match_operand:VI_256 1 "register_operand" "x")
+ (match_operand:VI_256 2 "nonimmediate_operand" "xm")]
+ "TARGET_AVX2"
+{
+ rtx t1 = gen_reg_rtx (<MODE>mode);
+ rtx t2 = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_avx2_interleave_low<mode> (t1, operands[1], operands[2]));
+ emit_insn (gen_avx2_interleave_high<mode> (t2, operands[1], operands[2]));
+ emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode, operands[0]),
+ gen_lowpart (V4DImode, t1),
+ gen_lowpart (V4DImode, t2), GEN_INT (1 + (3 << 4))));
+ DONE;
+})
+
+(define_expand "vec_interleave_low<mode>"
+ [(match_operand:VI_256 0 "register_operand" "=x")
+ (match_operand:VI_256 1 "register_operand" "x")
+ (match_operand:VI_256 2 "nonimmediate_operand" "xm")]
+ "TARGET_AVX2"
+{
+ rtx t1 = gen_reg_rtx (<MODE>mode);
+ rtx t2 = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_avx2_interleave_low<mode> (t1, operands[1], operands[2]));
+ emit_insn (gen_avx2_interleave_high<mode> (t2, operands[1], operands[2]));
+ emit_insn (gen_avx2_permv2ti (gen_lowpart (V4DImode, operands[0]),
+ gen_lowpart (V4DImode, t1),
+ gen_lowpart (V4DImode, t2), GEN_INT (0 + (2 << 4))));
+ DONE;
+})
+
;; Modes handled by pinsr patterns.
(define_mode_iterator PINSR_MODE
[(V16QI "TARGET_SSE4_1") V8HI
{
case 0:
if (GET_MODE_SIZE (<ssescalarmode>mode) < GET_MODE_SIZE (SImode))
- return "pinsr<ssemodesuffix>\t{%3, %k2, %0|%0, %k2, %3}";
+ return "pinsr<ssemodesuffix>\t{%3, %k2, %0|%0, %k2, %3}";
/* FALLTHRU */
case 1:
return "pinsr<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}";
case 2:
if (GET_MODE_SIZE (<ssescalarmode>mode) < GET_MODE_SIZE (SImode))
- return "vpinsr<ssemodesuffix>\t{%3, %k2, %1, %0|%0, %1, %k2, %3}";
+ return "vpinsr<ssemodesuffix>\t{%3, %k2, %1, %0|%0, %1, %k2, %3}";
/* FALLTHRU */
case 3:
return "vpinsr<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
(set_attr "type" "sselog")
(set (attr "prefix_rex")
(if_then_else
- (and (eq (symbol_ref "TARGET_AVX") (const_int 0))
+ (and (not (match_test "TARGET_AVX"))
(eq (const_string "<MODE>mode") (const_string "V2DImode")))
(const_string "1")
(const_string "*")))
(set (attr "prefix_data16")
(if_then_else
- (and (eq (symbol_ref "TARGET_AVX") (const_int 0))
+ (and (not (match_test "TARGET_AVX"))
(eq (const_string "<MODE>mode") (const_string "V8HImode")))
(const_string "1")
(const_string "*")))
(set (attr "prefix_extra")
(if_then_else
- (and (eq (symbol_ref "TARGET_AVX") (const_int 0))
+ (and (not (match_test "TARGET_AVX"))
(eq (const_string "<MODE>mode") (const_string "V8HImode")))
(const_string "*")
(const_string "1")))
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
-;; It must come before *vec_extractv2di_1_sse since it is preferred.
+;; It must come before *vec_extractv2di_1_rex64 since it is preferred.
(define_insn "*sse4_1_pextrq"
[(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
(vec_select:DI
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_expand "avx2_pshufdv3"
+ [(match_operand:V8SI 0 "register_operand" "")
+ (match_operand:V8SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_0_to_255_operand" "")]
+ "TARGET_AVX2"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_avx2_pshufd_1 (operands[0], operands[1],
+ GEN_INT ((mask >> 0) & 3),
+ GEN_INT ((mask >> 2) & 3),
+ GEN_INT ((mask >> 4) & 3),
+ GEN_INT ((mask >> 6) & 3),
+ GEN_INT (((mask >> 0) & 3) + 4),
+ GEN_INT (((mask >> 2) & 3) + 4),
+ GEN_INT (((mask >> 4) & 3) + 4),
+ GEN_INT (((mask >> 6) & 3) + 4)));
+ DONE;
+})
+
+(define_insn "avx2_pshufd_1"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (vec_select:V8SI
+ (match_operand:V8SI 1 "nonimmediate_operand" "xm")
+ (parallel [(match_operand 2 "const_0_to_3_operand" "")
+ (match_operand 3 "const_0_to_3_operand" "")
+ (match_operand 4 "const_0_to_3_operand" "")
+ (match_operand 5 "const_0_to_3_operand" "")
+ (match_operand 6 "const_4_to_7_operand" "")
+ (match_operand 7 "const_4_to_7_operand" "")
+ (match_operand 8 "const_4_to_7_operand" "")
+ (match_operand 9 "const_4_to_7_operand" "")])))]
+ "TARGET_AVX2
+ && INTVAL (operands[2]) + 4 == INTVAL (operands[6])
+ && INTVAL (operands[3]) + 4 == INTVAL (operands[7])
+ && INTVAL (operands[4]) + 4 == INTVAL (operands[8])
+ && INTVAL (operands[5]) + 4 == INTVAL (operands[9])"
+{
+ int mask = 0;
+ mask |= INTVAL (operands[2]) << 0;
+ mask |= INTVAL (operands[3]) << 2;
+ mask |= INTVAL (operands[4]) << 4;
+ mask |= INTVAL (operands[5]) << 6;
+ operands[2] = GEN_INT (mask);
+
+ return "vpshufd\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix" "vex")
+ (set_attr "length_immediate" "1")
+ (set_attr "mode" "OI")])
+
(define_expand "sse2_pshufd"
[(match_operand:V4SI 0 "register_operand" "")
(match_operand:V4SI 1 "nonimmediate_operand" "")
(set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
+(define_expand "avx2_pshuflwv3"
+ [(match_operand:V16HI 0 "register_operand" "")
+ (match_operand:V16HI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_0_to_255_operand" "")]
+ "TARGET_AVX2"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_avx2_pshuflw_1 (operands[0], operands[1],
+ GEN_INT ((mask >> 0) & 3),
+ GEN_INT ((mask >> 2) & 3),
+ GEN_INT ((mask >> 4) & 3),
+ GEN_INT ((mask >> 6) & 3),
+ GEN_INT (((mask >> 0) & 3) + 8),
+ GEN_INT (((mask >> 2) & 3) + 8),
+ GEN_INT (((mask >> 4) & 3) + 8),
+ GEN_INT (((mask >> 6) & 3) + 8)));
+ DONE;
+})
+
+(define_insn "avx2_pshuflw_1"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_select:V16HI
+ (match_operand:V16HI 1 "nonimmediate_operand" "xm")
+ (parallel [(match_operand 2 "const_0_to_3_operand" "")
+ (match_operand 3 "const_0_to_3_operand" "")
+ (match_operand 4 "const_0_to_3_operand" "")
+ (match_operand 5 "const_0_to_3_operand" "")
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)
+ (match_operand 6 "const_8_to_11_operand" "")
+ (match_operand 7 "const_8_to_11_operand" "")
+ (match_operand 8 "const_8_to_11_operand" "")
+ (match_operand 9 "const_8_to_11_operand" "")
+ (const_int 12)
+ (const_int 13)
+ (const_int 14)
+ (const_int 15)])))]
+ "TARGET_AVX2
+ && INTVAL (operands[2]) + 8 == INTVAL (operands[6])
+ && INTVAL (operands[3]) + 8 == INTVAL (operands[7])
+ && INTVAL (operands[4]) + 8 == INTVAL (operands[8])
+ && INTVAL (operands[5]) + 8 == INTVAL (operands[9])"
+{
+ int mask = 0;
+ mask |= INTVAL (operands[2]) << 0;
+ mask |= INTVAL (operands[3]) << 2;
+ mask |= INTVAL (operands[4]) << 4;
+ mask |= INTVAL (operands[5]) << 6;
+ operands[2] = GEN_INT (mask);
+
+ return "vpshuflw\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "length_immediate" "1")
+ (set_attr "mode" "OI")])
+
(define_expand "sse2_pshuflw"
[(match_operand:V8HI 0 "register_operand" "")
(match_operand:V8HI 1 "nonimmediate_operand" "")
(set_attr "length_immediate" "1")
(set_attr "mode" "TI")])
+(define_expand "avx2_pshufhwv3"
+ [(match_operand:V16HI 0 "register_operand" "")
+ (match_operand:V16HI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_0_to_255_operand" "")]
+ "TARGET_AVX2"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_avx2_pshufhw_1 (operands[0], operands[1],
+ GEN_INT (((mask >> 0) & 3) + 4),
+ GEN_INT (((mask >> 2) & 3) + 4),
+ GEN_INT (((mask >> 4) & 3) + 4),
+ GEN_INT (((mask >> 6) & 3) + 4),
+ GEN_INT (((mask >> 0) & 3) + 12),
+ GEN_INT (((mask >> 2) & 3) + 12),
+ GEN_INT (((mask >> 4) & 3) + 12),
+ GEN_INT (((mask >> 6) & 3) + 12)));
+ DONE;
+})
+
+(define_insn "avx2_pshufhw_1"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_select:V16HI
+ (match_operand:V16HI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (match_operand 2 "const_4_to_7_operand" "")
+ (match_operand 3 "const_4_to_7_operand" "")
+ (match_operand 4 "const_4_to_7_operand" "")
+ (match_operand 5 "const_4_to_7_operand" "")
+ (const_int 8)
+ (const_int 9)
+ (const_int 10)
+ (const_int 11)
+ (match_operand 6 "const_12_to_15_operand" "")
+ (match_operand 7 "const_12_to_15_operand" "")
+ (match_operand 8 "const_12_to_15_operand" "")
+ (match_operand 9 "const_12_to_15_operand" "")])))]
+ "TARGET_AVX2
+ && INTVAL (operands[2]) + 8 == INTVAL (operands[6])
+ && INTVAL (operands[3]) + 8 == INTVAL (operands[7])
+ && INTVAL (operands[4]) + 8 == INTVAL (operands[8])
+ && INTVAL (operands[5]) + 8 == INTVAL (operands[9])"
+{
+ int mask = 0;
+ mask |= (INTVAL (operands[2]) - 4) << 0;
+ mask |= (INTVAL (operands[3]) - 4) << 2;
+ mask |= (INTVAL (operands[4]) - 4) << 4;
+ mask |= (INTVAL (operands[5]) - 4) << 6;
+ operands[2] = GEN_INT (mask);
+
+ return "vpshufhw\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "length_immediate" "1")
+ (set_attr "mode" "OI")])
+
(define_expand "sse2_pshufhw"
[(match_operand:V8HI 0 "register_operand" "")
(match_operand:V8HI 1 "nonimmediate_operand" "")
"operands[2] = CONST0_RTX (V4SImode);")
(define_insn "sse2_loadld"
- [(set (match_operand:V4SI 0 "register_operand" "=Y2,Yi,x,x,x")
+ [(set (match_operand:V4SI 0 "register_operand" "=x,Yi,x,x,x")
(vec_merge:V4SI
(vec_duplicate:V4SI
- (match_operand:SI 2 "nonimmediate_operand" "m ,r ,m,x,x"))
- (match_operand:V4SI 1 "reg_or_0_operand" "C ,C ,C,0,x")
+ (match_operand:SI 2 "nonimmediate_operand" "m ,r ,m,x,x"))
+ (match_operand:V4SI 1 "reg_or_0_operand" "C ,C ,C,0,x")
(const_int 1)))]
"TARGET_SSE"
"@
movss\t{%2, %0|%0, %2}
movss\t{%2, %0|%0, %2}
vmovss\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "base,base,noavx,noavx,avx")
+ [(set_attr "isa" "sse2,*,noavx,noavx,avx")
(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex,maybe_vex,orig,orig,vex")
(set_attr "mode" "TI,TI,V4SF,SF,SF")])
vpsrldq\t{$8, %1, %0|%0, %1, 8}
%vmovq\t{%H1, %0|%0, %H1}
mov{q}\t{%H1, %0|%0, %H1}"
- [(set_attr "isa" "base,noavx,avx,base,base")
+ [(set_attr "isa" "*,noavx,avx,*,*")
(set_attr "type" "ssemov,sseishft1,sseishft1,ssemov,imov")
(set_attr "length_immediate" "*,1,1,*,*")
(set_attr "memory" "*,none,none,*,*")
(set_attr "prefix" "maybe_vex,orig,vex,maybe_vex,orig")
(set_attr "mode" "V2SF,TI,TI,TI,DI")])
-(define_insn "*vec_extractv2di_1_sse2"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=m,x,x,x")
+(define_insn "*vec_extractv2di_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=m,x,x,x,x,x")
(vec_select:DI
- (match_operand:V2DI 1 "nonimmediate_operand" " x,0,x,o")
+ (match_operand:V2DI 1 "nonimmediate_operand" " x,0,x,o,x,o")
(parallel [(const_int 1)])))]
- "!TARGET_64BIT
- && TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "!TARGET_64BIT && TARGET_SSE
+ && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
"@
%vmovhps\t{%1, %0|%0, %1}
psrldq\t{$8, %0|%0, 8}
vpsrldq\t{$8, %1, %0|%0, %1, 8}
- %vmovq\t{%H1, %0|%0, %H1}"
- [(set_attr "isa" "base,noavx,avx,base")
- (set_attr "type" "ssemov,sseishft1,sseishft1,ssemov")
- (set_attr "length_immediate" "*,1,1,*")
- (set_attr "memory" "*,none,none,*")
- (set_attr "prefix" "maybe_vex,orig,vex,maybe_vex")
- (set_attr "mode" "V2SF,TI,TI,TI")])
-
-;; Not sure this is ever used, but it doesn't hurt to have it. -aoliva
-(define_insn "*vec_extractv2di_1_sse"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=m,x,x")
- (vec_select:DI
- (match_operand:V2DI 1 "nonimmediate_operand" " x,x,o")
- (parallel [(const_int 1)])))]
- "!TARGET_SSE2 && TARGET_SSE
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "@
- movhps\t{%1, %0|%0, %1}
+ %vmovq\t{%H1, %0|%0, %H1}
movhlps\t{%1, %0|%0, %1}
movlps\t{%H1, %0|%0, %H1}"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "V2SF,V4SF,V2SF")])
-
-(define_insn "*vec_dupv4si_avx"
- [(set (match_operand:V4SI 0 "register_operand" "=x,x")
+ [(set_attr "isa" "*,sse2_noavx,avx,sse2,noavx,noavx")
+ (set_attr "type" "ssemov,sseishft1,sseishft1,ssemov,ssemov,ssemov")
+ (set_attr "length_immediate" "*,1,1,*,*,*")
+ (set_attr "memory" "*,none,none,*,*,*")
+ (set_attr "prefix" "maybe_vex,orig,vex,maybe_vex,orig,orig")
+ (set_attr "mode" "V2SF,TI,TI,TI,V4SF,V2SF")])
+
+(define_expand "vec_dupv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "")
(vec_duplicate:V4SI
- (match_operand:SI 1 "nonimmediate_operand" " x,m")))]
- "TARGET_AVX"
- "@
- vpshufd\t{$0, %1, %0|%0, %1, 0}
- vbroadcastss\t{%1, %0|%0, %1}"
- [(set_attr "type" "sselog1,ssemov")
- (set_attr "length_immediate" "1,0")
- (set_attr "prefix_extra" "0,1")
- (set_attr "prefix" "vex")
- (set_attr "mode" "TI,V4SF")])
+ (match_operand:SI 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+{
+ if (!TARGET_AVX)
+ operands[1] = force_reg (V4SImode, operands[1]);
+})
(define_insn "*vec_dupv4si"
- [(set (match_operand:V4SI 0 "register_operand" "=Y2,x")
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x,x")
(vec_duplicate:V4SI
- (match_operand:SI 1 "register_operand" " Y2,0")))]
+ (match_operand:SI 1 "nonimmediate_operand" " x,m,0")))]
"TARGET_SSE"
"@
- pshufd\t{$0, %1, %0|%0, %1, 0}
+ %vpshufd\t{$0, %1, %0|%0, %1, 0}
+ vbroadcastss\t{%1, %0|%0, %1}
shufps\t{$0, %0, %0|%0, %0, 0}"
- [(set_attr "type" "sselog1")
- (set_attr "length_immediate" "1")
- (set_attr "mode" "TI,V4SF")])
-
-(define_insn "*vec_dupv2di_sse3"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x,x")
+ [(set_attr "isa" "sse2,avx,noavx")
+ (set_attr "type" "sselog1,ssemov,sselog1")
+ (set_attr "length_immediate" "1,0,1")
+ (set_attr "prefix_extra" "0,1,*")
+ (set_attr "prefix" "maybe_vex,vex,orig")
+ (set_attr "mode" "TI,V4SF,V4SF")])
+
+(define_expand "vec_dupv2di"
+ [(set (match_operand:V2DI 0 "register_operand" "")
(vec_duplicate:V2DI
- (match_operand:DI 1 "nonimmediate_operand" " 0,x,m")))]
- "TARGET_SSE3"
- "@
- punpcklqdq\t%0, %0
- vpunpcklqdq\t{%d1, %0|%0, %d1}
- %vmovddup\t{%1, %0|%0, %1}"
- [(set_attr "isa" "noavx,avx,base")
- (set_attr "type" "sselog1")
- (set_attr "prefix" "orig,vex,maybe_vex")
- (set_attr "mode" "TI,TI,DF")])
+ (match_operand:DI 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE"
+{
+ if (!TARGET_AVX)
+ operands[1] = force_reg (V2DImode, operands[1]);
+})
(define_insn "*vec_dupv2di"
- [(set (match_operand:V2DI 0 "register_operand" "=Y2,x")
+ [(set (match_operand:V2DI 0 "register_operand" "=x,x,x,x")
(vec_duplicate:V2DI
- (match_operand:DI 1 "register_operand" " 0 ,0")))]
+ (match_operand:DI 1 "nonimmediate_operand" " 0,x,m,0")))]
"TARGET_SSE"
"@
punpcklqdq\t%0, %0
+ vpunpcklqdq\t{%d1, %0|%0, %d1}
+ %vmovddup\t{%1, %0|%0, %1}
movlhps\t%0, %0"
- [(set_attr "type" "sselog1,ssemov")
- (set_attr "mode" "TI,V4SF")])
+ [(set_attr "isa" "sse2_noavx,avx,sse3,noavx")
+ (set_attr "type" "sselog1,sselog1,sselog1,ssemov")
+ (set_attr "prefix" "orig,vex,maybe_vex,orig")
+ (set_attr "mode" "TI,TI,DF,V4SF")])
(define_insn "*vec_concatv2si_sse4_1"
[(set (match_operand:V2SI 0 "register_operand" "=x, x,x,x, x, *y,*y")
(match_operand:SI 2 "vector_move_operand" "rm,rm,x,x, C,*ym, C")))]
"TARGET_SSE4_1"
"@
- pinsrd\t{$0x1, %2, %0|%0, %2, 0x1}
- vpinsrd\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}
+ pinsrd\t{$1, %2, %0|%0, %2, 1}
+ vpinsrd\t{$1, %2, %1, %0|%0, %1, %2, 1}
punpckldq\t{%2, %0|%0, %2}
vpunpckldq\t{%2, %1, %0|%0, %1, %2}
%vmovd\t{%1, %0|%0, %1}
punpckldq\t{%2, %0|%0, %2}
movd\t{%1, %0|%0, %1}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base,base,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*,*,*")
(set_attr "type" "sselog,sselog,sselog,sselog,ssemov,mmxcvt,mmxmov")
(set_attr "prefix_extra" "1,1,*,*,*,*,*")
(set_attr "length_immediate" "1,1,*,*,*,*,*")
(set_attr "mode" "V4SF,V4SF,DI,DI")])
(define_insn "*vec_concatv4si"
- [(set (match_operand:V4SI 0 "register_operand" "=Y2,x,x,x,x")
+ [(set (match_operand:V4SI 0 "register_operand" "=x,x,x,x,x")
(vec_concat:V4SI
- (match_operand:V2SI 1 "register_operand" " 0 ,x,0,0,x")
- (match_operand:V2SI 2 "nonimmediate_operand" " Y2,x,x,m,m")))]
+ (match_operand:V2SI 1 "register_operand" " 0,x,0,0,x")
+ (match_operand:V2SI 2 "nonimmediate_operand" " x,x,x,m,m")))]
"TARGET_SSE"
"@
punpcklqdq\t{%2, %0|%0, %2}
movlhps\t{%2, %0|%0, %2}
movhps\t{%2, %0|%0, %2}
vmovhps\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "noavx,avx,noavx,noavx,avx")
+ [(set_attr "isa" "sse2_noavx,avx,noavx,noavx,avx")
(set_attr "type" "sselog,sselog,ssemov,ssemov,ssemov")
(set_attr "prefix" "orig,vex,orig,orig,vex")
(set_attr "mode" "TI,TI,V4SF,V2SF,V2SF")])
;; movd instead of movq is required to handle broken assemblers.
-(define_insn "*vec_concatv2di_rex64_sse4_1"
+(define_insn "*vec_concatv2di_rex64"
[(set (match_operand:V2DI 0 "register_operand"
- "=x, x, x,Yi,!x,x,x,x,x")
+ "=x,x ,x ,Yi,!x,x,x,x,x")
(vec_concat:V2DI
(match_operand:DI 1 "nonimmediate_operand"
- " 0, x,xm,r ,*y,0,x,0,x")
+ " 0,x ,xm,r ,*y,0,x,0,x")
(match_operand:DI 2 "vector_move_operand"
- "rm,rm, C,C ,C ,x,x,m,m")))]
- "TARGET_64BIT && TARGET_SSE4_1"
+ "rm,rm,C ,C ,C ,x,x,m,m")))]
+ "TARGET_64BIT"
"@
- pinsrq\t{$0x1, %2, %0|%0, %2, 0x1}
- vpinsrq\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}
+ pinsrq\t{$1, %2, %0|%0, %2, 1}
+ vpinsrq\t{$1, %2, %1, %0|%0, %1, %2, 1}
%vmovq\t{%1, %0|%0, %1}
%vmovd\t{%1, %0|%0, %1}
movq2dq\t{%1, %0|%0, %1}
vpunpcklqdq\t{%2, %1, %0|%0, %1, %2}
movhps\t{%2, %0|%0, %2}
vmovhps\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "noavx,avx,base,base,base,noavx,avx,noavx,avx")
- (set_attr "type" "sselog,sselog,ssemov,ssemov,ssemov,sselog,sselog,ssemov,ssemov")
+ [(set_attr "isa" "sse4_noavx,avx,*,*,*,noavx,avx,noavx,avx")
+ (set (attr "type")
+ (if_then_else
+ (eq_attr "alternative" "0,1,5,6")
+ (const_string "sselog")
+ (const_string "ssemov")))
(set (attr "prefix_rex")
(if_then_else
(and (eq_attr "alternative" "0,3")
- (eq (symbol_ref "TARGET_AVX") (const_int 0)))
+ (not (match_test "TARGET_AVX")))
(const_string "1")
(const_string "*")))
(set_attr "prefix_extra" "1,1,*,*,*,*,*,*,*")
(set_attr "prefix" "orig,vex,maybe_vex,maybe_vex,orig,orig,vex,orig,vex")
(set_attr "mode" "TI,TI,TI,TI,TI,TI,TI,V2SF,V2SF")])
-;; movd instead of movq is required to handle broken assemblers.
-(define_insn "*vec_concatv2di_rex64_sse"
- [(set (match_operand:V2DI 0 "register_operand" "=Y2,Yi,!Y2,Y2,x,x")
- (vec_concat:V2DI
- (match_operand:DI 1 "nonimmediate_operand" "Y2m,r ,*y ,0 ,0,0")
- (match_operand:DI 2 "vector_move_operand" " C ,C ,C ,Y2,x,m")))]
- "TARGET_64BIT && TARGET_SSE"
- "@
- movq\t{%1, %0|%0, %1}
- movd\t{%1, %0|%0, %1}
- movq2dq\t{%1, %0|%0, %1}
- punpcklqdq\t{%2, %0|%0, %2}
- movlhps\t{%2, %0|%0, %2}
- movhps\t{%2, %0|%0, %2}"
- [(set_attr "type" "ssemov,ssemov,ssemov,sselog,ssemov,ssemov")
- (set_attr "prefix_rex" "*,1,*,*,*,*")
- (set_attr "mode" "TI,TI,TI,TI,V4SF,V2SF")])
-
(define_insn "vec_concatv2di"
- [(set (match_operand:V2DI 0 "register_operand" "=Y2,?Y2,Y2,x,x,x,x")
+ [(set (match_operand:V2DI 0 "register_operand" "=x,?x,x,x,x,x,x")
(vec_concat:V2DI
- (match_operand:DI 1 "nonimmediate_operand" "Y2m,*y , 0,x,0,0,x")
- (match_operand:DI 2 "vector_move_operand" " C , C ,Y2,x,x,m,m")))]
+ (match_operand:DI 1 "nonimmediate_operand" "xm,*y,0,x,0,0,x")
+ (match_operand:DI 2 "vector_move_operand" " C, C,x,x,x,m,m")))]
"!TARGET_64BIT && TARGET_SSE"
"@
%vmovq\t{%1, %0|%0, %1}
movlhps\t{%2, %0|%0, %2}
movhps\t{%2, %0|%0, %2}
vmovhps\t{%2, %1, %0|%0, %1, %2}"
- [(set_attr "isa" "base,base,noavx,avx,noavx,noavx,avx")
+ [(set_attr "isa" "sse2,sse2,sse2_noavx,avx,noavx,noavx,avx")
(set_attr "type" "ssemov,ssemov,sselog,sselog,ssemov,ssemov,ssemov")
(set_attr "prefix" "maybe_vex,orig,orig,vex,orig,orig,vex")
(set_attr "mode" "TI,TI,TI,TI,V4SF,V2SF,V2SF")])
(define_expand "vec_unpacks_lo_<mode>"
[(match_operand:<sseunpackmode> 0 "register_operand" "")
- (match_operand:VI124_128 1 "register_operand" "")]
+ (match_operand:VI124_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
"ix86_expand_sse_unpack (operands, false, false); DONE;")
(define_expand "vec_unpacks_hi_<mode>"
[(match_operand:<sseunpackmode> 0 "register_operand" "")
- (match_operand:VI124_128 1 "register_operand" "")]
+ (match_operand:VI124_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
"ix86_expand_sse_unpack (operands, false, true); DONE;")
(define_expand "vec_unpacku_lo_<mode>"
[(match_operand:<sseunpackmode> 0 "register_operand" "")
- (match_operand:VI124_128 1 "register_operand" "")]
+ (match_operand:VI124_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
"ix86_expand_sse_unpack (operands, true, false); DONE;")
(define_expand "vec_unpacku_hi_<mode>"
[(match_operand:<sseunpackmode> 0 "register_operand" "")
- (match_operand:VI124_128 1 "register_operand" "")]
+ (match_operand:VI124_AVX2 1 "register_operand" "")]
"TARGET_SSE2"
"ix86_expand_sse_unpack (operands, true, true); DONE;")
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+(define_expand "avx2_uavgv32qi3"
+ [(set (match_operand:V32QI 0 "register_operand" "")
+ (truncate:V32QI
+ (lshiftrt:V32HI
+ (plus:V32HI
+ (plus:V32HI
+ (zero_extend:V32HI
+ (match_operand:V32QI 1 "nonimmediate_operand" ""))
+ (zero_extend:V32HI
+ (match_operand:V32QI 2 "nonimmediate_operand" "")))
+ (const_vector:V32QI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (PLUS, V32QImode, operands);")
+
(define_expand "sse2_uavgv16qi3"
[(set (match_operand:V16QI 0 "register_operand" "")
(truncate:V16QI
"TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (PLUS, V16QImode, operands);")
+(define_insn "*avx2_uavgv32qi3"
+ [(set (match_operand:V32QI 0 "register_operand" "=x")
+ (truncate:V32QI
+ (lshiftrt:V32HI
+ (plus:V32HI
+ (plus:V32HI
+ (zero_extend:V32HI
+ (match_operand:V32QI 1 "nonimmediate_operand" "%x"))
+ (zero_extend:V32HI
+ (match_operand:V32QI 2 "nonimmediate_operand" "xm")))
+ (const_vector:V32QI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (PLUS, V32QImode, operands)"
+ "vpavgb\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "*sse2_uavgv16qi3"
[(set (match_operand:V16QI 0 "register_operand" "=x,x")
(truncate:V16QI
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+(define_expand "avx2_uavgv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "")
+ (truncate:V16HI
+ (lshiftrt:V16SI
+ (plus:V16SI
+ (plus:V16SI
+ (zero_extend:V16SI
+ (match_operand:V16HI 1 "nonimmediate_operand" ""))
+ (zero_extend:V16SI
+ (match_operand:V16HI 2 "nonimmediate_operand" "")))
+ (const_vector:V16HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (PLUS, V16HImode, operands);")
+
(define_expand "sse2_uavgv8hi3"
[(set (match_operand:V8HI 0 "register_operand" "")
(truncate:V8HI
(const_int 1) (const_int 1)
(const_int 1) (const_int 1)]))
(const_int 1))))]
- "TARGET_SSE2"
- "ix86_fixup_binary_operands_no_copy (PLUS, V8HImode, operands);")
+ "TARGET_SSE2"
+ "ix86_fixup_binary_operands_no_copy (PLUS, V8HImode, operands);")
+
+(define_insn "*avx2_uavgv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (truncate:V16HI
+ (lshiftrt:V16SI
+ (plus:V16SI
+ (plus:V16SI
+ (zero_extend:V16SI
+ (match_operand:V16HI 1 "nonimmediate_operand" "%x"))
+ (zero_extend:V16SI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")))
+ (const_vector:V16HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (PLUS, V16HImode, operands)"
+ "vpavgw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
(define_insn "*sse2_uavgv8hi3"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
;; The correct representation for this is absolutely enormous, and
;; surely not generally useful.
-(define_insn "sse2_psadbw"
- [(set (match_operand:V2DI 0 "register_operand" "=x,x")
- (unspec:V2DI [(match_operand:V16QI 1 "register_operand" "0,x")
- (match_operand:V16QI 2 "nonimmediate_operand" "xm,xm")]
- UNSPEC_PSADBW))]
+(define_insn "<sse2_avx2>_psadbw"
+ [(set (match_operand:VI8_AVX2 0 "register_operand" "=x,x")
+ (unspec:VI8_AVX2 [(match_operand:<ssebytemode> 1 "register_operand" "0,x")
+ (match_operand:<ssebytemode> 2 "nonimmediate_operand" "xm,xm")]
+ UNSPEC_PSADBW))]
"TARGET_SSE2"
"@
psadbw\t{%2, %0|%0, %2}
(set_attr "atom_unit" "simul")
(set_attr "prefix_data16" "1,*")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "<sse>_movmsk<ssemodesuffix><avxsizesuffix>"
[(set (match_operand:SI 0 "register_operand" "=r")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
+(define_insn "avx2_pmovmskb"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:V32QI 1 "register_operand" "x")]
+ UNSPEC_MOVMSK))]
+ "TARGET_AVX2"
+ "vpmovmskb\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "DI")])
+
(define_insn "sse2_pmovmskb"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand:V16QI 1 "register_operand" "x")]
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+(define_insn "avx2_phaddwv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_concat:V16HI
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI
+ (match_operand:V16HI 1 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 9)])))
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 13)])))
+ (plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 15)]))))))
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 9)])))
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 13)])))
+ (plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 15)]))))))))]
+ "TARGET_AVX2"
+ "vphaddw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "ssse3_phaddwv8hi3"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(vec_concat:V8HI
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
+(define_insn "avx2_phadddv8si3"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (vec_concat:V8SI
+ (vec_concat:V4SI
+ (vec_concat:V2SI
+ (plus:SI
+ (vec_select:SI
+ (match_operand:V8SI 1 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 1)])))
+ (plus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2SI
+ (plus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 5)])))
+ (plus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4SI
+ (vec_concat:V2SI
+ (plus:SI
+ (vec_select:SI
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 1)])))
+ (plus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2SI
+ (plus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 5)])))
+ (plus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 7)])))))))]
+ "TARGET_AVX2"
+ "vphaddd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "ssse3_phadddv4si3"
[(set (match_operand:V4SI 0 "register_operand" "=x,x")
(vec_concat:V4SI
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
+(define_insn "avx2_phaddswv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_concat:V16HI
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI
+ (match_operand:V16HI 1 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 9)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 13)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 15)]))))))
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 9)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 13)])))
+ (ss_plus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 15)]))))))))]
+ "TARGET_AVX2"
+ "vphaddsw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "ssse3_phaddswv8hi3"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(vec_concat:V8HI
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
+(define_insn "avx2_phsubwv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_concat:V16HI
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI
+ (match_operand:V16HI 1 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 9)])))
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 13)])))
+ (minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 15)]))))))
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 9)])))
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 13)])))
+ (minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 15)]))))))))]
+ "TARGET_AVX2"
+ "vphsubw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "ssse3_phsubwv8hi3"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(vec_concat:V8HI
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
+(define_insn "avx2_phsubdv8si3"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (vec_concat:V8SI
+ (vec_concat:V4SI
+ (vec_concat:V2SI
+ (minus:SI
+ (vec_select:SI
+ (match_operand:V8SI 1 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 1)])))
+ (minus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2SI
+ (minus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 5)])))
+ (minus:SI
+ (vec_select:SI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:SI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4SI
+ (vec_concat:V2SI
+ (minus:SI
+ (vec_select:SI
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 1)])))
+ (minus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2SI
+ (minus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 5)])))
+ (minus:SI
+ (vec_select:SI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:SI (match_dup 2) (parallel [(const_int 7)])))))))]
+ "TARGET_AVX2"
+ "vphsubd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "ssse3_phsubdv4si3"
[(set (match_operand:V4SI 0 "register_operand" "=x,x")
(vec_concat:V4SI
"@
phsubd\t{%2, %0|%0, %2}
vphsubd\t{%2, %1, %0|%0, %1, %2}"
-
+
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseiadd")
(set_attr "atom_unit" "complex")
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
+(define_insn "avx2_phsubswv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_concat:V16HI
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI
+ (match_operand:V16HI 1 "register_operand" "x")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 5)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 9)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 13)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 1) (parallel [(const_int 15)]))))))
+ (vec_concat:V8HI
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 2)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 3)]))))
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 4)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 5)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 6)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 7)])))))
+ (vec_concat:V4HI
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 8)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 9)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 10)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 11)]))))
+ (vec_concat:V2HI
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 12)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 13)])))
+ (ss_minus:HI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 14)]))
+ (vec_select:HI (match_dup 2) (parallel [(const_int 15)]))))))))]
+ "TARGET_AVX2"
+ "vphsubsw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "ssse3_phsubswv8hi3"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(vec_concat:V8HI
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
+(define_insn "avx2_pmaddubsw256"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (ss_plus:V16HI
+ (mult:V16HI
+ (zero_extend:V16HI
+ (vec_select:V16QI
+ (match_operand:V32QI 1 "register_operand" "x")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)
+ (const_int 16)
+ (const_int 18)
+ (const_int 20)
+ (const_int 22)
+ (const_int 24)
+ (const_int 26)
+ (const_int 28)
+ (const_int 30)])))
+ (sign_extend:V16HI
+ (vec_select:V16QI
+ (match_operand:V32QI 2 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 4)
+ (const_int 6)
+ (const_int 8)
+ (const_int 10)
+ (const_int 12)
+ (const_int 14)
+ (const_int 16)
+ (const_int 18)
+ (const_int 20)
+ (const_int 22)
+ (const_int 24)
+ (const_int 26)
+ (const_int 28)
+ (const_int 30)]))))
+ (mult:V16HI
+ (zero_extend:V16HI
+ (vec_select:V16QI (match_dup 1)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)
+ (const_int 17)
+ (const_int 19)
+ (const_int 21)
+ (const_int 23)
+ (const_int 25)
+ (const_int 27)
+ (const_int 29)
+ (const_int 31)])))
+ (sign_extend:V16HI
+ (vec_select:V16QI (match_dup 2)
+ (parallel [(const_int 1)
+ (const_int 3)
+ (const_int 5)
+ (const_int 7)
+ (const_int 9)
+ (const_int 11)
+ (const_int 13)
+ (const_int 15)
+ (const_int 17)
+ (const_int 19)
+ (const_int 21)
+ (const_int 23)
+ (const_int 25)
+ (const_int 27)
+ (const_int 29)
+ (const_int 31)]))))))]
+ "TARGET_AVX2"
+ "vpmaddubsw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseiadd")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "ssse3_pmaddubsw128"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(ss_plus:V8HI
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
+(define_expand "avx2_umulhrswv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "")
+ (truncate:V16HI
+ (lshiftrt:V16SI
+ (plus:V16SI
+ (lshiftrt:V16SI
+ (mult:V16SI
+ (sign_extend:V16SI
+ (match_operand:V16HI 1 "nonimmediate_operand" ""))
+ (sign_extend:V16SI
+ (match_operand:V16HI 2 "nonimmediate_operand" "")))
+ (const_int 14))
+ (const_vector:V16HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_AVX2"
+ "ix86_fixup_binary_operands_no_copy (MULT, V16HImode, operands);")
+
+(define_insn "*avx2_umulhrswv16hi3"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (truncate:V16HI
+ (lshiftrt:V16SI
+ (plus:V16SI
+ (lshiftrt:V16SI
+ (mult:V16SI
+ (sign_extend:V16SI
+ (match_operand:V16HI 1 "nonimmediate_operand" "%x"))
+ (sign_extend:V16SI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")))
+ (const_int 14))
+ (const_vector:V16HI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_AVX2 && ix86_binary_operator_ok (MULT, V16HImode, operands)"
+ "vpmulhrsw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseimul")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_expand "ssse3_pmulhrswv8hi3"
[(set (match_operand:V8HI 0 "register_operand" "")
(truncate:V8HI
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
-(define_insn "ssse3_pshufbv16qi3"
- [(set (match_operand:V16QI 0 "register_operand" "=x,x")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0,x")
- (match_operand:V16QI 2 "nonimmediate_operand" "xm,xm")]
- UNSPEC_PSHUFB))]
+(define_insn "<ssse3_avx2>_pshufb<mode>3"
+ [(set (match_operand:VI1_AVX2 0 "register_operand" "=x,x")
+ (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 1 "register_operand" "0,x")
+ (match_operand:VI1_AVX2 2 "nonimmediate_operand" "xm,xm")]
+ UNSPEC_PSHUFB))]
"TARGET_SSSE3"
"@
pshufb\t{%2, %0|%0, %2}
(set_attr "prefix_data16" "1,*")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "ssse3_pshufbv8qi3"
[(set (match_operand:V8QI 0 "register_operand" "=y")
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
-(define_insn "ssse3_psign<mode>3"
- [(set (match_operand:VI124_128 0 "register_operand" "=x,x")
- (unspec:VI124_128
- [(match_operand:VI124_128 1 "register_operand" "0,x")
- (match_operand:VI124_128 2 "nonimmediate_operand" "xm,xm")]
+(define_insn "<ssse3_avx2>_psign<mode>3"
+ [(set (match_operand:VI124_AVX2 0 "register_operand" "=x,x")
+ (unspec:VI124_AVX2
+ [(match_operand:VI124_AVX2 1 "register_operand" "0,x")
+ (match_operand:VI124_AVX2 2 "nonimmediate_operand" "xm,xm")]
UNSPEC_PSIGN))]
"TARGET_SSSE3"
"@
(set_attr "prefix_data16" "1,*")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "ssse3_psign<mode>3"
[(set (match_operand:MMXMODEI 0 "register_operand" "=y")
(set (attr "prefix_rex") (symbol_ref "x86_extended_reg_mentioned_p (insn)"))
(set_attr "mode" "DI")])
-(define_insn "ssse3_palignrti"
- [(set (match_operand:TI 0 "register_operand" "=x,x")
- (unspec:TI [(match_operand:TI 1 "register_operand" "0,x")
- (match_operand:TI 2 "nonimmediate_operand" "xm,xm")
- (match_operand:SI 3 "const_0_to_255_mul_8_operand" "n,n")]
- UNSPEC_PALIGNR))]
+(define_insn "<ssse3_avx2>_palignr<mode>"
+ [(set (match_operand:SSESCALARMODE 0 "register_operand" "=x,x")
+ (unspec:SSESCALARMODE [(match_operand:SSESCALARMODE 1 "register_operand" "0,x")
+ (match_operand:SSESCALARMODE 2 "nonimmediate_operand" "xm,xm")
+ (match_operand:SI 3 "const_0_to_255_mul_8_operand" "n,n")]
+ UNSPEC_PALIGNR))]
"TARGET_SSSE3"
{
operands[3] = GEN_INT (INTVAL (operands[3]) / 8);
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "ssse3_palignrdi"
[(set (match_operand:DI 0 "register_operand" "=y")
(set_attr "mode" "DI")])
(define_insn "abs<mode>2"
- [(set (match_operand:VI124_128 0 "register_operand" "=x")
- (abs:VI124_128
- (match_operand:VI124_128 1 "nonimmediate_operand" "xm")))]
+ [(set (match_operand:VI124_AVX2 0 "register_operand" "=x")
+ (abs:VI124_AVX2
+ (match_operand:VI124_AVX2 1 "nonimmediate_operand" "xm")))]
"TARGET_SSSE3"
"%vpabs<ssemodesuffix>\t{%1, %0|%0, %1}"
[(set_attr "type" "sselog1")
(set_attr "prefix_data16" "1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "abs<mode>2"
[(set (match_operand:MMXMODEI 0 "register_operand" "=y")
[(set (match_operand:MODEF 0 "memory_operand" "=m")
(unspec:MODEF
[(match_operand:MODEF 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
+ UNSPEC_MOVNT))]
"TARGET_SSE4A"
"movnt<ssemodesuffix>\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(define_insn "sse4a_extrqi"
[(set (match_operand:V2DI 0 "register_operand" "=x")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
- (match_operand 2 "const_int_operand" "")
- (match_operand 3 "const_int_operand" "")]
- UNSPEC_EXTRQI))]
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand 2 "const_0_to_255_operand" "")
+ (match_operand 3 "const_0_to_255_operand" "")]
+ UNSPEC_EXTRQI))]
"TARGET_SSE4A"
"extrq\t{%3, %2, %0|%0, %2, %3}"
[(set_attr "type" "sse")
(define_insn "sse4a_extrq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
- (match_operand:V16QI 2 "register_operand" "x")]
- UNSPEC_EXTRQ))]
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V16QI 2 "register_operand" "x")]
+ UNSPEC_EXTRQ))]
"TARGET_SSE4A"
"extrq\t{%2, %0|%0, %2}"
[(set_attr "type" "sse")
(define_insn "sse4a_insertqi"
[(set (match_operand:V2DI 0 "register_operand" "=x")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
- (match_operand:V2DI 2 "register_operand" "x")
- (match_operand 3 "const_int_operand" "")
- (match_operand 4 "const_int_operand" "")]
- UNSPEC_INSERTQI))]
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "register_operand" "x")
+ (match_operand 3 "const_0_to_255_operand" "")
+ (match_operand 4 "const_0_to_255_operand" "")]
+ UNSPEC_INSERTQI))]
"TARGET_SSE4A"
"insertq\t{%4, %3, %2, %0|%0, %2, %3, %4}"
[(set_attr "type" "sseins")
(define_insn "sse4a_insertq"
[(set (match_operand:V2DI 0 "register_operand" "=x")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
- (match_operand:V2DI 2 "register_operand" "x")]
- UNSPEC_INSERTQ))]
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "register_operand" "x")]
+ UNSPEC_INSERTQ))]
"TARGET_SSE4A"
"insertq\t{%2, %0|%0, %2}"
[(set_attr "type" "sseins")
(vec_merge:VF
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")
(match_operand:VF 1 "register_operand" "0,x")
- (match_operand:SI 3 "const_int_operand" "")))]
- "TARGET_SSE4_1
- && IN_RANGE (INTVAL (operands[3]), 0, (1 << GET_MODE_NUNITS (<MODE>mode))-1)"
+ (match_operand:SI 3 "const_0_to_<blendbits>_operand" "")))]
+ "TARGET_SSE4_1"
"@
blend<ssemodesuffix>\t{%3, %2, %0|%0, %2, %3}
vblend<ssemodesuffix>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
(set_attr "prefix" "orig,vex")
(set_attr "mode" "<MODE>")])
-(define_insn "sse4_1_movntdqa"
- [(set (match_operand:V2DI 0 "register_operand" "=x")
- (unspec:V2DI [(match_operand:V2DI 1 "memory_operand" "m")]
+(define_insn "<sse4_1_avx2>_movntdqa"
+ [(set (match_operand:VI8_AVX2 0 "register_operand" "=x")
+ (unspec:VI8_AVX2 [(match_operand:VI8_AVX2 1 "memory_operand" "m")]
UNSPEC_MOVNTDQA))]
"TARGET_SSE4_1"
"%vmovntdqa\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_insn "sse4_1_mpsadbw"
- [(set (match_operand:V16QI 0 "register_operand" "=x,x")
- (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "0,x")
- (match_operand:V16QI 2 "nonimmediate_operand" "xm,xm")
- (match_operand:SI 3 "const_0_to_255_operand" "n,n")]
- UNSPEC_MPSADBW))]
+(define_insn "<sse4_1_avx2>_mpsadbw"
+ [(set (match_operand:VI1_AVX2 0 "register_operand" "=x,x")
+ (unspec:VI1_AVX2 [(match_operand:VI1_AVX2 1 "register_operand" "0,x")
+ (match_operand:VI1_AVX2 2 "nonimmediate_operand" "xm,xm")
+ (match_operand:SI 3 "const_0_to_255_operand" "n,n")]
+ UNSPEC_MPSADBW))]
"TARGET_SSE4_1"
"@
mpsadbw\t{%3, %2, %0|%0, %2, %3}
(set_attr "length_immediate" "1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "avx2_packusdw"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_concat:V16HI
+ (us_truncate:V8HI
+ (match_operand:V8SI 1 "register_operand" "x"))
+ (us_truncate:V8HI
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm"))))]
+ "TARGET_AVX2"
+ "vpackusdw\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
(define_insn "sse4_1_packusdw"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
-(define_insn "sse4_1_pblendvb"
- [(set (match_operand:V16QI 0 "reg_not_xmm0_operand" "=x,x")
- (unspec:V16QI
- [(match_operand:V16QI 1 "reg_not_xmm0_operand_maybe_avx" "0,x")
- (match_operand:V16QI 2 "nonimm_not_xmm0_operand_maybe_avx" "xm,xm")
- (match_operand:V16QI 3 "register_operand" "Yz,x")]
+(define_insn "<sse4_1_avx2>_pblendvb"
+ [(set (match_operand:VI1_AVX2 0 "reg_not_xmm0_operand" "=x,x")
+ (unspec:VI1_AVX2
+ [(match_operand:VI1_AVX2 1 "reg_not_xmm0_operand_maybe_avx" "0,x")
+ (match_operand:VI1_AVX2 2 "nonimm_not_xmm0_operand_maybe_avx" "xm,xm")
+ (match_operand:VI1_AVX2 3 "register_operand" "Yz,x")]
UNSPEC_BLENDV))]
"TARGET_SSE4_1"
"@
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "*,1")
(set_attr "prefix" "orig,vex")
- (set_attr "mode" "TI")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "sse4_1_pblendw"
[(set (match_operand:V8HI 0 "register_operand" "=x,x")
(set_attr "prefix" "orig,vex")
(set_attr "mode" "TI")])
+;; The builtin uses an 8-bit immediate. Expand that.
+(define_expand "avx2_pblendw"
+ [(set (match_operand:V16HI 0 "register_operand" "")
+ (vec_merge:V16HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "")
+ (match_operand:V16HI 1 "register_operand" "")
+ (match_operand:SI 3 "const_0_to_255_operand" "")))]
+ "TARGET_AVX2"
+{
+ HOST_WIDE_INT val = INTVAL (operands[3]) & 0xff;
+ operands[3] = GEN_INT (val << 8 | val);
+})
+
+(define_insn "*avx2_pblendw"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (vec_merge:V16HI
+ (match_operand:V16HI 2 "nonimmediate_operand" "xm")
+ (match_operand:V16HI 1 "register_operand" "x")
+ (match_operand:SI 3 "avx2_pblendw_operand" "n")))]
+ "TARGET_AVX2"
+{
+ operands[3] = GEN_INT (INTVAL (operands[3]) & 0xff);
+ return "vpblendw\t{%3, %2, %1, %0|%0, %1, %2, %3}";
+}
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_insn "avx2_pblendd<mode>"
+ [(set (match_operand:VI4_AVX2 0 "register_operand" "=x")
+ (vec_merge:VI4_AVX2
+ (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xm")
+ (match_operand:VI4_AVX2 1 "register_operand" "x")
+ (match_operand:SI 3 "const_0_to_255_operand" "n")))]
+ "TARGET_AVX2"
+ "vpblendd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
+
(define_insn "sse4_1_phminposuw"
[(set (match_operand:V8HI 0 "register_operand" "=x")
(unspec:V8HI [(match_operand:V8HI 1 "nonimmediate_operand" "xm")]
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_<code>v16qiv16hi2"
+ [(set (match_operand:V16HI 0 "register_operand" "=x")
+ (any_extend:V16HI
+ (match_operand:V16QI 1 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2"
+ "vpmov<extsuffix>bw\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "sse4_1_<code>v8qiv8hi2"
[(set (match_operand:V8HI 0 "register_operand" "=x")
(any_extend:V8HI
(const_int 6)
(const_int 7)]))))]
"TARGET_SSE4_1"
- "%vpmov<extsuffix>bw\t{%1, %0|%0, %1}"
+ "%vpmov<extsuffix>bw\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_<code>v8qiv8si2"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (any_extend:V8SI
+ (vec_select:V8QI
+ (match_operand:V16QI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)]))))]
+ "TARGET_AVX2"
+ "vpmov<extsuffix>bd\t{%1, %0|%0, %q1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "sse4_1_<code>v4qiv4si2"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(any_extend:V4SI
(const_int 2)
(const_int 3)]))))]
"TARGET_SSE4_1"
- "%vpmov<extsuffix>bd\t{%1, %0|%0, %1}"
+ "%vpmov<extsuffix>bd\t{%1, %0|%0, %k1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_<code>v8hiv8si2"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (any_extend:V8SI
+ (match_operand:V8HI 1 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2"
+ "vpmov<extsuffix>wd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "sse4_1_<code>v4hiv4si2"
[(set (match_operand:V4SI 0 "register_operand" "=x")
(any_extend:V4SI
(const_int 2)
(const_int 3)]))))]
"TARGET_SSE4_1"
- "%vpmov<extsuffix>wd\t{%1, %0|%0, %1}"
+ "%vpmov<extsuffix>wd\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_<code>v4qiv4di2"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (any_extend:V4DI
+ (vec_select:V4QI
+ (match_operand:V16QI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_AVX2"
+ "vpmov<extsuffix>bq\t{%1, %0|%0, %k1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "sse4_1_<code>v2qiv2di2"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(any_extend:V2DI
(parallel [(const_int 0)
(const_int 1)]))))]
"TARGET_SSE4_1"
- "%vpmov<extsuffix>bq\t{%1, %0|%0, %1}"
+ "%vpmov<extsuffix>bq\t{%1, %0|%0, %w1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_<code>v4hiv4di2"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (any_extend:V4DI
+ (vec_select:V4HI
+ (match_operand:V8HI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))))]
+ "TARGET_AVX2"
+ "vpmov<extsuffix>wq\t{%1, %0|%0, %q1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "sse4_1_<code>v2hiv2di2"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(any_extend:V2DI
(parallel [(const_int 0)
(const_int 1)]))))]
"TARGET_SSE4_1"
- "%vpmov<extsuffix>wq\t{%1, %0|%0, %1}"
+ "%vpmov<extsuffix>wq\t{%1, %0|%0, %k1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "TI")])
+(define_insn "avx2_<code>v4siv4di2"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (any_extend:V4DI
+ (match_operand:V4SI 1 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2"
+ "vpmov<extsuffix>dq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "mode" "OI")])
+
(define_insn "sse4_1_<code>v2siv2di2"
[(set (match_operand:V2DI 0 "register_operand" "=x")
(any_extend:V2DI
(parallel [(const_int 0)
(const_int 1)]))))]
"TARGET_SSE4_1"
- "%vpmov<extsuffix>dq\t{%1, %0|%0, %1}"
+ "%vpmov<extsuffix>dq\t{%1, %0|%0, %q1}"
[(set_attr "type" "ssemov")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
[(set_attr "type" "ssecvt")
(set (attr "prefix_data16")
(if_then_else
- (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (match_test "TARGET_AVX")
(const_string "*")
(const_string "1")))
(set_attr "prefix_extra" "1")
(set_attr "prefix" "maybe_vex")
(set_attr "mode" "<MODE>")])
+(define_expand "<sse4_1>_round<ssemodesuffix>_sfix<avxsizesuffix>"
+ [(match_operand:<sseintvecmode> 0 "register_operand" "")
+ (match_operand:VF1 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_0_to_15_operand" "")]
+ "TARGET_ROUND"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+
+ emit_insn
+ (gen_<sse4_1>_round<ssemodesuffix><avxsizesuffix> (tmp, operands[1],
+ operands[2]));
+ emit_insn
+ (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "<sse4_1>_round<ssemodesuffix>_vec_pack_sfix<avxsizesuffix>"
+ [(match_operand:<ssepackfltmode> 0 "register_operand" "")
+ (match_operand:VF2 1 "nonimmediate_operand" "")
+ (match_operand:VF2 2 "nonimmediate_operand" "")
+ (match_operand:SI 3 "const_0_to_15_operand" "")]
+ "TARGET_ROUND"
+{
+ rtx tmp0, tmp1;
+
+ tmp0 = gen_reg_rtx (<MODE>mode);
+ tmp1 = gen_reg_rtx (<MODE>mode);
+
+ emit_insn
+ (gen_<sse4_1>_round<ssemodesuffix><avxsizesuffix> (tmp0, operands[1],
+ operands[3]));
+ emit_insn
+ (gen_<sse4_1>_round<ssemodesuffix><avxsizesuffix> (tmp1, operands[2],
+ operands[3]));
+ emit_insn
+ (gen_vec_pack_sfix_trunc_<mode> (operands[0], tmp0, tmp1));
+ DONE;
+})
+
(define_insn "sse4_1_round<ssescalarmodesuffix>"
[(set (match_operand:VF_128 0 "register_operand" "=x,x")
(vec_merge:VF_128
(set_attr "prefix" "orig,vex")
(set_attr "mode" "<MODE>")])
+(define_expand "round<mode>2"
+ [(set (match_dup 4)
+ (plus:VF
+ (match_operand:VF 1 "register_operand" "")
+ (match_dup 3)))
+ (set (match_operand:VF 0 "register_operand" "")
+ (unspec:VF
+ [(match_dup 4) (match_dup 5)]
+ UNSPEC_ROUND))]
+ "TARGET_ROUND && !flag_trapping_math"
+{
+ enum machine_mode scalar_mode;
+ const struct real_format *fmt;
+ REAL_VALUE_TYPE pred_half, half_minus_pred_half;
+ rtx half, vec_half;
+
+ scalar_mode = GET_MODE_INNER (<MODE>mode);
+
+ /* load nextafter (0.5, 0.0) */
+ fmt = REAL_MODE_FORMAT (scalar_mode);
+ real_2expN (&half_minus_pred_half, -(fmt->p) - 1, scalar_mode);
+ REAL_ARITHMETIC (pred_half, MINUS_EXPR, dconsthalf, half_minus_pred_half);
+ half = const_double_from_real_value (pred_half, scalar_mode);
+
+ vec_half = ix86_build_const_vector (<MODE>mode, true, half);
+ vec_half = force_reg (<MODE>mode, vec_half);
+
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_copysign<mode>3 (operands[3], vec_half, operands[1]));
+
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = GEN_INT (ROUND_TRUNC);
+})
+
+(define_expand "round<mode>2_sfix"
+ [(match_operand:<sseintvecmode> 0 "register_operand" "")
+ (match_operand:VF1 1 "register_operand" "")]
+ "TARGET_ROUND && !flag_trapping_math"
+{
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_round<mode>2 (tmp, operands[1]));
+
+ emit_insn
+ (gen_fix_trunc<mode><sseintvecmodelower>2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "round<mode>2_vec_pack_sfix"
+ [(match_operand:<ssepackfltmode> 0 "register_operand" "")
+ (match_operand:VF2 1 "register_operand" "")
+ (match_operand:VF2 2 "register_operand" "")]
+ "TARGET_ROUND && !flag_trapping_math"
+{
+ rtx tmp0, tmp1;
+
+ tmp0 = gen_reg_rtx (<MODE>mode);
+ tmp1 = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_round<mode>2 (tmp0, operands[1]));
+ emit_insn (gen_round<mode>2 (tmp1, operands[2]));
+
+ emit_insn
+ (gen_vec_pack_sfix_trunc_<mode> (operands[0], tmp0, tmp1));
+ DONE;
+})
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Intel SSE4.2 string/text processing instructions
operands[2], operands[3],
operands[4], operands[5],
operands[6]));
+ if (!(flags || ecx || xmm0))
+ emit_note (NOTE_INSN_DELETED);
+
DONE;
}
[(set_attr "type" "sselog")
emit_insn (gen_sse4_2_pcmpistr_cconly (NULL, NULL,
operands[2], operands[3],
operands[4]));
+ if (!(flags || ecx || xmm0))
+ emit_note (NOTE_INSN_DELETED);
+
DONE;
}
[(set_attr "type" "sselog")
;; do not allow the value being added to be a memory operation.
(define_insn "xop_pmacsww"
[(set (match_operand:V8HI 0 "register_operand" "=x")
- (plus:V8HI
+ (plus:V8HI
(mult:V8HI
(match_operand:V8HI 1 "nonimmediate_operand" "%x")
(match_operand:V8HI 2 "nonimmediate_operand" "xm"))
(define_insn "xop_pmacssww"
[(set (match_operand:V8HI 0 "register_operand" "=x")
- (ss_plus:V8HI
+ (ss_plus:V8HI
(mult:V8HI (match_operand:V8HI 1 "nonimmediate_operand" "%x")
(match_operand:V8HI 2 "nonimmediate_operand" "xm"))
(match_operand:V8HI 3 "nonimmediate_operand" "x")))]
(define_insn "xop_pmacsdd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
- (plus:V4SI
+ (plus:V4SI
(mult:V4SI
(match_operand:V4SI 1 "nonimmediate_operand" "%x")
(match_operand:V4SI 2 "nonimmediate_operand" "xm"))
(define_insn "xop_pmacssdd"
[(set (match_operand:V4SI 0 "register_operand" "=x")
- (ss_plus:V4SI
+ (ss_plus:V4SI
(mult:V4SI (match_operand:V4SI 1 "nonimmediate_operand" "%x")
(match_operand:V4SI 2 "nonimmediate_operand" "xm"))
(match_operand:V4SI 3 "nonimmediate_operand" "x")))]
[(set (match_operand:V 0 "register_operand" "=x,x")
(if_then_else:V
(match_operand:V 3 "nonimmediate_operand" "x,m")
- (match_operand:V 1 "vector_move_operand" "x,x")
- (match_operand:V 2 "vector_move_operand" "xm,x")))]
+ (match_operand:V 1 "register_operand" "x,x")
+ (match_operand:V 2 "nonimmediate_operand" "xm,x")))]
"TARGET_XOP"
"vpcmov\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sse4arg")])
int i;
if (GET_MODE (op2) != <ssescalarmode>mode)
- {
+ {
op2 = gen_reg_rtx (<ssescalarmode>mode);
convert_move (op2, operands[2], false);
}
int i;
if (GET_MODE (op2) != <ssescalarmode>mode)
- {
+ {
op2 = gen_reg_rtx (<ssescalarmode>mode);
convert_move (op2, operands[2], false);
}
(set_attr "mode" "TI")])
;; XOP packed shift instructions.
-;; FIXME: add V2DI back in
(define_expand "vlshr<mode>3"
- [(match_operand:VI124_128 0 "register_operand" "")
- (match_operand:VI124_128 1 "register_operand" "")
- (match_operand:VI124_128 2 "register_operand" "")]
+ [(set (match_operand:VI12_128 0 "register_operand" "")
+ (lshiftrt:VI12_128
+ (match_operand:VI12_128 1 "register_operand" "")
+ (match_operand:VI12_128 2 "nonimmediate_operand" "")))]
"TARGET_XOP"
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_xop_lshl<mode>3 (operands[0], operands[1], neg));
+ emit_insn (gen_xop_shl<mode>3 (operands[0], operands[1], neg));
DONE;
})
+(define_expand "vlshr<mode>3"
+ [(set (match_operand:VI48_128 0 "register_operand" "")
+ (lshiftrt:VI48_128
+ (match_operand:VI48_128 1 "register_operand" "")
+ (match_operand:VI48_128 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2 || TARGET_XOP"
+{
+ if (!TARGET_AVX2)
+ {
+ rtx neg = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_xop_shl<mode>3 (operands[0], operands[1], neg));
+ DONE;
+ }
+})
+
+(define_expand "vlshr<mode>3"
+ [(set (match_operand:VI48_256 0 "register_operand" "")
+ (lshiftrt:VI48_256
+ (match_operand:VI48_256 1 "register_operand" "")
+ (match_operand:VI48_256 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2")
+
(define_expand "vashr<mode>3"
- [(match_operand:VI124_128 0 "register_operand" "")
- (match_operand:VI124_128 1 "register_operand" "")
- (match_operand:VI124_128 2 "register_operand" "")]
+ [(set (match_operand:VI128_128 0 "register_operand" "")
+ (ashiftrt:VI128_128
+ (match_operand:VI128_128 1 "register_operand" "")
+ (match_operand:VI128_128 2 "nonimmediate_operand" "")))]
"TARGET_XOP"
{
rtx neg = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_xop_ashl<mode>3 (operands[0], operands[1], neg));
+ emit_insn (gen_xop_sha<mode>3 (operands[0], operands[1], neg));
DONE;
})
+(define_expand "vashrv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (ashiftrt:V4SI (match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2 || TARGET_XOP"
+{
+ if (!TARGET_AVX2)
+ {
+ rtx neg = gen_reg_rtx (V4SImode);
+ emit_insn (gen_negv4si2 (neg, operands[2]));
+ emit_insn (gen_xop_shav4si3 (operands[0], operands[1], neg));
+ DONE;
+ }
+})
+
+(define_expand "vashrv8si3"
+ [(set (match_operand:V8SI 0 "register_operand" "")
+ (ashiftrt:V8SI (match_operand:V8SI 1 "register_operand" "")
+ (match_operand:V8SI 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2")
+
(define_expand "vashl<mode>3"
- [(match_operand:VI124_128 0 "register_operand" "")
- (match_operand:VI124_128 1 "register_operand" "")
- (match_operand:VI124_128 2 "register_operand" "")]
+ [(set (match_operand:VI12_128 0 "register_operand" "")
+ (ashift:VI12_128
+ (match_operand:VI12_128 1 "register_operand" "")
+ (match_operand:VI12_128 2 "nonimmediate_operand" "")))]
"TARGET_XOP"
{
- emit_insn (gen_xop_ashl<mode>3 (operands[0], operands[1], operands[2]));
+ emit_insn (gen_xop_sha<mode>3 (operands[0], operands[1], operands[2]));
DONE;
})
-(define_insn "xop_ashl<mode>3"
+(define_expand "vashl<mode>3"
+ [(set (match_operand:VI48_128 0 "register_operand" "")
+ (ashift:VI48_128
+ (match_operand:VI48_128 1 "register_operand" "")
+ (match_operand:VI48_128 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2 || TARGET_XOP"
+{
+ if (!TARGET_AVX2)
+ {
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+ emit_insn (gen_xop_sha<mode>3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_expand "vashl<mode>3"
+ [(set (match_operand:VI48_256 0 "register_operand" "")
+ (ashift:VI48_256
+ (match_operand:VI48_256 1 "register_operand" "")
+ (match_operand:VI48_256 2 "nonimmediate_operand" "")))]
+ "TARGET_AVX2")
+
+(define_insn "xop_sha<mode>3"
[(set (match_operand:VI_128 0 "register_operand" "=x,x")
(if_then_else:VI_128
(ge:VI_128
(set_attr "prefix_extra" "2")
(set_attr "mode" "TI")])
-(define_insn "xop_lshl<mode>3"
+(define_insn "xop_shl<mode>3"
[(set (match_operand:VI_128 0 "register_operand" "=x,x")
(if_then_else:VI_128
(ge:VI_128
(set_attr "prefix_extra" "2")
(set_attr "mode" "TI")])
-;; SSE2 doesn't have some shift varients, so define versions for XOP
+;; SSE2 doesn't have some shift variants, so define versions for XOP
(define_expand "ashlv16qi3"
- [(match_operand:V16QI 0 "register_operand" "")
- (match_operand:V16QI 1 "register_operand" "")
- (match_operand:SI 2 "nonmemory_operand" "")]
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (ashift:V16QI
+ (match_operand:V16QI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
"TARGET_XOP"
{
- rtvec vs = rtvec_alloc (16);
- rtx par = gen_rtx_PARALLEL (V16QImode, vs);
rtx reg = gen_reg_rtx (V16QImode);
+ rtx par;
int i;
- for (i = 0; i < 16; i++)
- RTVEC_ELT (vs, i) = operands[2];
-
- emit_insn (gen_vec_initv16qi (reg, par));
- emit_insn (gen_xop_ashlv16qi3 (operands[0], operands[1], reg));
- DONE;
-})
-(define_expand "lshlv16qi3"
- [(match_operand:V16QI 0 "register_operand" "")
- (match_operand:V16QI 1 "register_operand" "")
- (match_operand:SI 2 "nonmemory_operand" "")]
- "TARGET_XOP"
-{
- rtvec vs = rtvec_alloc (16);
- rtx par = gen_rtx_PARALLEL (V16QImode, vs);
- rtx reg = gen_reg_rtx (V16QImode);
- int i;
+ par = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
for (i = 0; i < 16; i++)
- RTVEC_ELT (vs, i) = operands[2];
+ XVECEXP (par, 0, i) = operands[2];
emit_insn (gen_vec_initv16qi (reg, par));
- emit_insn (gen_xop_lshlv16qi3 (operands[0], operands[1], reg));
+ emit_insn (gen_xop_shav16qi3 (operands[0], operands[1], reg));
DONE;
})
-(define_expand "ashrv16qi3"
- [(match_operand:V16QI 0 "register_operand" "")
- (match_operand:V16QI 1 "register_operand" "")
- (match_operand:SI 2 "nonmemory_operand" "")]
+(define_expand "<shift_insn>v16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (any_shiftrt:V16QI
+ (match_operand:V16QI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))]
"TARGET_XOP"
{
- rtvec vs = rtvec_alloc (16);
- rtx par = gen_rtx_PARALLEL (V16QImode, vs);
rtx reg = gen_reg_rtx (V16QImode);
+ rtx par;
+ bool negate = false;
+ rtx (*shift_insn)(rtx, rtx, rtx);
int i;
- rtx ele = ((CONST_INT_P (operands[2]))
- ? GEN_INT (- INTVAL (operands[2]))
- : operands[2]);
+ if (CONST_INT_P (operands[2]))
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
+ else
+ negate = true;
+
+ par = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
for (i = 0; i < 16; i++)
- RTVEC_ELT (vs, i) = ele;
+ XVECEXP (par, 0, i) = operands[2];
emit_insn (gen_vec_initv16qi (reg, par));
- if (!CONST_INT_P (operands[2]))
- {
- rtx neg = gen_reg_rtx (V16QImode);
- emit_insn (gen_negv16qi2 (neg, reg));
- emit_insn (gen_xop_ashlv16qi3 (operands[0], operands[1], neg));
- }
+ if (negate)
+ emit_insn (gen_negv16qi2 (reg, reg));
+
+ if (<CODE> == LSHIFTRT)
+ shift_insn = gen_xop_shlv16qi3;
else
- emit_insn (gen_xop_ashlv16qi3 (operands[0], operands[1], reg));
+ shift_insn = gen_xop_shav16qi3;
+ emit_insn (shift_insn (operands[0], operands[1], reg));
DONE;
})
(define_expand "ashrv2di3"
- [(match_operand:V2DI 0 "register_operand" "")
- (match_operand:V2DI 1 "register_operand" "")
- (match_operand:DI 2 "nonmemory_operand" "")]
+ [(set (match_operand:V2DI 0 "register_operand" "")
+ (ashiftrt:V2DI
+ (match_operand:V2DI 1 "register_operand" "")
+ (match_operand:DI 2 "nonmemory_operand" "")))]
"TARGET_XOP"
{
- rtvec vs = rtvec_alloc (2);
- rtx par = gen_rtx_PARALLEL (V2DImode, vs);
rtx reg = gen_reg_rtx (V2DImode);
- rtx ele;
+ rtx par;
+ bool negate = false;
+ int i;
if (CONST_INT_P (operands[2]))
- ele = GEN_INT (- INTVAL (operands[2]));
- else if (GET_MODE (operands[2]) != DImode)
- {
- rtx move = gen_reg_rtx (DImode);
- ele = gen_reg_rtx (DImode);
- convert_move (move, operands[2], false);
- emit_insn (gen_negdi2 (ele, move));
- }
+ operands[2] = GEN_INT (-INTVAL (operands[2]));
else
- {
- ele = gen_reg_rtx (DImode);
- emit_insn (gen_negdi2 (ele, operands[2]));
- }
+ negate = true;
+
+ par = gen_rtx_PARALLEL (V2DImode, rtvec_alloc (2));
+ for (i = 0; i < 2; i++)
+ XVECEXP (par, 0, i) = operands[2];
- RTVEC_ELT (vs, 0) = ele;
- RTVEC_ELT (vs, 1) = ele;
emit_insn (gen_vec_initv2di (reg, par));
- emit_insn (gen_xop_ashlv2di3 (operands[0], operands[1], reg));
+
+ if (negate)
+ emit_insn (gen_negv2di2 (reg, reg));
+
+ emit_insn (gen_xop_shav2di3 (operands[0], operands[1], reg));
DONE;
})
(set_attr "prefix" "vex")
(set_attr "mode" "OI")])
+(define_mode_attr AVXTOSSEMODE
+ [(V4DI "V2DI") (V2DI "V2DI")
+ (V8SI "V4SI") (V4SI "V4SI")
+ (V16HI "V8HI") (V8HI "V8HI")
+ (V32QI "V16QI") (V16QI "V16QI")])
+
+(define_insn "avx2_pbroadcast<mode>"
+ [(set (match_operand:VI 0 "register_operand" "=x")
+ (vec_duplicate:VI
+ (vec_select:<ssescalarmode>
+ (match_operand:<AVXTOSSEMODE> 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))))]
+ "TARGET_AVX2"
+ "vpbroadcast<ssemodesuffix>\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "avx2_permvarv8si"
+ [(set (match_operand:V8SI 0 "register_operand" "=x")
+ (unspec:V8SI
+ [(match_operand:V8SI 1 "register_operand" "x")
+ (match_operand:V8SI 2 "nonimmediate_operand" "xm")]
+ UNSPEC_VPERMSI))]
+ "TARGET_AVX2"
+ "vpermd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_insn "avx2_permv4df"
+ [(set (match_operand:V4DF 0 "register_operand" "=x")
+ (unspec:V4DF
+ [(match_operand:V4DF 1 "register_operand" "xm")
+ (match_operand:SI 2 "const_0_to_255_operand" "n")]
+ UNSPEC_VPERMDF))]
+ "TARGET_AVX2"
+ "vpermpd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_insn "avx2_permvarv8sf"
+ [(set (match_operand:V8SF 0 "register_operand" "=x")
+ (unspec:V8SF
+ [(match_operand:V8SF 1 "register_operand" "x")
+ (match_operand:V8SF 2 "nonimmediate_operand" "xm")]
+ UNSPEC_VPERMSF))]
+ "TARGET_AVX2"
+ "vpermps\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_expand "avx2_permv4di"
+ [(match_operand:V4DI 0 "register_operand" "")
+ (match_operand:V4DI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "const_0_to_255_operand" "")]
+ "TARGET_AVX2"
+{
+ int mask = INTVAL (operands[2]);
+ emit_insn (gen_avx2_permv4di_1 (operands[0], operands[1],
+ GEN_INT ((mask >> 0) & 3),
+ GEN_INT ((mask >> 2) & 3),
+ GEN_INT ((mask >> 4) & 3),
+ GEN_INT ((mask >> 6) & 3)));
+ DONE;
+})
+
+(define_insn "avx2_permv4di_1"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (vec_select:V4DI
+ (match_operand:V4DI 1 "nonimmediate_operand" "xm")
+ (parallel [(match_operand 2 "const_0_to_3_operand" "")
+ (match_operand 3 "const_0_to_3_operand" "")
+ (match_operand 4 "const_0_to_3_operand" "")
+ (match_operand 5 "const_0_to_3_operand" "")])))]
+ "TARGET_AVX2"
+{
+ int mask = 0;
+ mask |= INTVAL (operands[2]) << 0;
+ mask |= INTVAL (operands[3]) << 2;
+ mask |= INTVAL (operands[4]) << 4;
+ mask |= INTVAL (operands[5]) << 6;
+ operands[2] = GEN_INT (mask);
+ return "vpermq\t{%2, %1, %0|%0, %1, %2}";
+}
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_insn "avx2_permv2ti"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (unspec:V4DI
+ [(match_operand:V4DI 1 "register_operand" "x")
+ (match_operand:V4DI 2 "nonimmediate_operand" "xm")
+ (match_operand:SI 3 "const_0_to_255_operand" "n")]
+ UNSPEC_VPERMTI))]
+ "TARGET_AVX2"
+ "vperm2i128\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_insn "avx2_vec_dupv4df"
+ [(set (match_operand:V4DF 0 "register_operand" "=x")
+ (vec_duplicate:V4DF
+ (vec_select:DF
+ (match_operand:V2DF 1 "register_operand" "x")
+ (parallel [(const_int 0)]))))]
+ "TARGET_AVX2"
+ "vbroadcastsd\t{%1, %0|%0, %1}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "V4DF")])
+
+;; Modes handled by AVX vec_dup patterns.
+(define_mode_iterator AVX_VEC_DUP_MODE
+ [V8SI V8SF V4DI V4DF])
+
(define_insn "vec_dup<mode>"
- [(set (match_operand:AVX256MODE24P 0 "register_operand" "=x,x")
- (vec_duplicate:AVX256MODE24P
+ [(set (match_operand:AVX_VEC_DUP_MODE 0 "register_operand" "=x,x")
+ (vec_duplicate:AVX_VEC_DUP_MODE
(match_operand:<ssescalarmode> 1 "nonimmediate_operand" "m,?x")))]
"TARGET_AVX"
"@
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
+(define_insn "avx2_vbroadcasti128_<mode>"
+ [(set (match_operand:VI_256 0 "register_operand" "=x")
+ (vec_concat:VI_256
+ (match_operand:<ssehalfvecmode> 1 "memory_operand" "m")
+ (match_dup 1)))]
+ "TARGET_AVX2"
+ "vbroadcasti128\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_split
- [(set (match_operand:AVX256MODE24P 0 "register_operand" "")
- (vec_duplicate:AVX256MODE24P
+ [(set (match_operand:AVX_VEC_DUP_MODE 0 "register_operand" "")
+ (vec_duplicate:AVX_VEC_DUP_MODE
(match_operand:<ssescalarmode> 1 "register_operand" "")))]
"TARGET_AVX && reload_completed"
- [(set (match_dup 2) (vec_duplicate:<ssehalfvecmode> (match_dup 1)))
- (set (match_dup 0) (vec_concat:AVX256MODE24P (match_dup 2) (match_dup 2)))]
+ [(set (match_dup 2)
+ (vec_duplicate:<ssehalfvecmode> (match_dup 1)))
+ (set (match_dup 0)
+ (vec_concat:AVX_VEC_DUP_MODE (match_dup 2) (match_dup 2)))]
"operands[2] = gen_rtx_REG (<ssehalfvecmode>mode, REGNO (operands[0]));")
(define_insn "avx_vbroadcastf128_<mode>"
(match_dup 1)))]
"TARGET_AVX"
"@
- vbroadcastf128\t{%1, %0|%0, %1}
- vinsertf128\t{$1, %1, %0, %0|%0, %0, %1, 1}
- vperm2f128\t{$0, %t1, %t1, %0|%0, %t1, %t1, 0}"
+ vbroadcast<i128>\t{%1, %0|%0, %1}
+ vinsert<i128>\t{$1, %1, %0, %0|%0, %0, %1, 1}
+ vperm2<i128>\t{$0, %t1, %t1, %0|%0, %t1, %t1, 0}"
[(set_attr "type" "ssemov,sselog1,sselog1")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "0,1,1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V4SF,V8SF,V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
;; Recognize broadcast as a vec_select as produced by builtin_vec_perm.
;; If it so happens that the input is in memory, use vbroadcast.
}
operands[1] = adjust_address_nv (op1, <ssescalarmode>mode,
- elt * GET_MODE_SIZE (<ssescalarmode>mode));
+ elt * GET_MODE_SIZE (<ssescalarmode>mode));
})
(define_expand "avx_vpermil<mode>"
(match_operand:SI 3 "const_0_to_255_operand" "n")]
UNSPEC_VPERMIL2F128))]
"TARGET_AVX"
- "vperm2f128\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "vperm2<i128>\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "*avx_vperm2f128<mode>_nozero"
[(set (match_operand:AVX256MODE2P 0 "register_operand" "=x")
&& avx_vperm2f128_parallel (operands[3], <MODE>mode)"
{
int mask = avx_vperm2f128_parallel (operands[3], <MODE>mode) - 1;
+ if (mask == 0x12)
+ return "vinsert<i128>\t{$0, %x2, %1, %0|%0, %1, %x2, 0}";
+ if (mask == 0x20)
+ return "vinsert<i128>\t{$1, %x2, %1, %0|%0, %1, %x2, 1}";
operands[3] = GEN_INT (mask);
- return "vperm2f128\t{%3, %2, %1, %0|%0, %1, %2, %3}";
+ return "vperm2<i128>\t{%3, %2, %1, %0|%0, %1, %2, %3}";
}
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_expand "avx_vinsertf128<mode>"
[(match_operand:V_256 0 "register_operand" "")
DONE;
})
+(define_insn "avx2_vec_set_lo_v4di"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (vec_concat:V4DI
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm")
+ (vec_select:V2DI
+ (match_operand:V4DI 1 "register_operand" "x")
+ (parallel [(const_int 2) (const_int 3)]))))]
+ "TARGET_AVX2"
+ "vinserti128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
+(define_insn "avx2_vec_set_hi_v4di"
+ [(set (match_operand:V4DI 0 "register_operand" "=x")
+ (vec_concat:V4DI
+ (vec_select:V2DI
+ (match_operand:V4DI 1 "register_operand" "x")
+ (parallel [(const_int 0) (const_int 1)]))
+ (match_operand:V2DI 2 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2"
+ "vinserti128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
+ [(set_attr "type" "sselog")
+ (set_attr "prefix_extra" "1")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "OI")])
+
(define_insn "vec_set_lo_<mode>"
[(set (match_operand:VI8F_256 0 "register_operand" "=x")
(vec_concat:VI8F_256
(match_operand:VI8F_256 1 "register_operand" "x")
(parallel [(const_int 2) (const_int 3)]))))]
"TARGET_AVX"
- "vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
+ "vinsert<i128>\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "vec_set_hi_<mode>"
[(set (match_operand:VI8F_256 0 "register_operand" "=x")
(parallel [(const_int 0) (const_int 1)]))
(match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xm")))]
"TARGET_AVX"
- "vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
+ "vinsert<i128>\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "vec_set_lo_<mode>"
[(set (match_operand:VI4F_256 0 "register_operand" "=x")
(parallel [(const_int 4) (const_int 5)
(const_int 6) (const_int 7)]))))]
"TARGET_AVX"
- "vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
+ "vinsert<i128>\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "vec_set_hi_<mode>"
[(set (match_operand:VI4F_256 0 "register_operand" "=x")
(const_int 2) (const_int 3)]))
(match_operand:<ssehalfvecmode> 2 "nonimmediate_operand" "xm")))]
"TARGET_AVX"
- "vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
+ "vinsert<i128>\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "<sseinsnmode>")])
(define_insn "vec_set_lo_v16hi"
[(set (match_operand:V16HI 0 "register_operand" "=x")
(const_int 12) (const_int 13)
(const_int 14) (const_int 15)]))))]
"TARGET_AVX"
- "vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
+ "vinsert%~128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "OI")])
(define_insn "vec_set_hi_v16hi"
[(set (match_operand:V16HI 0 "register_operand" "=x")
(const_int 6) (const_int 7)]))
(match_operand:V8HI 2 "nonimmediate_operand" "xm")))]
"TARGET_AVX"
- "vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
+ "vinsert%~128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "OI")])
(define_insn "vec_set_lo_v32qi"
[(set (match_operand:V32QI 0 "register_operand" "=x")
(const_int 28) (const_int 29)
(const_int 30) (const_int 31)]))))]
"TARGET_AVX"
- "vinsertf128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
+ "vinsert%~128\t{$0x0, %2, %1, %0|%0, %1, %2, 0x0}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
+ (set_attr "mode" "OI")])
(define_insn "vec_set_hi_v32qi"
[(set (match_operand:V32QI 0 "register_operand" "=x")
(const_int 14) (const_int 15)]))
(match_operand:V16QI 2 "nonimmediate_operand" "xm")))]
"TARGET_AVX"
- "vinsertf128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
+ "vinsert%~128\t{$0x1, %2, %1, %0|%0, %1, %2, 0x1}"
[(set_attr "type" "sselog")
(set_attr "prefix_extra" "1")
(set_attr "length_immediate" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "V8SF")])
-
-(define_expand "avx_maskload<ssemodesuffix><avxsizesuffix>"
- [(set (match_operand:VF 0 "register_operand" "")
- (unspec:VF
- [(match_operand:<sseintvecmode> 2 "register_operand" "")
- (match_operand:VF 1 "memory_operand" "")
- (match_dup 0)]
- UNSPEC_MASKMOV))]
- "TARGET_AVX")
+ (set_attr "mode" "OI")])
-(define_expand "avx_maskstore<ssemodesuffix><avxsizesuffix>"
- [(set (match_operand:VF 0 "memory_operand" "")
- (unspec:VF
- [(match_operand:<sseintvecmode> 1 "register_operand" "")
- (match_operand:VF 2 "register_operand" "")
- (match_dup 0)]
+(define_insn "<avx_avx2>_maskload<ssemodesuffix><avxsizesuffix>"
+ [(set (match_operand:V48_AVX2 0 "register_operand" "=x")
+ (unspec:V48_AVX2
+ [(match_operand:<sseintvecmode> 2 "register_operand" "x")
+ (match_operand:V48_AVX2 1 "memory_operand" "m")]
UNSPEC_MASKMOV))]
- "TARGET_AVX")
+ "TARGET_AVX"
+ "v<sseintprefix>maskmov<ssemodesuffix>\t{%1, %2, %0|%0, %2, %1}"
+ [(set_attr "type" "sselog1")
+ (set_attr "prefix_extra" "1")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
-(define_insn "*avx_maskmov<ssemodesuffix><avxsizesuffix>"
- [(set (match_operand:VF 0 "nonimmediate_operand" "=x,m")
- (unspec:VF
- [(match_operand:<sseintvecmode> 1 "register_operand" "x,x")
- (match_operand:VF 2 "nonimmediate_operand" "m,x")
+(define_insn "<avx_avx2>_maskstore<ssemodesuffix><avxsizesuffix>"
+ [(set (match_operand:V48_AVX2 0 "memory_operand" "=m")
+ (unspec:V48_AVX2
+ [(match_operand:<sseintvecmode> 1 "register_operand" "x")
+ (match_operand:V48_AVX2 2 "register_operand" "x")
(match_dup 0)]
UNSPEC_MASKMOV))]
- "TARGET_AVX
- && (REG_P (operands[0]) == MEM_P (operands[2]))"
- "vmaskmov<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ "TARGET_AVX"
+ "v<sseintprefix>maskmov<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "sselog1")
(set_attr "prefix_extra" "1")
(set_attr "prefix" "vex")
- (set_attr "mode" "<MODE>")])
+ (set_attr "mode" "<sseinsnmode>")])
-(define_insn_and_split "avx_<ssemodesuffix><avxsizesuffix>_<ssemodesuffix>"
+(define_insn_and_split "avx_<castmode><avxsizesuffix>_<castmode>"
[(set (match_operand:AVX256MODE2P 0 "nonimmediate_operand" "=x,m")
(unspec:AVX256MODE2P
[(match_operand:<ssehalfvecmode> 1 "nonimmediate_operand" "xm,x")]
rtx op1 = operands[1];
if (REG_P (op0))
op0 = gen_rtx_REG (<ssehalfvecmode>mode, REGNO (op0));
- else
+ else
op1 = gen_rtx_REG (<MODE>mode, REGNO (op1));
emit_move_insn (op0, op1);
DONE;
DONE;
})
-(define_insn "*vec_concat<mode>_avx"
+(define_expand "avx2_extracti128"
+ [(match_operand:V2DI 0 "nonimmediate_operand" "")
+ (match_operand:V4DI 1 "register_operand" "")
+ (match_operand:SI 2 "const_0_to_1_operand" "")]
+ "TARGET_AVX2"
+{
+ rtx (*insn)(rtx, rtx);
+
+ switch (INTVAL (operands[2]))
+ {
+ case 0:
+ insn = gen_vec_extract_lo_v4di;
+ break;
+ case 1:
+ insn = gen_vec_extract_hi_v4di;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (insn (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "avx2_inserti128"
+ [(match_operand:V4DI 0 "register_operand" "")
+ (match_operand:V4DI 1 "register_operand" "")
+ (match_operand:V2DI 2 "nonimmediate_operand" "")
+ (match_operand:SI 3 "const_0_to_1_operand" "")]
+ "TARGET_AVX2"
+{
+ rtx (*insn)(rtx, rtx, rtx);
+
+ switch (INTVAL (operands[3]))
+ {
+ case 0:
+ insn = gen_avx2_vec_set_lo_v4di;
+ break;
+ case 1:
+ insn = gen_avx2_vec_set_hi_v4di;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (insn (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+(define_insn "avx2_ashrv<mode>"
+ [(set (match_operand:VI4_AVX2 0 "register_operand" "=x")
+ (ashiftrt:VI4_AVX2
+ (match_operand:VI4_AVX2 1 "register_operand" "x")
+ (match_operand:VI4_AVX2 2 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2"
+ "vpsravd\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseishft")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "avx2_<shift_insn>v<mode>"
+ [(set (match_operand:VI48_AVX2 0 "register_operand" "=x")
+ (any_lshift:VI48_AVX2
+ (match_operand:VI48_AVX2 1 "register_operand" "x")
+ (match_operand:VI48_AVX2 2 "nonimmediate_operand" "xm")))]
+ "TARGET_AVX2"
+ "vp<vshift>v<ssemodesuffix>\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "type" "sseishft")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "avx_vec_concat<mode>"
[(set (match_operand:V_256 0 "register_operand" "=x,x")
(vec_concat:V_256
(match_operand:<ssehalfvecmode> 1 "register_operand" "x,x")
switch (which_alternative)
{
case 0:
- return "vinsertf128\t{$0x1, %2, %t1, %0|%0, %t1, %2, 0x1}";
+ return "vinsert<i128>\t{$0x1, %2, %t1, %0|%0, %t1, %2, 0x1}";
case 1:
switch (get_attr_mode (insn))
- {
+ {
case MODE_V8SF:
return "vmovaps\t{%1, %x0|%x0, %1}";
case MODE_V4DF:
[(set (match_operand:V8HI 0 "register_operand" "")
(vec_concat:V8HI
(unspec:V4HI [(match_operand:V4SF 1 "register_operand" "")
- (match_operand:SI 2 "immediate_operand" "")]
+ (match_operand:SI 2 "const_0_to_255_operand" "")]
UNSPEC_VCVTPS2PH)
(match_dup 3)))]
"TARGET_F16C"
[(set (match_operand:V8HI 0 "register_operand" "=x")
(vec_concat:V8HI
(unspec:V4HI [(match_operand:V4SF 1 "register_operand" "x")
- (match_operand:SI 2 "immediate_operand" "N")]
+ (match_operand:SI 2 "const_0_to_255_operand" "N")]
UNSPEC_VCVTPS2PH)
(match_operand:V4HI 3 "const0_operand" "")))]
"TARGET_F16C"
(define_insn "*vcvtps2ph_store"
[(set (match_operand:V4HI 0 "memory_operand" "=m")
(unspec:V4HI [(match_operand:V4SF 1 "register_operand" "x")
- (match_operand:SI 2 "immediate_operand" "N")]
+ (match_operand:SI 2 "const_0_to_255_operand" "N")]
UNSPEC_VCVTPS2PH))]
"TARGET_F16C"
"vcvtps2ph\t{%2, %1, %0|%0, %1, %2}"
(define_insn "vcvtps2ph256"
[(set (match_operand:V8HI 0 "nonimmediate_operand" "=xm")
(unspec:V8HI [(match_operand:V8SF 1 "register_operand" "x")
- (match_operand:SI 2 "immediate_operand" "N")]
+ (match_operand:SI 2 "const_0_to_255_operand" "N")]
UNSPEC_VCVTPS2PH))]
"TARGET_F16C"
"vcvtps2ph\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssecvt")
(set_attr "prefix" "vex")
(set_attr "mode" "V8SF")])
+
+;; For gather* insn patterns
+(define_mode_iterator VEC_GATHER_MODE
+ [V2DI V2DF V4DI V4DF V4SI V4SF V8SI V8SF])
+(define_mode_attr VEC_GATHER_IDXSI
+ [(V2DI "V4SI") (V2DF "V4SI")
+ (V4DI "V4SI") (V4DF "V4SI")
+ (V4SI "V4SI") (V4SF "V4SI")
+ (V8SI "V8SI") (V8SF "V8SI")])
+(define_mode_attr VEC_GATHER_IDXDI
+ [(V2DI "V2DI") (V2DF "V2DI")
+ (V4DI "V4DI") (V4DF "V4DI")
+ (V4SI "V2DI") (V4SF "V2DI")
+ (V8SI "V4DI") (V8SF "V4DI")])
+(define_mode_attr VEC_GATHER_SRCDI
+ [(V2DI "V2DI") (V2DF "V2DF")
+ (V4DI "V4DI") (V4DF "V4DF")
+ (V4SI "V4SI") (V4SF "V4SF")
+ (V8SI "V4SI") (V8SF "V4SF")])
+
+(define_expand "avx2_gathersi<mode>"
+ [(parallel [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "")
+ (unspec:VEC_GATHER_MODE
+ [(match_operand:VEC_GATHER_MODE 1 "register_operand" "")
+ (mem:<ssescalarmode>
+ (match_par_dup 7
+ [(match_operand 2 "vsib_address_operand" "")
+ (match_operand:<VEC_GATHER_IDXSI>
+ 3 "register_operand" "")
+ (match_operand:SI 5 "const1248_operand " "")]))
+ (mem:BLK (scratch))
+ (match_operand:VEC_GATHER_MODE 4 "register_operand" "")]
+ UNSPEC_GATHER))
+ (clobber (match_scratch:VEC_GATHER_MODE 6 ""))])]
+ "TARGET_AVX2"
+{
+ operands[7]
+ = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[3],
+ operands[5]), UNSPEC_VSIBADDR);
+})
+
+(define_insn "*avx2_gathersi<mode>"
+ [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x")
+ (unspec:VEC_GATHER_MODE
+ [(match_operand:VEC_GATHER_MODE 2 "register_operand" "0")
+ (match_operator:<ssescalarmode> 7 "vsib_mem_operator"
+ [(unspec:P
+ [(match_operand:P 3 "vsib_address_operand" "p")
+ (match_operand:<VEC_GATHER_IDXSI> 4 "register_operand" "x")
+ (match_operand:SI 6 "const1248_operand" "n")]
+ UNSPEC_VSIBADDR)])
+ (mem:BLK (scratch))
+ (match_operand:VEC_GATHER_MODE 5 "register_operand" "1")]
+ UNSPEC_GATHER))
+ (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))]
+ "TARGET_AVX2"
+ "v<sseintprefix>gatherd<ssemodesuffix>\t{%1, %7, %0|%0, %7, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "*avx2_gathersi<mode>_2"
+ [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x")
+ (unspec:VEC_GATHER_MODE
+ [(pc)
+ (match_operator:<ssescalarmode> 6 "vsib_mem_operator"
+ [(unspec:P
+ [(match_operand:P 2 "vsib_address_operand" "p")
+ (match_operand:<VEC_GATHER_IDXSI> 3 "register_operand" "x")
+ (match_operand:SI 5 "const1248_operand" "n")]
+ UNSPEC_VSIBADDR)])
+ (mem:BLK (scratch))
+ (match_operand:VEC_GATHER_MODE 4 "register_operand" "1")]
+ UNSPEC_GATHER))
+ (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))]
+ "TARGET_AVX2"
+ "v<sseintprefix>gatherd<ssemodesuffix>\t{%1, %6, %0|%0, %6, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_expand "avx2_gatherdi<mode>"
+ [(parallel [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "")
+ (unspec:VEC_GATHER_MODE
+ [(match_operand:<VEC_GATHER_SRCDI> 1 "register_operand" "")
+ (mem:<ssescalarmode>
+ (match_par_dup 7
+ [(match_operand 2 "vsib_address_operand" "")
+ (match_operand:<VEC_GATHER_IDXDI>
+ 3 "register_operand" "")
+ (match_operand:SI 5 "const1248_operand " "")]))
+ (mem:BLK (scratch))
+ (match_operand:<VEC_GATHER_SRCDI>
+ 4 "register_operand" "")]
+ UNSPEC_GATHER))
+ (clobber (match_scratch:VEC_GATHER_MODE 6 ""))])]
+ "TARGET_AVX2"
+{
+ operands[7]
+ = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, operands[2], operands[3],
+ operands[5]), UNSPEC_VSIBADDR);
+})
+
+(define_insn "*avx2_gatherdi<mode>"
+ [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x")
+ (unspec:VEC_GATHER_MODE
+ [(match_operand:<VEC_GATHER_SRCDI> 2 "register_operand" "0")
+ (match_operator:<ssescalarmode> 7 "vsib_mem_operator"
+ [(unspec:P
+ [(match_operand:P 3 "vsib_address_operand" "p")
+ (match_operand:<VEC_GATHER_IDXDI> 4 "register_operand" "x")
+ (match_operand:SI 6 "const1248_operand" "n")]
+ UNSPEC_VSIBADDR)])
+ (mem:BLK (scratch))
+ (match_operand:<VEC_GATHER_SRCDI> 5 "register_operand" "1")]
+ UNSPEC_GATHER))
+ (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))]
+ "TARGET_AVX2"
+ "v<sseintprefix>gatherq<ssemodesuffix>\t{%5, %7, %2|%2, %7, %5}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])
+
+(define_insn "*avx2_gatherdi<mode>_2"
+ [(set (match_operand:VEC_GATHER_MODE 0 "register_operand" "=&x")
+ (unspec:VEC_GATHER_MODE
+ [(pc)
+ (match_operator:<ssescalarmode> 6 "vsib_mem_operator"
+ [(unspec:P
+ [(match_operand:P 2 "vsib_address_operand" "p")
+ (match_operand:<VEC_GATHER_IDXDI> 3 "register_operand" "x")
+ (match_operand:SI 5 "const1248_operand" "n")]
+ UNSPEC_VSIBADDR)])
+ (mem:BLK (scratch))
+ (match_operand:<VEC_GATHER_SRCDI> 4 "register_operand" "1")]
+ UNSPEC_GATHER))
+ (clobber (match_scratch:VEC_GATHER_MODE 1 "=&x"))]
+ "TARGET_AVX2"
+{
+ if (<MODE>mode != <VEC_GATHER_SRCDI>mode)
+ return "v<sseintprefix>gatherq<ssemodesuffix>\t{%4, %6, %x0|%x0, %6, %4}";
+ return "v<sseintprefix>gatherq<ssemodesuffix>\t{%4, %6, %0|%0, %6, %4}";
+}
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "vex")
+ (set_attr "mode" "<sseinsnmode>")])