X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Fconfig%2Fi386%2Fsse.md;h=e9f6c3da8fbbe9c6af5a539103527ccfd8c57092;hb=33541f98a3ec41f15aa971e3fe350c546b1f1117;hp=cea13cbf88c9a6ebcc031f54d1d5cbad051cffa5;hpb=908dc1fce67d23202ee6adbd708dee239a6c9b62;p=pf3gnuchains%2Fgcc-fork.git
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index cea13cbf88c..e9f6c3da8fb 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -18,23 +18,53 @@
;; along with GCC; see the file COPYING3. If not see
;; .
+;; All vector modes including V1TImode, used in move patterns.
+(define_mode_iterator V16
+ [(V32QI "TARGET_AVX") V16QI
+ (V16HI "TARGET_AVX") V8HI
+ (V8SI "TARGET_AVX") V4SI
+ (V4DI "TARGET_AVX") V2DI
+ V1TI
+ (V8SF "TARGET_AVX") V4SF
+ (V4DF "TARGET_AVX") V2DF])
+
+;; All vector modes
+(define_mode_iterator V
+ [(V32QI "TARGET_AVX") V16QI
+ (V16HI "TARGET_AVX") V8HI
+ (V8SI "TARGET_AVX") V4SI
+ (V4DI "TARGET_AVX") V2DI
+ (V8SF "TARGET_AVX") V4SF
+ (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+
+;; All 128bit vector modes
+(define_mode_iterator V_128
+ [V16QI V8HI V4SI V2DI V4SF (V2DF "TARGET_SSE2")])
+
+;; All 256bit vector modes
+(define_mode_iterator V_256
+ [V32QI V16HI V8SI V4DI V8SF V4DF])
;; All vector float modes
(define_mode_iterator VF
- [(V4SF "TARGET_SSE") (V2DF "TARGET_SSE2")
- (V8SF "TARGET_AVX") (V4DF "TARGET_AVX")])
+ [(V8SF "TARGET_AVX") V4SF
+ (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
;; All SFmode vector float modes
(define_mode_iterator VF1
- [(V4SF "TARGET_SSE") (V8SF "TARGET_AVX")])
+ [(V8SF "TARGET_AVX") V4SF])
;; All DFmode vector float modes
(define_mode_iterator VF2
- [(V2DF "TARGET_SSE2") (V4DF "TARGET_AVX")])
+ [(V4DF "TARGET_AVX") V2DF])
;; All 128bit vector float modes
(define_mode_iterator VF_128
- [(V4SF "TARGET_SSE") (V2DF "TARGET_SSE2")])
+ [V4SF (V2DF "TARGET_SSE2")])
+
+;; All 256bit vector float modes
+(define_mode_iterator VF_256
+ [V8SF V4DF])
;; All vector integer modes
(define_mode_iterator VI
@@ -43,6 +73,14 @@
(V8SI "TARGET_AVX") V4SI
(V4DI "TARGET_AVX") V2DI])
+;; All QImode vector integer modes
+(define_mode_iterator VI1
+ [(V32QI "TARGET_AVX") V16QI])
+
+;; All DImode vector integer modes
+(define_mode_iterator VI8
+ [(V4DI "TARGET_AVX") V2DI])
+
;; All 128bit vector integer modes
(define_mode_iterator VI_128 [V16QI V8HI V4SI V2DI])
@@ -53,128 +91,105 @@
(define_mode_iterator VI24_128 [V8HI V4SI])
(define_mode_iterator VI248_128 [V8HI V4SI V2DI])
+;; Int-float size matches
+(define_mode_iterator VI4F_128 [V4SI V4SF])
+(define_mode_iterator VI8F_128 [V2DI V2DF])
+(define_mode_iterator VI4F_256 [V8SI V8SF])
+(define_mode_iterator VI8F_256 [V4DI V4DF])
-;; Instruction suffix for sign and zero extensions.
-(define_code_attr extsuffix [(sign_extend "sx") (zero_extend "zx")])
+;; Mapping from float mode to required SSE level
+(define_mode_attr sse
+ [(SF "sse") (DF "sse2")
+ (V4SF "sse") (V2DF "sse2")
+ (V8SF "avx") (V4DF "avx")])
-;; All 16-byte vector modes handled by SSE
-(define_mode_iterator SSEMODE [V16QI V8HI V4SI V2DI V4SF V2DF])
-(define_mode_iterator SSEMODE16 [V16QI V8HI V4SI V2DI V1TI V4SF V2DF])
+(define_mode_attr sse2
+ [(V16QI "sse2") (V32QI "avx")
+ (V2DI "sse2") (V4DI "avx")])
-;; All 32-byte vector modes handled by AVX
-(define_mode_iterator AVX256MODE [V32QI V16HI V8SI V4DI V8SF V4DF])
+(define_mode_attr sse3
+ [(V16QI "sse3") (V32QI "avx")])
-;; All QI vector modes handled by AVX
-(define_mode_iterator AVXMODEQI [V32QI V16QI])
+(define_mode_attr sse4_1
+ [(V4SF "sse4_1") (V2DF "sse4_1")
+ (V8SF "avx") (V4DF "avx")])
-;; All DI vector modes handled by AVX
-(define_mode_iterator AVXMODEDI [V4DI V2DI])
+(define_mode_attr avxsizesuffix
+ [(V32QI "256") (V16HI "256") (V8SI "256") (V4DI "256")
+ (V16QI "") (V8HI "") (V4SI "") (V2DI "")
+ (V8SF "256") (V4DF "256")
+ (V4SF "") (V2DF "")])
-;; All vector modes handled by AVX
-(define_mode_iterator AVXMODE
- [V16QI V8HI V4SI V2DI V4SF V2DF V32QI V16HI V8SI V4DI V8SF V4DF])
-(define_mode_iterator AVXMODE16
- [V16QI V8HI V4SI V2DI V1TI V4SF V2DF V32QI V16HI V8SI V4DI V8SF V4DF])
+;; SSE instruction mode
+(define_mode_attr sseinsnmode
+ [(V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI")
+ (V16QI "TI") (V8HI "TI") (V4SI "TI") (V2DI "TI") (V1TI "TI")
+ (V8SF "V8SF") (V4DF "V4DF")
+ (V4SF "V4SF") (V2DF "V2DF")])
-;; Mix-n-match
-(define_mode_iterator SSEMODE124 [V16QI V8HI V4SI])
-(define_mode_iterator SSEMODE1248 [V16QI V8HI V4SI V2DI])
-(define_mode_iterator SSEMODEF2P [V4SF V2DF])
+;; Mapping of vector float modes to an integer mode of the same size
+(define_mode_attr sseintvecmode
+ [(V8SF "V8SI") (V4DF "V4DI")
+ (V4SF "V4SI") (V2DF "V2DI")])
-(define_mode_iterator AVX256MODEF2P [V8SF V4DF])
-(define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF])
-(define_mode_iterator AVX256MODE24P [V8SI V8SF V4DI V4DF])
-(define_mode_iterator AVX256MODE4P [V4DI V4DF])
-(define_mode_iterator AVX256MODE8P [V8SI V8SF])
-(define_mode_iterator AVXMODEF2P [V4SF V2DF V8SF V4DF])
-(define_mode_iterator AVXMODEFDP [V2DF V4DF])
-(define_mode_iterator AVXMODEFSP [V4SF V8SF])
+;; Mapping of vector modes to a vector mode of double size
+(define_mode_attr ssedoublevecmode
+ [(V32QI "V64QI") (V16HI "V32HI") (V8SI "V16SI") (V4DI "V8DI")
+ (V16QI "V32QI") (V8HI "V16HI") (V4SI "V8SI") (V2DI "V4DI")
+ (V8SF "V16SF") (V4DF "V8DF")
+ (V4SF "V8SF") (V2DF "V4DF")])
+
+;; Mapping of vector modes to a vector mode of half size
+(define_mode_attr ssehalfvecmode
+ [(V32QI "V16QI") (V16HI "V8HI") (V8SI "V4SI") (V4DI "V2DI")
+ (V16QI "V8QI") (V8HI "V4HI") (V4SI "V2SI")
+ (V8SF "V4SF") (V4DF "V2DF")
+ (V4SF "V2SF")])
-(define_mode_iterator FMAMODE [SF DF V4SF V2DF V8SF V4DF])
+;; Mapping of vector modes back to the scalar modes
+(define_mode_attr ssescalarmode
+ [(V32QI "QI") (V16HI "HI") (V8SI "SI") (V4DI "DI")
+ (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI")
+ (V8SF "SF") (V4DF "DF")
+ (V4SF "SF") (V2DF "DF")])
-;; Int-float size matches
-(define_mode_iterator SSEMODE4S [V4SF V4SI])
-(define_mode_iterator SSEMODE2D [V2DF V2DI])
+;; Number of scalar elements in each vector type
+(define_mode_attr ssescalarnum
+ [(V32QI "32") (V16HI "16") (V8SI "8") (V4DI "4")
+ (V16QI "16") (V8HI "8") (V4SI "4") (V2DI "2")
+ (V8SF "8") (V4DF "4")
+ (V4SF "4") (V2DF "2")])
-;; Modes handled by vec_extract_even/odd pattern.
-(define_mode_iterator SSEMODE_EO
- [(V4SF "TARGET_SSE")
- (V2DF "TARGET_SSE2")
- (V2DI "TARGET_SSE2") (V4SI "TARGET_SSE2")
- (V8HI "TARGET_SSE2") (V16QI "TARGET_SSE2")
- (V4DF "TARGET_AVX") (V8SF "TARGET_AVX")])
+;; SSE scalar suffix for vector modes
+(define_mode_attr ssescalarmodesuffix
+ [(V8SF "ss") (V4DF "sd")
+ (V4SF "ss") (V2DF "sd")
+ (V8SI "ss") (V4DI "sd")
+ (V4SI "d")])
-;; Modes handled by storent patterns.
-(define_mode_iterator STORENT_MODE
- [(SF "TARGET_SSE4A") (DF "TARGET_SSE4A")
- (SI "TARGET_SSE2") (V2DI "TARGET_SSE2") (V2DF "TARGET_SSE2")
- (V4SF "TARGET_SSE")
- (V4DF "TARGET_AVX") (V8SF "TARGET_AVX")])
+;; Pack/unpack vector modes
+(define_mode_attr sseunpackmode
+ [(V16QI "V8HI") (V8HI "V4SI") (V4SI "V2DI")])
-;; Modes handled by vector extract patterns.
-(define_mode_iterator VEC_EXTRACT_MODE
- [(V2DI "TARGET_SSE") (V4SI "TARGET_SSE")
- (V8HI "TARGET_SSE") (V16QI "TARGET_SSE")
- (V2DF "TARGET_SSE") (V4SF "TARGET_SSE")
- (V4DF "TARGET_AVX") (V8SF "TARGET_AVX")])
+(define_mode_attr ssepackmode
+ [(V8HI "V16QI") (V4SI "V8HI") (V2DI "V4SI")])
-;; Mapping from float mode to required SSE level
-(define_mode_attr sse
- [(SF "sse") (DF "sse2")
- (V4SF "sse") (V2DF "sse2")
- (V8SF "avx") (V4DF "avx")])
+;; Mapping of the max integer size for xop rotate immediate constraint
+(define_mode_attr sserotatemax
+ [(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")])
-;; Mapping from integer vector mode to mnemonic suffix
-(define_mode_attr ssevecsize [(V16QI "b") (V8HI "w") (V4SI "d") (V2DI "q")])
+;; Mapping of mode to cast intrinsic name
+(define_mode_attr castmode [(V8SI "si") (V8SF "ps") (V4DF "pd")])
-;; Mapping of the insn mnemonic suffix
-(define_mode_attr ssemodesuffix
- [(SF "ss") (DF "sd") (V4SF "ps") (V2DF "pd") (V8SF "ps") (V4DF "pd")
- (V8SI "ps") (V4DI "pd")])
-(define_mode_attr ssescalarmodesuffix
- [(SF "ss") (DF "sd") (V4SF "ss") (V2DF "sd") (V8SF "ss") (V8SI "ss")
- (V4DF "sd") (V4SI "d") (V4DI "sd")])
+;; Instruction suffix for sign and zero extensions.
+(define_code_attr extsuffix [(sign_extend "sx") (zero_extend "zx")])
-;; Mapping of the max integer size for xop rotate immediate constraint
-(define_mode_attr sserotatemax [(V16QI "7") (V8HI "15") (V4SI "31") (V2DI "63")])
-;; Mapping of vector modes back to the scalar modes
-(define_mode_attr ssescalarmode [(V4SF "SF") (V2DF "DF")
- (V16QI "QI") (V8HI "HI")
- (V4SI "SI") (V2DI "DI")])
-;; Mapping of vector modes to a vector mode of double size
-(define_mode_attr ssedoublesizemode
- [(V2DF "V4DF") (V2DI "V4DI") (V4SF "V8SF") (V4SI "V8SI")
- (V8HI "V16HI") (V16QI "V32QI")
- (V4DF "V8DF") (V8SF "V16SF")
- (V4DI "V8DI") (V8SI "V16SI") (V16HI "V32HI") (V32QI "V64QI")])
+;; Mix-n-match
+(define_mode_iterator AVX256MODE2P [V8SI V8SF V4DF])
-;; Number of scalar elements in each vector type
-(define_mode_attr ssescalarnum
- [(V4SF "4") (V2DF "2") (V16QI "16") (V8HI "8") (V4SI "4") (V2DI "2")
- (V8SF "8") (V4DF "4") (V32QI "32") (V16HI "16") (V8SI "8") (V4DI "4")])
-
-;; Mapping for AVX
-(define_mode_attr avxvecmode
- [(V16QI "TI") (V8HI "TI") (V4SI "TI") (V2DI "TI") (V1TI "TI")
- (V4SF "V4SF") (V8SF "V8SF") (V2DF "V2DF") (V4DF "V4DF")
- (V32QI "OI") (V16HI "OI") (V8SI "OI") (V4DI "OI")])
-(define_mode_attr avxhalfvecmode
- [(V32QI "V16QI") (V16HI "V8HI") (V8SI "V4SI") (V4DI "V2DI")
- (V8SF "V4SF") (V4DF "V2DF")
- (V16QI "V8QI") (V8HI "V4HI") (V4SI "V2SI") (V4SF "V2SF")])
-(define_mode_attr avxscalarmode
- [(V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI") (V4SF "SF") (V2DF "DF")
- (V32QI "QI") (V16HI "HI") (V8SI "SI") (V4DI "DI") (V8SF "SF") (V4DF "DF")])
-(define_mode_attr avxpermvecmode
- [(V2DF "V2DI") (V4SF "V4SI") (V4DF "V4DI") (V8SF "V8SI")])
-(define_mode_attr avxmodesuffixp
- [(V2DF "pd") (V4SI "si") (V4SF "ps") (V8SF "ps") (V8SI "si")
- (V4DF "pd")])
-(define_mode_attr avxmodesuffix
- [(V16QI "") (V32QI "256") (V4SI "") (V4SF "") (V2DF "")
- (V8SI "256") (V8SF "256") (V4DF "256")])
+(define_mode_iterator FMAMODE [SF DF V4SF V2DF V8SF V4DF])
;; Mapping of immediate bits for blend instructions
(define_mode_attr blendbits
@@ -188,19 +203,22 @@
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; All of these patterns are enabled for SSE1 as well as SSE2.
+;; This is essential for maintaining stable calling conventions.
+
(define_expand "mov"
- [(set (match_operand:AVX256MODE 0 "nonimmediate_operand" "")
- (match_operand:AVX256MODE 1 "nonimmediate_operand" ""))]
- "TARGET_AVX"
+ [(set (match_operand:V16 0 "nonimmediate_operand" "")
+ (match_operand:V16 1 "nonimmediate_operand" ""))]
+ "TARGET_SSE"
{
ix86_expand_vector_move (mode, operands);
DONE;
})
-(define_insn "*avx_mov_internal"
- [(set (match_operand:AVXMODE16 0 "nonimmediate_operand" "=x,x ,m")
- (match_operand:AVXMODE16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
- "TARGET_AVX
+(define_insn "*mov_internal"
+ [(set (match_operand:V16 0 "nonimmediate_operand" "=x,x ,m")
+ (match_operand:V16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
+ "TARGET_SSE
&& (register_operand (operands[0], mode)
|| register_operand (operands[1], mode))"
{
@@ -214,85 +232,51 @@
{
case MODE_V8SF:
case MODE_V4SF:
- if (misaligned_operand (operands[0], mode)
- || misaligned_operand (operands[1], mode))
+ if (TARGET_AVX
+ && (misaligned_operand (operands[0], mode)
+ || misaligned_operand (operands[1], mode)))
return "vmovups\t{%1, %0|%0, %1}";
else
- return "vmovaps\t{%1, %0|%0, %1}";
+ return "%vmovaps\t{%1, %0|%0, %1}";
+
case MODE_V4DF:
case MODE_V2DF:
- if (misaligned_operand (operands[0], mode)
- || misaligned_operand (operands[1], mode))
+ if (TARGET_AVX
+ && (misaligned_operand (operands[0], mode)
+ || misaligned_operand (operands[1], mode)))
return "vmovupd\t{%1, %0|%0, %1}";
else if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vmovaps\t{%1, %0|%0, %1}";
+ return "%vmovaps\t{%1, %0|%0, %1}";
else
- return "vmovapd\t{%1, %0|%0, %1}";
- default:
- if (misaligned_operand (operands[0], mode)
- || misaligned_operand (operands[1], mode))
+ return "%vmovapd\t{%1, %0|%0, %1}";
+
+ case MODE_OI:
+ case MODE_TI:
+ if (TARGET_AVX
+ && (misaligned_operand (operands[0], mode)
+ || misaligned_operand (operands[1], mode)))
return "vmovdqu\t{%1, %0|%0, %1}";
else if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "vmovaps\t{%1, %0|%0, %1}";
+ return "%vmovaps\t{%1, %0|%0, %1}";
else
- return "vmovdqa\t{%1, %0|%0, %1}";
- }
- default:
- gcc_unreachable ();
- }
-}
- [(set_attr "type" "sselog1,ssemov,ssemov")
- (set_attr "prefix" "vex")
- (set_attr "mode" "")])
-
-;; All of these patterns are enabled for SSE1 as well as SSE2.
-;; This is essential for maintaining stable calling conventions.
-
-(define_expand "mov"
- [(set (match_operand:SSEMODE16 0 "nonimmediate_operand" "")
- (match_operand:SSEMODE16 1 "nonimmediate_operand" ""))]
- "TARGET_SSE"
-{
- ix86_expand_vector_move (mode, operands);
- DONE;
-})
+ return "%vmovdqa\t{%1, %0|%0, %1}";
-(define_insn "*mov_internal"
- [(set (match_operand:SSEMODE16 0 "nonimmediate_operand" "=x,x ,m")
- (match_operand:SSEMODE16 1 "nonimmediate_or_sse_const_operand" "C ,xm,x"))]
- "TARGET_SSE
- && (register_operand (operands[0], mode)
- || register_operand (operands[1], mode))"
-{
- switch (which_alternative)
- {
- case 0:
- return standard_sse_constant_opcode (insn, operands[1]);
- case 1:
- case 2:
- switch (get_attr_mode (insn))
- {
- case MODE_V4SF:
- return "movaps\t{%1, %0|%0, %1}";
- case MODE_V2DF:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "movaps\t{%1, %0|%0, %1}";
- else
- return "movapd\t{%1, %0|%0, %1}";
default:
- if (TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL)
- return "movaps\t{%1, %0|%0, %1}";
- else
- return "movdqa\t{%1, %0|%0, %1}";
+ gcc_unreachable ();
}
default:
gcc_unreachable ();
}
}
[(set_attr "type" "sselog1,ssemov,ssemov")
+ (set_attr "prefix" "maybe_vex")
(set (attr "mode")
- (cond [(ior (ior (ne (symbol_ref "optimize_function_for_size_p (cfun)") (const_int 0))
- (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
+ (cond [(ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "")
+ (ior (ior
+ (ne (symbol_ref "optimize_function_for_size_p (cfun)")
+ (const_int 0))
+ (eq (symbol_ref "TARGET_SSE2") (const_int 0)))
(and (eq_attr "alternative" "2")
(ne (symbol_ref "TARGET_SSE_TYPELESS_STORES")
(const_int 0))))
@@ -304,6 +288,19 @@
]
(const_string "TI")))])
+(define_insn "sse2_movq128"
+ [(set (match_operand:V2DI 0 "register_operand" "=x")
+ (vec_concat:V2DI
+ (vec_select:DI
+ (match_operand:V2DI 1 "nonimmediate_operand" "xm")
+ (parallel [(const_int 0)]))
+ (const_int 0)))]
+ "TARGET_SSE2"
+ "%vmovq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "TI")])
+
;; Move a DI from a 32-bit register pair (e.g. %edx:%eax) to an xmm.
;; We'd rather avoid this entirely; if the 32-bit reg pair was loaded
;; from memory, we'd prefer to load the memory directly into the %xmm
@@ -366,15 +363,7 @@
})
(define_expand "push1"
- [(match_operand:AVX256MODE 0 "register_operand" "")]
- "TARGET_AVX"
-{
- ix86_expand_push (mode, operands[0]);
- DONE;
-})
-
-(define_expand "push1"
- [(match_operand:SSEMODE16 0 "register_operand" "")]
+ [(match_operand:V16 0 "register_operand" "")]
"TARGET_SSE"
{
ix86_expand_push (mode, operands[0]);
@@ -382,168 +371,83 @@
})
(define_expand "movmisalign"
- [(set (match_operand:AVX256MODE 0 "nonimmediate_operand" "")
- (match_operand:AVX256MODE 1 "nonimmediate_operand" ""))]
- "TARGET_AVX"
-{
- ix86_expand_vector_move_misalign (mode, operands);
- DONE;
-})
-
-(define_expand "movmisalign"
- [(set (match_operand:SSEMODE16 0 "nonimmediate_operand" "")
- (match_operand:SSEMODE16 1 "nonimmediate_operand" ""))]
+ [(set (match_operand:V16 0 "nonimmediate_operand" "")
+ (match_operand:V16 1 "nonimmediate_operand" ""))]
"TARGET_SSE"
{
ix86_expand_vector_move_misalign (mode, operands);
DONE;
})
-(define_expand "avx_movu"
- [(set (match_operand:AVXMODEF2P 0 "nonimmediate_operand" "")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "")]
+(define_expand "_movu"
+ [(set (match_operand:VF 0 "nonimmediate_operand" "")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "")]
UNSPEC_MOVU))]
- "AVX_VEC_FLOAT_MODE_P (mode)"
+ "TARGET_SSE"
{
if (MEM_P (operands[0]) && MEM_P (operands[1]))
operands[1] = force_reg (mode, operands[1]);
})
-(define_insn "*avx_movu"
- [(set (match_operand:AVXMODEF2P 0 "nonimmediate_operand" "=x,m")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "xm,x")]
+(define_insn "*_movu"
+ [(set (match_operand:VF 0 "nonimmediate_operand" "=x,m")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "xm,x")]
UNSPEC_MOVU))]
- "AVX_VEC_FLOAT_MODE_P (mode)
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "vmovu\t{%1, %0|%0, %1}"
+ "TARGET_SSE && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
+ "%vmovu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
- (set_attr "prefix" "vex")
- (set_attr "mode" "")])
-
-(define_insn "sse2_movq128"
- [(set (match_operand:V2DI 0 "register_operand" "=x")
- (vec_concat:V2DI
- (vec_select:DI
- (match_operand:V2DI 1 "nonimmediate_operand" "xm")
- (parallel [(const_int 0)]))
- (const_int 0)))]
- "TARGET_SSE2"
- "%vmovq\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
(set_attr "prefix" "maybe_vex")
- (set_attr "mode" "TI")])
-
-(define_expand "_movu"
- [(set (match_operand:SSEMODEF2P 0 "nonimmediate_operand" "")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "")]
- UNSPEC_MOVU))]
- "SSE_VEC_FLOAT_MODE_P (mode)"
-{
- if (MEM_P (operands[0]) && MEM_P (operands[1]))
- operands[1] = force_reg (mode, operands[1]);
-})
-
-(define_insn "*_movu"
- [(set (match_operand:SSEMODEF2P 0 "nonimmediate_operand" "=x,m")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "nonimmediate_operand" "xm,x")]
- UNSPEC_MOVU))]
- "SSE_VEC_FLOAT_MODE_P (mode)
- && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "movu\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "movu" "1")
(set_attr "mode" "")])
-(define_expand "avx_movdqu"
- [(set (match_operand:AVXMODEQI 0 "nonimmediate_operand" "")
- (unspec:AVXMODEQI
- [(match_operand:AVXMODEQI 1 "nonimmediate_operand" "")]
- UNSPEC_MOVU))]
- "TARGET_AVX"
-{
- if (MEM_P (operands[0]) && MEM_P (operands[1]))
- operands[1] = force_reg (mode, operands[1]);
-})
-
-(define_insn "*avx_movdqu"
- [(set (match_operand:AVXMODEQI 0 "nonimmediate_operand" "=x,m")
- (unspec:AVXMODEQI
- [(match_operand:AVXMODEQI 1 "nonimmediate_operand" "xm,x")]
- UNSPEC_MOVU))]
- "TARGET_AVX && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "vmovdqu\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "movu" "1")
- (set_attr "prefix" "vex")
- (set_attr "mode" "")])
-
-(define_expand "sse2_movdqu"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "")
- (unspec:V16QI [(match_operand:V16QI 1 "nonimmediate_operand" "")]
- UNSPEC_MOVU))]
+(define_expand "_movdqu"
+ [(set (match_operand:VI1 0 "nonimmediate_operand" "")
+ (unspec:VI1 [(match_operand:VI1 1 "nonimmediate_operand" "")]
+ UNSPEC_MOVU))]
"TARGET_SSE2"
{
if (MEM_P (operands[0]) && MEM_P (operands[1]))
- operands[1] = force_reg (V16QImode, operands[1]);
+ operands[1] = force_reg (mode, operands[1]);
})
-(define_insn "*sse2_movdqu"
- [(set (match_operand:V16QI 0 "nonimmediate_operand" "=x,m")
- (unspec:V16QI [(match_operand:V16QI 1 "nonimmediate_operand" "xm,x")]
- UNSPEC_MOVU))]
+(define_insn "*_movdqu"
+ [(set (match_operand:VI1 0 "nonimmediate_operand" "=x,m")
+ (unspec:VI1 [(match_operand:VI1 1 "nonimmediate_operand" "xm,x")]
+ UNSPEC_MOVU))]
"TARGET_SSE2 && !(MEM_P (operands[0]) && MEM_P (operands[1]))"
- "movdqu\t{%1, %0|%0, %1}"
+ "%vmovdqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
(set_attr "movu" "1")
- (set_attr "prefix_data16" "1")
- (set_attr "mode" "TI")])
-
-(define_insn "avx_movnt"
- [(set (match_operand:AVXMODEF2P 0 "memory_operand" "=m")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "AVX_VEC_FLOAT_MODE_P (mode)"
- "vmovnt\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "prefix" "vex")
- (set_attr "mode" "")])
-
-(define_insn "_movnt"
- [(set (match_operand:SSEMODEF2P 0 "memory_operand" "=m")
- (unspec:SSEMODEF2P
- [(match_operand:SSEMODEF2P 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "SSE_VEC_FLOAT_MODE_P (mode)"
- "movnt\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssemov")
- (set_attr "mode" "")])
-
-(define_insn "avx_movnt"
- [(set (match_operand:AVXMODEDI 0 "memory_operand" "=m")
- (unspec:AVXMODEDI
- [(match_operand:AVXMODEDI 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "TARGET_AVX"
- "vmovntdq\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
- (set_attr "prefix" "vex")
- (set_attr "mode" "")])
+ (set (attr "prefix_data16")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "")])
-(define_insn "sse2_movntv2di"
- [(set (match_operand:V2DI 0 "memory_operand" "=m")
- (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "x")]
- UNSPEC_MOVNT))]
- "TARGET_SSE2"
- "movntdq\t{%1, %0|%0, %1}"
+(define_insn "_lddqu"
+ [(set (match_operand:VI1 0 "register_operand" "=x")
+ (unspec:VI1 [(match_operand:VI1 1 "memory_operand" "m")]
+ UNSPEC_LDDQU))]
+ "TARGET_SSE3"
+ "%vlddqu\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
- (set_attr "prefix_data16" "1")
- (set_attr "mode" "TI")])
+ (set_attr "movu" "1")
+ (set (attr "prefix_data16")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "0")))
+ (set (attr "prefix_rep")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "")])
(define_insn "sse2_movntsi"
[(set (match_operand:SI 0 "memory_operand" "=m")
@@ -555,39 +459,48 @@
(set_attr "prefix_data16" "0")
(set_attr "mode" "V2DF")])
-(define_insn "avx_lddqu"
- [(set (match_operand:AVXMODEQI 0 "register_operand" "=x")
- (unspec:AVXMODEQI
- [(match_operand:AVXMODEQI 1 "memory_operand" "m")]
- UNSPEC_LDDQU))]
- "TARGET_AVX"
- "vlddqu\t{%1, %0|%0, %1}"
- [(set_attr "type" "ssecvt")
- (set_attr "movu" "1")
- (set_attr "prefix" "vex")
- (set_attr "mode" "")])
-
-(define_insn "sse3_lddqu"
- [(set (match_operand:V16QI 0 "register_operand" "=x")
- (unspec:V16QI [(match_operand:V16QI 1 "memory_operand" "m")]
- UNSPEC_LDDQU))]
- "TARGET_SSE3"
- "lddqu\t{%1, %0|%0, %1}"
+(define_insn "_movnt"
+ [(set (match_operand:VF 0 "memory_operand" "=m")
+ (unspec:VF [(match_operand:VF 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE"
+ "%vmovnt\t{%1, %0|%0, %1}"
[(set_attr "type" "ssemov")
- (set_attr "movu" "1")
- (set_attr "prefix_data16" "0")
- (set_attr "prefix_rep" "1")
- (set_attr "mode" "TI")])
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "")])
+
+(define_insn "_movnt"
+ [(set (match_operand:VI8 0 "memory_operand" "=m")
+ (unspec:VI8 [(match_operand:VI8 1 "register_operand" "x")]
+ UNSPEC_MOVNT))]
+ "TARGET_SSE2"
+ "%vmovntdq\t{%1, %0|%0, %1}"
+ [(set_attr "type" "ssecvt")
+ (set (attr "prefix_data16")
+ (if_then_else
+ (ne (symbol_ref "TARGET_AVX") (const_int 0))
+ (const_string "*")
+ (const_string "1")))
+ (set_attr "prefix" "maybe_vex")
+ (set_attr "mode" "")])
; Expand patterns for non-temporal stores. At the moment, only those
; that directly map to insns are defined; it would be possible to
; define patterns for other modes that would expand to several insns.
+;; Modes handled by storent patterns.
+(define_mode_iterator STORENT_MODE
+ [(SI "TARGET_SSE2") (SF "TARGET_SSE4A") (DF "TARGET_SSE4A")
+ (V2DI "TARGET_SSE2")
+ (V8SF "TARGET_AVX") V4SF
+ (V4DF "TARGET_AVX") (V2DF "TARGET_SSE2")])
+
(define_expand "storent"
[(set (match_operand:STORENT_MODE 0 "memory_operand" "")
(unspec:STORENT_MODE
[(match_operand:STORENT_MODE 1 "register_operand" "")]
- UNSPEC_MOVNT))])
+ UNSPEC_MOVNT))]
+ "TARGET_SSE")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
@@ -599,7 +512,7 @@
[(set (match_operand:VF 0 "register_operand" "")
(absneg:VF
(match_operand:VF 1 "register_operand" "")))]
- ""
+ "TARGET_SSE"
"ix86_expand_fp_absneg_operator (, mode, operands); DONE;")
(define_insn_and_split "*absneg2"
@@ -607,7 +520,7 @@
(match_operator:VF 3 "absneg_operator"
[(match_operand:VF 1 "nonimmediate_operand" "0, xm,x, m")]))
(use (match_operand:VF 2 "nonimmediate_operand" "xm,0, xm,x"))]
- ""
+ "TARGET_SSE"
"#"
"reload_completed"
[(const_int 0)]
@@ -645,7 +558,7 @@
(plusminus:VF
(match_operand:VF 1 "nonimmediate_operand" "")
(match_operand:VF 2 "nonimmediate_operand" "")))]
- ""
+ "TARGET_SSE"
"ix86_fixup_binary_operands_no_copy (, mode, operands);")
(define_insn "*3"
@@ -653,7 +566,7 @@
(plusminus:VF
(match_operand:VF 1 "nonimmediate_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
- "ix86_binary_operator_ok (, mode, operands)"
+ "TARGET_SSE && ix86_binary_operator_ok (, mode, operands)"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -670,7 +583,7 @@
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
- ""
+ "TARGET_SSE"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -684,7 +597,7 @@
(mult:VF
(match_operand:VF 1 "nonimmediate_operand" "")
(match_operand:VF 2 "nonimmediate_operand" "")))]
- ""
+ "TARGET_SSE"
"ix86_fixup_binary_operands_no_copy (MULT, mode, operands);")
(define_insn "*mul3"
@@ -692,7 +605,7 @@
(mult:VF
(match_operand:VF 1 "nonimmediate_operand" "%0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
- "ix86_binary_operator_ok (MULT, mode, operands)"
+ "TARGET_SSE && ix86_binary_operator_ok (MULT, mode, operands)"
"@
mul\t{%2, %0|%0, %2}
vmul\t{%2, %1, %0|%0, %1, %2}"
@@ -709,7 +622,7 @@
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
- ""
+ "TARGET_SSE"
"@
mul\t{%2, %0|%0, %2}
vmul\t{%2, %1, %0|%0, %1, %2}"
@@ -722,14 +635,14 @@
[(set (match_operand:VF2 0 "register_operand" "")
(div:VF2 (match_operand:VF2 1 "register_operand" "")
(match_operand:VF2 2 "nonimmediate_operand" "")))]
- ""
+ "TARGET_SSE2"
"ix86_fixup_binary_operands_no_copy (DIV, mode, operands);")
(define_expand "div3"
[(set (match_operand:VF1 0 "register_operand" "")
(div:VF1 (match_operand:VF1 1 "register_operand" "")
(match_operand:VF1 2 "nonimmediate_operand" "")))]
- ""
+ "TARGET_SSE"
{
ix86_fixup_binary_operands_no_copy (DIV, mode, operands);
@@ -747,7 +660,7 @@
(div:VF
(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
- ""
+ "TARGET_SSE"
"@
div\t{%2, %0|%0, %2}
vdiv\t{%2, %1, %0|%0, %1, %2}"
@@ -764,7 +677,7 @@
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
- ""
+ "TARGET_SSE"
"@
div\t{%2, %0|%0, %2}
vdiv\t{%2, %1, %0|%0, %1, %2}"
@@ -777,7 +690,7 @@
[(set (match_operand:VF1 0 "register_operand" "=x")
(unspec:VF1
[(match_operand:VF1 1 "nonimmediate_operand" "xm")] UNSPEC_RCP))]
- ""
+ "TARGET_SSE"
"%vrcpps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "atom_sse_attr" "rcp")
@@ -803,12 +716,13 @@
(define_expand "sqrt2"
[(set (match_operand:VF2 0 "register_operand" "")
- (sqrt:VF2 (match_operand:VF2 1 "nonimmediate_operand" "")))])
+ (sqrt:VF2 (match_operand:VF2 1 "nonimmediate_operand" "")))]
+ "TARGET_SSE2")
(define_expand "sqrt2"
[(set (match_operand:VF1 0 "register_operand" "")
(sqrt:VF1 (match_operand:VF1 1 "nonimmediate_operand" "")))]
- ""
+ "TARGET_SSE"
{
if (TARGET_SSE_MATH && TARGET_RECIP && !optimize_insn_for_size_p ()
&& flag_finite_math_only && !flag_trapping_math
@@ -822,7 +736,7 @@
(define_insn "_sqrt2"
[(set (match_operand:VF 0 "register_operand" "=x")
(sqrt:VF (match_operand:VF 1 "nonimmediate_operand" "xm")))]
- ""
+ "TARGET_SSE"
"%vsqrt\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "atom_sse_attr" "sqrt")
@@ -836,7 +750,7 @@
(match_operand:VF_128 1 "nonimmediate_operand" "xm,xm"))
(match_operand:VF_128 2 "register_operand" "0,x")
(const_int 1)))]
- ""
+ "TARGET_SSE"
"@
sqrt\t{%1, %0|%0, %1}
vsqrt\t{%1, %2, %0|%0, %2, %1}"
@@ -860,7 +774,7 @@
[(set (match_operand:VF1 0 "register_operand" "=x")
(unspec:VF1
[(match_operand:VF1 1 "nonimmediate_operand" "xm")] UNSPEC_RSQRT))]
- ""
+ "TARGET_SSE"
"%vrsqrtps\t{%1, %0|%0, %1}"
[(set_attr "type" "sse")
(set_attr "prefix" "maybe_vex")
@@ -891,7 +805,7 @@
(smaxmin:VF
(match_operand:VF 1 "nonimmediate_operand" "")
(match_operand:VF 2 "nonimmediate_operand" "")))]
- ""
+ "TARGET_SSE"
{
if (!flag_finite_math_only)
operands[1] = force_reg (mode, operands[1]);
@@ -903,7 +817,7 @@
(smaxmin:VF
(match_operand:VF 1 "nonimmediate_operand" "%0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
- "flag_finite_math_only
+ "TARGET_SSE && flag_finite_math_only
&& ix86_binary_operator_ok (, mode, operands)"
"@
\t{%2, %0|%0, %2}
@@ -918,7 +832,7 @@
(smaxmin:VF
(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
- "!flag_finite_math_only"
+ "TARGET_SSE && !flag_finite_math_only"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -935,7 +849,7 @@
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm"))
(match_dup 1)
(const_int 1)))]
- ""
+ "TARGET_SSE"
"@
\t{%2, %0|%0, %2}
v\t{%2, %1, %0|%0, %1, %2}"
@@ -956,10 +870,10 @@
[(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")]
UNSPEC_IEEE_MIN))]
- ""
+ "TARGET_SSE"
"@
- vmin\t{%2, %1, %0|%0, %1, %2}
- min\t{%2, %0|%0, %2}"
+ min\t{%2, %0|%0, %2}
+ vmin\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
@@ -971,10 +885,10 @@
[(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")]
UNSPEC_IEEE_MAX))]
- ""
+ "TARGET_SSE"
"@
- vmax\t{%2, %1, %0|%0, %1, %2}
- max\t{%2, %0|%0, %2}"
+ max\t{%2, %0|%0, %2}
+ vmax\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "isa" "noavx,avx")
(set_attr "type" "sseadd")
(set_attr "prefix" "orig,vex")
@@ -1276,12 +1190,28 @@
(set_attr "prefix" "vex")
(set_attr "mode" "")])
+(define_insn "*_maskcmp3_comm"
+ [(set (match_operand:VF 0 "register_operand" "=x,x")
+ (match_operator:VF 3 "sse_comparison_operator"
+ [(match_operand:VF 1 "register_operand" "%0,x")
+ (match_operand:VF 2 "nonimmediate_operand" "xm,xm")]))]
+ "TARGET_SSE
+ && GET_RTX_CLASS (GET_CODE (operands[3])) == RTX_COMM_COMPARE"
+ "@
+ cmp%D3\t{%2, %0|%0, %2}
+ vcmp%D3\t{%2, %1, %0|%0, %1, %2}"
+ [(set_attr "isa" "noavx,avx")
+ (set_attr "type" "ssecmp")
+ (set_attr "length_immediate" "1")
+ (set_attr "prefix" "orig,vex")
+ (set_attr "mode" "")])
+
(define_insn "_maskcmp3"
[(set (match_operand:VF 0 "register_operand" "=x,x")
(match_operator:VF 3 "sse_comparison_operator"
[(match_operand:VF 1 "register_operand" "0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")]))]
- ""
+ "TARGET_SSE"
"@
cmp%D3\t{%2, %0|%0, %2}
vcmp%D3\t{%2, %1, %0|%0, %1, %2}"
@@ -1299,7 +1229,7 @@
(match_operand:VF_128 2 "nonimmediate_operand" "xm,xm")])
(match_dup 1)
(const_int 1)))]
- ""
+ "TARGET_SSE"
"@
cmp%D3\t{%2, %0|%0, %2}
vcmp%D3\t{%2, %1, %0|%0, %1, %2}"
@@ -1319,7 +1249,7 @@
(match_operand: 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))))]
"SSE_FLOAT_MODE_P (mode)"
- "%vcomis\t{%1, %0|%0, %1}"
+ "%vcomi\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "maybe_vex")
(set_attr "prefix_rep" "0")
@@ -1339,7 +1269,7 @@
(match_operand: 1 "nonimmediate_operand" "xm")
(parallel [(const_int 0)]))))]
"SSE_FLOAT_MODE_P (mode)"
- "%vucomis\t{%1, %0|%0, %1}"
+ "%vucomi\t{%1, %0|%0, %1}"
[(set_attr "type" "ssecomi")
(set_attr "prefix" "maybe_vex")
(set_attr "prefix_rep" "0")
@@ -1357,7 +1287,7 @@
(match_operand:VF 5 "nonimmediate_operand" "")])
(match_operand:VF 1 "general_operand" "")
(match_operand:VF 2 "general_operand" "")))]
- ""
+ "TARGET_SSE"
{
bool ok = ix86_expand_fp_vcond (operands);
gcc_assert (ok);
@@ -1376,7 +1306,7 @@
(not:VF
(match_operand:VF 1 "register_operand" "0,x"))
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
- ""
+ "TARGET_SSE"
{
static char buf[32];
const char *insn;
@@ -1408,7 +1338,7 @@
(any_logic:VF
(match_operand:VF 1 "nonimmediate_operand" "")
(match_operand:VF 2 "nonimmediate_operand" "")))]
- ""
+ "TARGET_SSE"
"ix86_fixup_binary_operands_no_copy (, mode, operands);")
(define_insn "*3"
@@ -1416,7 +1346,7 @@
(any_logic:VF
(match_operand:VF 1 "nonimmediate_operand" "%0,x")
(match_operand:VF 2 "nonimmediate_operand" "xm,xm")))]
- "ix86_binary_operator_ok (, mode, operands)"
+ "TARGET_SSE && ix86_binary_operator_ok (, mode, operands)"
{
static char buf[32];
const char *insn;
@@ -1453,7 +1383,7 @@
(match_operand:VF 2 "nonimmediate_operand" "")))
(set (match_operand:VF 0 "register_operand" "")
(ior:VF (match_dup 4) (match_dup 5)))]
- ""
+ "TARGET_SSE"
{
operands[3] = ix86_build_signbit_mask (mode, 1, 0);
@@ -1477,15 +1407,15 @@
static char buf[32];
const char *insn;
const char *suffix
- = TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL ? "s" : "";
+ = TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL ? "ps" : "";
switch (which_alternative)
{
case 0:
- insn = "andnp%s\t{%%2, %%0|%%0, %%2}";
+ insn = "andn%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
- insn = "vandnp%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
+ insn = "vandn%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
default:
gcc_unreachable ();
@@ -1509,15 +1439,15 @@
static char buf[32];
const char *insn;
const char *suffix
- = TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL ? "s" : "";
+ = TARGET_SSE_PACKED_SINGLE_INSN_OPTIMAL ? "ps" : "";
switch (which_alternative)
{
case 0:
- insn = "p%s\t{%%2, %%0|%%0, %%2}";
+ insn = "%s\t{%%2, %%0|%%0, %%2}";
break;
case 1:
- insn = "vp%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
+ insn = "v%s\t{%%2, %%1, %%0|%%0, %%1, %%2}";
break;
default:
gcc_unreachable ();
@@ -1563,8 +1493,7 @@
(match_operand:FMAMODE 1 "nonimmediate_operand")
(match_operand:FMAMODE 2 "nonimmediate_operand")
(match_operand:FMAMODE 3 "nonimmediate_operand")))]
- "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH"
- "")
+ "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH")
(define_expand "fms4"
[(set (match_operand:FMAMODE 0 "register_operand")
@@ -1572,8 +1501,7 @@
(match_operand:FMAMODE 1 "nonimmediate_operand")
(match_operand:FMAMODE 2 "nonimmediate_operand")
(neg:FMAMODE (match_operand:FMAMODE 3 "nonimmediate_operand"))))]
- "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH"
- "")
+ "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH")
(define_expand "fnma4"
[(set (match_operand:FMAMODE 0 "register_operand")
@@ -1581,8 +1509,7 @@
(neg:FMAMODE (match_operand:FMAMODE 1 "nonimmediate_operand"))
(match_operand:FMAMODE 2 "nonimmediate_operand")
(match_operand:FMAMODE 3 "nonimmediate_operand")))]
- "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH"
- "")
+ "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH")
(define_expand "fnms4"
[(set (match_operand:FMAMODE 0 "register_operand")
@@ -1590,8 +1517,7 @@
(neg:FMAMODE (match_operand:FMAMODE 1 "nonimmediate_operand"))
(match_operand:FMAMODE 2 "nonimmediate_operand")
(neg:FMAMODE (match_operand:FMAMODE 3 "nonimmediate_operand"))))]
- "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH"
- "")
+ "(TARGET_FMA || TARGET_FMA4) && TARGET_SSE_MATH")
;; The builtin for fma4intrin.h is not constrained by SSE math enabled.
(define_expand "fma4i_fmadd_"
@@ -1600,8 +1526,7 @@
(match_operand:FMAMODE 1 "nonimmediate_operand")
(match_operand:FMAMODE 2 "nonimmediate_operand")
(match_operand:FMAMODE 3 "nonimmediate_operand")))]
- "TARGET_FMA || TARGET_FMA4"
- "")
+ "TARGET_FMA || TARGET_FMA4")
(define_insn "*fma4i_fmadd_"
[(set (match_operand:FMAMODE 0 "register_operand" "=x,x")
@@ -1655,12 +1580,12 @@
;; entire destination register, with the high-order elements zeroed.
(define_expand "fma4i_vmfmadd_"
- [(set (match_operand:SSEMODEF2P 0 "register_operand")
- (vec_merge:SSEMODEF2P
- (fma:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand")
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand"))
+ [(set (match_operand:VF_128 0 "register_operand")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand")
+ (match_operand:VF_128 2 "nonimmediate_operand")
+ (match_operand:VF_128 3 "nonimmediate_operand"))
(match_dup 4)
(const_int 1)))]
"TARGET_FMA4"
@@ -1669,13 +1594,13 @@
})
(define_insn "*fma4i_vmfmadd_"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
- (vec_merge:SSEMODEF2P
- (fma:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%x,x")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" " x,m")
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
- (match_operand:SSEMODEF2P 4 "const0_operand" "")
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%x,x")
+ (match_operand:VF_128 2 "nonimmediate_operand" " x,m")
+ (match_operand:VF_128 3 "nonimmediate_operand" "xm,x"))
+ (match_operand:VF_128 4 "const0_operand" "")
(const_int 1)))]
"TARGET_FMA4"
"vfmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
@@ -1683,14 +1608,14 @@
(set_attr "mode" "")])
(define_insn "*fma4i_vmfmsub_"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
- (vec_merge:SSEMODEF2P
- (fma:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%x,x")
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" " x,m")
- (neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x")))
- (match_operand:SSEMODEF2P 4 "const0_operand" "")
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%x,x")
+ (match_operand:VF_128 2 "nonimmediate_operand" " x,m")
+ (neg:VF_128
+ (match_operand:VF_128 3 "nonimmediate_operand" "xm,x")))
+ (match_operand:VF_128 4 "const0_operand" "")
(const_int 1)))]
"TARGET_FMA4"
"vfmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
@@ -1698,14 +1623,14 @@
(set_attr "mode" "")])
(define_insn "*fma4i_vmfnmadd_"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
- (vec_merge:SSEMODEF2P
- (fma:SSEMODEF2P
- (neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%x,x"))
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" " x,m")
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x"))
- (match_operand:SSEMODEF2P 4 "const0_operand" "")
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (neg:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%x,x"))
+ (match_operand:VF_128 2 "nonimmediate_operand" " x,m")
+ (match_operand:VF_128 3 "nonimmediate_operand" "xm,x"))
+ (match_operand:VF_128 4 "const0_operand" "")
(const_int 1)))]
"TARGET_FMA4"
"vfnmadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
@@ -1713,15 +1638,15 @@
(set_attr "mode" "")])
(define_insn "*fma4i_vmfnmsub_"
- [(set (match_operand:SSEMODEF2P 0 "register_operand" "=x,x")
- (vec_merge:SSEMODEF2P
- (fma:SSEMODEF2P
- (neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 1 "nonimmediate_operand" "%x,x"))
- (match_operand:SSEMODEF2P 2 "nonimmediate_operand" " x,m")
- (neg:SSEMODEF2P
- (match_operand:SSEMODEF2P 3 "nonimmediate_operand" "xm,x")))
- (match_operand:SSEMODEF2P 4 "const0_operand" "")
+ [(set (match_operand:VF_128 0 "register_operand" "=x,x")
+ (vec_merge:VF_128
+ (fma:VF_128
+ (neg:VF_128
+ (match_operand:VF_128 1 "nonimmediate_operand" "%x,x"))
+ (match_operand:VF_128 2 "nonimmediate_operand" " x,m")
+ (neg:VF_128
+ (match_operand:VF_128 3 "nonimmediate_operand" "xm,x")))
+ (match_operand:VF_128 4 "const0_operand" "")
(const_int 1)))]
"TARGET_FMA4"
"vfnmsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
@@ -1744,37 +1669,36 @@
;; But this doesn't seem useful in practice.
(define_expand "fmaddsub_"
- [(set (match_operand:AVXMODEF2P 0 "register_operand")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand")
- (match_operand:AVXMODEF2P 2 "nonimmediate_operand")
- (match_operand:AVXMODEF2P 3 "nonimmediate_operand")]
+ [(set (match_operand:VF 0 "register_operand")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand")
+ (match_operand:VF 2 "nonimmediate_operand")
+ (match_operand:VF 3 "nonimmediate_operand")]
UNSPEC_FMADDSUB))]
- "TARGET_FMA || TARGET_FMA4"
- "")
+ "TARGET_FMA || TARGET_FMA4")
(define_insn "*fma4_fmaddsub_"
- [(set (match_operand:AVXMODEF2P 0 "register_operand" "=x,x")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "%x,x")
- (match_operand:AVXMODEF2P 2 "nonimmediate_operand" " x,m")
- (match_operand:AVXMODEF2P 3 "nonimmediate_operand" "xm,x")]
+ [(set (match_operand:VF 0 "register_operand" "=x,x")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "%x,x")
+ (match_operand:VF 2 "nonimmediate_operand" " x,m")
+ (match_operand:VF 3 "nonimmediate_operand" "xm,x")]
UNSPEC_FMADDSUB))]
"TARGET_FMA4"
- "vfmaddsubps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "vfmaddsub\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
(define_insn "*fma4_fmsubadd_"
- [(set (match_operand:AVXMODEF2P 0 "register_operand" "=x,x")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "%x,x")
- (match_operand:AVXMODEF2P 2 "nonimmediate_operand" " x,m")
- (neg:AVXMODEF2P
- (match_operand:AVXMODEF2P 3 "nonimmediate_operand" "xm,x"))]
+ [(set (match_operand:VF 0 "register_operand" "=x,x")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "%x,x")
+ (match_operand:VF 2 "nonimmediate_operand" " x,m")
+ (neg:VF
+ (match_operand:VF 3 "nonimmediate_operand" "xm,x"))]
UNSPEC_FMADDSUB))]
"TARGET_FMA4"
- "vfmsubaddps\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+ "vfmsubadd\t{%3, %2, %1, %0|%0, %1, %2, %3}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
@@ -1793,7 +1717,7 @@
"TARGET_FMA"
"@
vfmadd132\t{%2, %3, %0|%0, %3, %2}
- vfmadd312\t{%3, %2, %0|%0, %2, %3}
+ vfmadd213\t{%3, %2, %0|%0, %2, %3}
vfmadd231\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
@@ -1808,12 +1732,12 @@
"TARGET_FMA"
"@
vfmsub132\t{%2, %3, %0|%0, %3, %2}
- vfmsub312\t{%3, %2, %0|%0, %2, %3}
+ vfmsub213\t{%3, %2, %0|%0, %2, %3}
vfmsub231\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
-(define_insn "*fma_fmadd_"
+(define_insn "*fma_fnmadd_"
[(set (match_operand:FMAMODE 0 "register_operand" "=x,x,x")
(fma:FMAMODE
(neg:FMAMODE
@@ -1823,12 +1747,12 @@
"TARGET_FMA"
"@
vfnmadd132\t{%2, %3, %0|%0, %3, %2}
- vfnmadd312\t{%3, %2, %0|%0, %2, %3}
+ vfnmadd213\t{%3, %2, %0|%0, %2, %3}
vfnmadd231\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
-(define_insn "*fma_fmsub_"
+(define_insn "*fma_fnmsub_"
[(set (match_operand:FMAMODE 0 "register_operand" "=x,x,x")
(fma:FMAMODE
(neg:FMAMODE
@@ -1839,17 +1763,17 @@
"TARGET_FMA"
"@
vfnmsub132\t{%2, %3, %0|%0, %3, %2}
- vfnmsub312\t{%3, %2, %0|%0, %2, %3}
+ vfnmsub231\t{%3, %2, %0|%0, %2, %3}
vfnmsub231\t{%2, %1, %0|%0, %1, %2}"
[(set_attr "type" "ssemuladd")
(set_attr "mode" "")])
(define_insn "*fma_fmaddsub_"
- [(set (match_operand:AVXMODEF2P 0 "register_operand" "=x,x,x")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "%0, 0,x")
- (match_operand:AVXMODEF2P 2 "nonimmediate_operand" "xm, x,xm")
- (match_operand:AVXMODEF2P 3 "nonimmediate_operand" " x,xm,0")]
+ [(set (match_operand:VF 0 "register_operand" "=x,x,x")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "%0, 0,x")
+ (match_operand:VF 2 "nonimmediate_operand" "xm, x,xm")
+ (match_operand:VF 3 "nonimmediate_operand" " x,xm,0")]
UNSPEC_FMADDSUB))]
"TARGET_FMA"
"@
@@ -1860,12 +1784,12 @@
(set_attr "mode" "")])
(define_insn "*fma_fmsubadd_"
- [(set (match_operand:AVXMODEF2P 0 "register_operand" "=x,x,x")
- (unspec:AVXMODEF2P
- [(match_operand:AVXMODEF2P 1 "nonimmediate_operand" "%0, 0,x")
- (match_operand:AVXMODEF2P 2 "nonimmediate_operand" "xm, x,xm")
- (neg:AVXMODEF2P
- (match_operand:AVXMODEF2P 3 "nonimmediate_operand" " x,xm,0"))]
+ [(set (match_operand:VF 0 "register_operand" "=x,x,x")
+ (unspec:VF
+ [(match_operand:VF 1 "nonimmediate_operand" "%0, 0,x")
+ (match_operand:VF 2 "nonimmediate_operand" "xm, x,xm")
+ (neg:VF
+ (match_operand:VF 3 "nonimmediate_operand" " x,xm,0"))]
UNSPEC_FMADDSUB))]
"TARGET_FMA"
"@
@@ -2722,7 +2646,7 @@
ix86_build_const_vector (V2DFmode, 1, x));
operands[5] = gen_reg_rtx (V4SImode);
-
+
for (i = 6; i < 9; i++)
operands[i] = gen_reg_rtx (V2DFmode);
})
@@ -2847,7 +2771,7 @@
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_movhlps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
@@ -2874,7 +2798,7 @@
movlps\t{%H2, %0|%0, %H2}
vmovlps\t{%H2, %1, %0|%0, %1, %H2}
%vmovhps\t{%2, %0|%0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")])
@@ -2892,7 +2816,7 @@
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_movlhps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
@@ -2919,7 +2843,7 @@
movhps\t{%2, %0|%0, %2}
vmovhps\t{%2, %1, %0|%0, %1, %2}
%vmovlps\t{%2, %H0|%H0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF,V2SF")])
@@ -3200,11 +3124,11 @@
})
(define_insn "sse_shufps_"
- [(set (match_operand:SSEMODE4S 0 "register_operand" "=x,x")
- (vec_select:SSEMODE4S
- (vec_concat:
- (match_operand:SSEMODE4S 1 "register_operand" "0,x")
- (match_operand:SSEMODE4S 2 "nonimmediate_operand" "xm,xm"))
+ [(set (match_operand:VI4F_128 0 "register_operand" "=x,x")
+ (vec_select:VI4F_128
+ (vec_concat:
+ (match_operand:VI4F_128 1 "register_operand" "0,x")
+ (match_operand:VI4F_128 2 "nonimmediate_operand" "xm,xm"))
(parallel [(match_operand 3 "const_0_to_3_operand" "")
(match_operand 4 "const_0_to_3_operand" "")
(match_operand 5 "const_4_to_7_operand" "")
@@ -3258,7 +3182,7 @@
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_loadhps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
@@ -3282,7 +3206,7 @@
movlhps\t{%2, %0|%0, %2}
vmovlhps\t{%2, %1, %0|%0, %1, %2}
%vmovlps\t{%2, %H0|%H0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "ssemov")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
(set_attr "mode" "V2SF,V2SF,V4SF,V4SF,V2SF")])
@@ -3311,7 +3235,7 @@
"TARGET_SSE"
{
rtx dst = ix86_fixup_binary_operands (UNKNOWN, V4SFmode, operands);
-
+
emit_insn (gen_sse_loadlps (dst, operands[1], operands[2]));
/* Fix up the destination if needed. */
@@ -3335,7 +3259,7 @@
movlps\t{%2, %0|%0, %2}
vmovlps\t{%2, %1, %0|%0, %1, %2}
%vmovlps\t{%2, %0|%0, %2}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*")
(set_attr "type" "sselog,sselog,ssemov,ssemov,ssemov")
(set_attr "length_immediate" "1,1,*,*,*")
(set_attr "prefix" "orig,vex,orig,vex,maybe_vex")
@@ -3363,7 +3287,7 @@
"TARGET_SSE"
{
if (!TARGET_AVX)
- operands[1] = force_reg (V4SFmode, operands[1]);
+ operands[1] = force_reg (SFmode, operands[1]);
})
(define_insn "*vec_dupv4sf_avx"
@@ -3406,7 +3330,7 @@
%vmovss\t{%1, %0|%0, %1}
punpckldq\t{%2, %0|%0, %2}
movd\t{%1, %0|%0, %1}"
- [(set_attr "isa" "noavx,avx,noavx,avx,base,base,base")
+ [(set_attr "isa" "noavx,avx,noavx,avx,*,*,*")
(set_attr "type" "sselog,sselog,sselog,sselog,ssemov,mmxcvt,mmxmov")
(set_attr "prefix_data16" "*,*,1,*,*,*,*")
(set_attr "prefix_extra" "*,*,1,1,*,*,*")
@@ -3431,7 +3355,7 @@
[(set_attr "type" "sselog,ssemov,mmxcvt,mmxmov")
(set_attr "mode" "V4SF,SF,DI,DI")])
-(define_insn "*vec_concatv4sf_sse"
+(define_insn "*vec_concatv4sf"
[(set (match_operand:V4SF 0 "register_operand" "=x,x,x,x")
(vec_concat:V4SF
(match_operand:V2SF 1 "register_operand" " 0,x,0,x")
@@ -3448,7 +3372,7 @@
(set_attr "mode" "V4SF,V4SF,V2SF,V2SF")])
(define_expand "vec_init"
- [(match_operand:SSEMODE 0 "register_operand" "")
+ [(match_operand:V_128 0 "register_operand" "")
(match_operand 1 "" "")]
"TARGET_SSE"
{
@@ -3458,79 +3382,43 @@
;; Avoid combining registers from different units in a single alternative,
;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "*vec_set_0_sse4_1"
- [(set (match_operand:SSEMODE4S 0 "nonimmediate_operand"
- "=x,x,x ,x,x,x ,x ,m,m,m")
- (vec_merge:SSEMODE4S
- (vec_duplicate:SSEMODE4S
+(define_insn "vec_set_0"
+ [(set (match_operand:VI4F_128 0 "nonimmediate_operand"
+ "=Y4,Y2,Y2,x,x,x,Y4 ,x ,m,m ,m")
+ (vec_merge:VI4F_128
+ (vec_duplicate:VI4F_128
(match_operand: 2 "general_operand"
- " x,m,*r,x,x,*rm,*rm,x,*r,fF"))
- (match_operand:SSEMODE4S 1 "vector_move_operand"
- " C,C,C ,0,x,0 ,x ,0,0 ,0")
+ " Y4,m ,*r,m,x,x,*rm,*rm,x,fF,*r"))
+ (match_operand:VI4F_128 1 "vector_move_operand"
+ " C ,C ,C ,C,0,x,0 ,x ,0,0 ,0")
(const_int 1)))]
- "TARGET_SSE4_1"
+ "TARGET_SSE"
"@
%vinsertps\t{$0xe, %d2, %0|%0, %d2, 0xe}
%vmov\t{%2, %0|%0, %2}
%vmovd\t{%2, %0|%0, %2}
movss\t{%2, %0|%0, %2}
+ movss\t{%2, %0|%0, %2}
vmovss\t{%2, %1, %0|%0, %1, %2}
pinsrd\t{$0, %2, %0|%0, %2, 0}
vpinsrd\t{$0, %2, %1, %0|%0, %1, %2, 0}
#
#
#"
- [(set_attr "isa" "base,base,base,noavx,avx,noavx,avx,base,base,base")
- (set_attr "type" "sselog,ssemov,ssemov,ssemov,ssemov,sselog,sselog,*,*,*")
- (set_attr "prefix_extra" "*,*,*,*,*,1,1,*,*,*")
- (set_attr "length_immediate" "*,*,*,*,*,1,1,*,*,*")
- (set_attr "prefix" "maybe_vex,maybe_vex,maybe_vex,orig,vex,orig,vex,*,*,*")
- (set_attr "mode" "SF,,SI,SF,SF,TI,TI,*,*,*")])
-
-;; Avoid combining registers from different units in a single alternative,
-;; see comment above inline_secondary_memory_needed function in i386.c
-(define_insn "*vec_set_0_sse2"
- [(set (match_operand:SSEMODE4S 0 "nonimmediate_operand"
- "=x,x ,x,m,m ,m")
- (vec_merge:SSEMODE4S
- (vec_duplicate:SSEMODE4S
- (match_operand: 2 "general_operand"
- " m,*r,x,x,*r,fF"))
- (match_operand:SSEMODE4S 1 "vector_move_operand"
- " C, C,0,0,0 ,0")
- (const_int 1)))]
- "TARGET_SSE2"
- "@
- mov