1 /* { dg-do run { target { powerpc*-*-* && vmx_hw } } } */
2 /* { dg-do compile { target { powerpc*-*-* && { ! vmx_hw } } } } */
3 /* { dg-require-effective-target powerpc_altivec_ok } */
4 /* { dg-options "-maltivec -mabi=altivec -O2" } */
6 /* Check that "easy" AltiVec constants are correctly synthesized. */
8 extern void abort (void);
10 typedef __attribute__ ((vector_size (16))) unsigned char v16qi;
11 typedef __attribute__ ((vector_size (16))) unsigned short v8hi;
12 typedef __attribute__ ((vector_size (16))) unsigned int v4si;
14 char w[16] __attribute__((aligned(16)));
17 /* Emulate the vspltis? instructions on a 16-byte array of chars. */
19 void vspltisb (char *v, int val)
22 for (i = 0; i < 16; i++)
26 void vspltish (char *v, int val)
29 for (i = 0; i < 16; i += 2)
30 v[i] = val >> 7, v[i + 1] = val;
33 void vspltisw (char *v, int val)
36 for (i = 0; i < 16; i += 4)
37 v[i] = v[i + 1] = v[i + 2] = val >> 7, v[i + 3] = val;
41 /* Use three different check functions for each mode-instruction pair.
42 The callers have no typecasting and no addressable vectors, to make
43 the test more robust. */
45 void __attribute__ ((noinline)) check_v16qi (v16qi v1, char *v2)
47 if (memcmp (&v1, v2, 16))
51 void __attribute__ ((noinline)) check_v8hi (v8hi v1, char *v2)
53 if (memcmp (&v1, v2, 16))
57 void __attribute__ ((noinline)) check_v4si (v4si v1, char *v2)
59 if (memcmp (&v1, v2, 16))
66 void v16qi_vspltisb ()
68 v16qi v = { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15 };
73 void v16qi_vspltisb_neg ()
75 v16qi v = { -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5, -5 };
80 void v16qi_vspltisb_addself ()
82 v16qi v = { 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30 };
87 void v16qi_vspltisb_neg_addself ()
89 v16qi v = { -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24, -24 };
94 void v16qi_vspltish ()
96 v16qi v = { 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15, 0, 15 };
101 void v16qi_vspltish_addself ()
103 v16qi v = { 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30, 0, 30 };
108 void v16qi_vspltish_neg ()
110 v16qi v = { -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5, -1, -5 };
115 void v16qi_vspltisw ()
117 v16qi v = { 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15, 0, 0, 0, 15 };
122 void v16qi_vspltisw_addself ()
124 v16qi v = { 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30, 0, 0, 0, 30 };
129 void v16qi_vspltisw_neg ()
131 v16qi v = { -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5, -1, -1, -1, -5 };
139 void v8hi_vspltisb ()
141 v8hi v = { 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F, 0x0F0F };
146 void v8hi_vspltisb_addself ()
148 v8hi v = { 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E, 0x1E1E };
153 void v8hi_vspltisb_neg ()
155 v8hi v = { 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB, 0xFBFB };
160 void v8hi_vspltish ()
162 v8hi v = { 15, 15, 15, 15, 15, 15, 15, 15 };
167 void v8hi_vspltish_neg ()
169 v8hi v = { -5, -5, -5, -5, -5, -5, -5, -5 };
174 void v8hi_vspltish_addself ()
176 v8hi v = { 30, 30, 30, 30, 30, 30, 30, 30 };
181 void v8hi_vspltish_neg_addself ()
183 v8hi v = { -24, -24, -24, -24, -24, -24, -24, -24 };
188 void v8hi_vspltisw ()
190 v8hi v = { 0, 15, 0, 15, 0, 15, 0, 15 };
195 void v8hi_vspltisw_addself ()
197 v8hi v = { 0, 30, 0, 30, 0, 30, 0, 30 };
202 void v8hi_vspltisw_neg ()
204 v8hi v = { -1, -5, -1, -5, -1, -5, -1, -5 };
211 void v4si_vspltisb ()
213 v4si v = { 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F, 0x0F0F0F0F };
218 void v4si_vspltisb_addself ()
220 v4si v = { 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E, 0x1E1E1E1E };
225 void v4si_vspltisb_neg ()
227 v4si v = { 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB, 0xFBFBFBFB };
232 void v4si_vspltish ()
234 v4si v = { 0x000F000F, 0x000F000F, 0x000F000F, 0x000F000F };
239 void v4si_vspltish_addself ()
241 v4si v = { 0x001E001E, 0x001E001E, 0x001E001E, 0x001E001E };
246 void v4si_vspltish_neg ()
248 v4si v = { 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB, 0xFFFBFFFB };
253 void v4si_vspltisw ()
255 v4si v = { 15, 15, 15, 15 };
260 void v4si_vspltisw_neg ()
262 v4si v = { -5, -5, -5, -5 };
267 void v4si_vspltisw_addself ()
269 v4si v = { 30, 30, 30, 30 };
274 void v4si_vspltisw_neg_addself ()
276 v4si v = { -24, -24, -24, -24 };
286 v16qi_vspltisb_neg ();
287 v16qi_vspltisb_addself ();
288 v16qi_vspltisb_neg_addself ();
290 v16qi_vspltish_addself ();
291 v16qi_vspltish_neg ();
293 v16qi_vspltisw_addself ();
294 v16qi_vspltisw_neg ();
297 v8hi_vspltisb_addself ();
298 v8hi_vspltisb_neg ();
300 v8hi_vspltish_neg ();
301 v8hi_vspltish_addself ();
302 v8hi_vspltish_neg_addself ();
304 v8hi_vspltisw_addself ();
305 v8hi_vspltisw_neg ();
308 v4si_vspltisb_addself ();
309 v4si_vspltisb_neg ();
311 v4si_vspltish_addself ();
312 v4si_vspltish_neg ();
314 v4si_vspltisw_neg ();
315 v4si_vspltisw_addself ();
316 v4si_vspltisw_neg_addself ();