/* This use is out of pattern use, if LHS has other uses that are
pattern uses, we should mark the stmt itself, and not the pattern
stmt. */
- FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
- {
- if (is_gimple_debug (USE_STMT (use_p)))
- continue;
- use_stmt = USE_STMT (use_p);
+ if (TREE_CODE (lhs) == SSA_NAME)
+ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
+ {
+ if (is_gimple_debug (USE_STMT (use_p)))
+ continue;
+ use_stmt = USE_STMT (use_p);
- if (vinfo_for_stmt (use_stmt)
- && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
- {
- found = true;
- break;
- }
- }
+ if (vinfo_for_stmt (use_stmt)
+ && STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (use_stmt)))
+ {
+ found = true;
+ break;
+ }
+ }
}
if (!found)
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "Create vector_cst. nunits = %d", nunits);
- vec_cst = build_vector_from_val (vector_type, op);
+ vec_cst = build_vector_from_val (vector_type,
+ fold_convert (TREE_TYPE (vector_type),
+ op));
return vect_init_vector (stmt, vec_cst, vector_type, NULL);
}
return new_stmt;
}
-
/* Check if STMT performs a conversion operation, that can be vectorized.
If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at BSI.
tree vectype_out, vectype_in;
int ncopies, j;
tree rhs_type;
- tree builtin_decl;
enum { NARROW, NONE, WIDEN } modifier;
int i;
VEC(tree,heap) *vec_oprnds0 = NULL;
/* Supportable by target? */
if ((modifier == NONE
- && !targetm.vectorize.builtin_conversion (code, vectype_out, vectype_in))
+ && !supportable_convert_operation (code, vectype_out, vectype_in, &decl1, &code1))
|| (modifier == WIDEN
&& !supportable_widening_operation (code, stmt,
vectype_out, vectype_in,
else
vect_get_vec_defs_for_stmt_copy (dt, &vec_oprnds0, NULL);
- builtin_decl =
- targetm.vectorize.builtin_conversion (code,
- vectype_out, vectype_in);
FOR_EACH_VEC_ELT (tree, vec_oprnds0, i, vop0)
- {
- /* Arguments are ready. create the new vector stmt. */
- new_stmt = gimple_build_call (builtin_decl, 1, vop0);
- new_temp = make_ssa_name (vec_dest, new_stmt);
- gimple_call_set_lhs (new_stmt, new_temp);
- vect_finish_stmt_generation (stmt, new_stmt, gsi);
- if (slp_node)
- VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
- }
+ {
+ /* Arguments are ready, create the new vector stmt. */
+ if (code1 == CALL_EXPR)
+ {
+ new_stmt = gimple_build_call (decl1, 1, vop0);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_call_set_lhs (new_stmt, new_temp);
+ }
+ else
+ {
+ gcc_assert (TREE_CODE_LENGTH (code) == unary_op);
+ new_stmt = gimple_build_assign_with_ops (code, vec_dest, vop0,
+ NULL);
+ new_temp = make_ssa_name (vec_dest, new_stmt);
+ gimple_assign_set_lhs (new_stmt, new_temp);
+ }
+
+ vect_finish_stmt_generation (stmt, new_stmt, gsi);
+ if (slp_node)
+ VEC_quick_push (gimple, SLP_TREE_VEC_STMTS (slp_node), new_stmt);
+ }
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
!= GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
return false;
+ /* We do not handle bit-precision changes. */
+ if ((CONVERT_EXPR_CODE_P (code)
+ || code == VIEW_CONVERT_EXPR)
+ && INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
+ && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ || ((TYPE_PRECISION (TREE_TYPE (op))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
+ /* But a conversion that does not change the bit-pattern is ok. */
+ && !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
+ > TYPE_PRECISION (TREE_TYPE (op)))
+ && TYPE_UNSIGNED (TREE_TYPE (op))))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "type conversion to/from bit-precision "
+ "unsupported.");
+ return false;
+ }
+
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = assignment_vec_info_type;
int nunits_in;
int nunits_out;
tree vectype_out;
+ tree op1_vectype;
int ncopies;
int j, i;
VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
scalar_dest = gimple_assign_lhs (stmt);
vectype_out = STMT_VINFO_VECTYPE (stmt_info);
+ if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "bit-precision shifts not supported.");
+ return false;
+ }
op0 = gimple_assign_rhs1 (stmt);
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
return false;
op1 = gimple_assign_rhs2 (stmt);
- if (!vect_is_simple_use (op1, loop_vinfo, bb_vinfo, &def_stmt, &def, &dt[1]))
+ if (!vect_is_simple_use_1 (op1, loop_vinfo, bb_vinfo, &def_stmt, &def,
+ &dt[1], &op1_vectype))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "use not simple.");
optab = optab_for_tree_code (code, vectype, optab_vector);
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "vector/vector shift/rotate found.");
+ if (!op1_vectype)
+ op1_vectype = get_same_sized_vectype (TREE_TYPE (op1), vectype_out);
+ if (op1_vectype == NULL_TREE
+ || TYPE_MODE (op1_vectype) != TYPE_MODE (vectype))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "unusable type for last operand in"
+ " vector/vector shift/rotate.");
+ return false;
+ }
}
/* See if the machine has a vector shifted by scalar insn and if not
then see if it has a vector shifted by vector insn. */
/* Unlike the other binary operators, shifts/rotates have
the rhs being int, instead of the same type as the lhs,
so make sure the scalar is the right type if we are
- dealing with vectors of short/char. */
+ dealing with vectors of long long/long/short/char. */
if (dt[1] == vect_constant_def)
op1 = fold_convert (TREE_TYPE (vectype), op1);
+ else if (!useless_type_conversion_p (TREE_TYPE (vectype),
+ TREE_TYPE (op1)))
+ {
+ if (slp_node
+ && TYPE_MODE (TREE_TYPE (vectype))
+ != TYPE_MODE (TREE_TYPE (op1)))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "unusable type for last operand in"
+ " vector/vector shift/rotate.");
+ return false;
+ }
+ if (vec_stmt && !slp_node)
+ {
+ op1 = fold_convert (TREE_TYPE (vectype), op1);
+ op1 = vect_init_vector (stmt, op1,
+ TREE_TYPE (vectype), NULL);
+ }
+ }
}
}
}
scalar_dest = gimple_assign_lhs (stmt);
vectype_out = STMT_VINFO_VECTYPE (stmt_info);
+ /* Most operations cannot handle bit-precision types without extra
+ truncations. */
+ if ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ /* Exception are bitwise binary operations. */
+ && code != BIT_IOR_EXPR
+ && code != BIT_XOR_EXPR
+ && code != BIT_AND_EXPR)
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "bit-precision arithmetic not supported.");
+ return false;
+ }
+
op0 = gimple_assign_rhs1 (stmt);
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype))
if (! ((INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
&& INTEGRAL_TYPE_P (TREE_TYPE (op0)))
|| (SCALAR_FLOAT_TYPE_P (TREE_TYPE (scalar_dest))
- && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
- && CONVERT_EXPR_CODE_P (code))))
+ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0)))))
return false;
+
+ if (INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
+ && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ || ((TYPE_PRECISION (TREE_TYPE (op0))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op0)))))))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "type demotion to/from bit-precision unsupported.");
+ return false;
+ }
+
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
VEC (tree, heap) *vec_oprnds0 = NULL, *vec_oprnds1 = NULL;
VEC (tree, heap) *vec_dsts = NULL, *interm_types = NULL, *tmp_vec_dsts = NULL;
bb_vec_info bb_vinfo = STMT_VINFO_BB_VINFO (stmt_info);
+ unsigned int k;
if (!STMT_VINFO_RELEVANT_P (stmt_info) && !bb_vinfo)
return false;
code = gimple_assign_rhs_code (stmt);
if (!CONVERT_EXPR_CODE_P (code)
- && code != WIDEN_MULT_EXPR)
+ && code != WIDEN_MULT_EXPR
+ && code != WIDEN_LSHIFT_EXPR)
return false;
scalar_dest = gimple_assign_lhs (stmt);
&& SCALAR_FLOAT_TYPE_P (TREE_TYPE (op0))
&& CONVERT_EXPR_CODE_P (code))))
return false;
+
+ if (INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
+ && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ || ((TYPE_PRECISION (TREE_TYPE (op0))
+ != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op0)))))))
+ {
+ if (vect_print_dump_info (REPORT_DETAILS))
+ fprintf (vect_dump, "type promotion to/from bit-precision "
+ "unsupported.");
+ return false;
+ }
+
if (!vect_is_simple_use_1 (op0, loop_vinfo, bb_vinfo,
&def_stmt, &def, &dt[0], &vectype_in))
{
bool ok;
op1 = gimple_assign_rhs2 (stmt);
- if (code == WIDEN_MULT_EXPR)
+ if (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR)
{
/* For WIDEN_MULT_EXPR, if OP0 is a constant, use the type of
OP1. */
fprintf (vect_dump, "transform type promotion operation. ncopies = %d.",
ncopies);
- if (code == WIDEN_MULT_EXPR)
+ if (code == WIDEN_MULT_EXPR || code == WIDEN_LSHIFT_EXPR)
{
if (CONSTANT_CLASS_P (op0))
op0 = fold_convert (TREE_TYPE (op1), op0);
if (op_type == binary_op)
vec_oprnds1 = VEC_alloc (tree, heap, 1);
}
+ else if (code == WIDEN_LSHIFT_EXPR)
+ vec_oprnds1 = VEC_alloc (tree, heap, slp_node->vec_stmts_size);
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
if (j == 0)
{
if (slp_node)
- vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
- &vec_oprnds1, -1);
- else
+ {
+ if (code == WIDEN_LSHIFT_EXPR)
+ {
+ vec_oprnd1 = op1;
+ /* Store vec_oprnd1 for every vector stmt to be created
+ for SLP_NODE. We check during the analysis that all
+ the shift arguments are the same. */
+ for (k = 0; k < slp_node->vec_stmts_size - 1; k++)
+ VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
+
+ vect_get_slp_defs (op0, NULL_TREE, slp_node, &vec_oprnds0, NULL,
+ -1);
+ }
+ else
+ vect_get_slp_defs (op0, op1, slp_node, &vec_oprnds0,
+ &vec_oprnds1, -1);
+ }
+ else
{
vec_oprnd0 = vect_get_vec_def_for_operand (op0, stmt, NULL);
VEC_quick_push (tree, vec_oprnds0, vec_oprnd0);
if (op_type == binary_op)
{
- vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
+ if (code == WIDEN_LSHIFT_EXPR)
+ vec_oprnd1 = op1;
+ else
+ vec_oprnd1 = vect_get_vec_def_for_operand (op1, stmt, NULL);
VEC_quick_push (tree, vec_oprnds1, vec_oprnd1);
}
}
VEC_replace (tree, vec_oprnds0, 0, vec_oprnd0);
if (op_type == binary_op)
{
- vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
+ if (code == WIDEN_LSHIFT_EXPR)
+ vec_oprnd1 = op1;
+ else
+ vec_oprnd1 = vect_get_vec_def_for_stmt_copy (dt[1], vec_oprnd1);
VEC_replace (tree, vec_oprnds1, 0, vec_oprnd1);
}
}
return false;
scalar_dest = gimple_assign_lhs (stmt);
+ if (TREE_CODE (scalar_dest) == VIEW_CONVERT_EXPR
+ && is_pattern_stmt_p (stmt_info))
+ scalar_dest = TREE_OPERAND (scalar_dest, 0);
if (TREE_CODE (scalar_dest) != ARRAY_REF
&& TREE_CODE (scalar_dest) != INDIRECT_REF
&& TREE_CODE (scalar_dest) != COMPONENT_REF
return false;
}
- /* The scalar rhs type needs to be trivially convertible to the vector
- component type. This should always be the case. */
elem_type = TREE_TYPE (vectype);
- if (!useless_type_conversion_p (elem_type, TREE_TYPE (op)))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "??? operands of different types");
- return false;
- }
-
vec_mode = TYPE_MODE (vectype);
+
/* FORNOW. In some cases can vectorize even if data-type not supported
(e.g. - array initialization with 0). */
if (optab_handler (mov_optab, vec_mode) == CODE_FOR_nothing)
}
/* Given a vector type VECTYPE returns a builtin DECL to be used
- for vector permutation and stores a mask into *MASK that implements
- reversal of the vector elements. If that is impossible to do
- returns NULL (and *MASK is unchanged). */
+ for vector permutation and returns the mask that implements
+ reversal of the vector elements. If that is impossible to do,
+ returns NULL. */
static tree
-perm_mask_for_reverse (tree vectype, tree *mask)
+perm_mask_for_reverse (tree vectype)
{
- tree builtin_decl;
- tree mask_element_type, mask_type;
- tree mask_vec = NULL;
- int i;
- int nunits;
- if (!targetm.vectorize.builtin_vec_perm)
- return NULL;
-
- builtin_decl = targetm.vectorize.builtin_vec_perm (vectype,
- &mask_element_type);
- if (!builtin_decl || !mask_element_type)
- return NULL;
+ tree mask_elt_type, mask_type, mask_vec;
+ int i, nunits;
+ unsigned char *sel;
- mask_type = get_vectype_for_scalar_type (mask_element_type);
nunits = TYPE_VECTOR_SUBPARTS (vectype);
- if (!mask_type
- || TYPE_VECTOR_SUBPARTS (vectype) != TYPE_VECTOR_SUBPARTS (mask_type))
+ sel = XALLOCAVEC (unsigned char, nunits);
+
+ for (i = 0; i < nunits; ++i)
+ sel[i] = nunits - 1 - i;
+
+ if (!can_vec_perm_p (TYPE_MODE (vectype), false, sel))
return NULL;
+ mask_elt_type
+ = lang_hooks.types.type_for_size
+ (TREE_INT_CST_LOW (TYPE_SIZE (TREE_TYPE (vectype))), 1);
+ mask_type = get_vectype_for_scalar_type (mask_elt_type);
+
+ mask_vec = NULL;
for (i = 0; i < nunits; i++)
- mask_vec = tree_cons (NULL, build_int_cst (mask_element_type, i), mask_vec);
+ mask_vec = tree_cons (NULL, build_int_cst (mask_elt_type, i), mask_vec);
mask_vec = build_vector (mask_type, mask_vec);
- if (!targetm.vectorize.builtin_vec_perm_ok (vectype, mask_vec))
- return NULL;
- if (mask)
- *mask = mask_vec;
- return builtin_decl;
+ return mask_vec;
}
/* Given a vector variable X, that was generated for the scalar LHS of
reverse_vec_elements (tree x, gimple stmt, gimple_stmt_iterator *gsi)
{
tree vectype = TREE_TYPE (x);
- tree mask_vec, builtin_decl;
- tree perm_dest, data_ref;
+ tree mask_vec, perm_dest, data_ref;
gimple perm_stmt;
- builtin_decl = perm_mask_for_reverse (vectype, &mask_vec);
+ mask_vec = perm_mask_for_reverse (vectype);
perm_dest = vect_create_destination_var (gimple_assign_lhs (stmt), vectype);
/* Generate the permute statement. */
- perm_stmt = gimple_build_call (builtin_decl, 3, x, x, mask_vec);
- if (!useless_type_conversion_p (vectype,
- TREE_TYPE (TREE_TYPE (builtin_decl))))
- {
- tree tem = create_tmp_reg (TREE_TYPE (TREE_TYPE (builtin_decl)), NULL);
- tem = make_ssa_name (tem, perm_stmt);
- gimple_call_set_lhs (perm_stmt, tem);
- vect_finish_stmt_generation (stmt, perm_stmt, gsi);
- perm_stmt = gimple_build_assign (NULL_TREE,
- build1 (VIEW_CONVERT_EXPR,
- vectype, tem));
- }
+ perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest,
+ x, x, mask_vec);
data_ref = make_ssa_name (perm_dest, perm_stmt);
gimple_set_lhs (perm_stmt, data_ref);
vect_finish_stmt_generation (stmt, perm_stmt, gsi);
bool strided_load = false;
bool load_lanes_p = false;
gimple first_stmt;
- tree scalar_type;
bool inv_p;
bool negative;
bool compute_in_loop = false;
return false;
}
- scalar_type = TREE_TYPE (DR_REF (dr));
+ elem_type = TREE_TYPE (vectype);
mode = TYPE_MODE (vectype);
/* FORNOW. In some cases can vectorize even if data-type not supported
return false;
}
- /* The vector component type needs to be trivially convertible to the
- scalar lhs. This should always be the case. */
- elem_type = TREE_TYPE (vectype);
- if (!useless_type_conversion_p (TREE_TYPE (scalar_dest), elem_type))
- {
- if (vect_print_dump_info (REPORT_DETAILS))
- fprintf (vect_dump, "??? operands of different types");
- return false;
- }
-
/* Check if the load is a part of an interleaving chain. */
if (STMT_VINFO_STRIDED_ACCESS (stmt_info))
{
fprintf (vect_dump, "negative step but alignment required.");
return false;
}
- if (!perm_mask_for_reverse (vectype, NULL))
+ if (!perm_mask_for_reverse (vectype))
{
if (vect_print_dump_info (REPORT_DETAILS))
fprintf (vect_dump, "negative step and reversing not supported.");
if (strided_load)
{
first_stmt = GROUP_FIRST_ELEMENT (stmt_info);
+ if (slp
+ && !SLP_INSTANCE_LOAD_PERMUTATION (slp_node_instance)
+ && first_stmt != VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0))
+ first_stmt = VEC_index (gimple, SLP_TREE_SCALAR_STMTS (slp_node), 0);
+
/* Check if the chain of loads is already vectorized. */
if (STMT_VINFO_VEC_STMT (vinfo_for_stmt (first_stmt)))
{
msq = new_temp;
bump = size_binop (MULT_EXPR, vs_minus_1,
- TYPE_SIZE_UNIT (scalar_type));
+ TYPE_SIZE_UNIT (elem_type));
ptr = bump_vector_ptr (dataref_ptr, NULL, gsi, stmt, bump);
new_stmt = gimple_build_assign_with_ops
(BIT_AND_EXPR, NULL_TREE, ptr,
/* 4. Handle invariant-load. */
if (inv_p && !bb_vinfo)
{
- tree vec_inv;
+ tree tem, vec_inv;
gimple_stmt_iterator gsi2 = *gsi;
gcc_assert (!strided_load);
gsi_next (&gsi2);
- vec_inv = build_vector_from_val (vectype, scalar_dest);
+ tem = scalar_dest;
+ if (!useless_type_conversion_p (TREE_TYPE (vectype),
+ TREE_TYPE (tem)))
+ {
+ tem = fold_convert (TREE_TYPE (vectype), tem);
+ tem = force_gimple_operand_gsi (&gsi2, tem, true,
+ NULL_TREE, true,
+ GSI_SAME_STMT);
+ }
+ vec_inv = build_vector_from_val (vectype, tem);
new_temp = vect_init_vector (stmt, vec_inv,
vectype, &gsi2);
new_stmt = SSA_NAME_DEF_STMT (new_temp);
Returns whether a COND can be vectorized. Checks whether
condition operands are supportable using vec_is_simple_use. */
-bool
+static bool
vect_is_simple_cond (tree cond, loop_vec_info loop_vinfo, tree *comp_vectype)
{
tree lhs, rhs;
if (nbytes < TYPE_ALIGN_UNIT (scalar_type))
return NULL_TREE;
- /* If we'd build a vector type of elements whose mode precision doesn't
- match their types precision we'll get mismatched types on vector
- extracts via BIT_FIELD_REFs. This effectively means we disable
- vectorization of bool and/or enum types in some languages. */
+ /* For vector types of elements whose mode precision doesn't
+ match their types precision we use a element type of mode
+ precision. The vectorization routines will have to make sure
+ they support the proper result truncation/extension. */
if (INTEGRAL_TYPE_P (scalar_type)
&& GET_MODE_BITSIZE (inner_mode) != TYPE_PRECISION (scalar_type))
- return NULL_TREE;
+ scalar_type = build_nonstandard_integer_type (GET_MODE_BITSIZE (inner_mode),
+ TYPE_UNSIGNED (scalar_type));
if (GET_MODE_CLASS (inner_mode) != MODE_INT
&& GET_MODE_CLASS (inner_mode) != MODE_FLOAT)
}
break;
+ case WIDEN_LSHIFT_EXPR:
+ if (BYTES_BIG_ENDIAN)
+ {
+ c1 = VEC_WIDEN_LSHIFT_HI_EXPR;
+ c2 = VEC_WIDEN_LSHIFT_LO_EXPR;
+ }
+ else
+ {
+ c2 = VEC_WIDEN_LSHIFT_HI_EXPR;
+ c1 = VEC_WIDEN_LSHIFT_LO_EXPR;
+ }
+ break;
+
CASE_CONVERT:
if (BYTES_BIG_ENDIAN)
{