X-Git-Url: http://git.sourceforge.jp/view?a=blobdiff_plain;f=gcc%2Ftree-vect-data-refs.c;h=2d8142e84608b796b53a8f739367195ccefa26cd;hb=fe10f73e500e04b2006cdcfd1349e93dfdcebd0e;hp=e6f03813a402528a5de5553f916df3809a3f98b4;hpb=1965a2418178355ae64b5dab96a0eb822e300105;p=pf3gnuchains%2Fgcc-fork.git diff --git a/gcc/tree-vect-data-refs.c b/gcc/tree-vect-data-refs.c index e6f03813a40..2d8142e8460 100644 --- a/gcc/tree-vect-data-refs.c +++ b/gcc/tree-vect-data-refs.c @@ -1141,11 +1141,7 @@ vector_alignment_reachable_p (struct data_reference *dr) if (!known_alignment_for_access_p (dr)) { tree type = (TREE_TYPE (DR_REF (dr))); - tree ba = DR_BASE_OBJECT (dr); - bool is_packed = false; - - if (ba) - is_packed = contains_packed_reference (ba); + bool is_packed = contains_packed_reference (DR_REF (dr)); if (compare_tree_int (TYPE_SIZE (type), TYPE_ALIGN (type)) > 0) is_packed = true; @@ -2319,7 +2315,7 @@ vect_analyze_data_ref_access (struct data_reference *dr) stmt_vec_info stmt_info = vinfo_for_stmt (stmt); loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info); struct loop *loop = NULL; - HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step); + HOST_WIDE_INT dr_step; if (loop_vinfo) loop = LOOP_VINFO_LOOP (loop_vinfo); @@ -2332,6 +2328,7 @@ vect_analyze_data_ref_access (struct data_reference *dr) } /* Allow invariant loads in loops. */ + dr_step = TREE_INT_CST_LOW (step); if (loop_vinfo && dr_step == 0) { GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) = NULL; @@ -2871,10 +2868,6 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, return false; } - base = unshare_expr (DR_BASE_ADDRESS (dr)); - offset = unshare_expr (DR_OFFSET (dr)); - init = unshare_expr (DR_INIT (dr)); - if (stmt_can_throw_internal (stmt)) { if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS)) @@ -2896,6 +2889,32 @@ vect_analyze_data_refs (loop_vec_info loop_vinfo, return false; } + if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF + && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1))) + { + if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS)) + { + fprintf (vect_dump, "not vectorized: statement is bitfield " + "access "); + print_gimple_stmt (vect_dump, stmt, 0, TDF_SLIM); + } + + if (bb_vinfo) + { + STMT_VINFO_VECTORIZABLE (stmt_info) = false; + stop_bb_analysis = true; + continue; + } + + if (gather) + free_data_ref (dr); + return false; + } + + base = unshare_expr (DR_BASE_ADDRESS (dr)); + offset = unshare_expr (DR_OFFSET (dr)); + init = unshare_expr (DR_INIT (dr)); + if (is_gimple_call (stmt)) { if (vect_print_dump_info (REPORT_UNVECTORIZED_LOCATIONS)) @@ -3794,15 +3813,13 @@ vect_create_destination_var (tree scalar_dest, tree vectype) /* Function vect_strided_store_supported. - Returns TRUE is INTERLEAVE_HIGH and INTERLEAVE_LOW operations are supported, - and FALSE otherwise. */ + Returns TRUE if interleave high and interleave low permutations + are supported, and FALSE otherwise. */ bool vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count) { - enum machine_mode mode; - - mode = TYPE_MODE (vectype); + enum machine_mode mode = TYPE_MODE (vectype); /* vect_permute_store_chain requires the group size to be a power of two. */ if (exact_log2 (count) == -1) @@ -3813,7 +3830,7 @@ vect_strided_store_supported (tree vectype, unsigned HOST_WIDE_INT count) return false; } - /* Check that the operation is supported. */ + /* Check that the permutation is supported. */ if (VECTOR_MODE_P (mode)) { unsigned int i, nelt = GET_MODE_NUNITS (mode); @@ -3923,11 +3940,9 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain, tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); tree perm_mask_low, perm_mask_high; unsigned int i, n; - unsigned int j, nelt = GET_MODE_NUNITS (TYPE_MODE (vectype)); + unsigned int j, nelt = TYPE_VECTOR_SUBPARTS (vectype); unsigned char *sel = XALLOCAVEC (unsigned char, nelt); - gcc_assert (vect_strided_store_supported (vectype, length)); - *result_chain = VEC_copy (tree, heap, dr_chain); for (i = 0, n = nelt / 2; i < n; i++) @@ -3936,9 +3951,12 @@ vect_permute_store_chain (VEC(tree,heap) *dr_chain, sel[i * 2 + 1] = i + nelt; } perm_mask_high = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm_mask_high != NULL); + for (i = 0; i < nelt; i++) sel[i] += nelt / 2; perm_mask_low = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm_mask_low != NULL); for (i = 0, n = exact_log2 (length); i < n; i++) { @@ -4246,16 +4264,13 @@ vect_setup_realignment (gimple stmt, gimple_stmt_iterator *gsi, /* Function vect_strided_load_supported. - Returns TRUE is EXTRACT_EVEN and EXTRACT_ODD operations are supported, + Returns TRUE if even and odd permutations are supported, and FALSE otherwise. */ bool vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count) { - optab ee_optab, eo_optab; - enum machine_mode mode; - - mode = TYPE_MODE (vectype); + enum machine_mode mode = TYPE_MODE (vectype); /* vect_permute_load_chain requires the group size to be a power of two. */ if (exact_log2 (count) == -1) @@ -4266,18 +4281,22 @@ vect_strided_load_supported (tree vectype, unsigned HOST_WIDE_INT count) return false; } - ee_optab = optab_for_tree_code (VEC_EXTRACT_EVEN_EXPR, - vectype, optab_default); - eo_optab = optab_for_tree_code (VEC_EXTRACT_ODD_EXPR, - vectype, optab_default); - if (ee_optab && eo_optab - && optab_handler (ee_optab, mode) != CODE_FOR_nothing - && optab_handler (eo_optab, mode) != CODE_FOR_nothing) - return true; + /* Check that the permutation is supported. */ + if (VECTOR_MODE_P (mode)) + { + unsigned int i, nelt = GET_MODE_NUNITS (mode); + unsigned char *sel = XALLOCAVEC (unsigned char, nelt); - if (can_vec_perm_for_code_p (VEC_EXTRACT_EVEN_EXPR, mode, NULL) - && can_vec_perm_for_code_p (VEC_EXTRACT_ODD_EXPR, mode, NULL)) - return true; + for (i = 0; i < nelt; i++) + sel[i] = i * 2; + if (can_vec_perm_p (mode, false, sel)) + { + for (i = 0; i < nelt; i++) + sel[i] = i * 2 + 1; + if (can_vec_perm_p (mode, false, sel)) + return true; + } + } if (vect_print_dump_info (REPORT_DETAILS)) fprintf (vect_dump, "extract even/odd not supported by target"); @@ -4379,17 +4398,28 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain, VEC(tree,heap) **result_chain) { tree perm_dest, data_ref, first_vect, second_vect; + tree perm_mask_even, perm_mask_odd; gimple perm_stmt; tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt)); - int i; - unsigned int j; - - gcc_assert (vect_strided_load_supported (vectype, length)); + unsigned int i, j, log_length = exact_log2 (length); + unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype); + unsigned char *sel = XALLOCAVEC (unsigned char, nelt); *result_chain = VEC_copy (tree, heap, dr_chain); - for (i = 0; i < exact_log2 (length); i++) + + for (i = 0; i < nelt; ++i) + sel[i] = i * 2; + perm_mask_even = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm_mask_even != NULL); + + for (i = 0; i < nelt; ++i) + sel[i] = i * 2 + 1; + perm_mask_odd = vect_gen_perm_mask (vectype, sel); + gcc_assert (perm_mask_odd != NULL); + + for (i = 0; i < log_length; i++) { - for (j = 0; j < length; j +=2) + for (j = 0; j < length; j += 2) { first_vect = VEC_index (tree, dr_chain, j); second_vect = VEC_index (tree, dr_chain, j+1); @@ -4399,9 +4429,9 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain, DECL_GIMPLE_REG_P (perm_dest) = 1; add_referenced_var (perm_dest); - perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_EVEN_EXPR, - perm_dest, first_vect, - second_vect); + perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest, + first_vect, second_vect, + perm_mask_even); data_ref = make_ssa_name (perm_dest, perm_stmt); gimple_assign_set_lhs (perm_stmt, data_ref); @@ -4415,9 +4445,10 @@ vect_permute_load_chain (VEC(tree,heap) *dr_chain, DECL_GIMPLE_REG_P (perm_dest) = 1; add_referenced_var (perm_dest); - perm_stmt = gimple_build_assign_with_ops (VEC_EXTRACT_ODD_EXPR, - perm_dest, first_vect, - second_vect); + perm_stmt = gimple_build_assign_with_ops3 (VEC_PERM_EXPR, perm_dest, + first_vect, second_vect, + perm_mask_odd); + data_ref = make_ssa_name (perm_dest, perm_stmt); gimple_assign_set_lhs (perm_stmt, data_ref); vect_finish_stmt_generation (stmt, perm_stmt, gsi); @@ -4659,12 +4690,7 @@ vect_supportable_dr_alignment (struct data_reference *dr, return dr_explicit_realign_optimized; } if (!known_alignment_for_access_p (dr)) - { - tree ba = DR_BASE_OBJECT (dr); - - if (ba) - is_packed = contains_packed_reference (ba); - } + is_packed = contains_packed_reference (DR_REF (dr)); if (targetm.vectorize. support_vector_misalignment (mode, type, @@ -4678,12 +4704,7 @@ vect_supportable_dr_alignment (struct data_reference *dr, tree type = (TREE_TYPE (DR_REF (dr))); if (!known_alignment_for_access_p (dr)) - { - tree ba = DR_BASE_OBJECT (dr); - - if (ba) - is_packed = contains_packed_reference (ba); - } + is_packed = contains_packed_reference (DR_REF (dr)); if (targetm.vectorize. support_vector_misalignment (mode, type,