X-Git-Url: http://git.sourceforge.jp/view?p=pf3gnuchains%2Fgcc-fork.git;a=blobdiff_plain;f=gcc%2Ffortran%2Ftrans-intrinsic.c;h=2bc628d40f29e5c26fc480d697d6ad8e7a0cd9d0;hp=2937734b9cffa2a1738819e2a0c229c2a261bfd2;hb=9a457ae7ab889f27e72e3ded595fb5e4c1140d21;hpb=808656b4f963e7b773c378c561dbced38b52274f diff --git a/gcc/fortran/trans-intrinsic.c b/gcc/fortran/trans-intrinsic.c index 2937734b9cf..2bc628d40f2 100644 --- a/gcc/fortran/trans-intrinsic.c +++ b/gcc/fortran/trans-intrinsic.c @@ -1,5 +1,5 @@ /* Intrinsic translation - Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 + Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Contributed by Paul Brook and Steven Bosscher @@ -139,7 +139,7 @@ static tree builtin_decl_for_precision (enum built_in_function base_built_in, int precision) { - int i = END_BUILTINS; + enum built_in_function i = END_BUILTINS; gfc_intrinsic_map_t *m; for (m = gfc_intrinsic_map; m->double_built_in != base_built_in ; m++) @@ -158,7 +158,7 @@ builtin_decl_for_precision (enum built_in_function base_built_in, return m->real16_decl; } - return (i == END_BUILTINS ? NULL_TREE : built_in_decls[i]); + return (i == END_BUILTINS ? NULL_TREE : builtin_decl_explicit (i)); } @@ -331,7 +331,8 @@ gfc_conv_intrinsic_conversion (gfc_se * se, gfc_expr * expr) tree artype; artype = TREE_TYPE (TREE_TYPE (args[0])); - args[0] = fold_build1 (REALPART_EXPR, artype, args[0]); + args[0] = fold_build1_loc (input_location, REALPART_EXPR, artype, + args[0]); } se->expr = convert (type, args[0]); @@ -357,11 +358,12 @@ build_fixbound_expr (stmtblock_t * pblock, tree arg, tree type, int up) intval = gfc_evaluate_now (intval, pblock); tmp = convert (argtype, intval); - cond = fold_build2 (up ? GE_EXPR : LE_EXPR, boolean_type_node, tmp, arg); + cond = fold_build2_loc (input_location, up ? GE_EXPR : LE_EXPR, + boolean_type_node, tmp, arg); - tmp = fold_build2 (up ? PLUS_EXPR : MINUS_EXPR, type, intval, - build_int_cst (type, 1)); - tmp = fold_build3 (COND_EXPR, type, cond, intval, tmp); + tmp = fold_build2_loc (input_location, up ? PLUS_EXPR : MINUS_EXPR, type, + intval, build_int_cst (type, 1)); + tmp = fold_build3_loc (input_location, COND_EXPR, type, cond, intval, tmp); return tmp; } @@ -424,7 +426,7 @@ build_fix_expr (stmtblock_t * pblock, tree arg, tree type, break; case RND_TRUNC: - return fold_build1 (FIX_TRUNC_EXPR, type, arg); + return fold_build1_loc (input_location, FIX_TRUNC_EXPR, type, arg); break; default: @@ -456,7 +458,7 @@ gfc_conv_intrinsic_aint (gfc_se * se, gfc_expr * expr, enum rounding_mode op) int kind; kind = expr->ts.kind; - nargs = gfc_intrinsic_argument_list_length (expr); + nargs = gfc_intrinsic_argument_list_length (expr); decl = NULL_TREE; /* We have builtin functions for some cases. */ @@ -496,17 +498,21 @@ gfc_conv_intrinsic_aint (gfc_se * se, gfc_expr * expr, enum rounding_mode op) n = gfc_validate_kind (BT_INTEGER, kind, false); mpfr_set_z (huge, gfc_integer_kinds[n].huge, GFC_RND_MODE); tmp = gfc_conv_mpfr_to_tree (huge, kind, 0); - cond = fold_build2 (LT_EXPR, boolean_type_node, arg[0], tmp); + cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, arg[0], + tmp); mpfr_neg (huge, huge, GFC_RND_MODE); tmp = gfc_conv_mpfr_to_tree (huge, kind, 0); - tmp = fold_build2 (GT_EXPR, boolean_type_node, arg[0], tmp); - cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, cond, tmp); + tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, arg[0], + tmp); + cond = fold_build2_loc (input_location, TRUTH_AND_EXPR, boolean_type_node, + cond, tmp); itype = gfc_get_int_type (kind); tmp = build_fix_expr (&se->pre, arg[0], itype, op); tmp = convert (type, tmp); - se->expr = fold_build3 (COND_EXPR, type, cond, tmp, arg[0]); + se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, tmp, + arg[0]); mpfr_clear (huge); } @@ -544,7 +550,8 @@ gfc_conv_intrinsic_int (gfc_se * se, gfc_expr * expr, enum rounding_mode op) tree artype; artype = TREE_TYPE (TREE_TYPE (args[0])); - args[0] = fold_build1 (REALPART_EXPR, artype, args[0]); + args[0] = fold_build1_loc (input_location, REALPART_EXPR, artype, + args[0]); } se->expr = build_fix_expr (&se->pre, args[0], type, op); @@ -560,7 +567,8 @@ gfc_conv_intrinsic_imagpart (gfc_se * se, gfc_expr * expr) tree arg; gfc_conv_intrinsic_function_args (se, expr, &arg, 1); - se->expr = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (arg)), arg); + se->expr = fold_build1_loc (input_location, IMAGPART_EXPR, + TREE_TYPE (TREE_TYPE (arg)), arg); } @@ -572,7 +580,7 @@ gfc_conv_intrinsic_conjg (gfc_se * se, gfc_expr * expr) tree arg; gfc_conv_intrinsic_function_args (se, expr, &arg, 1); - se->expr = fold_build1 (CONJ_EXPR, TREE_TYPE (arg), arg); + se->expr = fold_build1_loc (input_location, CONJ_EXPR, TREE_TYPE (arg), arg); } @@ -605,7 +613,7 @@ void gfc_build_intrinsic_lib_fndecls (void) { gfc_intrinsic_map_t *m; - tree quad_decls[(int) END_BUILTINS]; + tree quad_decls[END_BUILTINS + 1]; if (gfc_real16_is_float128) { @@ -613,37 +621,38 @@ gfc_build_intrinsic_lib_fndecls (void) C99-like library functions. For now, we only handle __float128 q-suffixed functions. */ - tree tmp, func_0, func_1, func_2, func_cabs, func_frexp; + tree type, complex_type, func_1, func_2, func_cabs, func_frexp; tree func_lround, func_llround, func_scalbn, func_cpow; - memset (quad_decls, 0, sizeof(tree) * (int) END_BUILTINS); + memset (quad_decls, 0, sizeof(tree) * (END_BUILTINS + 1)); - /* type (*) (void) */ - func_0 = build_function_type (float128_type_node, void_list_node); + type = float128_type_node; + complex_type = complex_float128_type_node; /* type (*) (type) */ - tmp = tree_cons (NULL_TREE, float128_type_node, void_list_node); - func_1 = build_function_type (float128_type_node, tmp); + func_1 = build_function_type_list (type, type, NULL_TREE); /* long (*) (type) */ - func_lround = build_function_type (long_integer_type_node, tmp); + func_lround = build_function_type_list (long_integer_type_node, + type, NULL_TREE); /* long long (*) (type) */ - func_llround = build_function_type (long_long_integer_type_node, tmp); + func_llround = build_function_type_list (long_long_integer_type_node, + type, NULL_TREE); /* type (*) (type, type) */ - tmp = tree_cons (NULL_TREE, float128_type_node, tmp); - func_2 = build_function_type (float128_type_node, tmp); + func_2 = build_function_type_list (type, type, type, NULL_TREE); /* type (*) (type, &int) */ - tmp = tree_cons (NULL_TREE, float128_type_node, void_list_node); - tmp = tree_cons (NULL_TREE, build_pointer_type (integer_type_node), tmp); - func_frexp = build_function_type (float128_type_node, tmp); + func_frexp + = build_function_type_list (type, + type, + build_pointer_type (integer_type_node), + NULL_TREE); /* type (*) (type, int) */ - tmp = tree_cons (NULL_TREE, float128_type_node, void_list_node); - tmp = tree_cons (NULL_TREE, integer_type_node, tmp); - func_scalbn = build_function_type (float128_type_node, tmp); + func_scalbn = build_function_type_list (type, + type, integer_type_node, NULL_TREE); /* type (*) (complex type) */ - tmp = tree_cons (NULL_TREE, complex_float128_type_node, void_list_node); - func_cabs = build_function_type (float128_type_node, tmp); + func_cabs = build_function_type_list (type, complex_type, NULL_TREE); /* complex type (*) (complex type, complex type) */ - tmp = tree_cons (NULL_TREE, complex_float128_type_node, tmp); - func_cpow = build_function_type (complex_float128_type_node, tmp); + func_cpow + = build_function_type_list (complex_type, + complex_type, complex_type, NULL_TREE); #define DEFINE_MATH_BUILTIN(ID, NAME, ARGTYPE) #define DEFINE_MATH_BUILTIN_C(ID, NAME, ARGTYPE) @@ -670,26 +679,28 @@ gfc_build_intrinsic_lib_fndecls (void) m->id != GFC_ISYM_NONE || m->double_built_in != END_BUILTINS; m++) { if (m->float_built_in != END_BUILTINS) - m->real4_decl = built_in_decls[m->float_built_in]; + m->real4_decl = builtin_decl_explicit (m->float_built_in); if (m->complex_float_built_in != END_BUILTINS) - m->complex4_decl = built_in_decls[m->complex_float_built_in]; + m->complex4_decl = builtin_decl_explicit (m->complex_float_built_in); if (m->double_built_in != END_BUILTINS) - m->real8_decl = built_in_decls[m->double_built_in]; + m->real8_decl = builtin_decl_explicit (m->double_built_in); if (m->complex_double_built_in != END_BUILTINS) - m->complex8_decl = built_in_decls[m->complex_double_built_in]; + m->complex8_decl = builtin_decl_explicit (m->complex_double_built_in); /* If real(kind=10) exists, it is always long double. */ if (m->long_double_built_in != END_BUILTINS) - m->real10_decl = built_in_decls[m->long_double_built_in]; + m->real10_decl = builtin_decl_explicit (m->long_double_built_in); if (m->complex_long_double_built_in != END_BUILTINS) - m->complex10_decl = built_in_decls[m->complex_long_double_built_in]; + m->complex10_decl + = builtin_decl_explicit (m->complex_long_double_built_in); if (!gfc_real16_is_float128) { if (m->long_double_built_in != END_BUILTINS) - m->real16_decl = built_in_decls[m->long_double_built_in]; + m->real16_decl = builtin_decl_explicit (m->long_double_built_in); if (m->complex_long_double_built_in != END_BUILTINS) - m->complex16_decl = built_in_decls[m->complex_long_double_built_in]; + m->complex16_decl + = builtin_decl_explicit (m->complex_long_double_built_in); } else if (quad_decls[m->double_built_in] != NULL_TREE) { @@ -702,7 +713,6 @@ gfc_build_intrinsic_lib_fndecls (void) { /* Same thing for the complex ones. */ m->complex16_decl = quad_decls[m->double_built_in]; - m->real16_decl = quad_decls[m->double_built_in]; } } } @@ -714,7 +724,7 @@ static tree gfc_get_intrinsic_lib_fndecl (gfc_intrinsic_map_t * m, gfc_expr * expr) { tree type; - tree argtypes; + VEC(tree,gc) *argtypes; tree fndecl; gfc_actual_arglist *actual; tree *pdecl; @@ -795,14 +805,13 @@ gfc_get_intrinsic_lib_fndecl (gfc_intrinsic_map_t * m, gfc_expr * expr) ts->kind); } - argtypes = NULL_TREE; + argtypes = NULL; for (actual = expr->value.function.actual; actual; actual = actual->next) { type = gfc_typenode_for_spec (&actual->expr->ts); - argtypes = gfc_chainon_list (argtypes, type); + VEC_safe_push (tree, gc, argtypes, type); } - argtypes = chainon (argtypes, void_list_node); - type = build_function_type (gfc_typenode_for_spec (ts), argtypes); + type = build_function_type_vec (gfc_typenode_for_spec (ts), argtypes); fndecl = build_decl (input_location, FUNCTION_DECL, get_identifier (name), type); @@ -876,7 +885,7 @@ gfc_trans_same_strlen_check (const char* intr_name, locus* where, return; /* Compare the two string lengths. */ - cond = fold_build2 (NE_EXPR, boolean_type_node, a, b); + cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, a, b); /* Output the runtime-check. */ name = gfc_build_cstring_const (intr_name); @@ -913,6 +922,399 @@ gfc_conv_intrinsic_exponent (gfc_se *se, gfc_expr *expr) se->expr = fold_convert (type, res); } + +/* Convert the last ref of a scalar coarray from an AR_ELEMENT to an + AR_FULL, suitable for the scalarizer. */ + +static gfc_ss * +walk_coarray (gfc_expr *e) +{ + gfc_ss *ss; + + gcc_assert (gfc_get_corank (e) > 0); + + ss = gfc_walk_expr (e); + + /* Fix scalar coarray. */ + if (ss == gfc_ss_terminator) + { + gfc_ref *ref; + + ref = e->ref; + while (ref) + { + if (ref->type == REF_ARRAY + && ref->u.ar.codimen > 0) + break; + + ref = ref->next; + } + + gcc_assert (ref != NULL); + if (ref->u.ar.type == AR_ELEMENT) + ref->u.ar.type = AR_SECTION; + ss = gfc_reverse_ss (gfc_walk_array_ref (ss, e, ref)); + } + + return ss; +} + + +static void +trans_this_image (gfc_se * se, gfc_expr *expr) +{ + stmtblock_t loop; + tree type, desc, dim_arg, cond, tmp, m, loop_var, exit_label, min_var, + lbound, ubound, extent, ml; + gfc_se argse; + gfc_ss *ss; + int rank, corank; + + /* The case -fcoarray=single is handled elsewhere. */ + gcc_assert (gfc_option.coarray != GFC_FCOARRAY_SINGLE); + + gfc_init_coarray_decl (false); + + /* Argument-free version: THIS_IMAGE(). */ + if (expr->value.function.actual->expr == NULL) + { + se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), + gfort_gvar_caf_this_image); + return; + } + + /* Coarray-argument version: THIS_IMAGE(coarray [, dim]). */ + + type = gfc_get_int_type (gfc_default_integer_kind); + corank = gfc_get_corank (expr->value.function.actual->expr); + rank = expr->value.function.actual->expr->rank; + + /* Obtain the descriptor of the COARRAY. */ + gfc_init_se (&argse, NULL); + ss = walk_coarray (expr->value.function.actual->expr); + gcc_assert (ss != gfc_ss_terminator); + argse.want_coarray = 1; + gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr, ss); + gfc_add_block_to_block (&se->pre, &argse.pre); + gfc_add_block_to_block (&se->post, &argse.post); + desc = argse.expr; + + if (se->ss) + { + /* Create an implicit second parameter from the loop variable. */ + gcc_assert (!expr->value.function.actual->next->expr); + gcc_assert (corank > 0); + gcc_assert (se->loop->dimen == 1); + gcc_assert (se->ss->info->expr == expr); + + dim_arg = se->loop->loopvar[0]; + dim_arg = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, dim_arg, + build_int_cst (TREE_TYPE (dim_arg), 1)); + gfc_advance_se_ss_chain (se); + } + else + { + /* Use the passed DIM= argument. */ + gcc_assert (expr->value.function.actual->next->expr); + gfc_init_se (&argse, NULL); + gfc_conv_expr_type (&argse, expr->value.function.actual->next->expr, + gfc_array_index_type); + gfc_add_block_to_block (&se->pre, &argse.pre); + dim_arg = argse.expr; + + if (INTEGER_CST_P (dim_arg)) + { + int hi, co_dim; + + hi = TREE_INT_CST_HIGH (dim_arg); + co_dim = TREE_INT_CST_LOW (dim_arg); + if (hi || co_dim < 1 + || co_dim > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) + gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " + "dimension index", expr->value.function.isym->name, + &expr->where); + } + else if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) + { + dim_arg = gfc_evaluate_now (dim_arg, &se->pre); + cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + dim_arg, + build_int_cst (TREE_TYPE (dim_arg), 1)); + tmp = gfc_rank_cst[GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))]; + tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + dim_arg, tmp); + cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR, + boolean_type_node, cond, tmp); + gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, + gfc_msg_fault); + } + } + + /* Used algorithm; cf. Fortran 2008, C.10. Note, due to the scalarizer, + one always has a dim_arg argument. + + m = this_image() - 1 + if (corank == 1) + { + sub(1) = m + lcobound(corank) + return; + } + i = rank + min_var = min (rank + corank - 2, rank + dim_arg - 1) + for (;;) + { + extent = gfc_extent(i) + ml = m + m = m/extent + if (i >= min_var) + goto exit_label + i++ + } + exit_label: + sub(dim_arg) = (dim_arg < corank) ? ml - m*extent + lcobound(dim_arg) + : m + lcobound(corank) + */ + + /* this_image () - 1. */ + tmp = fold_convert (type, gfort_gvar_caf_this_image); + tmp = fold_build2_loc (input_location, MINUS_EXPR, type, tmp, + build_int_cst (type, 1)); + if (corank == 1) + { + /* sub(1) = m + lcobound(corank). */ + lbound = gfc_conv_descriptor_lbound_get (desc, + build_int_cst (TREE_TYPE (gfc_array_index_type), + corank+rank-1)); + lbound = fold_convert (type, lbound); + tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp, lbound); + + se->expr = tmp; + return; + } + + m = gfc_create_var (type, NULL); + ml = gfc_create_var (type, NULL); + loop_var = gfc_create_var (integer_type_node, NULL); + min_var = gfc_create_var (integer_type_node, NULL); + + /* m = this_image () - 1. */ + gfc_add_modify (&se->pre, m, tmp); + + /* min_var = min (rank + corank-2, rank + dim_arg - 1). */ + tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, + fold_convert (integer_type_node, dim_arg), + build_int_cst (integer_type_node, rank - 1)); + tmp = fold_build2_loc (input_location, MIN_EXPR, integer_type_node, + build_int_cst (integer_type_node, rank + corank - 2), + tmp); + gfc_add_modify (&se->pre, min_var, tmp); + + /* i = rank. */ + tmp = build_int_cst (integer_type_node, rank); + gfc_add_modify (&se->pre, loop_var, tmp); + + exit_label = gfc_build_label_decl (NULL_TREE); + TREE_USED (exit_label) = 1; + + /* Loop body. */ + gfc_init_block (&loop); + + /* ml = m. */ + gfc_add_modify (&loop, ml, m); + + /* extent = ... */ + lbound = gfc_conv_descriptor_lbound_get (desc, loop_var); + ubound = gfc_conv_descriptor_ubound_get (desc, loop_var); + extent = gfc_conv_array_extent_dim (lbound, ubound, NULL); + extent = fold_convert (type, extent); + + /* m = m/extent. */ + gfc_add_modify (&loop, m, + fold_build2_loc (input_location, TRUNC_DIV_EXPR, type, + m, extent)); + + /* Exit condition: if (i >= min_var) goto exit_label. */ + cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, loop_var, + min_var); + tmp = build1_v (GOTO_EXPR, exit_label); + tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp, + build_empty_stmt (input_location)); + gfc_add_expr_to_block (&loop, tmp); + + /* Increment loop variable: i++. */ + gfc_add_modify (&loop, loop_var, + fold_build2_loc (input_location, PLUS_EXPR, integer_type_node, + loop_var, + build_int_cst (integer_type_node, 1))); + + /* Making the loop... actually loop! */ + tmp = gfc_finish_block (&loop); + tmp = build1_v (LOOP_EXPR, tmp); + gfc_add_expr_to_block (&se->pre, tmp); + + /* The exit label. */ + tmp = build1_v (LABEL_EXPR, exit_label); + gfc_add_expr_to_block (&se->pre, tmp); + + /* sub(co_dim) = (co_dim < corank) ? ml - m*extent + lcobound(dim_arg) + : m + lcobound(corank) */ + + cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, dim_arg, + build_int_cst (TREE_TYPE (dim_arg), corank)); + + lbound = gfc_conv_descriptor_lbound_get (desc, + fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, dim_arg, + build_int_cst (TREE_TYPE (dim_arg), rank-1))); + lbound = fold_convert (type, lbound); + + tmp = fold_build2_loc (input_location, MINUS_EXPR, type, ml, + fold_build2_loc (input_location, MULT_EXPR, type, + m, extent)); + tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp, lbound); + + se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, tmp, + fold_build2_loc (input_location, PLUS_EXPR, type, + m, lbound)); +} + + +static void +trans_image_index (gfc_se * se, gfc_expr *expr) +{ + tree num_images, cond, coindex, type, lbound, ubound, desc, subdesc, + tmp, invalid_bound; + gfc_se argse, subse; + gfc_ss *ss, *subss; + int rank, corank, codim; + + type = gfc_get_int_type (gfc_default_integer_kind); + corank = gfc_get_corank (expr->value.function.actual->expr); + rank = expr->value.function.actual->expr->rank; + + /* Obtain the descriptor of the COARRAY. */ + gfc_init_se (&argse, NULL); + ss = walk_coarray (expr->value.function.actual->expr); + gcc_assert (ss != gfc_ss_terminator); + argse.want_coarray = 1; + gfc_conv_expr_descriptor (&argse, expr->value.function.actual->expr, ss); + gfc_add_block_to_block (&se->pre, &argse.pre); + gfc_add_block_to_block (&se->post, &argse.post); + desc = argse.expr; + + /* Obtain a handle to the SUB argument. */ + gfc_init_se (&subse, NULL); + subss = gfc_walk_expr (expr->value.function.actual->next->expr); + gcc_assert (subss != gfc_ss_terminator); + gfc_conv_expr_descriptor (&subse, expr->value.function.actual->next->expr, + subss); + gfc_add_block_to_block (&se->pre, &subse.pre); + gfc_add_block_to_block (&se->post, &subse.post); + subdesc = build_fold_indirect_ref_loc (input_location, + gfc_conv_descriptor_data_get (subse.expr)); + + /* Fortran 2008 does not require that the values remain in the cobounds, + thus we need explicitly check this - and return 0 if they are exceeded. */ + + lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[rank+corank-1]); + tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[corank-1], NULL); + invalid_bound = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + fold_convert (gfc_array_index_type, tmp), + lbound); + + for (codim = corank + rank - 2; codim >= rank; codim--) + { + lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); + ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[codim]); + tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[codim-rank], NULL); + cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + fold_convert (gfc_array_index_type, tmp), + lbound); + invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR, + boolean_type_node, invalid_bound, cond); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + fold_convert (gfc_array_index_type, tmp), + ubound); + invalid_bound = fold_build2_loc (input_location, TRUTH_OR_EXPR, + boolean_type_node, invalid_bound, cond); + } + + invalid_bound = gfc_unlikely (invalid_bound); + + + /* See Fortran 2008, C.10 for the following algorithm. */ + + /* coindex = sub(corank) - lcobound(n). */ + coindex = fold_convert (gfc_array_index_type, + gfc_build_array_ref (subdesc, gfc_rank_cst[corank-1], + NULL)); + lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[rank+corank-1]); + coindex = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, + fold_convert (gfc_array_index_type, coindex), + lbound); + + for (codim = corank + rank - 2; codim >= rank; codim--) + { + tree extent, ubound; + + /* coindex = coindex*extent(codim) + sub(codim) - lcobound(codim). */ + lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); + ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[codim]); + extent = gfc_conv_array_extent_dim (lbound, ubound, NULL); + + /* coindex *= extent. */ + coindex = fold_build2_loc (input_location, MULT_EXPR, + gfc_array_index_type, coindex, extent); + + /* coindex += sub(codim). */ + tmp = gfc_build_array_ref (subdesc, gfc_rank_cst[codim-rank], NULL); + coindex = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, coindex, + fold_convert (gfc_array_index_type, tmp)); + + /* coindex -= lbound(codim). */ + lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[codim]); + coindex = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, coindex, lbound); + } + + coindex = fold_build2_loc (input_location, PLUS_EXPR, type, + fold_convert(type, coindex), + build_int_cst (type, 1)); + + /* Return 0 if "coindex" exceeds num_images(). */ + + if (gfc_option.coarray == GFC_FCOARRAY_SINGLE) + num_images = build_int_cst (type, 1); + else + { + gfc_init_coarray_decl (false); + num_images = fold_convert (type, gfort_gvar_caf_num_images); + } + + tmp = gfc_create_var (type, NULL); + gfc_add_modify (&se->pre, tmp, coindex); + + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, tmp, + num_images); + cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, boolean_type_node, + cond, + fold_convert (boolean_type_node, invalid_bound)); + se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, + build_int_cst (type, 0), tmp); +} + + +static void +trans_num_images (gfc_se * se) +{ + gfc_init_coarray_decl (false); + se->expr = fold_convert (gfc_get_int_type (gfc_default_integer_kind), + gfort_gvar_caf_num_images); +} + + /* Evaluate a single upper or lower bound. */ /* TODO: bound intrinsic generates way too much unnecessary code. */ @@ -940,23 +1342,25 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper) /* Create an implicit second parameter from the loop variable. */ gcc_assert (!arg2->expr); gcc_assert (se->loop->dimen == 1); - gcc_assert (se->ss->expr == expr); + gcc_assert (se->ss->info->expr == expr); gfc_advance_se_ss_chain (se); bound = se->loop->loopvar[0]; - bound = fold_build2 (MINUS_EXPR, gfc_array_index_type, bound, - se->loop->from[0]); + bound = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, bound, + se->loop->from[0]); } else { /* use the passed argument. */ - gcc_assert (arg->next->expr); + gcc_assert (arg2->expr); gfc_init_se (&argse, NULL); - gfc_conv_expr_type (&argse, arg->next->expr, gfc_array_index_type); + gfc_conv_expr_type (&argse, arg2->expr, gfc_array_index_type); gfc_add_block_to_block (&se->pre, &argse.pre); bound = argse.expr; /* Convert from one based to zero based. */ - bound = fold_build2 (MINUS_EXPR, gfc_array_index_type, bound, - gfc_index_one_node); + bound = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, bound, + gfc_index_one_node); } /* TODO: don't re-evaluate the descriptor on each iteration. */ @@ -986,11 +1390,13 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper) if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) { bound = gfc_evaluate_now (bound, &se->pre); - cond = fold_build2 (LT_EXPR, boolean_type_node, - bound, build_int_cst (TREE_TYPE (bound), 0)); + cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + bound, build_int_cst (TREE_TYPE (bound), 0)); tmp = gfc_rank_cst[GFC_TYPE_ARRAY_RANK (TREE_TYPE (desc))]; - tmp = fold_build2 (GE_EXPR, boolean_type_node, bound, tmp); - cond = fold_build2 (TRUTH_ORIF_EXPR, boolean_type_node, cond, tmp); + tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, + bound, tmp); + cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR, + boolean_type_node, cond, tmp); gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, gfc_msg_fault); } @@ -1026,53 +1432,63 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper) { tree stride = gfc_conv_descriptor_stride_get (desc, bound); - cond1 = fold_build2 (GE_EXPR, boolean_type_node, ubound, lbound); - - cond3 = fold_build2 (GE_EXPR, boolean_type_node, stride, - gfc_index_zero_node); - cond3 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, cond3, cond1); - - cond4 = fold_build2 (LT_EXPR, boolean_type_node, stride, - gfc_index_zero_node); + cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, + ubound, lbound); + cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, + stride, gfc_index_zero_node); + cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR, + boolean_type_node, cond3, cond1); + cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + stride, gfc_index_zero_node); if (upper) { tree cond5; - cond = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, cond3, cond4); - - cond5 = fold_build2 (EQ_EXPR, boolean_type_node, gfc_index_one_node, lbound); - cond5 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, cond4, cond5); - - cond = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, cond, cond5); - - se->expr = fold_build3 (COND_EXPR, gfc_array_index_type, cond, - ubound, gfc_index_zero_node); + cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, + boolean_type_node, cond3, cond4); + cond5 = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + gfc_index_one_node, lbound); + cond5 = fold_build2_loc (input_location, TRUTH_AND_EXPR, + boolean_type_node, cond4, cond5); + + cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, + boolean_type_node, cond, cond5); + + se->expr = fold_build3_loc (input_location, COND_EXPR, + gfc_array_index_type, cond, + ubound, gfc_index_zero_node); } else { if (as->type == AS_ASSUMED_SIZE) - cond = fold_build2 (EQ_EXPR, boolean_type_node, bound, - build_int_cst (TREE_TYPE (bound), - arg->expr->rank - 1)); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + bound, build_int_cst (TREE_TYPE (bound), + arg->expr->rank - 1)); else cond = boolean_false_node; - cond1 = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, cond3, cond4); - cond = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, cond, cond1); + cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR, + boolean_type_node, cond3, cond4); + cond = fold_build2_loc (input_location, TRUTH_OR_EXPR, + boolean_type_node, cond, cond1); - se->expr = fold_build3 (COND_EXPR, gfc_array_index_type, cond, - lbound, gfc_index_one_node); + se->expr = fold_build3_loc (input_location, COND_EXPR, + gfc_array_index_type, cond, + lbound, gfc_index_one_node); } } else { if (upper) { - size = fold_build2 (MINUS_EXPR, gfc_array_index_type, ubound, lbound); - se->expr = fold_build2 (PLUS_EXPR, gfc_array_index_type, size, + size = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, ubound, lbound); + se->expr = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, size, gfc_index_one_node); - se->expr = fold_build2 (MAX_EXPR, gfc_array_index_type, se->expr, - gfc_index_zero_node); + se->expr = fold_build2_loc (input_location, MAX_EXPR, + gfc_array_index_type, se->expr, + gfc_index_zero_node); } else se->expr = gfc_index_one_node; @@ -1084,6 +1500,168 @@ gfc_conv_intrinsic_bound (gfc_se * se, gfc_expr * expr, int upper) static void +conv_intrinsic_cobound (gfc_se * se, gfc_expr * expr) +{ + gfc_actual_arglist *arg; + gfc_actual_arglist *arg2; + gfc_se argse; + gfc_ss *ss; + tree bound, resbound, resbound2, desc, cond, tmp; + tree type; + int corank; + + gcc_assert (expr->value.function.isym->id == GFC_ISYM_LCOBOUND + || expr->value.function.isym->id == GFC_ISYM_UCOBOUND + || expr->value.function.isym->id == GFC_ISYM_THIS_IMAGE); + + arg = expr->value.function.actual; + arg2 = arg->next; + + gcc_assert (arg->expr->expr_type == EXPR_VARIABLE); + corank = gfc_get_corank (arg->expr); + + ss = walk_coarray (arg->expr); + gcc_assert (ss != gfc_ss_terminator); + gfc_init_se (&argse, NULL); + argse.want_coarray = 1; + + gfc_conv_expr_descriptor (&argse, arg->expr, ss); + gfc_add_block_to_block (&se->pre, &argse.pre); + gfc_add_block_to_block (&se->post, &argse.post); + desc = argse.expr; + + if (se->ss) + { + /* Create an implicit second parameter from the loop variable. */ + gcc_assert (!arg2->expr); + gcc_assert (corank > 0); + gcc_assert (se->loop->dimen == 1); + gcc_assert (se->ss->info->expr == expr); + + bound = se->loop->loopvar[0]; + bound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, + bound, gfc_rank_cst[arg->expr->rank]); + gfc_advance_se_ss_chain (se); + } + else + { + /* use the passed argument. */ + gcc_assert (arg2->expr); + gfc_init_se (&argse, NULL); + gfc_conv_expr_type (&argse, arg2->expr, gfc_array_index_type); + gfc_add_block_to_block (&se->pre, &argse.pre); + bound = argse.expr; + + if (INTEGER_CST_P (bound)) + { + int hi, low; + + hi = TREE_INT_CST_HIGH (bound); + low = TREE_INT_CST_LOW (bound); + if (hi || low < 1 || low > GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))) + gfc_error ("'dim' argument of %s intrinsic at %L is not a valid " + "dimension index", expr->value.function.isym->name, + &expr->where); + } + else if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) + { + bound = gfc_evaluate_now (bound, &se->pre); + cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + bound, build_int_cst (TREE_TYPE (bound), 1)); + tmp = gfc_rank_cst[GFC_TYPE_ARRAY_CORANK (TREE_TYPE (desc))]; + tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + bound, tmp); + cond = fold_build2_loc (input_location, TRUTH_ORIF_EXPR, + boolean_type_node, cond, tmp); + gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, + gfc_msg_fault); + } + + + /* Substract 1 to get to zero based and add dimensions. */ + switch (arg->expr->rank) + { + case 0: + bound = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, bound, + gfc_index_one_node); + case 1: + break; + default: + bound = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, bound, + gfc_rank_cst[arg->expr->rank - 1]); + } + } + + resbound = gfc_conv_descriptor_lbound_get (desc, bound); + + /* Handle UCOBOUND with special handling of the last codimension. */ + if (expr->value.function.isym->id == GFC_ISYM_UCOBOUND) + { + /* Last codimension: For -fcoarray=single just return + the lcobound - otherwise add + ceiling (real (num_images ()) / real (size)) - 1 + = (num_images () + size - 1) / size - 1 + = (num_images - 1) / size(), + where size is the product of the extent of all but the last + codimension. */ + + if (gfc_option.coarray != GFC_FCOARRAY_SINGLE && corank > 1) + { + tree cosize; + + gfc_init_coarray_decl (false); + cosize = gfc_conv_descriptor_cosize (desc, arg->expr->rank, corank); + + tmp = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, + fold_convert (gfc_array_index_type, + gfort_gvar_caf_num_images), + build_int_cst (gfc_array_index_type, 1)); + tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR, + gfc_array_index_type, tmp, + fold_convert (gfc_array_index_type, cosize)); + resbound = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, resbound, tmp); + } + else if (gfc_option.coarray != GFC_FCOARRAY_SINGLE) + { + /* ubound = lbound + num_images() - 1. */ + gfc_init_coarray_decl (false); + tmp = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, + fold_convert (gfc_array_index_type, + gfort_gvar_caf_num_images), + build_int_cst (gfc_array_index_type, 1)); + resbound = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, resbound, tmp); + } + + if (corank > 1) + { + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + bound, + build_int_cst (TREE_TYPE (bound), + arg->expr->rank + corank - 1)); + + resbound2 = gfc_conv_descriptor_ubound_get (desc, bound); + se->expr = fold_build3_loc (input_location, COND_EXPR, + gfc_array_index_type, cond, + resbound, resbound2); + } + else + se->expr = resbound; + } + else + se->expr = resbound; + + type = gfc_typenode_for_spec (&expr->ts); + se->expr = convert (type, se->expr); +} + + +static void gfc_conv_intrinsic_abs (gfc_se * se, gfc_expr * expr) { tree arg, cabs; @@ -1094,7 +1672,8 @@ gfc_conv_intrinsic_abs (gfc_se * se, gfc_expr * expr) { case BT_INTEGER: case BT_REAL: - se->expr = fold_build1 (ABS_EXPR, TREE_TYPE (arg), arg); + se->expr = fold_build1_loc (input_location, ABS_EXPR, TREE_TYPE (arg), + arg); break; case BT_COMPLEX: @@ -1129,14 +1708,14 @@ gfc_conv_intrinsic_cmplx (gfc_se * se, gfc_expr * expr, int both) imag = convert (TREE_TYPE (type), args[1]); else if (TREE_CODE (TREE_TYPE (args[0])) == COMPLEX_TYPE) { - imag = fold_build1 (IMAGPART_EXPR, TREE_TYPE (TREE_TYPE (args[0])), - args[0]); + imag = fold_build1_loc (input_location, IMAGPART_EXPR, + TREE_TYPE (TREE_TYPE (args[0])), args[0]); imag = convert (TREE_TYPE (type), imag); } else imag = build_real_from_int_cst (TREE_TYPE (type), integer_zero_node); - se->expr = fold_build2 (COMPLEX_EXPR, type, real, imag); + se->expr = fold_build2_loc (input_location, COMPLEX_EXPR, type, real, imag); } /* Remainder function MOD(A, P) = A - INT(A / P) * P @@ -1165,9 +1744,11 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo) type = TREE_TYPE (args[0]); if (modulo) - se->expr = fold_build2 (FLOOR_MOD_EXPR, type, args[0], args[1]); + se->expr = fold_build2_loc (input_location, FLOOR_MOD_EXPR, type, + args[0], args[1]); else - se->expr = fold_build2 (TRUNC_MOD_EXPR, type, args[0], args[1]); + se->expr = fold_build2_loc (input_location, TRUNC_MOD_EXPR, type, + args[0], args[1]); break; case BT_REAL: @@ -1202,21 +1783,26 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo) { tree zero = gfc_build_const (type, integer_zero_node); tmp = gfc_evaluate_now (se->expr, &se->pre); - test = fold_build2 (LT_EXPR, boolean_type_node, args[0], zero); - test2 = fold_build2 (LT_EXPR, boolean_type_node, args[1], zero); - test2 = fold_build2 (TRUTH_XOR_EXPR, boolean_type_node, test, test2); - test = fold_build2 (NE_EXPR, boolean_type_node, tmp, zero); - test = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, test, test2); + test = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + args[0], zero); + test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + args[1], zero); + test2 = fold_build2_loc (input_location, TRUTH_XOR_EXPR, + boolean_type_node, test, test2); + test = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, + tmp, zero); + test = fold_build2_loc (input_location, TRUTH_AND_EXPR, + boolean_type_node, test, test2); test = gfc_evaluate_now (test, &se->pre); - se->expr = fold_build3 (COND_EXPR, type, test, - fold_build2 (PLUS_EXPR, type, tmp, args[1]), - tmp); + se->expr = fold_build3_loc (input_location, COND_EXPR, type, test, + fold_build2_loc (input_location, PLUS_EXPR, + type, tmp, args[1]), tmp); return; } /* If we do not have a built_in fmod, the calculation is going to have to be done longhand. */ - tmp = fold_build2 (RDIV_EXPR, type, args[0], args[1]); + tmp = fold_build2_loc (input_location, RDIV_EXPR, type, args[0], args[1]); /* Test if the value is too large to handle sensibly. */ gfc_set_model_kind (expr->ts.kind); @@ -1230,12 +1816,15 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo) } mpfr_set_z (huge, gfc_integer_kinds[n].huge, GFC_RND_MODE); test = gfc_conv_mpfr_to_tree (huge, expr->ts.kind, 0); - test2 = fold_build2 (LT_EXPR, boolean_type_node, tmp, test); + test2 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, + tmp, test); mpfr_neg (huge, huge, GFC_RND_MODE); test = gfc_conv_mpfr_to_tree (huge, expr->ts.kind, 0); - test = fold_build2 (GT_EXPR, boolean_type_node, tmp, test); - test2 = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, test, test2); + test = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, tmp, + test); + test2 = fold_build2_loc (input_location, TRUTH_AND_EXPR, + boolean_type_node, test, test2); itype = gfc_get_int_type (ikind); if (modulo) @@ -1243,9 +1832,11 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo) else tmp = build_fix_expr (&se->pre, tmp, itype, RND_TRUNC); tmp = convert (type, tmp); - tmp = fold_build3 (COND_EXPR, type, test2, tmp, args[0]); - tmp = fold_build2 (MULT_EXPR, type, tmp, args[1]); - se->expr = fold_build2 (MINUS_EXPR, type, args[0], tmp); + tmp = fold_build3_loc (input_location, COND_EXPR, type, test2, tmp, + args[0]); + tmp = fold_build2_loc (input_location, MULT_EXPR, type, tmp, args[1]); + se->expr = fold_build2_loc (input_location, MINUS_EXPR, type, args[0], + tmp); mpfr_clear (huge); break; @@ -1254,6 +1845,62 @@ gfc_conv_intrinsic_mod (gfc_se * se, gfc_expr * expr, int modulo) } } +/* DSHIFTL(I,J,S) = (I << S) | (J >> (BITSIZE(J) - S)) + DSHIFTR(I,J,S) = (I << (BITSIZE(I) - S)) | (J >> S) + where the right shifts are logical (i.e. 0's are shifted in). + Because SHIFT_EXPR's want shifts strictly smaller than the integral + type width, we have to special-case both S == 0 and S == BITSIZE(J): + DSHIFTL(I,J,0) = I + DSHIFTL(I,J,BITSIZE) = J + DSHIFTR(I,J,0) = J + DSHIFTR(I,J,BITSIZE) = I. */ + +static void +gfc_conv_intrinsic_dshift (gfc_se * se, gfc_expr * expr, bool dshiftl) +{ + tree type, utype, stype, arg1, arg2, shift, res, left, right; + tree args[3], cond, tmp; + int bitsize; + + gfc_conv_intrinsic_function_args (se, expr, args, 3); + + gcc_assert (TREE_TYPE (args[0]) == TREE_TYPE (args[1])); + type = TREE_TYPE (args[0]); + bitsize = TYPE_PRECISION (type); + utype = unsigned_type_for (type); + stype = TREE_TYPE (args[2]); + + arg1 = gfc_evaluate_now (args[0], &se->pre); + arg2 = gfc_evaluate_now (args[1], &se->pre); + shift = gfc_evaluate_now (args[2], &se->pre); + + /* The generic case. */ + tmp = fold_build2_loc (input_location, MINUS_EXPR, stype, + build_int_cst (stype, bitsize), shift); + left = fold_build2_loc (input_location, LSHIFT_EXPR, type, + arg1, dshiftl ? shift : tmp); + + right = fold_build2_loc (input_location, RSHIFT_EXPR, utype, + fold_convert (utype, arg2), dshiftl ? tmp : shift); + right = fold_convert (type, right); + + res = fold_build2_loc (input_location, BIT_IOR_EXPR, type, left, right); + + /* Special cases. */ + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, shift, + build_int_cst (stype, 0)); + res = fold_build3_loc (input_location, COND_EXPR, type, cond, + dshiftl ? arg1 : arg2, res); + + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, shift, + build_int_cst (stype, bitsize)); + res = fold_build3_loc (input_location, COND_EXPR, type, cond, + dshiftl ? arg2 : arg1, res); + + se->expr = res; +} + + /* Positive difference DIM (x, y) = ((x - y) < 0) ? 0 : x - y. */ static void @@ -1268,12 +1915,12 @@ gfc_conv_intrinsic_dim (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_function_args (se, expr, args, 2); type = TREE_TYPE (args[0]); - val = fold_build2 (MINUS_EXPR, type, args[0], args[1]); + val = fold_build2_loc (input_location, MINUS_EXPR, type, args[0], args[1]); val = gfc_evaluate_now (val, &se->pre); zero = gfc_build_const (type, integer_zero_node); - tmp = fold_build2 (LE_EXPR, boolean_type_node, val, zero); - se->expr = fold_build3 (COND_EXPR, type, tmp, zero, val); + tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, val, zero); + se->expr = fold_build3_loc (input_location, COND_EXPR, type, tmp, zero, val); } @@ -1305,10 +1952,14 @@ gfc_conv_intrinsic_sign (gfc_se * se, gfc_expr * expr) { tree cond, zero; zero = build_real_from_int_cst (TREE_TYPE (args[1]), integer_zero_node); - cond = fold_build2 (EQ_EXPR, boolean_type_node, args[1], zero); - se->expr = fold_build3 (COND_EXPR, TREE_TYPE (args[0]), cond, - build_call_expr (abs, 1, args[0]), - build_call_expr (tmp, 2, args[0], args[1])); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + args[1], zero); + se->expr = fold_build3_loc (input_location, COND_EXPR, + TREE_TYPE (args[0]), cond, + build_call_expr_loc (input_location, abs, 1, + args[0]), + build_call_expr_loc (input_location, tmp, 2, + args[0], args[1])); } else se->expr = build_call_expr_loc (input_location, tmp, 2, @@ -1325,16 +1976,16 @@ gfc_conv_intrinsic_sign (gfc_se * se, gfc_expr * expr) /* Construct (A ^ B) >> 31, which generates a bit mask of all zeros if the signs of A and B are the same, and of all ones if they differ. */ - tmp = fold_build2 (BIT_XOR_EXPR, type, args[0], args[1]); - tmp = fold_build2 (RSHIFT_EXPR, type, tmp, - build_int_cst (type, TYPE_PRECISION (type) - 1)); + tmp = fold_build2_loc (input_location, BIT_XOR_EXPR, type, args[0], args[1]); + tmp = fold_build2_loc (input_location, RSHIFT_EXPR, type, tmp, + build_int_cst (type, TYPE_PRECISION (type) - 1)); tmp = gfc_evaluate_now (tmp, &se->pre); /* Construct (A + tmp) ^ tmp, which is A if tmp is zero, and -A if tmp] is all ones (i.e. -1). */ - se->expr = fold_build2 (BIT_XOR_EXPR, type, - fold_build2 (PLUS_EXPR, type, args[0], tmp), - tmp); + se->expr = fold_build2_loc (input_location, BIT_XOR_EXPR, type, + fold_build2_loc (input_location, PLUS_EXPR, + type, args[0], tmp), tmp); } @@ -1366,7 +2017,8 @@ gfc_conv_intrinsic_dprod (gfc_se * se, gfc_expr * expr) type = gfc_typenode_for_spec (&expr->ts); args[0] = convert (type, args[0]); args[1] = convert (type, args[1]); - se->expr = fold_build2 (MULT_EXPR, type, args[0], args[1]); + se->expr = fold_build2_loc (input_location, MULT_EXPR, type, args[0], + args[1]); } @@ -1386,10 +2038,10 @@ gfc_conv_intrinsic_char (gfc_se * se, gfc_expr * expr) type = gfc_get_char_type (expr->ts.kind); var = gfc_create_var (type, "char"); - arg[0] = fold_build1 (NOP_EXPR, type, arg[0]); + arg[0] = fold_build1_loc (input_location, NOP_EXPR, type, arg[0]); gfc_add_modify (&se->pre, var, arg[0]); se->expr = gfc_build_addr_expr (build_pointer_type (type), var); - se->string_length = integer_one_node; + se->string_length = build_int_cst (gfc_charlen_type_node, 1); } @@ -1408,7 +2060,7 @@ gfc_conv_intrinsic_ctime (gfc_se * se, gfc_expr * expr) args = XALLOCAVEC (tree, num_args); var = gfc_create_var (pchar_type_node, "pstr"); - len = gfc_create_var (gfc_get_int_type (8), "len"); + len = gfc_create_var (gfc_charlen_type_node, "len"); gfc_conv_intrinsic_function_args (se, expr, &args[2], num_args - 2); args[0] = gfc_build_addr_expr (NULL_TREE, var); @@ -1421,8 +2073,8 @@ gfc_conv_intrinsic_ctime (gfc_se * se, gfc_expr * expr) gfc_add_expr_to_block (&se->pre, tmp); /* Free the temporary afterwards, if necessary. */ - cond = fold_build2 (GT_EXPR, boolean_type_node, - len, build_int_cst (TREE_TYPE (len), 0)); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + len, build_int_cst (TREE_TYPE (len), 0)); tmp = gfc_call_free (var); tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); gfc_add_expr_to_block (&se->post, tmp); @@ -1460,8 +2112,8 @@ gfc_conv_intrinsic_fdate (gfc_se * se, gfc_expr * expr) gfc_add_expr_to_block (&se->pre, tmp); /* Free the temporary afterwards, if necessary. */ - cond = fold_build2 (GT_EXPR, boolean_type_node, - len, build_int_cst (TREE_TYPE (len), 0)); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + len, build_int_cst (TREE_TYPE (len), 0)); tmp = gfc_call_free (var); tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); gfc_add_expr_to_block (&se->post, tmp); @@ -1501,8 +2153,8 @@ gfc_conv_intrinsic_ttynam (gfc_se * se, gfc_expr * expr) gfc_add_expr_to_block (&se->pre, tmp); /* Free the temporary afterwards, if necessary. */ - cond = fold_build2 (GT_EXPR, boolean_type_node, - len, build_int_cst (TREE_TYPE (len), 0)); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + len, build_int_cst (TREE_TYPE (len), 0)); tmp = gfc_call_free (var); tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); gfc_add_expr_to_block (&se->post, tmp); @@ -1579,7 +2231,8 @@ gfc_conv_intrinsic_minmax (gfc_se * se, gfc_expr * expr, enum tree_code op) thencase = build2_v (MODIFY_EXPR, mvar, convert (type, val)); - tmp = fold_build2 (op, boolean_type_node, convert (type, val), mvar); + tmp = fold_build2_loc (input_location, op, boolean_type_node, + convert (type, val), mvar); /* FIXME: When the IEEE_ARITHMETIC module is implemented, the call to __builtin_isnan might be made dependent on that module being loaded, @@ -1587,9 +2240,11 @@ gfc_conv_intrinsic_minmax (gfc_se * se, gfc_expr * expr, enum tree_code op) if (FLOAT_TYPE_P (TREE_TYPE (mvar))) { isnan = build_call_expr_loc (input_location, - built_in_decls[BUILT_IN_ISNAN], 1, mvar); - tmp = fold_build2 (TRUTH_OR_EXPR, boolean_type_node, tmp, - fold_convert (boolean_type_node, isnan)); + builtin_decl_explicit (BUILT_IN_ISNAN), + 1, mvar); + tmp = fold_build2_loc (input_location, TRUTH_OR_EXPR, + boolean_type_node, tmp, + fold_convert (boolean_type_node, isnan)); } tmp = build3_v (COND_EXPR, tmp, thencase, build_empty_stmt (input_location)); @@ -1623,8 +2278,8 @@ gfc_conv_intrinsic_minmax_char (gfc_se * se, gfc_expr * expr, int op) args[0] = gfc_build_addr_expr (NULL_TREE, len); var = gfc_create_var (gfc_get_pchar_type (expr->ts.kind), "pstr"); args[1] = gfc_build_addr_expr (ppvoid_type_node, var); - args[2] = build_int_cst (NULL_TREE, op); - args[3] = build_int_cst (NULL_TREE, nargs / 2); + args[2] = build_int_cst (integer_type_node, op); + args[3] = build_int_cst (integer_type_node, nargs / 2); if (expr->ts.kind == 1) function = gfor_fndecl_string_minmax; @@ -1641,8 +2296,8 @@ gfc_conv_intrinsic_minmax_char (gfc_se * se, gfc_expr * expr, int op) gfc_add_expr_to_block (&se->pre, tmp); /* Free the temporary afterwards, if necessary. */ - cond = fold_build2 (GT_EXPR, boolean_type_node, - len, build_int_cst (TREE_TYPE (len), 0)); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + len, build_int_cst (TREE_TYPE (len), 0)); tmp = gfc_call_free (var); tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); gfc_add_expr_to_block (&se->post, tmp); @@ -1691,7 +2346,7 @@ gfc_conv_intrinsic_funcall (gfc_se * se, gfc_expr * expr) gfc_symbol *sym; VEC(tree,gc) *append_args; - gcc_assert (!se->ss || se->ss->expr == expr); + gcc_assert (!se->ss || se->ss->info->expr == expr); if (se->ss) gcc_assert (expr->rank > 0); @@ -1748,7 +2403,7 @@ gfc_conv_intrinsic_funcall (gfc_se * se, gfc_expr * expr) gfc_conv_procedure_call (se, sym, expr->value.function.actual, expr, append_args); - gfc_free (sym); + gfc_free_symbol (sym); } /* ANY and ALL intrinsics. ANY->op == NE_EXPR, ALL->op == EQ_EXPR. @@ -1840,8 +2495,8 @@ gfc_conv_intrinsic_anyall (gfc_se * se, gfc_expr * expr, enum tree_code op) gfc_conv_expr_val (&arrayse, actual->expr); gfc_add_block_to_block (&body, &arrayse.pre); - tmp = fold_build2 (op, boolean_type_node, arrayse.expr, - build_int_cst (TREE_TYPE (arrayse.expr), 0)); + tmp = fold_build2_loc (input_location, op, boolean_type_node, arrayse.expr, + build_int_cst (TREE_TYPE (arrayse.expr), 0)); tmp = build3_v (COND_EXPR, tmp, found, build_empty_stmt (input_location)); gfc_add_expr_to_block (&body, tmp); gfc_add_block_to_block (&body, &arrayse.post); @@ -1901,8 +2556,8 @@ gfc_conv_intrinsic_count (gfc_se * se, gfc_expr * expr) /* Generate the loop body. */ gfc_start_scalarized_body (&loop, &body); - tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (resvar), - resvar, build_int_cst (TREE_TYPE (resvar), 1)); + tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (resvar), + resvar, build_int_cst (TREE_TYPE (resvar), 1)); tmp = build2_v (MODIFY_EXPR, resvar, tmp); gfc_init_se (&arrayse, NULL); @@ -1925,6 +2580,20 @@ gfc_conv_intrinsic_count (gfc_se * se, gfc_expr * expr) se->expr = resvar; } + +/* Update given gfc_se to have ss component pointing to the nested gfc_ss + struct and return the corresponding loopinfo. */ + +static gfc_loopinfo * +enter_nested_loop (gfc_se *se) +{ + se->ss = se->ss->nested_ss; + gcc_assert (se->ss == se->ss->loop->ss); + + return se->ss->loop; +} + + /* Inline implementation of the sum and product intrinsics. */ static void gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, @@ -1936,20 +2605,23 @@ gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, stmtblock_t body; stmtblock_t block; tree tmp; - gfc_loopinfo loop; - gfc_actual_arglist *actual; - gfc_ss *arrayss; - gfc_ss *maskss; + gfc_loopinfo loop, *ploop; + gfc_actual_arglist *arg_array, *arg_mask; + gfc_ss *arrayss = NULL; + gfc_ss *maskss = NULL; gfc_se arrayse; gfc_se maskse; + gfc_se *parent_se; gfc_expr *arrayexpr; gfc_expr *maskexpr; - if (se->ss) + if (expr->rank > 0) { - gfc_conv_intrinsic_funcall (se, expr); - return; + gcc_assert (gfc_inline_intrinsic_function_p (expr)); + parent_se = se; } + else + parent_se = NULL; type = gfc_typenode_for_spec (&expr->ts); /* Initialize the result. */ @@ -1963,62 +2635,79 @@ gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, gfc_build_const (type, integer_one_node)); tmp = gfc_build_const (type, integer_zero_node); } - else if (op == PLUS_EXPR) + else if (op == PLUS_EXPR || op == BIT_IOR_EXPR || op == BIT_XOR_EXPR) tmp = gfc_build_const (type, integer_zero_node); else if (op == NE_EXPR) /* PARITY. */ tmp = convert (type, boolean_false_node); + else if (op == BIT_AND_EXPR) + tmp = gfc_build_const (type, fold_build1_loc (input_location, NEGATE_EXPR, + type, integer_one_node)); else tmp = gfc_build_const (type, integer_one_node); gfc_add_modify (&se->pre, resvar, tmp); - /* Walk the arguments. */ - actual = expr->value.function.actual; - arrayexpr = actual->expr; - arrayss = gfc_walk_expr (arrayexpr); - gcc_assert (arrayss != gfc_ss_terminator); + arg_array = expr->value.function.actual; + + arrayexpr = arg_array->expr; if (op == NE_EXPR || norm2) /* PARITY and NORM2. */ maskexpr = NULL; else { - actual = actual->next->next; - gcc_assert (actual); - maskexpr = actual->expr; + arg_mask = arg_array->next->next; + gcc_assert (arg_mask != NULL); + maskexpr = arg_mask->expr; } - if (maskexpr && maskexpr->rank != 0) + if (expr->rank == 0) { - maskss = gfc_walk_expr (maskexpr); - gcc_assert (maskss != gfc_ss_terminator); + /* Walk the arguments. */ + arrayss = gfc_walk_expr (arrayexpr); + gcc_assert (arrayss != gfc_ss_terminator); + + if (maskexpr && maskexpr->rank > 0) + { + maskss = gfc_walk_expr (maskexpr); + gcc_assert (maskss != gfc_ss_terminator); + } + else + maskss = NULL; + + /* Initialize the scalarizer. */ + gfc_init_loopinfo (&loop); + gfc_add_ss_to_loop (&loop, arrayss); + if (maskexpr && maskexpr->rank > 0) + gfc_add_ss_to_loop (&loop, maskss); + + /* Initialize the loop. */ + gfc_conv_ss_startstride (&loop); + gfc_conv_loop_setup (&loop, &expr->where); + + gfc_mark_ss_chain_used (arrayss, 1); + if (maskexpr && maskexpr->rank > 0) + gfc_mark_ss_chain_used (maskss, 1); + + ploop = &loop; } else - maskss = NULL; + /* All the work has been done in the parent loops. */ + ploop = enter_nested_loop (se); - /* Initialize the scalarizer. */ - gfc_init_loopinfo (&loop); - gfc_add_ss_to_loop (&loop, arrayss); - if (maskss) - gfc_add_ss_to_loop (&loop, maskss); + gcc_assert (ploop); - /* Initialize the loop. */ - gfc_conv_ss_startstride (&loop); - gfc_conv_loop_setup (&loop, &expr->where); - - gfc_mark_ss_chain_used (arrayss, 1); - if (maskss) - gfc_mark_ss_chain_used (maskss, 1); /* Generate the loop body. */ - gfc_start_scalarized_body (&loop, &body); + gfc_start_scalarized_body (ploop, &body); /* If we have a mask, only add this element if the mask is set. */ - if (maskss) + if (maskexpr && maskexpr->rank > 0) { - gfc_init_se (&maskse, NULL); - gfc_copy_loopinfo_to_se (&maskse, &loop); - maskse.ss = maskss; + gfc_init_se (&maskse, parent_se); + gfc_copy_loopinfo_to_se (&maskse, ploop); + if (expr->rank == 0) + maskse.ss = maskss; gfc_conv_expr_val (&maskse, maskexpr); gfc_add_block_to_block (&body, &maskse.pre); @@ -2028,9 +2717,10 @@ gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, gfc_init_block (&block); /* Do the actual summation/product. */ - gfc_init_se (&arrayse, NULL); - gfc_copy_loopinfo_to_se (&arrayse, &loop); - arrayse.ss = arrayss; + gfc_init_se (&arrayse, parent_se); + gfc_copy_loopinfo_to_se (&arrayse, ploop); + if (expr->rank == 0) + arrayse.ss = arrayss; gfc_conv_expr_val (&arrayse, arrayexpr); gfc_add_block_to_block (&block, &arrayse.pre); @@ -2058,49 +2748,54 @@ gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, absX = gfc_create_var (type, "absX"); gfc_add_modify (&ifblock1, absX, - fold_build1 (ABS_EXPR, type, arrayse.expr)); + fold_build1_loc (input_location, ABS_EXPR, type, + arrayse.expr)); val = gfc_create_var (type, "val"); gfc_add_expr_to_block (&ifblock1, val); gfc_init_block (&ifblock2); gfc_add_modify (&ifblock2, val, - fold_build2 (RDIV_EXPR, type, scale, absX)); - res1 = fold_build2 (MULT_EXPR, type, val, val); - res1 = fold_build2 (MULT_EXPR, type, resvar, res1); - res1 = fold_build2 (PLUS_EXPR, type, res1, - gfc_build_const (type, integer_one_node)); + fold_build2_loc (input_location, RDIV_EXPR, type, scale, + absX)); + res1 = fold_build2_loc (input_location, MULT_EXPR, type, val, val); + res1 = fold_build2_loc (input_location, MULT_EXPR, type, resvar, res1); + res1 = fold_build2_loc (input_location, PLUS_EXPR, type, res1, + gfc_build_const (type, integer_one_node)); gfc_add_modify (&ifblock2, resvar, res1); gfc_add_modify (&ifblock2, scale, absX); res1 = gfc_finish_block (&ifblock2); gfc_init_block (&ifblock3); gfc_add_modify (&ifblock3, val, - fold_build2 (RDIV_EXPR, type, absX, scale)); - res2 = fold_build2 (MULT_EXPR, type, val, val); - res2 = fold_build2 (PLUS_EXPR, type, resvar, res2); + fold_build2_loc (input_location, RDIV_EXPR, type, absX, + scale)); + res2 = fold_build2_loc (input_location, MULT_EXPR, type, val, val); + res2 = fold_build2_loc (input_location, PLUS_EXPR, type, resvar, res2); gfc_add_modify (&ifblock3, resvar, res2); res2 = gfc_finish_block (&ifblock3); - cond = fold_build2 (GT_EXPR, boolean_type_node, absX, scale); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + absX, scale); tmp = build3_v (COND_EXPR, cond, res1, res2); gfc_add_expr_to_block (&ifblock1, tmp); tmp = gfc_finish_block (&ifblock1); - cond = fold_build2 (NE_EXPR, boolean_type_node, arrayse.expr, - gfc_build_const (type, integer_zero_node)); + cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, + arrayse.expr, + gfc_build_const (type, integer_zero_node)); tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); gfc_add_expr_to_block (&block, tmp); } else { - tmp = fold_build2 (op, type, resvar, arrayse.expr); + tmp = fold_build2_loc (input_location, op, type, resvar, arrayse.expr); gfc_add_modify (&block, resvar, tmp); } gfc_add_block_to_block (&block, &arrayse.post); - if (maskss) + if (maskexpr && maskexpr->rank > 0) { /* We enclose the above in if (mask) {...} . */ @@ -2112,30 +2807,43 @@ gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, tmp = gfc_finish_block (&block); gfc_add_expr_to_block (&body, tmp); - gfc_trans_scalarizing_loops (&loop, &body); + gfc_trans_scalarizing_loops (ploop, &body); /* For a scalar mask, enclose the loop in an if statement. */ - if (maskexpr && maskss == NULL) + if (maskexpr && maskexpr->rank == 0) { - gfc_init_se (&maskse, NULL); - gfc_conv_expr_val (&maskse, maskexpr); gfc_init_block (&block); - gfc_add_block_to_block (&block, &loop.pre); - gfc_add_block_to_block (&block, &loop.post); + gfc_add_block_to_block (&block, &ploop->pre); + gfc_add_block_to_block (&block, &ploop->post); tmp = gfc_finish_block (&block); - tmp = build3_v (COND_EXPR, maskse.expr, tmp, - build_empty_stmt (input_location)); + if (expr->rank > 0) + { + tmp = build3_v (COND_EXPR, se->ss->info->data.scalar.value, tmp, + build_empty_stmt (input_location)); + gfc_advance_se_ss_chain (se); + } + else + { + gcc_assert (expr->rank == 0); + gfc_init_se (&maskse, NULL); + gfc_conv_expr_val (&maskse, maskexpr); + tmp = build3_v (COND_EXPR, maskse.expr, tmp, + build_empty_stmt (input_location)); + } + gfc_add_expr_to_block (&block, tmp); gfc_add_block_to_block (&se->pre, &block); + gcc_assert (se->post.head == NULL); } else { - gfc_add_block_to_block (&se->pre, &loop.pre); - gfc_add_block_to_block (&se->pre, &loop.post); + gfc_add_block_to_block (&se->pre, &ploop->pre); + gfc_add_block_to_block (&se->pre, &ploop->post); } - gfc_cleanup_loop (&loop); + if (expr->rank == 0) + gfc_cleanup_loop (ploop); if (norm2) { @@ -2144,7 +2852,7 @@ gfc_conv_intrinsic_arith (gfc_se * se, gfc_expr * expr, enum tree_code op, sqrt = gfc_builtin_decl_for_float_kind (BUILT_IN_SQRT, expr->ts.kind); resvar = build_call_expr_loc (input_location, sqrt, 1, resvar); - resvar = fold_build2 (MULT_EXPR, type, scale, resvar); + resvar = fold_build2_loc (input_location, MULT_EXPR, type, scale, resvar); } se->expr = resvar; @@ -2212,7 +2920,8 @@ gfc_conv_intrinsic_dot_product (gfc_se * se, gfc_expr * expr) arrayse1.ss = arrayss1; gfc_conv_expr_val (&arrayse1, arrayexpr1); if (expr->ts.type == BT_COMPLEX) - arrayse1.expr = fold_build1 (CONJ_EXPR, type, arrayse1.expr); + arrayse1.expr = fold_build1_loc (input_location, CONJ_EXPR, type, + arrayse1.expr); gfc_add_block_to_block (&block, &arrayse1.pre); /* Make the tree expression for array2. */ @@ -2225,13 +2934,15 @@ gfc_conv_intrinsic_dot_product (gfc_se * se, gfc_expr * expr) /* Do the actual product and sum. */ if (expr->ts.type == BT_LOGICAL) { - tmp = fold_build2 (TRUTH_AND_EXPR, type, arrayse1.expr, arrayse2.expr); - tmp = fold_build2 (TRUTH_OR_EXPR, type, resvar, tmp); + tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR, type, + arrayse1.expr, arrayse2.expr); + tmp = fold_build2_loc (input_location, TRUTH_OR_EXPR, type, resvar, tmp); } else { - tmp = fold_build2 (MULT_EXPR, type, arrayse1.expr, arrayse2.expr); - tmp = fold_build2 (PLUS_EXPR, type, resvar, tmp); + tmp = fold_build2_loc (input_location, MULT_EXPR, type, arrayse1.expr, + arrayse2.expr); + tmp = fold_build2_loc (input_location, PLUS_EXPR, type, resvar, tmp); } gfc_add_modify (&block, resvar, tmp); @@ -2374,29 +3085,22 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) { nonempty = gfc_conv_mpz_to_tree (asize, gfc_index_integer_kind); mpz_clear (asize); - nonempty = fold_build2 (GT_EXPR, boolean_type_node, nonempty, - gfc_index_zero_node); + nonempty = fold_build2_loc (input_location, GT_EXPR, + boolean_type_node, nonempty, + gfc_index_zero_node); } maskss = NULL; } limit = gfc_create_var (gfc_typenode_for_spec (&arrayexpr->ts), "limit"); - n = gfc_validate_kind (arrayexpr->ts.type, arrayexpr->ts.kind, false); switch (arrayexpr->ts.type) { case BT_REAL: - if (HONOR_INFINITIES (DECL_MODE (limit))) - { - REAL_VALUE_TYPE real; - real_inf (&real); - tmp = build_real (TREE_TYPE (limit), real); - } - else - tmp = gfc_conv_mpfr_to_tree (gfc_real_kinds[n].huge, - arrayexpr->ts.kind, 0); + tmp = gfc_build_inf_or_huge (TREE_TYPE (limit), arrayexpr->ts.kind); break; case BT_INTEGER: + n = gfc_validate_kind (arrayexpr->ts.type, arrayexpr->ts.kind, false); tmp = gfc_conv_mpz_to_tree (gfc_integer_kinds[n].huge, arrayexpr->ts.kind); break; @@ -2410,10 +3114,10 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) -HUGE for BT_REAL and (-HUGE - 1) for BT_INTEGER; the most positive possible value is HUGE in both cases. */ if (op == GT_EXPR) - tmp = fold_build1 (NEGATE_EXPR, TREE_TYPE (tmp), tmp); + tmp = fold_build1_loc (input_location, NEGATE_EXPR, TREE_TYPE (tmp), tmp); if (op == GT_EXPR && expr->ts.type == BT_INTEGER) - tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), tmp, - build_int_cst (type, 1)); + tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp), tmp, + build_int_cst (type, 1)); gfc_add_modify (&se->pre, limit, tmp); @@ -2425,12 +3129,29 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) /* Initialize the loop. */ gfc_conv_ss_startstride (&loop); + + /* The code generated can have more than one loop in sequence (see the + comment at the function header). This doesn't work well with the + scalarizer, which changes arrays' offset when the scalarization loops + are generated (see gfc_trans_preloop_setup). Fortunately, {min,max}loc + are currently inlined in the scalar case only (for which loop is of rank + one). As there is no dependency to care about in that case, there is no + temporary, so that we can use the scalarizer temporary code to handle + multiple loops. Thus, we set temp_dim here, we call gfc_mark_ss_chain_used + with flag=3 later, and we use gfc_trans_scalarized_loop_boundary even later + to restore offset. + TODO: this prevents inlining of rank > 0 minmaxloc calls, so this + should eventually go away. We could either create two loops properly, + or find another way to save/restore the array offsets between the two + loops (without conflicting with temporary management), or use a single + loop minmaxloc implementation. See PR 31067. */ + loop.temp_dim = loop.dimen; gfc_conv_loop_setup (&loop, &expr->where); gcc_assert (loop.dimen == 1); if (nonempty == NULL && maskss == NULL && loop.from[0] && loop.to[0]) - nonempty = fold_build2 (LE_EXPR, boolean_type_node, loop.from[0], - loop.to[0]); + nonempty = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, + loop.from[0], loop.to[0]); lab1 = NULL; lab2 = NULL; @@ -2441,9 +3162,10 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) the inner loop. */ if (nonempty != NULL && !HONOR_NANS (DECL_MODE (limit))) gfc_add_modify (&loop.pre, pos, - fold_build3 (COND_EXPR, gfc_array_index_type, - nonempty, gfc_index_one_node, - gfc_index_zero_node)); + fold_build3_loc (input_location, COND_EXPR, + gfc_array_index_type, + nonempty, gfc_index_one_node, + gfc_index_zero_node)); else { gfc_add_modify (&loop.pre, pos, gfc_index_zero_node); @@ -2453,9 +3175,17 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) TREE_USED (lab2) = 1; } - gfc_mark_ss_chain_used (arrayss, 1); + /* An offset must be added to the loop + counter to obtain the required position. */ + gcc_assert (loop.from[0]); + + tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, + gfc_index_one_node, loop.from[0]); + gfc_add_modify (&loop.pre, offset, tmp); + + gfc_mark_ss_chain_used (arrayss, lab1 ? 3 : 1); if (maskss) - gfc_mark_ss_chain_used (maskss, 1); + gfc_mark_ss_chain_used (maskss, lab1 ? 3 : 1); /* Generate the loop body. */ gfc_start_scalarized_body (&loop, &body); @@ -2486,35 +3216,25 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) /* Assign the value to the limit... */ gfc_add_modify (&ifblock, limit, arrayse.expr); - /* Remember where we are. An offset must be added to the loop - counter to obtain the required position. */ - if (loop.from[0]) - tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, - gfc_index_one_node, loop.from[0]); - else - tmp = gfc_index_one_node; - - gfc_add_modify (&block, offset, tmp); - if (nonempty == NULL && HONOR_NANS (DECL_MODE (limit))) { stmtblock_t ifblock2; tree ifbody2; gfc_start_block (&ifblock2); - tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (pos), - loop.loopvar[0], offset); + tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (pos), + loop.loopvar[0], offset); gfc_add_modify (&ifblock2, pos, tmp); ifbody2 = gfc_finish_block (&ifblock2); - cond = fold_build2 (EQ_EXPR, boolean_type_node, pos, - gfc_index_zero_node); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, pos, + gfc_index_zero_node); tmp = build3_v (COND_EXPR, cond, ifbody2, build_empty_stmt (input_location)); gfc_add_expr_to_block (&block, tmp); } - tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (pos), - loop.loopvar[0], offset); + tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (pos), + loop.loopvar[0], offset); gfc_add_modify (&ifblock, pos, tmp); if (lab1) @@ -2525,10 +3245,12 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) if (!lab1 || HONOR_NANS (DECL_MODE (limit))) { if (lab1) - cond = fold_build2 (op == GT_EXPR ? GE_EXPR : LE_EXPR, - boolean_type_node, arrayse.expr, limit); + cond = fold_build2_loc (input_location, + op == GT_EXPR ? GE_EXPR : LE_EXPR, + boolean_type_node, arrayse.expr, limit); else - cond = fold_build2 (op, boolean_type_node, arrayse.expr, limit); + cond = fold_build2_loc (input_location, op, boolean_type_node, + arrayse.expr, limit); ifbody = build3_v (COND_EXPR, cond, ifbody, build_empty_stmt (input_location)); @@ -2549,7 +3271,7 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) if (lab1) { - gfc_trans_scalarized_loop_end (&loop, 0, &body); + gfc_trans_scalarized_loop_boundary (&loop, &body); if (HONOR_NANS (DECL_MODE (limit))) { @@ -2564,7 +3286,6 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) gfc_add_expr_to_block (&loop.code[0], build1_v (GOTO_EXPR, lab2)); gfc_add_expr_to_block (&loop.code[0], build1_v (LABEL_EXPR, lab1)); - gfc_start_block (&body); /* If we have a mask, only check this element if the mask is set. */ if (maskss) @@ -2593,23 +3314,14 @@ gfc_conv_intrinsic_minmaxloc (gfc_se * se, gfc_expr * expr, enum tree_code op) /* Assign the value to the limit... */ gfc_add_modify (&ifblock, limit, arrayse.expr); - /* Remember where we are. An offset must be added to the loop - counter to obtain the required position. */ - if (loop.from[0]) - tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, - gfc_index_one_node, loop.from[0]); - else - tmp = gfc_index_one_node; - - gfc_add_modify (&block, offset, tmp); - - tmp = fold_build2 (PLUS_EXPR, TREE_TYPE (pos), - loop.loopvar[0], offset); + tmp = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (pos), + loop.loopvar[0], offset); gfc_add_modify (&ifblock, pos, tmp); ifbody = gfc_finish_block (&ifblock); - cond = fold_build2 (op, boolean_type_node, arrayse.expr, limit); + cond = fold_build2_loc (input_location, op, boolean_type_node, + arrayse.expr, limit); tmp = build3_v (COND_EXPR, cond, ifbody, build_empty_stmt (input_location)); @@ -2829,14 +3541,15 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) possible value is HUGE in both cases. */ if (op == GT_EXPR) { - tmp = fold_build1 (NEGATE_EXPR, TREE_TYPE (tmp), tmp); + tmp = fold_build1_loc (input_location, NEGATE_EXPR, TREE_TYPE (tmp), tmp); if (huge_cst) - huge_cst = fold_build1 (NEGATE_EXPR, TREE_TYPE (huge_cst), huge_cst); + huge_cst = fold_build1_loc (input_location, NEGATE_EXPR, + TREE_TYPE (huge_cst), huge_cst); } if (op == GT_EXPR && expr->ts.type == BT_INTEGER) - tmp = fold_build2 (MINUS_EXPR, TREE_TYPE (tmp), - tmp, build_int_cst (type, 1)); + tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp), + tmp, build_int_cst (type, 1)); gfc_add_modify (&se->pre, limit, tmp); @@ -2862,8 +3575,9 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) { nonempty = gfc_conv_mpz_to_tree (asize, gfc_index_integer_kind); mpz_clear (asize); - nonempty = fold_build2 (GT_EXPR, boolean_type_node, nonempty, - gfc_index_zero_node); + nonempty = fold_build2_loc (input_location, GT_EXPR, + boolean_type_node, nonempty, + gfc_index_zero_node); } maskss = NULL; } @@ -2876,12 +3590,28 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) /* Initialize the loop. */ gfc_conv_ss_startstride (&loop); + + /* The code generated can have more than one loop in sequence (see the + comment at the function header). This doesn't work well with the + scalarizer, which changes arrays' offset when the scalarization loops + are generated (see gfc_trans_preloop_setup). Fortunately, {min,max}val + are currently inlined in the scalar case only. As there is no dependency + to care about in that case, there is no temporary, so that we can use the + scalarizer temporary code to handle multiple loops. Thus, we set temp_dim + here, we call gfc_mark_ss_chain_used with flag=3 later, and we use + gfc_trans_scalarized_loop_boundary even later to restore offset. + TODO: this prevents inlining of rank > 0 minmaxval calls, so this + should eventually go away. We could either create two loops properly, + or find another way to save/restore the array offsets between the two + loops (without conflicting with temporary management), or use a single + loop minmaxval implementation. See PR 31067. */ + loop.temp_dim = loop.dimen; gfc_conv_loop_setup (&loop, &expr->where); if (nonempty == NULL && maskss == NULL && loop.dimen == 1 && loop.from[0] && loop.to[0]) - nonempty = fold_build2 (LE_EXPR, boolean_type_node, loop.from[0], - loop.to[0]); + nonempty = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, + loop.from[0], loop.to[0]); nonempty_var = NULL; if (nonempty == NULL && (HONOR_INFINITIES (DECL_MODE (limit)) @@ -2907,9 +3637,9 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) } } - gfc_mark_ss_chain_used (arrayss, 1); + gfc_mark_ss_chain_used (arrayss, lab ? 3 : 1); if (maskss) - gfc_mark_ss_chain_used (maskss, 1); + gfc_mark_ss_chain_used (maskss, lab ? 3 : 1); /* Generate the loop body. */ gfc_start_scalarized_body (&loop, &body); @@ -2941,8 +3671,8 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) if (HONOR_NANS (DECL_MODE (limit))) { - tmp = fold_build2 (op == GT_EXPR ? GE_EXPR : LE_EXPR, - boolean_type_node, arrayse.expr, limit); + tmp = fold_build2_loc (input_location, op == GT_EXPR ? GE_EXPR : LE_EXPR, + boolean_type_node, arrayse.expr, limit); if (lab) ifbody = build1_v (GOTO_EXPR, lab); else @@ -2964,7 +3694,8 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) signed zeros. */ if (HONOR_SIGNED_ZEROS (DECL_MODE (limit))) { - tmp = fold_build2 (op, boolean_type_node, arrayse.expr, limit); + tmp = fold_build2_loc (input_location, op, boolean_type_node, + arrayse.expr, limit); ifbody = build2_v (MODIFY_EXPR, limit, arrayse.expr); tmp = build3_v (COND_EXPR, tmp, ifbody, build_empty_stmt (input_location)); @@ -2972,8 +3703,9 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) } else { - tmp = fold_build2 (op == GT_EXPR ? MAX_EXPR : MIN_EXPR, - type, arrayse.expr, limit); + tmp = fold_build2_loc (input_location, + op == GT_EXPR ? MAX_EXPR : MIN_EXPR, + type, arrayse.expr, limit); gfc_add_modify (&block2, limit, tmp); } } @@ -2987,15 +3719,17 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) if (HONOR_NANS (DECL_MODE (limit)) || HONOR_SIGNED_ZEROS (DECL_MODE (limit))) { - tmp = fold_build2 (op, boolean_type_node, arrayse.expr, limit); + tmp = fold_build2_loc (input_location, op, boolean_type_node, + arrayse.expr, limit); ifbody = build2_v (MODIFY_EXPR, limit, arrayse.expr); ifbody = build3_v (COND_EXPR, tmp, ifbody, build_empty_stmt (input_location)); } else { - tmp = fold_build2 (op == GT_EXPR ? MAX_EXPR : MIN_EXPR, - type, arrayse.expr, limit); + tmp = fold_build2_loc (input_location, + op == GT_EXPR ? MAX_EXPR : MIN_EXPR, + type, arrayse.expr, limit); ifbody = build2_v (MODIFY_EXPR, limit, tmp); } tmp = build3_v (COND_EXPR, fast, ifbody, elsebody); @@ -3015,14 +3749,13 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) if (lab) { - gfc_trans_scalarized_loop_end (&loop, 0, &body); + gfc_trans_scalarized_loop_boundary (&loop, &body); - tmp = fold_build3 (COND_EXPR, type, nonempty, nan_cst, huge_cst); + tmp = fold_build3_loc (input_location, COND_EXPR, type, nonempty, + nan_cst, huge_cst); gfc_add_modify (&loop.code[0], limit, tmp); gfc_add_expr_to_block (&loop.code[0], build1_v (LABEL_EXPR, lab)); - gfc_start_block (&body); - /* If we have a mask, only add this element if the mask is set. */ if (maskss) { @@ -3049,7 +3782,8 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) if (HONOR_NANS (DECL_MODE (limit)) || HONOR_SIGNED_ZEROS (DECL_MODE (limit))) { - tmp = fold_build2 (op, boolean_type_node, arrayse.expr, limit); + tmp = fold_build2_loc (input_location, op, boolean_type_node, + arrayse.expr, limit); ifbody = build2_v (MODIFY_EXPR, limit, arrayse.expr); tmp = build3_v (COND_EXPR, tmp, ifbody, build_empty_stmt (input_location)); @@ -3057,8 +3791,9 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) } else { - tmp = fold_build2 (op == GT_EXPR ? MAX_EXPR : MIN_EXPR, - type, arrayse.expr, limit); + tmp = fold_build2_loc (input_location, + op == GT_EXPR ? MAX_EXPR : MIN_EXPR, + type, arrayse.expr, limit); gfc_add_modify (&block, limit, tmp); } @@ -3078,7 +3813,8 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) if (fast) { - tmp = fold_build3 (COND_EXPR, type, nonempty, nan_cst, huge_cst); + tmp = fold_build3_loc (input_location, COND_EXPR, type, nonempty, + nan_cst, huge_cst); ifbody = build2_v (MODIFY_EXPR, limit, tmp); tmp = build3_v (COND_EXPR, fast, build_empty_stmt (input_location), ifbody); @@ -3086,7 +3822,8 @@ gfc_conv_intrinsic_minmaxval (gfc_se * se, gfc_expr * expr, enum tree_code op) } else if (HONOR_INFINITIES (DECL_MODE (limit)) && !lab) { - tmp = fold_build3 (COND_EXPR, type, nonempty, limit, huge_cst); + tmp = fold_build3_loc (input_location, COND_EXPR, type, nonempty, limit, + huge_cst); gfc_add_modify (&loop.pre, limit, tmp); } @@ -3132,14 +3869,42 @@ gfc_conv_intrinsic_btest (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_function_args (se, expr, args, 2); type = TREE_TYPE (args[0]); - tmp = fold_build2 (LSHIFT_EXPR, type, build_int_cst (type, 1), args[1]); - tmp = fold_build2 (BIT_AND_EXPR, type, args[0], tmp); - tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp, - build_int_cst (type, 0)); + tmp = fold_build2_loc (input_location, LSHIFT_EXPR, type, + build_int_cst (type, 1), args[1]); + tmp = fold_build2_loc (input_location, BIT_AND_EXPR, type, args[0], tmp); + tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp, + build_int_cst (type, 0)); type = gfc_typenode_for_spec (&expr->ts); se->expr = convert (type, tmp); } + +/* Generate code for BGE, BGT, BLE and BLT intrinsics. */ +static void +gfc_conv_intrinsic_bitcomp (gfc_se * se, gfc_expr * expr, enum tree_code op) +{ + tree args[2]; + + gfc_conv_intrinsic_function_args (se, expr, args, 2); + + /* Convert both arguments to the unsigned type of the same size. */ + args[0] = fold_convert (unsigned_type_for (TREE_TYPE (args[0])), args[0]); + args[1] = fold_convert (unsigned_type_for (TREE_TYPE (args[1])), args[1]); + + /* If they have unequal type size, convert to the larger one. */ + if (TYPE_PRECISION (TREE_TYPE (args[0])) + > TYPE_PRECISION (TREE_TYPE (args[1]))) + args[1] = fold_convert (TREE_TYPE (args[0]), args[1]); + else if (TYPE_PRECISION (TREE_TYPE (args[1])) + > TYPE_PRECISION (TREE_TYPE (args[0]))) + args[0] = fold_convert (TREE_TYPE (args[1]), args[0]); + + /* Now, we compare them. */ + se->expr = fold_build2_loc (input_location, op, boolean_type_node, + args[0], args[1]); +} + + /* Generate code to perform the specified operation. */ static void gfc_conv_intrinsic_bitop (gfc_se * se, gfc_expr * expr, enum tree_code op) @@ -3147,7 +3912,8 @@ gfc_conv_intrinsic_bitop (gfc_se * se, gfc_expr * expr, enum tree_code op) tree args[2]; gfc_conv_intrinsic_function_args (se, expr, args, 2); - se->expr = fold_build2 (op, TREE_TYPE (args[0]), args[0], args[1]); + se->expr = fold_build2_loc (input_location, op, TREE_TYPE (args[0]), + args[0], args[1]); } /* Bitwise not. */ @@ -3157,7 +3923,8 @@ gfc_conv_intrinsic_not (gfc_se * se, gfc_expr * expr) tree arg; gfc_conv_intrinsic_function_args (se, expr, &arg, 1); - se->expr = fold_build1 (BIT_NOT_EXPR, TREE_TYPE (arg), arg); + se->expr = fold_build1_loc (input_location, BIT_NOT_EXPR, + TREE_TYPE (arg), arg); } /* Set or clear a single bit. */ @@ -3172,15 +3939,16 @@ gfc_conv_intrinsic_singlebitop (gfc_se * se, gfc_expr * expr, int set) gfc_conv_intrinsic_function_args (se, expr, args, 2); type = TREE_TYPE (args[0]); - tmp = fold_build2 (LSHIFT_EXPR, type, build_int_cst (type, 1), args[1]); + tmp = fold_build2_loc (input_location, LSHIFT_EXPR, type, + build_int_cst (type, 1), args[1]); if (set) op = BIT_IOR_EXPR; else { op = BIT_AND_EXPR; - tmp = fold_build1 (BIT_NOT_EXPR, type, tmp); + tmp = fold_build1_loc (input_location, BIT_NOT_EXPR, type, tmp); } - se->expr = fold_build2 (op, type, args[0], tmp); + se->expr = fold_build2_loc (input_location, op, type, args[0], tmp); } /* Extract a sequence of bits. @@ -3197,25 +3965,47 @@ gfc_conv_intrinsic_ibits (gfc_se * se, gfc_expr * expr) type = TREE_TYPE (args[0]); mask = build_int_cst (type, -1); - mask = fold_build2 (LSHIFT_EXPR, type, mask, args[2]); - mask = fold_build1 (BIT_NOT_EXPR, type, mask); + mask = fold_build2_loc (input_location, LSHIFT_EXPR, type, mask, args[2]); + mask = fold_build1_loc (input_location, BIT_NOT_EXPR, type, mask); - tmp = fold_build2 (RSHIFT_EXPR, type, args[0], args[1]); + tmp = fold_build2_loc (input_location, RSHIFT_EXPR, type, args[0], args[1]); - se->expr = fold_build2 (BIT_AND_EXPR, type, tmp, mask); + se->expr = fold_build2_loc (input_location, BIT_AND_EXPR, type, tmp, mask); } -/* RSHIFT (I, SHIFT) = I >> SHIFT - LSHIFT (I, SHIFT) = I << SHIFT */ static void -gfc_conv_intrinsic_rlshift (gfc_se * se, gfc_expr * expr, int right_shift) +gfc_conv_intrinsic_shift (gfc_se * se, gfc_expr * expr, bool right_shift, + bool arithmetic) { - tree args[2]; + tree args[2], type, num_bits, cond; gfc_conv_intrinsic_function_args (se, expr, args, 2); - se->expr = fold_build2 (right_shift ? RSHIFT_EXPR : LSHIFT_EXPR, - TREE_TYPE (args[0]), args[0], args[1]); + args[0] = gfc_evaluate_now (args[0], &se->pre); + args[1] = gfc_evaluate_now (args[1], &se->pre); + type = TREE_TYPE (args[0]); + + if (!arithmetic) + args[0] = fold_convert (unsigned_type_for (type), args[0]); + else + gcc_assert (right_shift); + + se->expr = fold_build2_loc (input_location, + right_shift ? RSHIFT_EXPR : LSHIFT_EXPR, + TREE_TYPE (args[0]), args[0], args[1]); + + if (!arithmetic) + se->expr = fold_convert (type, se->expr); + + /* The Fortran standard allows shift widths <= BIT_SIZE(I), whereas + gcc requires a shift width < BIT_SIZE(I), so we have to catch this + special case. */ + num_bits = build_int_cst (TREE_TYPE (args[1]), TYPE_PRECISION (type)); + cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, + args[1], num_bits); + + se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, + build_int_cst (type, 0), se->expr); } /* ISHFT (I, SHIFT) = (abs (shift) >= BIT_SIZE (i)) @@ -3236,34 +4026,39 @@ gfc_conv_intrinsic_ishft (gfc_se * se, gfc_expr * expr) tree rshift; gfc_conv_intrinsic_function_args (se, expr, args, 2); + + args[0] = gfc_evaluate_now (args[0], &se->pre); + args[1] = gfc_evaluate_now (args[1], &se->pre); + type = TREE_TYPE (args[0]); utype = unsigned_type_for (type); - width = fold_build1 (ABS_EXPR, TREE_TYPE (args[1]), args[1]); + width = fold_build1_loc (input_location, ABS_EXPR, TREE_TYPE (args[1]), + args[1]); /* Left shift if positive. */ - lshift = fold_build2 (LSHIFT_EXPR, type, args[0], width); + lshift = fold_build2_loc (input_location, LSHIFT_EXPR, type, args[0], width); /* Right shift if negative. We convert to an unsigned type because we want a logical shift. The standard doesn't define the case of shifting negative numbers, and we try to be compatible with other compilers, most notably g77, here. */ - rshift = fold_convert (type, fold_build2 (RSHIFT_EXPR, utype, - convert (utype, args[0]), width)); + rshift = fold_convert (type, fold_build2_loc (input_location, RSHIFT_EXPR, + utype, convert (utype, args[0]), width)); - tmp = fold_build2 (GE_EXPR, boolean_type_node, args[1], - build_int_cst (TREE_TYPE (args[1]), 0)); - tmp = fold_build3 (COND_EXPR, type, tmp, lshift, rshift); + tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, args[1], + build_int_cst (TREE_TYPE (args[1]), 0)); + tmp = fold_build3_loc (input_location, COND_EXPR, type, tmp, lshift, rshift); /* The Fortran standard allows shift widths <= BIT_SIZE(I), whereas gcc requires a shift width < BIT_SIZE(I), so we have to catch this special case. */ num_bits = build_int_cst (TREE_TYPE (args[1]), TYPE_PRECISION (type)); - cond = fold_build2 (GE_EXPR, boolean_type_node, width, num_bits); - - se->expr = fold_build3 (COND_EXPR, type, cond, - build_int_cst (type, 0), tmp); + cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, width, + num_bits); + se->expr = fold_build3_loc (input_location, COND_EXPR, type, cond, + build_int_cst (type, 0), tmp); } @@ -3321,7 +4116,7 @@ gfc_conv_intrinsic_ishftc (gfc_se * se, gfc_expr * expr) gcc_unreachable (); } se->expr = build_call_expr_loc (input_location, - tmp, 3, args[0], args[1], args[2]); + tmp, 3, args[0], args[1], args[2]); /* Convert the result back to the original type, if we extended the first argument's width above. */ if (expr->ts.kind < 4) @@ -3331,22 +4126,31 @@ gfc_conv_intrinsic_ishftc (gfc_se * se, gfc_expr * expr) } type = TREE_TYPE (args[0]); + /* Evaluate arguments only once. */ + args[0] = gfc_evaluate_now (args[0], &se->pre); + args[1] = gfc_evaluate_now (args[1], &se->pre); + /* Rotate left if positive. */ - lrot = fold_build2 (LROTATE_EXPR, type, args[0], args[1]); + lrot = fold_build2_loc (input_location, LROTATE_EXPR, type, args[0], args[1]); /* Rotate right if negative. */ - tmp = fold_build1 (NEGATE_EXPR, TREE_TYPE (args[1]), args[1]); - rrot = fold_build2 (RROTATE_EXPR, type, args[0], tmp); + tmp = fold_build1_loc (input_location, NEGATE_EXPR, TREE_TYPE (args[1]), + args[1]); + rrot = fold_build2_loc (input_location,RROTATE_EXPR, type, args[0], tmp); zero = build_int_cst (TREE_TYPE (args[1]), 0); - tmp = fold_build2 (GT_EXPR, boolean_type_node, args[1], zero); - rrot = fold_build3 (COND_EXPR, type, tmp, lrot, rrot); + tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, args[1], + zero); + rrot = fold_build3_loc (input_location, COND_EXPR, type, tmp, lrot, rrot); /* Do nothing if shift == 0. */ - tmp = fold_build2 (EQ_EXPR, boolean_type_node, args[1], zero); - se->expr = fold_build3 (COND_EXPR, type, tmp, args[0], rrot); + tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, args[1], + zero); + se->expr = fold_build3_loc (input_location, COND_EXPR, type, tmp, args[0], + rrot); } + /* LEADZ (i) = (i == 0) ? BIT_SIZE (i) : __builtin_clz(i) - (BIT_SIZE('int') - BIT_SIZE(i)) @@ -3377,23 +4181,23 @@ gfc_conv_intrinsic_leadz (gfc_se * se, gfc_expr * expr) if (argsize <= INT_TYPE_SIZE) { arg_type = unsigned_type_node; - func = built_in_decls[BUILT_IN_CLZ]; + func = builtin_decl_explicit (BUILT_IN_CLZ); } else if (argsize <= LONG_TYPE_SIZE) { arg_type = long_unsigned_type_node; - func = built_in_decls[BUILT_IN_CLZL]; + func = builtin_decl_explicit (BUILT_IN_CLZL); } else if (argsize <= LONG_LONG_TYPE_SIZE) { arg_type = long_long_unsigned_type_node; - func = built_in_decls[BUILT_IN_CLZLL]; + func = builtin_decl_explicit (BUILT_IN_CLZLL); } else { - gcc_assert (argsize == 128); + gcc_assert (argsize == 2 * LONG_LONG_TYPE_SIZE); arg_type = gfc_build_uint_type (argsize); - func = gfor_fndecl_clz128; + func = NULL_TREE; } /* Convert the actual argument twice: first, to the unsigned type of the @@ -3401,22 +4205,74 @@ gfc_conv_intrinsic_leadz (gfc_se * se, gfc_expr * expr) function. But the return type is of the default INTEGER kind. */ arg = fold_convert (gfc_build_uint_type (argsize), arg); arg = fold_convert (arg_type, arg); + arg = gfc_evaluate_now (arg, &se->pre); result_type = gfc_get_int_type (gfc_default_integer_kind); /* Compute LEADZ for the case i .ne. 0. */ - s = TYPE_PRECISION (arg_type) - argsize; - tmp = fold_convert (result_type, build_call_expr (func, 1, arg)); - leadz = fold_build2 (MINUS_EXPR, result_type, - tmp, build_int_cst (result_type, s)); + if (func) + { + s = TYPE_PRECISION (arg_type) - argsize; + tmp = fold_convert (result_type, + build_call_expr_loc (input_location, func, + 1, arg)); + leadz = fold_build2_loc (input_location, MINUS_EXPR, result_type, + tmp, build_int_cst (result_type, s)); + } + else + { + /* We end up here if the argument type is larger than 'long long'. + We generate this code: + + if (x & (ULL_MAX << ULL_SIZE) != 0) + return clzll ((unsigned long long) (x >> ULLSIZE)); + else + return ULL_SIZE + clzll ((unsigned long long) x); + where ULL_MAX is the largest value that a ULL_MAX can hold + (0xFFFFFFFFFFFFFFFF for a 64-bit long long type), and ULLSIZE + is the bit-size of the long long type (64 in this example). */ + tree ullsize, ullmax, tmp1, tmp2, btmp; + + ullsize = build_int_cst (result_type, LONG_LONG_TYPE_SIZE); + ullmax = fold_build1_loc (input_location, BIT_NOT_EXPR, + long_long_unsigned_type_node, + build_int_cst (long_long_unsigned_type_node, + 0)); + + cond = fold_build2_loc (input_location, LSHIFT_EXPR, arg_type, + fold_convert (arg_type, ullmax), ullsize); + cond = fold_build2_loc (input_location, BIT_AND_EXPR, arg_type, + arg, cond); + cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, + cond, build_int_cst (arg_type, 0)); + + tmp1 = fold_build2_loc (input_location, RSHIFT_EXPR, arg_type, + arg, ullsize); + tmp1 = fold_convert (long_long_unsigned_type_node, tmp1); + btmp = builtin_decl_explicit (BUILT_IN_CLZLL); + tmp1 = fold_convert (result_type, + build_call_expr_loc (input_location, btmp, 1, tmp1)); + + tmp2 = fold_convert (long_long_unsigned_type_node, arg); + btmp = builtin_decl_explicit (BUILT_IN_CLZLL); + tmp2 = fold_convert (result_type, + build_call_expr_loc (input_location, btmp, 1, tmp2)); + tmp2 = fold_build2_loc (input_location, PLUS_EXPR, result_type, + tmp2, ullsize); + + leadz = fold_build3_loc (input_location, COND_EXPR, result_type, + cond, tmp1, tmp2); + } /* Build BIT_SIZE. */ bit_size = build_int_cst (result_type, argsize); - cond = fold_build2 (EQ_EXPR, boolean_type_node, - arg, build_int_cst (arg_type, 0)); - se->expr = fold_build3 (COND_EXPR, result_type, cond, bit_size, leadz); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + arg, build_int_cst (arg_type, 0)); + se->expr = fold_build3_loc (input_location, COND_EXPR, result_type, cond, + bit_size, leadz); } + /* TRAILZ(i) = (i == 0) ? BIT_SIZE (i) : __builtin_ctz(i) The conditional expression is necessary because the result of TRAILZ(0) @@ -3442,23 +4298,23 @@ gfc_conv_intrinsic_trailz (gfc_se * se, gfc_expr *expr) if (argsize <= INT_TYPE_SIZE) { arg_type = unsigned_type_node; - func = built_in_decls[BUILT_IN_CTZ]; + func = builtin_decl_explicit (BUILT_IN_CTZ); } else if (argsize <= LONG_TYPE_SIZE) { arg_type = long_unsigned_type_node; - func = built_in_decls[BUILT_IN_CTZL]; + func = builtin_decl_explicit (BUILT_IN_CTZL); } else if (argsize <= LONG_LONG_TYPE_SIZE) { arg_type = long_long_unsigned_type_node; - func = built_in_decls[BUILT_IN_CTZLL]; + func = builtin_decl_explicit (BUILT_IN_CTZLL); } else { - gcc_assert (argsize == 128); + gcc_assert (argsize == 2 * LONG_LONG_TYPE_SIZE); arg_type = gfc_build_uint_type (argsize); - func = gfor_fndecl_ctz128; + func = NULL_TREE; } /* Convert the actual argument twice: first, to the unsigned type of the @@ -3466,18 +4322,63 @@ gfc_conv_intrinsic_trailz (gfc_se * se, gfc_expr *expr) function. But the return type is of the default INTEGER kind. */ arg = fold_convert (gfc_build_uint_type (argsize), arg); arg = fold_convert (arg_type, arg); + arg = gfc_evaluate_now (arg, &se->pre); result_type = gfc_get_int_type (gfc_default_integer_kind); /* Compute TRAILZ for the case i .ne. 0. */ - trailz = fold_convert (result_type, build_call_expr_loc (input_location, - func, 1, arg)); + if (func) + trailz = fold_convert (result_type, build_call_expr_loc (input_location, + func, 1, arg)); + else + { + /* We end up here if the argument type is larger than 'long long'. + We generate this code: + + if ((x & ULL_MAX) == 0) + return ULL_SIZE + ctzll ((unsigned long long) (x >> ULLSIZE)); + else + return ctzll ((unsigned long long) x); + + where ULL_MAX is the largest value that a ULL_MAX can hold + (0xFFFFFFFFFFFFFFFF for a 64-bit long long type), and ULLSIZE + is the bit-size of the long long type (64 in this example). */ + tree ullsize, ullmax, tmp1, tmp2, btmp; + + ullsize = build_int_cst (result_type, LONG_LONG_TYPE_SIZE); + ullmax = fold_build1_loc (input_location, BIT_NOT_EXPR, + long_long_unsigned_type_node, + build_int_cst (long_long_unsigned_type_node, 0)); + + cond = fold_build2_loc (input_location, BIT_AND_EXPR, arg_type, arg, + fold_convert (arg_type, ullmax)); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, cond, + build_int_cst (arg_type, 0)); + + tmp1 = fold_build2_loc (input_location, RSHIFT_EXPR, arg_type, + arg, ullsize); + tmp1 = fold_convert (long_long_unsigned_type_node, tmp1); + btmp = builtin_decl_explicit (BUILT_IN_CTZLL); + tmp1 = fold_convert (result_type, + build_call_expr_loc (input_location, btmp, 1, tmp1)); + tmp1 = fold_build2_loc (input_location, PLUS_EXPR, result_type, + tmp1, ullsize); + + tmp2 = fold_convert (long_long_unsigned_type_node, arg); + btmp = builtin_decl_explicit (BUILT_IN_CTZLL); + tmp2 = fold_convert (result_type, + build_call_expr_loc (input_location, btmp, 1, tmp2)); + + trailz = fold_build3_loc (input_location, COND_EXPR, result_type, + cond, tmp1, tmp2); + } /* Build BIT_SIZE. */ bit_size = build_int_cst (result_type, argsize); - cond = fold_build2 (EQ_EXPR, boolean_type_node, - arg, build_int_cst (arg_type, 0)); - se->expr = fold_build3 (COND_EXPR, result_type, cond, bit_size, trailz); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + arg, build_int_cst (arg_type, 0)); + se->expr = fold_build3_loc (input_location, COND_EXPR, result_type, cond, + bit_size, trailz); } /* Using __builtin_popcount for POPCNT and __builtin_parity for POPPAR; @@ -3501,17 +4402,23 @@ gfc_conv_intrinsic_popcnt_poppar (gfc_se * se, gfc_expr *expr, int parity) if (argsize <= INT_TYPE_SIZE) { arg_type = unsigned_type_node; - func = built_in_decls[parity ? BUILT_IN_PARITY : BUILT_IN_POPCOUNT]; + func = builtin_decl_explicit (parity + ? BUILT_IN_PARITY + : BUILT_IN_POPCOUNT); } else if (argsize <= LONG_TYPE_SIZE) { arg_type = long_unsigned_type_node; - func = built_in_decls[parity ? BUILT_IN_PARITYL : BUILT_IN_POPCOUNTL]; + func = builtin_decl_explicit (parity + ? BUILT_IN_PARITYL + : BUILT_IN_POPCOUNTL); } else if (argsize <= LONG_LONG_TYPE_SIZE) { arg_type = long_long_unsigned_type_node; - func = built_in_decls[parity ? BUILT_IN_PARITYLL : BUILT_IN_POPCOUNTLL]; + func = builtin_decl_explicit (parity + ? BUILT_IN_PARITYLL + : BUILT_IN_POPCOUNTLL); } else { @@ -3524,7 +4431,9 @@ gfc_conv_intrinsic_popcnt_poppar (gfc_se * se, gfc_expr *expr, int parity) as 'long long'. */ gcc_assert (argsize == 2 * LONG_LONG_TYPE_SIZE); - func = built_in_decls[parity ? BUILT_IN_PARITYLL : BUILT_IN_POPCOUNTLL]; + func = builtin_decl_explicit (parity + ? BUILT_IN_PARITYLL + : BUILT_IN_POPCOUNTLL); /* Convert it to an integer, and store into a variable. */ utype = gfc_build_uint_type (argsize); @@ -3536,17 +4445,19 @@ gfc_conv_intrinsic_popcnt_poppar (gfc_se * se, gfc_expr *expr, int parity) fold_convert (long_long_unsigned_type_node, arg)); - arg2 = fold_build2 (RSHIFT_EXPR, utype, arg, - build_int_cst (utype, LONG_LONG_TYPE_SIZE)); + arg2 = fold_build2_loc (input_location, RSHIFT_EXPR, utype, arg, + build_int_cst (utype, LONG_LONG_TYPE_SIZE)); call2 = build_call_expr_loc (input_location, func, 1, fold_convert (long_long_unsigned_type_node, arg2)); /* Combine the results. */ if (parity) - se->expr = fold_build2 (BIT_XOR_EXPR, result_type, call1, call2); + se->expr = fold_build2_loc (input_location, BIT_XOR_EXPR, result_type, + call1, call2); else - se->expr = fold_build2 (PLUS_EXPR, result_type, call1, call2); + se->expr = fold_build2_loc (input_location, PLUS_EXPR, result_type, + call1, call2); return; } @@ -3617,7 +4528,7 @@ conv_generic_with_optional_char_arg (gfc_se* se, gfc_expr* expr, sym = gfc_get_symbol_for_expr (expr); gfc_conv_procedure_call (se, sym, expr->value.function.actual, expr, append_args); - gfc_free (sym); + free (sym); } @@ -3641,7 +4552,7 @@ gfc_conv_intrinsic_len (gfc_se * se, gfc_expr * expr) switch (arg->expr_type) { case EXPR_CONSTANT: - len = build_int_cst (NULL_TREE, arg->value.character.length); + len = build_int_cst (gfc_charlen_type_node, arg->value.character.length); break; case EXPR_ARRAY: @@ -3757,7 +4668,7 @@ gfc_conv_intrinsic_ichar (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_function_args (se, expr, args, 2); gcc_assert (POINTER_TYPE_P (TREE_TYPE (args[1]))); pchartype = gfc_get_pchar_type (expr->value.function.actual->expr->ts.kind); - args[1] = fold_build1 (NOP_EXPR, pchartype, args[1]); + args[1] = fold_build1_loc (input_location, NOP_EXPR, pchartype, args[1]); type = gfc_typenode_for_spec (&expr->ts); se->expr = build_fold_indirect_ref_loc (input_location, @@ -3775,7 +4686,8 @@ gfc_conv_intrinsic_isnan (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_function_args (se, expr, &arg, 1); se->expr = build_call_expr_loc (input_location, - built_in_decls[BUILT_IN_ISNAN], 1, arg); + builtin_decl_explicit (BUILT_IN_ISNAN), + 1, arg); STRIP_TYPE_NOPS (se->expr); se->expr = fold_convert (gfc_typenode_for_spec (&expr->ts), se->expr); } @@ -3790,8 +4702,9 @@ gfc_conv_has_intvalue (gfc_se * se, gfc_expr * expr, const int value) tree arg; gfc_conv_intrinsic_function_args (se, expr, &arg, 1); - se->expr = fold_build2 (EQ_EXPR, gfc_typenode_for_spec (&expr->ts), - arg, build_int_cst (TREE_TYPE (arg), value)); + se->expr = fold_build2_loc (input_location, EQ_EXPR, + gfc_typenode_for_spec (&expr->ts), + arg, build_int_cst (TREE_TYPE (arg), value)); } @@ -3830,13 +4743,91 @@ gfc_conv_intrinsic_merge (gfc_se * se, gfc_expr * expr) fsource = args[3]; mask = args[4]; - gfc_trans_same_strlen_check ("MERGE intrinsic", &expr->where, len, len2, - &se->pre); - se->string_length = len; + gfc_trans_same_strlen_check ("MERGE intrinsic", &expr->where, len, len2, + &se->pre); + se->string_length = len; + } + type = TREE_TYPE (tsource); + se->expr = fold_build3_loc (input_location, COND_EXPR, type, mask, tsource, + fold_convert (type, fsource)); +} + + +/* MERGE_BITS (I, J, MASK) = (I & MASK) | (I & (~MASK)). */ + +static void +gfc_conv_intrinsic_merge_bits (gfc_se * se, gfc_expr * expr) +{ + tree args[3], mask, type; + + gfc_conv_intrinsic_function_args (se, expr, args, 3); + mask = gfc_evaluate_now (args[2], &se->pre); + + type = TREE_TYPE (args[0]); + gcc_assert (TREE_TYPE (args[1]) == type); + gcc_assert (TREE_TYPE (mask) == type); + + args[0] = fold_build2_loc (input_location, BIT_AND_EXPR, type, args[0], mask); + args[1] = fold_build2_loc (input_location, BIT_AND_EXPR, type, args[1], + fold_build1_loc (input_location, BIT_NOT_EXPR, + type, mask)); + se->expr = fold_build2_loc (input_location, BIT_IOR_EXPR, type, + args[0], args[1]); +} + + +/* MASKL(n) = n == 0 ? 0 : (~0) << (BIT_SIZE - n) + MASKR(n) = n == BIT_SIZE ? ~0 : ~((~0) << n) */ + +static void +gfc_conv_intrinsic_mask (gfc_se * se, gfc_expr * expr, int left) +{ + tree arg, allones, type, utype, res, cond, bitsize; + int i; + + gfc_conv_intrinsic_function_args (se, expr, &arg, 1); + arg = gfc_evaluate_now (arg, &se->pre); + + type = gfc_get_int_type (expr->ts.kind); + utype = unsigned_type_for (type); + + i = gfc_validate_kind (BT_INTEGER, expr->ts.kind, false); + bitsize = build_int_cst (TREE_TYPE (arg), gfc_integer_kinds[i].bit_size); + + allones = fold_build1_loc (input_location, BIT_NOT_EXPR, utype, + build_int_cst (utype, 0)); + + if (left) + { + /* Left-justified mask. */ + res = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (arg), + bitsize, arg); + res = fold_build2_loc (input_location, LSHIFT_EXPR, utype, allones, + fold_convert (utype, res)); + + /* Special case arg == 0, because SHIFT_EXPR wants a shift strictly + smaller than type width. */ + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, arg, + build_int_cst (TREE_TYPE (arg), 0)); + res = fold_build3_loc (input_location, COND_EXPR, utype, cond, + build_int_cst (utype, 0), res); } - type = TREE_TYPE (tsource); - se->expr = fold_build3 (COND_EXPR, type, mask, tsource, - fold_convert (type, fsource)); + else + { + /* Right-justified mask. */ + res = fold_build2_loc (input_location, LSHIFT_EXPR, utype, allones, + fold_convert (utype, arg)); + res = fold_build1_loc (input_location, BIT_NOT_EXPR, utype, res); + + /* Special case agr == bit_size, because SHIFT_EXPR wants a shift + strictly smaller than type width. */ + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + arg, bitsize); + res = fold_build3_loc (input_location, COND_EXPR, utype, + cond, allones, res); + } + + se->expr = fold_convert (type, res); } @@ -3869,12 +4860,12 @@ gfc_conv_intrinsic_nearest (gfc_se * se, gfc_expr * expr) nextafter = gfc_builtin_decl_for_float_kind (BUILT_IN_NEXTAFTER, expr->ts.kind); copysign = gfc_builtin_decl_for_float_kind (BUILT_IN_COPYSIGN, expr->ts.kind); - huge_val = gfc_builtin_decl_for_float_kind (BUILT_IN_HUGE_VAL, expr->ts.kind); type = gfc_typenode_for_spec (&expr->ts); gfc_conv_intrinsic_function_args (se, expr, args, 2); - tmp = build_call_expr_loc (input_location, copysign, 2, - build_call_expr_loc (input_location, huge_val, 0), + + huge_val = gfc_build_inf_or_huge (type, expr->ts.kind); + tmp = build_call_expr_loc (input_location, copysign, 2, huge_val, fold_convert (type, args[1])); se->expr = build_call_expr_loc (input_location, nextafter, 2, fold_convert (type, args[0]), tmp); @@ -3908,8 +4899,8 @@ gfc_conv_intrinsic_spacing (gfc_se * se, gfc_expr * expr) stmtblock_t block; k = gfc_validate_kind (BT_REAL, expr->ts.kind, false); - prec = build_int_cst (NULL_TREE, gfc_real_kinds[k].digits); - emin = build_int_cst (NULL_TREE, gfc_real_kinds[k].min_exponent - 1); + prec = build_int_cst (integer_type_node, gfc_real_kinds[k].digits); + emin = build_int_cst (integer_type_node, gfc_real_kinds[k].min_exponent - 1); tiny = gfc_conv_mpfr_to_tree (gfc_real_kinds[k].tiny, expr->ts.kind, 0); frexp = gfc_builtin_decl_for_float_kind (BUILT_IN_FREXP, expr->ts.kind); @@ -3929,17 +4920,18 @@ gfc_conv_intrinsic_spacing (gfc_se * se, gfc_expr * expr) gfc_build_addr_expr (NULL_TREE, e)); gfc_add_expr_to_block (&block, tmp); - tmp = fold_build2 (MINUS_EXPR, integer_type_node, e, prec); - gfc_add_modify (&block, e, fold_build2 (MAX_EXPR, integer_type_node, - tmp, emin)); + tmp = fold_build2_loc (input_location, MINUS_EXPR, integer_type_node, e, + prec); + gfc_add_modify (&block, e, fold_build2_loc (input_location, MAX_EXPR, + integer_type_node, tmp, emin)); tmp = build_call_expr_loc (input_location, scalbn, 2, build_real_from_int_cst (type, integer_one_node), e); gfc_add_modify (&block, res, tmp); /* Finish by building the IF statement. */ - cond = fold_build2 (EQ_EXPR, boolean_type_node, arg, - build_real_from_int_cst (type, integer_zero_node)); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, arg, + build_real_from_int_cst (type, integer_zero_node)); tmp = build3_v (COND_EXPR, cond, build2_v (MODIFY_EXPR, res, tiny), gfc_finish_block (&block)); @@ -3990,14 +4982,14 @@ gfc_conv_intrinsic_rrspacing (gfc_se * se, gfc_expr * expr) gfc_build_addr_expr (NULL_TREE, e)); gfc_add_expr_to_block (&block, tmp); - tmp = fold_build2 (MINUS_EXPR, integer_type_node, - build_int_cst (NULL_TREE, prec), e); + tmp = fold_build2_loc (input_location, MINUS_EXPR, integer_type_node, + build_int_cst (integer_type_node, prec), e); tmp = build_call_expr_loc (input_location, scalbn, 2, x, tmp); gfc_add_modify (&block, x, tmp); stmt = gfc_finish_block (&block); - cond = fold_build2 (NE_EXPR, boolean_type_node, x, - build_real_from_int_cst (type, integer_zero_node)); + cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, x, + build_real_from_int_cst (type, integer_zero_node)); tmp = build3_v (COND_EXPR, cond, stmt, build_empty_stmt (input_location)); gfc_add_expr_to_block (&se->pre, tmp); @@ -4059,6 +5051,9 @@ gfc_conv_intrinsic_size (gfc_se * se, gfc_expr * expr) gfc_init_se (&argse, NULL); actual = expr->value.function.actual; + if (actual->expr->ts.type == BT_CLASS) + gfc_add_class_array_ref (actual->expr); + ss = gfc_walk_expr (actual->expr); gcc_assert (ss != gfc_ss_terminator); argse.want_pointer = 1; @@ -4098,17 +5093,18 @@ gfc_conv_intrinsic_size (gfc_se * se, gfc_expr * expr) argse.data_not_needed = 1; gfc_conv_expr (&argse, actual->expr); gfc_add_block_to_block (&se->pre, &argse.pre); - tmp = fold_build2 (NE_EXPR, boolean_type_node, - argse.expr, null_pointer_node); + tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, + argse.expr, null_pointer_node); tmp = gfc_evaluate_now (tmp, &se->pre); - se->expr = fold_build3 (COND_EXPR, pvoid_type_node, - tmp, fncall1, fncall0); + se->expr = fold_build3_loc (input_location, COND_EXPR, + pvoid_type_node, tmp, fncall1, fncall0); } else { se->expr = NULL_TREE; - argse.expr = fold_build2 (MINUS_EXPR, gfc_array_index_type, - argse.expr, gfc_index_one_node); + argse.expr = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, + argse.expr, gfc_index_one_node); } } else if (expr->value.function.actual->expr->rank == 1) @@ -4127,12 +5123,14 @@ gfc_conv_intrinsic_size (gfc_se * se, gfc_expr * expr) arg1); ubound = gfc_conv_descriptor_ubound_get (arg1, argse.expr); lbound = gfc_conv_descriptor_lbound_get (arg1, argse.expr); - se->expr = fold_build2 (MINUS_EXPR, gfc_array_index_type, - ubound, lbound); - se->expr = fold_build2 (PLUS_EXPR, gfc_array_index_type, se->expr, - gfc_index_one_node); - se->expr = fold_build2 (MAX_EXPR, gfc_array_index_type, se->expr, - gfc_index_zero_node); + se->expr = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, ubound, lbound); + se->expr = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, + se->expr, gfc_index_one_node); + se->expr = fold_build2_loc (input_location, MAX_EXPR, + gfc_array_index_type, se->expr, + gfc_index_zero_node); } type = gfc_typenode_for_spec (&expr->ts); @@ -4153,8 +5151,9 @@ size_of_string_in_bytes (int kind, tree string_length) bytesize = build_int_cst (gfc_array_index_type, gfc_character_kinds[i].bit_size / 8); - return fold_build2 (MULT_EXPR, gfc_array_index_type, bytesize, - fold_convert (gfc_array_index_type, string_length)); + return fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type, + bytesize, + fold_convert (gfc_array_index_type, string_length)); } @@ -4179,7 +5178,7 @@ gfc_conv_intrinsic_sizeof (gfc_se *se, gfc_expr *expr) if (ss == gfc_ss_terminator) { if (arg->ts.type == BT_CLASS) - gfc_add_component_ref (arg, "$data"); + gfc_add_data_component (arg); gfc_conv_expr_reference (&argse, arg); @@ -4215,12 +5214,12 @@ gfc_conv_intrinsic_sizeof (gfc_se *se, gfc_expr *expr) idx = gfc_rank_cst[n]; lower = gfc_conv_descriptor_lbound_get (argse.expr, idx); upper = gfc_conv_descriptor_ubound_get (argse.expr, idx); - tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, - upper, lower); - tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, - tmp, gfc_index_one_node); - tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, - tmp, source_bytes); + tmp = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, upper, lower); + tmp = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, tmp, gfc_index_one_node); + tmp = fold_build2_loc (input_location, MULT_EXPR, + gfc_array_index_type, tmp, source_bytes); gfc_add_modify (&argse.pre, source_bytes, tmp); } se->expr = source_bytes; @@ -4250,8 +5249,8 @@ gfc_conv_intrinsic_storage_size (gfc_se *se, gfc_expr *expr) { if (arg->ts.type == BT_CLASS) { - gfc_add_component_ref (arg, "$vptr"); - gfc_add_component_ref (arg, "$size"); + gfc_add_vptr_component (arg); + gfc_add_size_component (arg); gfc_conv_expr (&argse, arg); tmp = fold_convert (result_type, argse.expr); goto done; @@ -4275,7 +5274,8 @@ gfc_conv_intrinsic_storage_size (gfc_se *se, gfc_expr *expr) tmp = fold_convert (result_type, size_in_bytes (type)); done: - se->expr = fold_build2 (MULT_EXPR, result_type, tmp, eight.expr); + se->expr = fold_build2_loc (input_location, MULT_EXPR, result_type, tmp, + eight.expr); gfc_add_block_to_block (&se->pre, &argse.pre); } @@ -4293,8 +5293,9 @@ gfc_conv_intrinsic_strcmp (gfc_se * se, gfc_expr * expr, enum tree_code op) = gfc_build_compare_string (args[0], args[1], args[2], args[3], expr->value.function.actual->expr->ts.kind, op); - se->expr = fold_build2 (op, gfc_typenode_for_spec (&expr->ts), se->expr, - build_int_cst (TREE_TYPE (se->expr), 0)); + se->expr = fold_build2_loc (input_location, op, + gfc_typenode_for_spec (&expr->ts), se->expr, + build_int_cst (TREE_TYPE (se->expr), 0)); } /* Generate a call to the adjustl/adjustr library function. */ @@ -4357,14 +5358,14 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr) gfc_actual_arglist *arg; gfc_se argse; gfc_ss *ss; - gfc_ss_info *info; + gfc_array_info *info; stmtblock_t block; int n; bool scalar_mold; info = NULL; if (se->loop) - info = &se->ss->data.info; + info = &se->ss->info->data.array; /* Convert SOURCE. The output from this stage is:- source_bytes = length of the source in bytes @@ -4432,7 +5433,8 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr) /* Clean up if it was repacked. */ gfc_init_block (&block); tmp = gfc_conv_array_data (argse.expr); - tmp = fold_build2 (NE_EXPR, boolean_type_node, source, tmp); + tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, + source, tmp); tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location)); gfc_add_expr_to_block (&block, tmp); @@ -4458,13 +5460,14 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr) gfc_add_modify (&argse.pre, source_bytes, tmp); lower = gfc_conv_descriptor_lbound_get (argse.expr, idx); upper = gfc_conv_descriptor_ubound_get (argse.expr, idx); - tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, - upper, lower); + tmp = fold_build2_loc (input_location, MINUS_EXPR, + gfc_array_index_type, upper, lower); gfc_add_modify (&argse.pre, extent, tmp); - tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, - extent, gfc_index_one_node); - tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, - tmp, source_bytes); + tmp = fold_build2_loc (input_location, PLUS_EXPR, + gfc_array_index_type, extent, + gfc_index_one_node); + tmp = fold_build2_loc (input_location, MULT_EXPR, + gfc_array_index_type, tmp, source_bytes); } } @@ -4542,15 +5545,16 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr) size_bytes = gfc_create_var (gfc_array_index_type, NULL); if (tmp != NULL_TREE) - tmp = fold_build2 (MULT_EXPR, gfc_array_index_type, - tmp, dest_word_len); + tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type, + tmp, dest_word_len); else tmp = source_bytes; gfc_add_modify (&se->pre, size_bytes, tmp); gfc_add_modify (&se->pre, size_words, - fold_build2 (CEIL_DIV_EXPR, gfc_array_index_type, - size_bytes, dest_word_len)); + fold_build2_loc (input_location, CEIL_DIV_EXPR, + gfc_array_index_type, + size_bytes, dest_word_len)); /* Evaluate the bounds of the result. If the loop range exists, we have to check if it is too large. If so, we modify loop->to be consistent @@ -4559,25 +5563,26 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr) n = se->loop->order[0]; if (se->loop->to[n] != NULL_TREE) { - tmp = fold_build2 (MINUS_EXPR, gfc_array_index_type, - se->loop->to[n], se->loop->from[n]); - tmp = fold_build2 (PLUS_EXPR, gfc_array_index_type, - tmp, gfc_index_one_node); - tmp = fold_build2 (MIN_EXPR, gfc_array_index_type, + tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, + se->loop->to[n], se->loop->from[n]); + tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, + tmp, gfc_index_one_node); + tmp = fold_build2_loc (input_location, MIN_EXPR, gfc_array_index_type, tmp, size_words); gfc_add_modify (&se->pre, size_words, tmp); gfc_add_modify (&se->pre, size_bytes, - fold_build2 (MULT_EXPR, gfc_array_index_type, - size_words, dest_word_len)); - upper = fold_build2 (PLUS_EXPR, gfc_array_index_type, - size_words, se->loop->from[n]); - upper = fold_build2 (MINUS_EXPR, gfc_array_index_type, - upper, gfc_index_one_node); + fold_build2_loc (input_location, MULT_EXPR, + gfc_array_index_type, + size_words, dest_word_len)); + upper = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, + size_words, se->loop->from[n]); + upper = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, + upper, gfc_index_one_node); } else { - upper = fold_build2 (MINUS_EXPR, gfc_array_index_type, - size_words, gfc_index_one_node); + upper = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type, + size_words, gfc_index_one_node); se->loop->from[n] = gfc_index_zero_node; } @@ -4585,9 +5590,8 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr) /* Build a destination descriptor, using the pointer, source, as the data field. */ - gfc_trans_create_temp_array (&se->pre, &se->post, se->loop, - info, mold_type, NULL_TREE, false, true, false, - &expr->where); + gfc_trans_create_temp_array (&se->pre, &se->post, se->ss, mold_type, + NULL_TREE, false, true, false, &expr->where); /* Cast the pointer to the result. */ tmp = gfc_conv_descriptor_data_get (info->descriptor); @@ -4595,26 +5599,27 @@ gfc_conv_intrinsic_transfer (gfc_se * se, gfc_expr * expr) /* Use memcpy to do the transfer. */ tmp = build_call_expr_loc (input_location, - built_in_decls[BUILT_IN_MEMCPY], + builtin_decl_explicit (BUILT_IN_MEMCPY), 3, tmp, fold_convert (pvoid_type_node, source), - fold_build2 (MIN_EXPR, gfc_array_index_type, - size_bytes, source_bytes)); + fold_build2_loc (input_location, MIN_EXPR, + gfc_array_index_type, + size_bytes, source_bytes)); gfc_add_expr_to_block (&se->pre, tmp); se->expr = info->descriptor; if (expr->ts.type == BT_CHARACTER) - se->string_length = dest_word_len; + se->string_length = fold_convert (gfc_charlen_type_node, dest_word_len); return; /* Deal with scalar results. */ scalar_transfer: - extent = fold_build2 (MIN_EXPR, gfc_array_index_type, - dest_word_len, source_bytes); - extent = fold_build2 (MAX_EXPR, gfc_array_index_type, - extent, gfc_index_zero_node); + extent = fold_build2_loc (input_location, MIN_EXPR, gfc_array_index_type, + dest_word_len, source_bytes); + extent = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type, + extent, gfc_index_zero_node); if (expr->ts.type == BT_CHARACTER) { @@ -4639,7 +5644,7 @@ scalar_transfer: gfc_add_modify (&block, tmpdecl, fold_convert (TREE_TYPE (ptr), tmp)); tmp = build_call_expr_loc (input_location, - built_in_decls[BUILT_IN_MEMCPY], 3, + builtin_decl_explicit (BUILT_IN_MEMCPY), 3, fold_convert (pvoid_type_node, tmpdecl), fold_convert (pvoid_type_node, ptr), extent); @@ -4647,8 +5652,8 @@ scalar_transfer: indirect = gfc_finish_block (&block); /* Wrap it up with the condition. */ - tmp = fold_build2 (LE_EXPR, boolean_type_node, - dest_word_len, source_bytes); + tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, + dest_word_len, source_bytes); tmp = build3_v (COND_EXPR, tmp, direct, indirect); gfc_add_expr_to_block (&se->pre, tmp); @@ -4664,7 +5669,7 @@ scalar_transfer: /* Use memcpy to do the transfer. */ tmp = gfc_build_addr_expr (NULL_TREE, tmpdecl); tmp = build_call_expr_loc (input_location, - built_in_decls[BUILT_IN_MEMCPY], 3, + builtin_decl_explicit (BUILT_IN_MEMCPY), 3, fold_convert (pvoid_type_node, tmp), fold_convert (pvoid_type_node, ptr), extent); @@ -4688,14 +5693,24 @@ gfc_conv_allocated (gfc_se *se, gfc_expr *expr) gfc_init_se (&arg1se, NULL); arg1 = expr->value.function.actual; + + if (arg1->expr->ts.type == BT_CLASS) + { + /* Make sure that class array expressions have both a _data + component reference and an array reference.... */ + if (CLASS_DATA (arg1->expr)->attr.dimension) + gfc_add_class_array_ref (arg1->expr); + /* .... whilst scalars only need the _data component. */ + else + gfc_add_data_component (arg1->expr); + } + ss1 = gfc_walk_expr (arg1->expr); if (ss1 == gfc_ss_terminator) { /* Allocatable scalar. */ arg1se.want_pointer = 1; - if (arg1->expr->ts.type == BT_CLASS) - gfc_add_component_ref (arg1->expr, "$data"); gfc_conv_expr (&arg1se, arg1->expr); tmp = arg1se.expr; } @@ -4707,8 +5722,8 @@ gfc_conv_allocated (gfc_se *se, gfc_expr *expr) tmp = gfc_conv_descriptor_data_get (arg1se.expr); } - tmp = fold_build2 (NE_EXPR, boolean_type_node, - tmp, fold_convert (TREE_TYPE (tmp), null_pointer_node)); + tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp, + fold_convert (TREE_TYPE (tmp), null_pointer_node)); se->expr = convert (gfc_typenode_for_spec (&expr->ts), tmp); } @@ -4736,7 +5751,7 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr) gfc_init_se (&arg2se, NULL); arg1 = expr->value.function.actual; if (arg1->expr->ts.type == BT_CLASS) - gfc_add_component_ref (arg1->expr, "$data"); + gfc_add_data_component (arg1->expr); arg2 = arg1->next; ss1 = gfc_walk_expr (arg1->expr); @@ -4758,22 +5773,23 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr) } gfc_add_block_to_block (&se->pre, &arg1se.pre); gfc_add_block_to_block (&se->post, &arg1se.post); - tmp = fold_build2 (NE_EXPR, boolean_type_node, tmp2, - fold_convert (TREE_TYPE (tmp2), null_pointer_node)); + tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, tmp2, + fold_convert (TREE_TYPE (tmp2), null_pointer_node)); se->expr = tmp; } else { /* An optional target. */ if (arg2->expr->ts.type == BT_CLASS) - gfc_add_component_ref (arg2->expr, "$data"); + gfc_add_data_component (arg2->expr); ss2 = gfc_walk_expr (arg2->expr); nonzero_charlen = NULL_TREE; if (arg1->expr->ts.type == BT_CHARACTER) - nonzero_charlen = fold_build2 (NE_EXPR, boolean_type_node, - arg1->expr->ts.u.cl->backend_decl, - integer_zero_node); + nonzero_charlen = fold_build2_loc (input_location, NE_EXPR, + boolean_type_node, + arg1->expr->ts.u.cl->backend_decl, + integer_zero_node); if (ss1 == gfc_ss_terminator) { @@ -4785,12 +5801,12 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr) gfc_conv_expr (&arg2se, arg2->expr); gfc_add_block_to_block (&se->pre, &arg1se.pre); gfc_add_block_to_block (&se->post, &arg1se.post); - tmp = fold_build2 (EQ_EXPR, boolean_type_node, - arg1se.expr, arg2se.expr); - tmp2 = fold_build2 (NE_EXPR, boolean_type_node, - arg1se.expr, null_pointer_node); - se->expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, - tmp, tmp2); + tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + arg1se.expr, arg2se.expr); + tmp2 = fold_build2_loc (input_location, NE_EXPR, boolean_type_node, + arg1se.expr, null_pointer_node); + se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR, + boolean_type_node, tmp, tmp2); } else { @@ -4800,8 +5816,9 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr) gfc_conv_expr_lhs (&arg1se, arg1->expr); tmp = gfc_conv_descriptor_stride_get (arg1se.expr, gfc_rank_cst[arg1->expr->rank - 1]); - nonzero_arraylen = fold_build2 (NE_EXPR, boolean_type_node, tmp, - build_int_cst (TREE_TYPE (tmp), 0)); + nonzero_arraylen = fold_build2_loc (input_location, NE_EXPR, + boolean_type_node, tmp, + build_int_cst (TREE_TYPE (tmp), 0)); /* A pointer to an array, call library function _gfor_associated. */ gcc_assert (ss2 != gfc_ss_terminator); @@ -4816,15 +5833,17 @@ gfc_conv_associated (gfc_se *se, gfc_expr *expr) gfor_fndecl_associated, 2, arg1se.expr, arg2se.expr); se->expr = convert (boolean_type_node, se->expr); - se->expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, - se->expr, nonzero_arraylen); + se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR, + boolean_type_node, se->expr, + nonzero_arraylen); } /* If target is present zero character length pointers cannot be associated. */ if (nonzero_charlen != NULL_TREE) - se->expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, - se->expr, nonzero_charlen); + se->expr = fold_build2_loc (input_location, TRUTH_AND_EXPR, + boolean_type_node, + se->expr, nonzero_charlen); } se->expr = convert (gfc_typenode_for_spec (&expr->ts), se->expr); @@ -4849,8 +5868,8 @@ gfc_conv_same_type_as (gfc_se *se, gfc_expr *expr) if (a->ts.type == BT_CLASS) { - gfc_add_component_ref (a, "$vptr"); - gfc_add_component_ref (a, "$hash"); + gfc_add_vptr_component (a); + gfc_add_hash_component (a); } else if (a->ts.type == BT_DERIVED) a = gfc_get_int_expr (gfc_default_integer_kind, NULL, @@ -4858,8 +5877,8 @@ gfc_conv_same_type_as (gfc_se *se, gfc_expr *expr) if (b->ts.type == BT_CLASS) { - gfc_add_component_ref (b, "$vptr"); - gfc_add_component_ref (b, "$hash"); + gfc_add_vptr_component (b); + gfc_add_hash_component (b); } else if (b->ts.type == BT_DERIVED) b = gfc_get_int_expr (gfc_default_integer_kind, NULL, @@ -4868,8 +5887,8 @@ gfc_conv_same_type_as (gfc_se *se, gfc_expr *expr) gfc_conv_expr (&se1, a); gfc_conv_expr (&se2, b); - tmp = fold_build2 (EQ_EXPR, boolean_type_node, - se1.expr, fold_convert (TREE_TYPE (se1.expr), se2.expr)); + tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, + se1.expr, fold_convert (TREE_TYPE (se1.expr), se2.expr)); se->expr = convert (gfc_typenode_for_spec (&expr->ts), tmp); } @@ -4909,7 +5928,7 @@ gfc_conv_intrinsic_si_kind (gfc_se *se, gfc_expr *expr) } -/* Generate code for SELECTED_REAL_KIND (P, R) intrinsic function. */ +/* Generate code for SELECTED_REAL_KIND (P, R, RADIX) intrinsic function. */ static void gfc_conv_intrinsic_sr_kind (gfc_se *se, gfc_expr *expr) @@ -4994,8 +6013,8 @@ gfc_conv_intrinsic_trim (gfc_se * se, gfc_expr * expr) gfc_add_expr_to_block (&se->pre, tmp); /* Free the temporary afterwards, if necessary. */ - cond = fold_build2 (GT_EXPR, boolean_type_node, - len, build_int_cst (TREE_TYPE (len), 0)); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + len, build_int_cst (TREE_TYPE (len), 0)); tmp = gfc_call_free (var); tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location)); gfc_add_expr_to_block (&se->post, tmp); @@ -5028,20 +6047,20 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr) ncopies_type = TREE_TYPE (ncopies); /* Check that NCOPIES is not negative. */ - cond = fold_build2 (LT_EXPR, boolean_type_node, ncopies, - build_int_cst (ncopies_type, 0)); + cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node, ncopies, + build_int_cst (ncopies_type, 0)); gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, "Argument NCOPIES of REPEAT intrinsic is negative " - "(its value is %lld)", + "(its value is %ld)", fold_convert (long_integer_type_node, ncopies)); /* If the source length is zero, any non negative value of NCOPIES is valid, and nothing happens. */ n = gfc_create_var (ncopies_type, "ncopies"); - cond = fold_build2 (EQ_EXPR, boolean_type_node, slen, - build_int_cst (size_type_node, 0)); - tmp = fold_build3 (COND_EXPR, ncopies_type, cond, - build_int_cst (ncopies_type, 0), ncopies); + cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, slen, + build_int_cst (size_type_node, 0)); + tmp = fold_build3_loc (input_location, COND_EXPR, ncopies_type, cond, + build_int_cst (ncopies_type, 0), ncopies); gfc_add_modify (&se->pre, n, tmp); ncopies = n; @@ -5051,24 +6070,24 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr) case to avoid the division by zero. */ i = gfc_validate_kind (BT_INTEGER, gfc_charlen_int_kind, false); max = gfc_conv_mpz_to_tree (gfc_integer_kinds[i].huge, gfc_charlen_int_kind); - max = fold_build2 (TRUNC_DIV_EXPR, size_type_node, - fold_convert (size_type_node, max), slen); + max = fold_build2_loc (input_location, TRUNC_DIV_EXPR, size_type_node, + fold_convert (size_type_node, max), slen); largest = TYPE_PRECISION (size_type_node) > TYPE_PRECISION (ncopies_type) ? size_type_node : ncopies_type; - cond = fold_build2 (GT_EXPR, boolean_type_node, - fold_convert (largest, ncopies), - fold_convert (largest, max)); - tmp = fold_build2 (EQ_EXPR, boolean_type_node, slen, - build_int_cst (size_type_node, 0)); - cond = fold_build3 (COND_EXPR, boolean_type_node, tmp, boolean_false_node, - cond); + cond = fold_build2_loc (input_location, GT_EXPR, boolean_type_node, + fold_convert (largest, ncopies), + fold_convert (largest, max)); + tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, slen, + build_int_cst (size_type_node, 0)); + cond = fold_build3_loc (input_location, COND_EXPR, boolean_type_node, tmp, + boolean_false_node, cond); gfc_trans_runtime_check (true, false, cond, &se->pre, &expr->where, "Argument NCOPIES of REPEAT intrinsic is too large"); /* Compute the destination length. */ - dlen = fold_build2 (MULT_EXPR, gfc_charlen_type_node, - fold_convert (gfc_charlen_type_node, slen), - fold_convert (gfc_charlen_type_node, ncopies)); + dlen = fold_build2_loc (input_location, MULT_EXPR, gfc_charlen_type_node, + fold_convert (gfc_charlen_type_node, slen), + fold_convert (gfc_charlen_type_node, ncopies)); type = gfc_get_character_type (expr->ts.kind, expr->ts.u.cl); dest = gfc_conv_string_tmp (se, build_pointer_type (type), dlen); @@ -5084,31 +6103,34 @@ gfc_conv_intrinsic_repeat (gfc_se * se, gfc_expr * expr) gfc_start_block (&body); /* Exit the loop if count >= ncopies. */ - cond = fold_build2 (GE_EXPR, boolean_type_node, count, ncopies); + cond = fold_build2_loc (input_location, GE_EXPR, boolean_type_node, count, + ncopies); tmp = build1_v (GOTO_EXPR, exit_label); TREE_USED (exit_label) = 1; - tmp = fold_build3 (COND_EXPR, void_type_node, cond, tmp, - build_empty_stmt (input_location)); + tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond, tmp, + build_empty_stmt (input_location)); gfc_add_expr_to_block (&body, tmp); /* Call memmove (dest + (i*slen*size), src, slen*size). */ - tmp = fold_build2 (MULT_EXPR, gfc_charlen_type_node, - fold_convert (gfc_charlen_type_node, slen), - fold_convert (gfc_charlen_type_node, count)); - tmp = fold_build2 (MULT_EXPR, gfc_charlen_type_node, - tmp, fold_convert (gfc_charlen_type_node, size)); - tmp = fold_build2 (POINTER_PLUS_EXPR, pvoid_type_node, - fold_convert (pvoid_type_node, dest), - fold_convert (sizetype, tmp)); + tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_charlen_type_node, + fold_convert (gfc_charlen_type_node, slen), + fold_convert (gfc_charlen_type_node, count)); + tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_charlen_type_node, + tmp, fold_convert (gfc_charlen_type_node, size)); + tmp = fold_build_pointer_plus_loc (input_location, + fold_convert (pvoid_type_node, dest), tmp); tmp = build_call_expr_loc (input_location, - built_in_decls[BUILT_IN_MEMMOVE], 3, tmp, src, - fold_build2 (MULT_EXPR, size_type_node, slen, - fold_convert (size_type_node, size))); + builtin_decl_explicit (BUILT_IN_MEMMOVE), + 3, tmp, src, + fold_build2_loc (input_location, MULT_EXPR, + size_type_node, slen, + fold_convert (size_type_node, + size))); gfc_add_expr_to_block (&body, tmp); /* Increment count. */ - tmp = fold_build2 (PLUS_EXPR, ncopies_type, - count, build_int_cst (TREE_TYPE (count), 1)); + tmp = fold_build2_loc (input_location, PLUS_EXPR, ncopies_type, + count, build_int_cst (TREE_TYPE (count), 1)); gfc_add_modify (&body, count, tmp); /* Build the loop. */ @@ -5191,7 +6213,7 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) name = &expr->value.function.name[2]; - if (expr->rank > 0 && !expr->inline_noncopying_intrinsic) + if (expr->rank > 0) { lib = gfc_is_intrinsic_libcall (expr); if (lib != 0) @@ -5337,6 +6359,22 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_btest (se, expr); break; + case GFC_ISYM_BGE: + gfc_conv_intrinsic_bitcomp (se, expr, GE_EXPR); + break; + + case GFC_ISYM_BGT: + gfc_conv_intrinsic_bitcomp (se, expr, GT_EXPR); + break; + + case GFC_ISYM_BLE: + gfc_conv_intrinsic_bitcomp (se, expr, LE_EXPR); + break; + + case GFC_ISYM_BLT: + gfc_conv_intrinsic_bitcomp (se, expr, LT_EXPR); + break; + case GFC_ISYM_ACHAR: case GFC_ISYM_CHAR: gfc_conv_intrinsic_char (se, expr); @@ -5414,6 +6452,14 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_dprod (se, expr); break; + case GFC_ISYM_DSHIFTL: + gfc_conv_intrinsic_dshift (se, expr, true); + break; + + case GFC_ISYM_DSHIFTR: + gfc_conv_intrinsic_dshift (se, expr, false); + break; + case GFC_ISYM_FDATE: gfc_conv_intrinsic_fdate (se, expr); break; @@ -5422,10 +6468,18 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_fraction (se, expr); break; + case GFC_ISYM_IALL: + gfc_conv_intrinsic_arith (se, expr, BIT_AND_EXPR, false); + break; + case GFC_ISYM_IAND: gfc_conv_intrinsic_bitop (se, expr, BIT_AND_EXPR); break; + case GFC_ISYM_IANY: + gfc_conv_intrinsic_arith (se, expr, BIT_IOR_EXPR, false); + break; + case GFC_ISYM_IBCLR: gfc_conv_intrinsic_singlebitop (se, expr, 0); break; @@ -5468,6 +6522,10 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_bitop (se, expr, BIT_IOR_EXPR); break; + case GFC_ISYM_IPARITY: + gfc_conv_intrinsic_arith (se, expr, BIT_XOR_EXPR, false); + break; + case GFC_ISYM_IS_IOSTAT_END: gfc_conv_has_intvalue (se, expr, LIBERROR_END); break; @@ -5481,11 +6539,23 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) break; case GFC_ISYM_LSHIFT: - gfc_conv_intrinsic_rlshift (se, expr, 0); + gfc_conv_intrinsic_shift (se, expr, false, false); break; case GFC_ISYM_RSHIFT: - gfc_conv_intrinsic_rlshift (se, expr, 1); + gfc_conv_intrinsic_shift (se, expr, true, true); + break; + + case GFC_ISYM_SHIFTA: + gfc_conv_intrinsic_shift (se, expr, true, true); + break; + + case GFC_ISYM_SHIFTL: + gfc_conv_intrinsic_shift (se, expr, false, false); + break; + + case GFC_ISYM_SHIFTR: + gfc_conv_intrinsic_shift (se, expr, true, false); break; case GFC_ISYM_ISHFT: @@ -5516,14 +6586,14 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_bound (se, expr, 0); break; + case GFC_ISYM_LCOBOUND: + conv_intrinsic_cobound (se, expr); + break; + case GFC_ISYM_TRANSPOSE: - if (se->ss && se->ss->useflags) - { - gfc_conv_tmp_array_ref (se); - gfc_advance_se_ss_chain (se); - } - else - gfc_conv_array_transpose (se, expr->value.function.actual->expr); + /* The scalarizer has already been set up for reversed dimension access + order ; now we just get the argument value normally. */ + gfc_conv_expr (se, expr->value.function.actual->expr); break; case GFC_ISYM_LEN: @@ -5550,6 +6620,14 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_strcmp (se, expr, LT_EXPR); break; + case GFC_ISYM_MASKL: + gfc_conv_intrinsic_mask (se, expr, 1); + break; + + case GFC_ISYM_MASKR: + gfc_conv_intrinsic_mask (se, expr, 0); + break; + case GFC_ISYM_MAX: if (expr->ts.type == BT_CHARACTER) gfc_conv_intrinsic_minmax_char (se, expr, 1); @@ -5569,6 +6647,10 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_merge (se, expr); break; + case GFC_ISYM_MERGE_BITS: + gfc_conv_intrinsic_merge_bits (se, expr); + break; + case GFC_ISYM_MIN: if (expr->ts.type == BT_CHARACTER) gfc_conv_intrinsic_minmax_char (se, expr, -1); @@ -5650,12 +6732,9 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) break; case GFC_ISYM_TRANSFER: - if (se->ss && se->ss->useflags) - { - /* Access the previously obtained result. */ - gfc_conv_tmp_array_ref (se); - gfc_advance_se_ss_chain (se); - } + if (se->ss && se->ss->info->useflags) + /* Access the previously obtained result. */ + gfc_conv_tmp_array_ref (se); else gfc_conv_intrinsic_transfer (se, expr); break; @@ -5668,6 +6747,10 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_bound (se, expr, 1); break; + case GFC_ISYM_UCOBOUND: + conv_intrinsic_cobound (se, expr); + break; + case GFC_ISYM_XOR: gfc_conv_intrinsic_bitop (se, expr, BIT_XOR_EXPR); break; @@ -5676,6 +6759,23 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) gfc_conv_intrinsic_loc (se, expr); break; + case GFC_ISYM_THIS_IMAGE: + /* For num_images() == 1, handle as LCOBOUND. */ + if (expr->value.function.actual->expr + && gfc_option.coarray == GFC_FCOARRAY_SINGLE) + conv_intrinsic_cobound (se, expr); + else + trans_this_image (se, expr); + break; + + case GFC_ISYM_IMAGE_INDEX: + trans_image_index (se, expr); + break; + + case GFC_ISYM_NUM_IMAGES: + trans_num_images (se); + break; + case GFC_ISYM_ACCESS: case GFC_ISYM_CHDIR: case GFC_ISYM_CHMOD: @@ -5736,16 +6836,190 @@ gfc_conv_intrinsic_function (gfc_se * se, gfc_expr * expr) } +static gfc_ss * +walk_inline_intrinsic_transpose (gfc_ss *ss, gfc_expr *expr) +{ + gfc_ss *arg_ss, *tmp_ss; + gfc_actual_arglist *arg; + + arg = expr->value.function.actual; + + gcc_assert (arg->expr); + + arg_ss = gfc_walk_subexpr (gfc_ss_terminator, arg->expr); + gcc_assert (arg_ss != gfc_ss_terminator); + + for (tmp_ss = arg_ss; ; tmp_ss = tmp_ss->next) + { + if (tmp_ss->info->type != GFC_SS_SCALAR + && tmp_ss->info->type != GFC_SS_REFERENCE) + { + int tmp_dim; + + gcc_assert (tmp_ss->dimen == 2); + + /* We just invert dimensions. */ + tmp_dim = tmp_ss->dim[0]; + tmp_ss->dim[0] = tmp_ss->dim[1]; + tmp_ss->dim[1] = tmp_dim; + } + + /* Stop when tmp_ss points to the last valid element of the chain... */ + if (tmp_ss->next == gfc_ss_terminator) + break; + } + + /* ... so that we can attach the rest of the chain to it. */ + tmp_ss->next = ss; + + return arg_ss; +} + + +/* Move the given dimension of the given gfc_ss list to a nested gfc_ss list. + This has the side effect of reversing the nested list, so there is no + need to call gfc_reverse_ss on it (the given list is assumed not to be + reversed yet). */ + +static gfc_ss * +nest_loop_dimension (gfc_ss *ss, int dim) +{ + int ss_dim, i; + gfc_ss *new_ss, *prev_ss = gfc_ss_terminator; + gfc_loopinfo *new_loop; + + gcc_assert (ss != gfc_ss_terminator); + + for (; ss != gfc_ss_terminator; ss = ss->next) + { + new_ss = gfc_get_ss (); + new_ss->next = prev_ss; + new_ss->parent = ss; + new_ss->info = ss->info; + new_ss->info->refcount++; + if (ss->dimen != 0) + { + gcc_assert (ss->info->type != GFC_SS_SCALAR + && ss->info->type != GFC_SS_REFERENCE); + + new_ss->dimen = 1; + new_ss->dim[0] = ss->dim[dim]; + + gcc_assert (dim < ss->dimen); + + ss_dim = --ss->dimen; + for (i = dim; i < ss_dim; i++) + ss->dim[i] = ss->dim[i + 1]; + + ss->dim[ss_dim] = 0; + } + prev_ss = new_ss; + + if (ss->nested_ss) + { + ss->nested_ss->parent = new_ss; + new_ss->nested_ss = ss->nested_ss; + } + ss->nested_ss = new_ss; + } + + new_loop = gfc_get_loopinfo (); + gfc_init_loopinfo (new_loop); + + gcc_assert (prev_ss != NULL); + gcc_assert (prev_ss != gfc_ss_terminator); + gfc_add_ss_to_loop (new_loop, prev_ss); + return new_ss->parent; +} + + +/* Create the gfc_ss list for the SUM/PRODUCT arguments when the function + is to be inlined. */ + +static gfc_ss * +walk_inline_intrinsic_arith (gfc_ss *ss, gfc_expr *expr) +{ + gfc_ss *tmp_ss, *tail, *array_ss; + gfc_actual_arglist *arg1, *arg2, *arg3; + int sum_dim; + bool scalar_mask = false; + + /* The rank of the result will be determined later. */ + arg1 = expr->value.function.actual; + arg2 = arg1->next; + arg3 = arg2->next; + gcc_assert (arg3 != NULL); + + if (expr->rank == 0) + return ss; + + tmp_ss = gfc_ss_terminator; + + if (arg3->expr) + { + gfc_ss *mask_ss; + + mask_ss = gfc_walk_subexpr (tmp_ss, arg3->expr); + if (mask_ss == tmp_ss) + scalar_mask = 1; + + tmp_ss = mask_ss; + } + + array_ss = gfc_walk_subexpr (tmp_ss, arg1->expr); + gcc_assert (array_ss != tmp_ss); + + /* Odd thing: If the mask is scalar, it is used by the frontend after + the array (to make an if around the nested loop). Thus it shall + be after array_ss once the gfc_ss list is reversed. */ + if (scalar_mask) + tmp_ss = gfc_get_scalar_ss (array_ss, arg3->expr); + else + tmp_ss = array_ss; + + /* "Hide" the dimension on which we will sum in the first arg's scalarization + chain. */ + sum_dim = mpz_get_si (arg2->expr->value.integer) - 1; + tail = nest_loop_dimension (tmp_ss, sum_dim); + tail->next = ss; + + return tmp_ss; +} + + +static gfc_ss * +walk_inline_intrinsic_function (gfc_ss * ss, gfc_expr * expr) +{ + + switch (expr->value.function.isym->id) + { + case GFC_ISYM_PRODUCT: + case GFC_ISYM_SUM: + return walk_inline_intrinsic_arith (ss, expr); + + case GFC_ISYM_TRANSPOSE: + return walk_inline_intrinsic_transpose (ss, expr); + + default: + gcc_unreachable (); + } + gcc_unreachable (); +} + + /* This generates code to execute before entering the scalarization loop. Currently does nothing. */ void gfc_add_intrinsic_ss_code (gfc_loopinfo * loop ATTRIBUTE_UNUSED, gfc_ss * ss) { - switch (ss->expr->value.function.isym->id) + switch (ss->info->expr->value.function.isym->id) { case GFC_ISYM_UBOUND: case GFC_ISYM_LBOUND: + case GFC_ISYM_UCOBOUND: + case GFC_ISYM_LCOBOUND: + case GFC_ISYM_THIS_IMAGE: break; default: @@ -5754,25 +7028,20 @@ gfc_add_intrinsic_ss_code (gfc_loopinfo * loop ATTRIBUTE_UNUSED, gfc_ss * ss) } -/* UBOUND and LBOUND intrinsics with one parameter are expanded into code - inside the scalarization loop. */ +/* The LBOUND, LCOBOUND, UBOUND and UCOBOUND intrinsics with one parameter + are expanded into code inside the scalarization loop. */ static gfc_ss * gfc_walk_intrinsic_bound (gfc_ss * ss, gfc_expr * expr) { - gfc_ss *newss; + if (expr->value.function.actual->expr->ts.type == BT_CLASS) + gfc_add_class_array_ref (expr->value.function.actual->expr); /* The two argument version returns a scalar. */ if (expr->value.function.actual->next->expr) return ss; - newss = gfc_get_ss (); - newss->type = GFC_SS_INTRINSIC; - newss->expr = expr; - newss->next = ss; - newss->data.info.dimen = 1; - - return newss; + return gfc_get_array_ss (ss, expr, 1, GFC_SS_INTRINSIC); } @@ -5781,17 +7050,43 @@ gfc_walk_intrinsic_bound (gfc_ss * ss, gfc_expr * expr) static gfc_ss * gfc_walk_intrinsic_libfunc (gfc_ss * ss, gfc_expr * expr) { - gfc_ss *newss; - gcc_assert (expr->rank > 0); + return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION); +} + + +/* Return whether the function call expression EXPR will be expanded + inline by gfc_conv_intrinsic_function. */ + +bool +gfc_inline_intrinsic_function_p (gfc_expr *expr) +{ + gfc_actual_arglist *args; + + if (!expr->value.function.isym) + return false; + + switch (expr->value.function.isym->id) + { + case GFC_ISYM_PRODUCT: + case GFC_ISYM_SUM: + /* Disable inline expansion if code size matters. */ + if (optimize_size) + return false; - newss = gfc_get_ss (); - newss->type = GFC_SS_FUNCTION; - newss->expr = expr; - newss->next = ss; - newss->data.info.dimen = expr->rank; + args = expr->value.function.actual; + /* We need to be able to subset the SUM argument at compile-time. */ + if (args->next->expr && args->next->expr->expr_type != EXPR_CONSTANT) + return false; - return newss; + return true; + + case GFC_ISYM_TRANSPOSE: + return true; + + default: + return false; + } } @@ -5805,12 +7100,18 @@ gfc_is_intrinsic_libcall (gfc_expr * expr) gcc_assert (expr->expr_type == EXPR_FUNCTION && expr->value.function.isym); gcc_assert (expr->rank > 0); + if (gfc_inline_intrinsic_function_p (expr)) + return 0; + switch (expr->value.function.isym->id) { case GFC_ISYM_ALL: case GFC_ISYM_ANY: case GFC_ISYM_COUNT: case GFC_ISYM_JN2: + case GFC_ISYM_IANY: + case GFC_ISYM_IALL: + case GFC_ISYM_IPARITY: case GFC_ISYM_MATMUL: case GFC_ISYM_MAXLOC: case GFC_ISYM_MAXVAL: @@ -5822,7 +7123,6 @@ gfc_is_intrinsic_libcall (gfc_expr * expr) case GFC_ISYM_SUM: case GFC_ISYM_SHAPE: case GFC_ISYM_SPREAD: - case GFC_ISYM_TRANSPOSE: case GFC_ISYM_YN2: /* Ignore absent optional parameters. */ return 1; @@ -5848,11 +7148,15 @@ gfc_walk_intrinsic_function (gfc_ss * ss, gfc_expr * expr, gcc_assert (isym); if (isym->elemental) - return gfc_walk_elemental_function_args (ss, expr->value.function.actual, GFC_SS_SCALAR); + return gfc_walk_elemental_function_args (ss, expr->value.function.actual, + GFC_SS_SCALAR); if (expr->rank == 0) return ss; + if (gfc_inline_intrinsic_function_p (expr)) + return walk_inline_intrinsic_function (ss, expr); + if (gfc_is_intrinsic_libcall (expr)) return gfc_walk_intrinsic_libfunc (ss, expr); @@ -5860,7 +7164,10 @@ gfc_walk_intrinsic_function (gfc_ss * ss, gfc_expr * expr, switch (isym->id) { case GFC_ISYM_LBOUND: + case GFC_ISYM_LCOBOUND: case GFC_ISYM_UBOUND: + case GFC_ISYM_UCOBOUND: + case GFC_ISYM_THIS_IMAGE: return gfc_walk_intrinsic_bound (ss, expr); case GFC_ISYM_TRANSFER: @@ -5875,41 +7182,221 @@ gfc_walk_intrinsic_function (gfc_ss * ss, gfc_expr * expr, } -tree -gfc_conv_intrinsic_move_alloc (gfc_code *code) +static tree +conv_intrinsic_atomic_def (gfc_code *code) { - if (code->ext.actual->expr->rank == 0) - { - /* Scalar arguments: Generate pointer assignments. */ - gfc_expr *from, *to; - stmtblock_t block; - tree tmp; + gfc_se atom, value; + stmtblock_t block; - from = code->ext.actual->expr; - to = code->ext.actual->next->expr; + gfc_init_se (&atom, NULL); + gfc_init_se (&value, NULL); + gfc_conv_expr (&atom, code->ext.actual->expr); + gfc_conv_expr (&value, code->ext.actual->next->expr); - gfc_start_block (&block); + gfc_init_block (&block); + gfc_add_modify (&block, atom.expr, + fold_convert (TREE_TYPE (atom.expr), value.expr)); + return gfc_finish_block (&block); +} + + +static tree +conv_intrinsic_atomic_ref (gfc_code *code) +{ + gfc_se atom, value; + stmtblock_t block; + + gfc_init_se (&atom, NULL); + gfc_init_se (&value, NULL); + gfc_conv_expr (&value, code->ext.actual->expr); + gfc_conv_expr (&atom, code->ext.actual->next->expr); + + gfc_init_block (&block); + gfc_add_modify (&block, value.expr, + fold_convert (TREE_TYPE (value.expr), atom.expr)); + return gfc_finish_block (&block); +} + + +static tree +conv_intrinsic_move_alloc (gfc_code *code) +{ + stmtblock_t block; + gfc_expr *from_expr, *to_expr; + gfc_expr *to_expr2, *from_expr2 = NULL; + gfc_se from_se, to_se; + gfc_ss *from_ss, *to_ss; + tree tmp; + + gfc_start_block (&block); - if (to->ts.type == BT_CLASS) - tmp = gfc_trans_class_assign (to, from, EXEC_POINTER_ASSIGN); + from_expr = code->ext.actual->expr; + to_expr = code->ext.actual->next->expr; + + gfc_init_se (&from_se, NULL); + gfc_init_se (&to_se, NULL); + + if (from_expr->rank == 0) + { + gcc_assert (from_expr->ts.type != BT_CLASS + || to_expr->ts.type == BT_CLASS); + if (from_expr->ts.type != BT_CLASS) + from_expr2 = from_expr; else - tmp = gfc_trans_pointer_assignment (to, from); - gfc_add_expr_to_block (&block, tmp); + { + from_expr2 = gfc_copy_expr (from_expr); + gfc_add_data_component (from_expr2); + } - if (from->ts.type == BT_CLASS) - tmp = gfc_trans_class_assign (from, gfc_get_null_expr (NULL), - EXEC_POINTER_ASSIGN); + if (to_expr->ts.type != BT_CLASS) + to_expr2 = to_expr; else - tmp = gfc_trans_pointer_assignment (from, - gfc_get_null_expr (NULL)); + { + to_expr2 = gfc_copy_expr (to_expr); + gfc_add_data_component (to_expr2); + } + + from_se.want_pointer = 1; + to_se.want_pointer = 1; + gfc_conv_expr (&from_se, from_expr2); + gfc_conv_expr (&to_se, to_expr2); + gfc_add_block_to_block (&block, &from_se.pre); + gfc_add_block_to_block (&block, &to_se.pre); + + /* Deallocate "to". */ + tmp = gfc_deallocate_scalar_with_status (to_se.expr, NULL_TREE, true, + to_expr2, to_expr->ts); gfc_add_expr_to_block (&block, tmp); + /* Assign (_data) pointers. */ + gfc_add_modify_loc (input_location, &block, to_se.expr, + fold_convert (TREE_TYPE (to_se.expr), from_se.expr)); + + /* Set "from" to NULL. */ + gfc_add_modify_loc (input_location, &block, from_se.expr, + fold_convert (TREE_TYPE (from_se.expr), null_pointer_node)); + + gfc_add_block_to_block (&block, &from_se.post); + gfc_add_block_to_block (&block, &to_se.post); + + /* Set _vptr. */ + if (to_expr->ts.type == BT_CLASS) + { + gfc_free_expr (to_expr2); + gfc_init_se (&to_se, NULL); + to_se.want_pointer = 1; + gfc_add_vptr_component (to_expr); + gfc_conv_expr (&to_se, to_expr); + + if (from_expr->ts.type == BT_CLASS) + { + gfc_free_expr (from_expr2); + gfc_init_se (&from_se, NULL); + from_se.want_pointer = 1; + gfc_add_vptr_component (from_expr); + gfc_conv_expr (&from_se, from_expr); + tmp = from_se.expr; + } + else + { + gfc_symbol *vtab; + vtab = gfc_find_derived_vtab (from_expr->ts.u.derived); + gcc_assert (vtab); + tmp = gfc_build_addr_expr (NULL_TREE, gfc_get_symbol_decl (vtab)); + } + + gfc_add_modify_loc (input_location, &block, to_se.expr, + fold_convert (TREE_TYPE (to_se.expr), tmp)); + } + return gfc_finish_block (&block); } - else - /* Array arguments: Generate library code. */ - return gfc_trans_call (code, false, NULL_TREE, NULL_TREE, false); + + /* Update _vptr component. */ + if (to_expr->ts.type == BT_CLASS) + { + to_se.want_pointer = 1; + to_expr2 = gfc_copy_expr (to_expr); + gfc_add_vptr_component (to_expr2); + gfc_conv_expr (&to_se, to_expr2); + + if (from_expr->ts.type == BT_CLASS) + { + from_se.want_pointer = 1; + from_expr2 = gfc_copy_expr (from_expr); + gfc_add_vptr_component (from_expr2); + gfc_conv_expr (&from_se, from_expr2); + tmp = from_se.expr; + } + else + { + gfc_symbol *vtab; + vtab = gfc_find_derived_vtab (from_expr->ts.u.derived); + gcc_assert (vtab); + tmp = gfc_build_addr_expr (NULL_TREE, gfc_get_symbol_decl (vtab)); + } + + gfc_add_modify_loc (input_location, &block, to_se.expr, + fold_convert (TREE_TYPE (to_se.expr), tmp)); + gfc_free_expr (to_expr2); + gfc_init_se (&to_se, NULL); + + if (from_expr->ts.type == BT_CLASS) + { + gfc_free_expr (from_expr2); + gfc_init_se (&from_se, NULL); + } + } + + /* Deallocate "to". */ + to_ss = gfc_walk_expr (to_expr); + from_ss = gfc_walk_expr (from_expr); + gfc_conv_expr_descriptor (&to_se, to_expr, to_ss); + gfc_conv_expr_descriptor (&from_se, from_expr, from_ss); + + tmp = gfc_conv_descriptor_data_get (to_se.expr); + tmp = gfc_deallocate_with_status (tmp, NULL_TREE, true, to_expr); + gfc_add_expr_to_block (&block, tmp); + + /* Move the pointer and update the array descriptor data. */ + gfc_add_modify_loc (input_location, &block, to_se.expr, from_se.expr); + + /* Set "to" to NULL. */ + tmp = gfc_conv_descriptor_data_get (from_se.expr); + gfc_add_modify_loc (input_location, &block, tmp, + fold_convert (TREE_TYPE (tmp), null_pointer_node)); + + return gfc_finish_block (&block); } +tree +gfc_conv_intrinsic_subroutine (gfc_code *code) +{ + tree res; + + gcc_assert (code->resolved_isym); + + switch (code->resolved_isym->id) + { + case GFC_ISYM_MOVE_ALLOC: + res = conv_intrinsic_move_alloc (code); + break; + + case GFC_ISYM_ATOMIC_DEF: + res = conv_intrinsic_atomic_def (code); + break; + + case GFC_ISYM_ATOMIC_REF: + res = conv_intrinsic_atomic_ref (code); + break; + + default: + res = NULL_TREE; + break; + } + + return res; +} + #include "gt-fortran-trans-intrinsic.h"