1 /* Array translation routines
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 Free Software Foundation, Inc.
5 Contributed by Paul Brook <paul@nowt.org>
6 and Steven Bosscher <s.bosscher@student.tudelft.nl>
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
24 /* trans-array.c-- Various array related code, including scalarization,
25 allocation, initialization and other support routines. */
27 /* How the scalarizer works.
28 In gfortran, array expressions use the same core routines as scalar
30 First, a Scalarization State (SS) chain is built. This is done by walking
31 the expression tree, and building a linear list of the terms in the
32 expression. As the tree is walked, scalar subexpressions are translated.
34 The scalarization parameters are stored in a gfc_loopinfo structure.
35 First the start and stride of each term is calculated by
36 gfc_conv_ss_startstride. During this process the expressions for the array
37 descriptors and data pointers are also translated.
39 If the expression is an assignment, we must then resolve any dependencies.
40 In fortran all the rhs values of an assignment must be evaluated before
41 any assignments take place. This can require a temporary array to store the
42 values. We also require a temporary when we are passing array expressions
43 or vector subscripts as procedure parameters.
45 Array sections are passed without copying to a temporary. These use the
46 scalarizer to determine the shape of the section. The flag
47 loop->array_parameter tells the scalarizer that the actual values and loop
48 variables will not be required.
50 The function gfc_conv_loop_setup generates the scalarization setup code.
51 It determines the range of the scalarizing loop variables. If a temporary
52 is required, this is created and initialized. Code for scalar expressions
53 taken outside the loop is also generated at this time. Next the offset and
54 scaling required to translate from loop variables to array indices for each
57 A call to gfc_start_scalarized_body marks the start of the scalarized
58 expression. This creates a scope and declares the loop variables. Before
59 calling this gfc_make_ss_chain_used must be used to indicate which terms
60 will be used inside this loop.
62 The scalar gfc_conv_* functions are then used to build the main body of the
63 scalarization loop. Scalarization loop variables and precalculated scalar
64 values are automatically substituted. Note that gfc_advance_se_ss_chain
65 must be used, rather than changing the se->ss directly.
67 For assignment expressions requiring a temporary two sub loops are
68 generated. The first stores the result of the expression in the temporary,
69 the second copies it to the result. A call to
70 gfc_trans_scalarized_loop_boundary marks the end of the main loop code and
71 the start of the copying loop. The temporary may be less than full rank.
73 Finally gfc_trans_scalarizing_loops is called to generate the implicit do
74 loops. The loops are added to the pre chain of the loopinfo. The post
75 chain may still contain cleanup code.
77 After the loop code has been added into its parent scope gfc_cleanup_loop
78 is called to free all the SS allocated by the scalarizer. */
82 #include "coretypes.h"
85 #include "diagnostic-core.h" /* For internal_error/fatal_error. */
88 #include "constructor.h"
90 #include "trans-stmt.h"
91 #include "trans-types.h"
92 #include "trans-array.h"
93 #include "trans-const.h"
94 #include "dependency.h"
96 static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
98 /* The contents of this structure aren't actually used, just the address. */
99 static gfc_ss gfc_ss_terminator_var;
100 gfc_ss * const gfc_ss_terminator = &gfc_ss_terminator_var;
104 gfc_array_dataptr_type (tree desc)
106 return (GFC_TYPE_ARRAY_DATAPTR_TYPE (TREE_TYPE (desc)));
110 /* Build expressions to access the members of an array descriptor.
111 It's surprisingly easy to mess up here, so never access
112 an array descriptor by "brute force", always use these
113 functions. This also avoids problems if we change the format
114 of an array descriptor.
116 To understand these magic numbers, look at the comments
117 before gfc_build_array_type() in trans-types.c.
119 The code within these defines should be the only code which knows the format
120 of an array descriptor.
122 Any code just needing to read obtain the bounds of an array should use
123 gfc_conv_array_* rather than the following functions as these will return
124 know constant values, and work with arrays which do not have descriptors.
126 Don't forget to #undef these! */
129 #define OFFSET_FIELD 1
130 #define DTYPE_FIELD 2
131 #define DIMENSION_FIELD 3
132 #define CAF_TOKEN_FIELD 4
134 #define STRIDE_SUBFIELD 0
135 #define LBOUND_SUBFIELD 1
136 #define UBOUND_SUBFIELD 2
138 /* This provides READ-ONLY access to the data field. The field itself
139 doesn't have the proper type. */
142 gfc_conv_descriptor_data_get (tree desc)
146 type = TREE_TYPE (desc);
147 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
149 field = TYPE_FIELDS (type);
150 gcc_assert (DATA_FIELD == 0);
152 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
154 t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);
159 /* This provides WRITE access to the data field.
161 TUPLES_P is true if we are generating tuples.
163 This function gets called through the following macros:
164 gfc_conv_descriptor_data_set
165 gfc_conv_descriptor_data_set. */
168 gfc_conv_descriptor_data_set (stmtblock_t *block, tree desc, tree value)
172 type = TREE_TYPE (desc);
173 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
175 field = TYPE_FIELDS (type);
176 gcc_assert (DATA_FIELD == 0);
178 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
180 gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value));
184 /* This provides address access to the data field. This should only be
185 used by array allocation, passing this on to the runtime. */
188 gfc_conv_descriptor_data_addr (tree desc)
192 type = TREE_TYPE (desc);
193 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
195 field = TYPE_FIELDS (type);
196 gcc_assert (DATA_FIELD == 0);
198 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
200 return gfc_build_addr_expr (NULL_TREE, t);
204 gfc_conv_descriptor_offset (tree desc)
209 type = TREE_TYPE (desc);
210 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
212 field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
213 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
215 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
216 desc, field, NULL_TREE);
220 gfc_conv_descriptor_offset_get (tree desc)
222 return gfc_conv_descriptor_offset (desc);
226 gfc_conv_descriptor_offset_set (stmtblock_t *block, tree desc,
229 tree t = gfc_conv_descriptor_offset (desc);
230 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
235 gfc_conv_descriptor_dtype (tree desc)
240 type = TREE_TYPE (desc);
241 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
243 field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
244 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
246 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
247 desc, field, NULL_TREE);
251 gfc_conv_descriptor_dimension (tree desc, tree dim)
257 type = TREE_TYPE (desc);
258 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
260 field = gfc_advance_chain (TYPE_FIELDS (type), DIMENSION_FIELD);
261 gcc_assert (field != NULL_TREE
262 && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
263 && TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);
265 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
266 desc, field, NULL_TREE);
267 tmp = gfc_build_array_ref (tmp, dim, NULL);
273 gfc_conv_descriptor_token (tree desc)
278 type = TREE_TYPE (desc);
279 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
280 gcc_assert (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE);
281 gcc_assert (gfc_option.coarray == GFC_FCOARRAY_LIB);
282 field = gfc_advance_chain (TYPE_FIELDS (type), CAF_TOKEN_FIELD);
283 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == prvoid_type_node);
285 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
286 desc, field, NULL_TREE);
291 gfc_conv_descriptor_stride (tree desc, tree dim)
296 tmp = gfc_conv_descriptor_dimension (desc, dim);
297 field = TYPE_FIELDS (TREE_TYPE (tmp));
298 field = gfc_advance_chain (field, STRIDE_SUBFIELD);
299 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
301 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
302 tmp, field, NULL_TREE);
307 gfc_conv_descriptor_stride_get (tree desc, tree dim)
309 tree type = TREE_TYPE (desc);
310 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
311 if (integer_zerop (dim)
312 && (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE
313 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_SHAPE_CONT
314 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT))
315 return gfc_index_one_node;
317 return gfc_conv_descriptor_stride (desc, dim);
321 gfc_conv_descriptor_stride_set (stmtblock_t *block, tree desc,
322 tree dim, tree value)
324 tree t = gfc_conv_descriptor_stride (desc, dim);
325 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
329 gfc_conv_descriptor_lbound (tree desc, tree dim)
334 tmp = gfc_conv_descriptor_dimension (desc, dim);
335 field = TYPE_FIELDS (TREE_TYPE (tmp));
336 field = gfc_advance_chain (field, LBOUND_SUBFIELD);
337 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
339 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
340 tmp, field, NULL_TREE);
345 gfc_conv_descriptor_lbound_get (tree desc, tree dim)
347 return gfc_conv_descriptor_lbound (desc, dim);
351 gfc_conv_descriptor_lbound_set (stmtblock_t *block, tree desc,
352 tree dim, tree value)
354 tree t = gfc_conv_descriptor_lbound (desc, dim);
355 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
359 gfc_conv_descriptor_ubound (tree desc, tree dim)
364 tmp = gfc_conv_descriptor_dimension (desc, dim);
365 field = TYPE_FIELDS (TREE_TYPE (tmp));
366 field = gfc_advance_chain (field, UBOUND_SUBFIELD);
367 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
369 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
370 tmp, field, NULL_TREE);
375 gfc_conv_descriptor_ubound_get (tree desc, tree dim)
377 return gfc_conv_descriptor_ubound (desc, dim);
381 gfc_conv_descriptor_ubound_set (stmtblock_t *block, tree desc,
382 tree dim, tree value)
384 tree t = gfc_conv_descriptor_ubound (desc, dim);
385 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
388 /* Build a null array descriptor constructor. */
391 gfc_build_null_descriptor (tree type)
396 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
397 gcc_assert (DATA_FIELD == 0);
398 field = TYPE_FIELDS (type);
400 /* Set a NULL data pointer. */
401 tmp = build_constructor_single (type, field, null_pointer_node);
402 TREE_CONSTANT (tmp) = 1;
403 /* All other fields are ignored. */
409 /* Modify a descriptor such that the lbound of a given dimension is the value
410 specified. This also updates ubound and offset accordingly. */
413 gfc_conv_shift_descriptor_lbound (stmtblock_t* block, tree desc,
414 int dim, tree new_lbound)
416 tree offs, ubound, lbound, stride;
417 tree diff, offs_diff;
419 new_lbound = fold_convert (gfc_array_index_type, new_lbound);
421 offs = gfc_conv_descriptor_offset_get (desc);
422 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
423 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
424 stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[dim]);
426 /* Get difference (new - old) by which to shift stuff. */
427 diff = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
430 /* Shift ubound and offset accordingly. This has to be done before
431 updating the lbound, as they depend on the lbound expression! */
432 ubound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
434 gfc_conv_descriptor_ubound_set (block, desc, gfc_rank_cst[dim], ubound);
435 offs_diff = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
437 offs = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
439 gfc_conv_descriptor_offset_set (block, desc, offs);
441 /* Finally set lbound to value we want. */
442 gfc_conv_descriptor_lbound_set (block, desc, gfc_rank_cst[dim], new_lbound);
446 /* Cleanup those #defines. */
451 #undef DIMENSION_FIELD
452 #undef CAF_TOKEN_FIELD
453 #undef STRIDE_SUBFIELD
454 #undef LBOUND_SUBFIELD
455 #undef UBOUND_SUBFIELD
458 /* Mark a SS chain as used. Flags specifies in which loops the SS is used.
459 flags & 1 = Main loop body.
460 flags & 2 = temp copy loop. */
463 gfc_mark_ss_chain_used (gfc_ss * ss, unsigned flags)
465 for (; ss != gfc_ss_terminator; ss = ss->next)
466 ss->info->useflags = flags;
470 /* Free a gfc_ss chain. */
473 gfc_free_ss_chain (gfc_ss * ss)
477 while (ss != gfc_ss_terminator)
479 gcc_assert (ss != NULL);
488 free_ss_info (gfc_ss_info *ss_info)
491 if (ss_info->refcount > 0)
494 gcc_assert (ss_info->refcount == 0);
502 gfc_free_ss (gfc_ss * ss)
504 gfc_ss_info *ss_info;
509 switch (ss_info->type)
512 for (n = 0; n < ss->dimen; n++)
514 if (ss_info->data.array.subscript[ss->dim[n]])
515 gfc_free_ss_chain (ss_info->data.array.subscript[ss->dim[n]]);
523 free_ss_info (ss_info);
528 /* Creates and initializes an array type gfc_ss struct. */
531 gfc_get_array_ss (gfc_ss *next, gfc_expr *expr, int dimen, gfc_ss_type type)
534 gfc_ss_info *ss_info;
537 ss_info = gfc_get_ss_info ();
539 ss_info->type = type;
540 ss_info->expr = expr;
546 for (i = 0; i < ss->dimen; i++)
553 /* Creates and initializes a temporary type gfc_ss struct. */
556 gfc_get_temp_ss (tree type, tree string_length, int dimen)
559 gfc_ss_info *ss_info;
562 ss_info = gfc_get_ss_info ();
564 ss_info->type = GFC_SS_TEMP;
565 ss_info->string_length = string_length;
566 ss_info->data.temp.type = type;
570 ss->next = gfc_ss_terminator;
572 for (i = 0; i < ss->dimen; i++)
579 /* Creates and initializes a scalar type gfc_ss struct. */
582 gfc_get_scalar_ss (gfc_ss *next, gfc_expr *expr)
585 gfc_ss_info *ss_info;
587 ss_info = gfc_get_ss_info ();
589 ss_info->type = GFC_SS_SCALAR;
590 ss_info->expr = expr;
600 /* Free all the SS associated with a loop. */
603 gfc_cleanup_loop (gfc_loopinfo * loop)
605 gfc_loopinfo *loop_next, **ploop;
610 while (ss != gfc_ss_terminator)
612 gcc_assert (ss != NULL);
613 next = ss->loop_chain;
618 /* Remove reference to self in the parent loop. */
620 for (ploop = &loop->parent->nested; *ploop; ploop = &(*ploop)->next)
627 /* Free non-freed nested loops. */
628 for (loop = loop->nested; loop; loop = loop_next)
630 loop_next = loop->next;
631 gfc_cleanup_loop (loop);
638 set_ss_loop (gfc_ss *ss, gfc_loopinfo *loop)
642 for (; ss != gfc_ss_terminator; ss = ss->next)
646 if (ss->info->type == GFC_SS_SCALAR
647 || ss->info->type == GFC_SS_REFERENCE
648 || ss->info->type == GFC_SS_TEMP)
651 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
652 if (ss->info->data.array.subscript[n] != NULL)
653 set_ss_loop (ss->info->data.array.subscript[n], loop);
658 /* Associate a SS chain with a loop. */
661 gfc_add_ss_to_loop (gfc_loopinfo * loop, gfc_ss * head)
664 gfc_loopinfo *nested_loop;
666 if (head == gfc_ss_terminator)
669 set_ss_loop (head, loop);
672 for (; ss && ss != gfc_ss_terminator; ss = ss->next)
676 nested_loop = ss->nested_ss->loop;
678 /* More than one ss can belong to the same loop. Hence, we add the
679 loop to the chain only if it is different from the previously
680 added one, to avoid duplicate nested loops. */
681 if (nested_loop != loop->nested)
683 gcc_assert (nested_loop->parent == NULL);
684 nested_loop->parent = loop;
686 gcc_assert (nested_loop->next == NULL);
687 nested_loop->next = loop->nested;
688 loop->nested = nested_loop;
691 gcc_assert (nested_loop->parent == loop);
694 if (ss->next == gfc_ss_terminator)
695 ss->loop_chain = loop->ss;
697 ss->loop_chain = ss->next;
699 gcc_assert (ss == gfc_ss_terminator);
704 /* Generate an initializer for a static pointer or allocatable array. */
707 gfc_trans_static_array_pointer (gfc_symbol * sym)
711 gcc_assert (TREE_STATIC (sym->backend_decl));
712 /* Just zero the data member. */
713 type = TREE_TYPE (sym->backend_decl);
714 DECL_INITIAL (sym->backend_decl) = gfc_build_null_descriptor (type);
718 /* If the bounds of SE's loop have not yet been set, see if they can be
719 determined from array spec AS, which is the array spec of a called
720 function. MAPPING maps the callee's dummy arguments to the values
721 that the caller is passing. Add any initialization and finalization
725 gfc_set_loop_bounds_from_array_spec (gfc_interface_mapping * mapping,
726 gfc_se * se, gfc_array_spec * as)
728 int n, dim, total_dim;
737 if (!as || as->type != AS_EXPLICIT)
740 for (ss = se->ss; ss; ss = ss->parent)
742 total_dim += ss->loop->dimen;
743 for (n = 0; n < ss->loop->dimen; n++)
745 /* The bound is known, nothing to do. */
746 if (ss->loop->to[n] != NULL_TREE)
750 gcc_assert (dim < as->rank);
751 gcc_assert (ss->loop->dimen <= as->rank);
753 /* Evaluate the lower bound. */
754 gfc_init_se (&tmpse, NULL);
755 gfc_apply_interface_mapping (mapping, &tmpse, as->lower[dim]);
756 gfc_add_block_to_block (&se->pre, &tmpse.pre);
757 gfc_add_block_to_block (&se->post, &tmpse.post);
758 lower = fold_convert (gfc_array_index_type, tmpse.expr);
760 /* ...and the upper bound. */
761 gfc_init_se (&tmpse, NULL);
762 gfc_apply_interface_mapping (mapping, &tmpse, as->upper[dim]);
763 gfc_add_block_to_block (&se->pre, &tmpse.pre);
764 gfc_add_block_to_block (&se->post, &tmpse.post);
765 upper = fold_convert (gfc_array_index_type, tmpse.expr);
767 /* Set the upper bound of the loop to UPPER - LOWER. */
768 tmp = fold_build2_loc (input_location, MINUS_EXPR,
769 gfc_array_index_type, upper, lower);
770 tmp = gfc_evaluate_now (tmp, &se->pre);
771 ss->loop->to[n] = tmp;
775 gcc_assert (total_dim == as->rank);
779 /* Generate code to allocate an array temporary, or create a variable to
780 hold the data. If size is NULL, zero the descriptor so that the
781 callee will allocate the array. If DEALLOC is true, also generate code to
782 free the array afterwards.
784 If INITIAL is not NULL, it is packed using internal_pack and the result used
785 as data instead of allocating a fresh, unitialized area of memory.
787 Initialization code is added to PRE and finalization code to POST.
788 DYNAMIC is true if the caller may want to extend the array later
789 using realloc. This prevents us from putting the array on the stack. */
792 gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
793 gfc_array_info * info, tree size, tree nelem,
794 tree initial, bool dynamic, bool dealloc)
800 desc = info->descriptor;
801 info->offset = gfc_index_zero_node;
802 if (size == NULL_TREE || integer_zerop (size))
804 /* A callee allocated array. */
805 gfc_conv_descriptor_data_set (pre, desc, null_pointer_node);
810 /* Allocate the temporary. */
811 onstack = !dynamic && initial == NULL_TREE
812 && (gfc_option.flag_stack_arrays
813 || gfc_can_put_var_on_stack (size));
817 /* Make a temporary variable to hold the data. */
818 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
819 nelem, gfc_index_one_node);
820 tmp = gfc_evaluate_now (tmp, pre);
821 tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
823 tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
825 tmp = gfc_create_var (tmp, "A");
826 /* If we're here only because of -fstack-arrays we have to
827 emit a DECL_EXPR to make the gimplifier emit alloca calls. */
828 if (!gfc_can_put_var_on_stack (size))
829 gfc_add_expr_to_block (pre,
830 fold_build1_loc (input_location,
831 DECL_EXPR, TREE_TYPE (tmp),
833 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
834 gfc_conv_descriptor_data_set (pre, desc, tmp);
838 /* Allocate memory to hold the data or call internal_pack. */
839 if (initial == NULL_TREE)
841 tmp = gfc_call_malloc (pre, NULL, size);
842 tmp = gfc_evaluate_now (tmp, pre);
849 stmtblock_t do_copying;
851 tmp = TREE_TYPE (initial); /* Pointer to descriptor. */
852 gcc_assert (TREE_CODE (tmp) == POINTER_TYPE);
853 tmp = TREE_TYPE (tmp); /* The descriptor itself. */
854 tmp = gfc_get_element_type (tmp);
855 gcc_assert (tmp == gfc_get_element_type (TREE_TYPE (desc)));
856 packed = gfc_create_var (build_pointer_type (tmp), "data");
858 tmp = build_call_expr_loc (input_location,
859 gfor_fndecl_in_pack, 1, initial);
860 tmp = fold_convert (TREE_TYPE (packed), tmp);
861 gfc_add_modify (pre, packed, tmp);
863 tmp = build_fold_indirect_ref_loc (input_location,
865 source_data = gfc_conv_descriptor_data_get (tmp);
867 /* internal_pack may return source->data without any allocation
868 or copying if it is already packed. If that's the case, we
869 need to allocate and copy manually. */
871 gfc_start_block (&do_copying);
872 tmp = gfc_call_malloc (&do_copying, NULL, size);
873 tmp = fold_convert (TREE_TYPE (packed), tmp);
874 gfc_add_modify (&do_copying, packed, tmp);
875 tmp = gfc_build_memcpy_call (packed, source_data, size);
876 gfc_add_expr_to_block (&do_copying, tmp);
878 was_packed = fold_build2_loc (input_location, EQ_EXPR,
879 boolean_type_node, packed,
881 tmp = gfc_finish_block (&do_copying);
882 tmp = build3_v (COND_EXPR, was_packed, tmp,
883 build_empty_stmt (input_location));
884 gfc_add_expr_to_block (pre, tmp);
886 tmp = fold_convert (pvoid_type_node, packed);
889 gfc_conv_descriptor_data_set (pre, desc, tmp);
892 info->data = gfc_conv_descriptor_data_get (desc);
894 /* The offset is zero because we create temporaries with a zero
896 gfc_conv_descriptor_offset_set (pre, desc, gfc_index_zero_node);
898 if (dealloc && !onstack)
900 /* Free the temporary. */
901 tmp = gfc_conv_descriptor_data_get (desc);
902 tmp = gfc_call_free (fold_convert (pvoid_type_node, tmp));
903 gfc_add_expr_to_block (post, tmp);
908 /* Get the scalarizer array dimension corresponding to actual array dimension
911 For example, if SS represents the array ref a(1,:,:,1), it is a
912 bidimensional scalarizer array, and the result would be 0 for ARRAY_DIM=1,
913 and 1 for ARRAY_DIM=2.
914 If SS represents transpose(a(:,1,1,:)), it is again a bidimensional
915 scalarizer array, and the result would be 1 for ARRAY_DIM=0 and 0 for
917 If SS represents sum(a(:,:,:,1), dim=1), it is a 2+1-dimensional scalarizer
918 array. If called on the inner ss, the result would be respectively 0,1,2 for
919 ARRAY_DIM=0,1,2. If called on the outer ss, the result would be 0,1
920 for ARRAY_DIM=1,2. */
923 get_scalarizer_dim_for_array_dim (gfc_ss *ss, int array_dim)
930 for (; ss; ss = ss->parent)
931 for (n = 0; n < ss->dimen; n++)
932 if (ss->dim[n] < array_dim)
935 return array_ref_dim;
940 innermost_ss (gfc_ss *ss)
942 while (ss->nested_ss != NULL)
950 /* Get the array reference dimension corresponding to the given loop dimension.
951 It is different from the true array dimension given by the dim array in
952 the case of a partial array reference (i.e. a(:,:,1,:) for example)
953 It is different from the loop dimension in the case of a transposed array.
957 get_array_ref_dim_for_loop_dim (gfc_ss *ss, int loop_dim)
959 return get_scalarizer_dim_for_array_dim (innermost_ss (ss),
964 /* Generate code to create and initialize the descriptor for a temporary
965 array. This is used for both temporaries needed by the scalarizer, and
966 functions returning arrays. Adjusts the loop variables to be
967 zero-based, and calculates the loop bounds for callee allocated arrays.
968 Allocate the array unless it's callee allocated (we have a callee
969 allocated array if 'callee_alloc' is true, or if loop->to[n] is
970 NULL_TREE for any n). Also fills in the descriptor, data and offset
971 fields of info if known. Returns the size of the array, or NULL for a
972 callee allocated array.
974 PRE, POST, INITIAL, DYNAMIC and DEALLOC are as for
975 gfc_trans_allocate_array_storage. */
978 gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post, gfc_ss * ss,
979 tree eltype, tree initial, bool dynamic,
980 bool dealloc, bool callee_alloc, locus * where)
984 gfc_array_info *info;
985 tree from[GFC_MAX_DIMENSIONS], to[GFC_MAX_DIMENSIONS];
996 memset (from, 0, sizeof (from));
997 memset (to, 0, sizeof (to));
999 info = &ss->info->data.array;
1001 gcc_assert (ss->dimen > 0);
1002 gcc_assert (ss->loop->dimen == ss->dimen);
1004 if (gfc_option.warn_array_temp && where)
1005 gfc_warning ("Creating array temporary at %L", where);
1007 /* Set the lower bound to zero. */
1008 for (s = ss; s; s = s->parent)
1012 total_dim += loop->dimen;
1013 for (n = 0; n < loop->dimen; n++)
1017 /* Callee allocated arrays may not have a known bound yet. */
1019 loop->to[n] = gfc_evaluate_now (
1020 fold_build2_loc (input_location, MINUS_EXPR,
1021 gfc_array_index_type,
1022 loop->to[n], loop->from[n]),
1024 loop->from[n] = gfc_index_zero_node;
1026 /* We have just changed the loop bounds, we must clear the
1027 corresponding specloop, so that delta calculation is not skipped
1028 later in gfc_set_delta. */
1029 loop->specloop[n] = NULL;
1031 /* We are constructing the temporary's descriptor based on the loop
1032 dimensions. As the dimensions may be accessed in arbitrary order
1033 (think of transpose) the size taken from the n'th loop may not map
1034 to the n'th dimension of the array. We need to reconstruct loop
1035 infos in the right order before using it to set the descriptor
1037 tmp_dim = get_scalarizer_dim_for_array_dim (ss, dim);
1038 from[tmp_dim] = loop->from[n];
1039 to[tmp_dim] = loop->to[n];
1041 info->delta[dim] = gfc_index_zero_node;
1042 info->start[dim] = gfc_index_zero_node;
1043 info->end[dim] = gfc_index_zero_node;
1044 info->stride[dim] = gfc_index_one_node;
1048 /* Initialize the descriptor. */
1050 gfc_get_array_type_bounds (eltype, total_dim, 0, from, to, 1,
1051 GFC_ARRAY_UNKNOWN, true);
1052 desc = gfc_create_var (type, "atmp");
1053 GFC_DECL_PACKED_ARRAY (desc) = 1;
1055 info->descriptor = desc;
1056 size = gfc_index_one_node;
1058 /* Fill in the array dtype. */
1059 tmp = gfc_conv_descriptor_dtype (desc);
1060 gfc_add_modify (pre, tmp, gfc_get_dtype (TREE_TYPE (desc)));
1063 Fill in the bounds and stride. This is a packed array, so:
1066 for (n = 0; n < rank; n++)
1069 delta = ubound[n] + 1 - lbound[n];
1070 size = size * delta;
1072 size = size * sizeof(element);
1075 or_expr = NULL_TREE;
1077 /* If there is at least one null loop->to[n], it is a callee allocated
1079 for (n = 0; n < total_dim; n++)
1080 if (to[n] == NULL_TREE)
1086 if (size == NULL_TREE)
1087 for (s = ss; s; s = s->parent)
1088 for (n = 0; n < s->loop->dimen; n++)
1090 dim = get_scalarizer_dim_for_array_dim (ss, s->dim[n]);
1092 /* For a callee allocated array express the loop bounds in terms
1093 of the descriptor fields. */
1094 tmp = fold_build2_loc (input_location,
1095 MINUS_EXPR, gfc_array_index_type,
1096 gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]),
1097 gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]));
1098 s->loop->to[n] = tmp;
1102 for (n = 0; n < total_dim; n++)
1104 /* Store the stride and bound components in the descriptor. */
1105 gfc_conv_descriptor_stride_set (pre, desc, gfc_rank_cst[n], size);
1107 gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[n],
1108 gfc_index_zero_node);
1110 gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n], to[n]);
1112 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1113 gfc_array_index_type,
1114 to[n], gfc_index_one_node);
1116 /* Check whether the size for this dimension is negative. */
1117 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
1118 tmp, gfc_index_zero_node);
1119 cond = gfc_evaluate_now (cond, pre);
1124 or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
1125 boolean_type_node, or_expr, cond);
1127 size = fold_build2_loc (input_location, MULT_EXPR,
1128 gfc_array_index_type, size, tmp);
1129 size = gfc_evaluate_now (size, pre);
1133 /* Get the size of the array. */
1134 if (size && !callee_alloc)
1136 /* If or_expr is true, then the extent in at least one
1137 dimension is zero and the size is set to zero. */
1138 size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
1139 or_expr, gfc_index_zero_node, size);
1142 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
1144 fold_convert (gfc_array_index_type,
1145 TYPE_SIZE_UNIT (gfc_get_element_type (type))));
1153 gfc_trans_allocate_array_storage (pre, post, info, size, nelem, initial,
1159 if (ss->dimen > ss->loop->temp_dim)
1160 ss->loop->temp_dim = ss->dimen;
1166 /* Return the number of iterations in a loop that starts at START,
1167 ends at END, and has step STEP. */
1170 gfc_get_iteration_count (tree start, tree end, tree step)
1175 type = TREE_TYPE (step);
1176 tmp = fold_build2_loc (input_location, MINUS_EXPR, type, end, start);
1177 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, type, tmp, step);
1178 tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp,
1179 build_int_cst (type, 1));
1180 tmp = fold_build2_loc (input_location, MAX_EXPR, type, tmp,
1181 build_int_cst (type, 0));
1182 return fold_convert (gfc_array_index_type, tmp);
1186 /* Extend the data in array DESC by EXTRA elements. */
1189 gfc_grow_array (stmtblock_t * pblock, tree desc, tree extra)
1196 if (integer_zerop (extra))
1199 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
1201 /* Add EXTRA to the upper bound. */
1202 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1204 gfc_conv_descriptor_ubound_set (pblock, desc, gfc_rank_cst[0], tmp);
1206 /* Get the value of the current data pointer. */
1207 arg0 = gfc_conv_descriptor_data_get (desc);
1209 /* Calculate the new array size. */
1210 size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
1211 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1212 ubound, gfc_index_one_node);
1213 arg1 = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
1214 fold_convert (size_type_node, tmp),
1215 fold_convert (size_type_node, size));
1217 /* Call the realloc() function. */
1218 tmp = gfc_call_realloc (pblock, arg0, arg1);
1219 gfc_conv_descriptor_data_set (pblock, desc, tmp);
1223 /* Return true if the bounds of iterator I can only be determined
1227 gfc_iterator_has_dynamic_bounds (gfc_iterator * i)
1229 return (i->start->expr_type != EXPR_CONSTANT
1230 || i->end->expr_type != EXPR_CONSTANT
1231 || i->step->expr_type != EXPR_CONSTANT);
1235 /* Split the size of constructor element EXPR into the sum of two terms,
1236 one of which can be determined at compile time and one of which must
1237 be calculated at run time. Set *SIZE to the former and return true
1238 if the latter might be nonzero. */
1241 gfc_get_array_constructor_element_size (mpz_t * size, gfc_expr * expr)
1243 if (expr->expr_type == EXPR_ARRAY)
1244 return gfc_get_array_constructor_size (size, expr->value.constructor);
1245 else if (expr->rank > 0)
1247 /* Calculate everything at run time. */
1248 mpz_set_ui (*size, 0);
1253 /* A single element. */
1254 mpz_set_ui (*size, 1);
1260 /* Like gfc_get_array_constructor_element_size, but applied to the whole
1261 of array constructor C. */
1264 gfc_get_array_constructor_size (mpz_t * size, gfc_constructor_base base)
1272 mpz_set_ui (*size, 0);
1277 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1280 if (i && gfc_iterator_has_dynamic_bounds (i))
1284 dynamic |= gfc_get_array_constructor_element_size (&len, c->expr);
1287 /* Multiply the static part of the element size by the
1288 number of iterations. */
1289 mpz_sub (val, i->end->value.integer, i->start->value.integer);
1290 mpz_fdiv_q (val, val, i->step->value.integer);
1291 mpz_add_ui (val, val, 1);
1292 if (mpz_sgn (val) > 0)
1293 mpz_mul (len, len, val);
1295 mpz_set_ui (len, 0);
1297 mpz_add (*size, *size, len);
1306 /* Make sure offset is a variable. */
1309 gfc_put_offset_into_var (stmtblock_t * pblock, tree * poffset,
1312 /* We should have already created the offset variable. We cannot
1313 create it here because we may be in an inner scope. */
1314 gcc_assert (*offsetvar != NULL_TREE);
1315 gfc_add_modify (pblock, *offsetvar, *poffset);
1316 *poffset = *offsetvar;
1317 TREE_USED (*offsetvar) = 1;
1321 /* Variables needed for bounds-checking. */
1322 static bool first_len;
1323 static tree first_len_val;
1324 static bool typespec_chararray_ctor;
1327 gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
1328 tree offset, gfc_se * se, gfc_expr * expr)
1332 gfc_conv_expr (se, expr);
1334 /* Store the value. */
1335 tmp = build_fold_indirect_ref_loc (input_location,
1336 gfc_conv_descriptor_data_get (desc));
1337 tmp = gfc_build_array_ref (tmp, offset, NULL);
1339 if (expr->ts.type == BT_CHARACTER)
1341 int i = gfc_validate_kind (BT_CHARACTER, expr->ts.kind, false);
1344 esize = size_in_bytes (gfc_get_element_type (TREE_TYPE (desc)));
1345 esize = fold_convert (gfc_charlen_type_node, esize);
1346 esize = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
1347 gfc_charlen_type_node, esize,
1348 build_int_cst (gfc_charlen_type_node,
1349 gfc_character_kinds[i].bit_size / 8));
1351 gfc_conv_string_parameter (se);
1352 if (POINTER_TYPE_P (TREE_TYPE (tmp)))
1354 /* The temporary is an array of pointers. */
1355 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1356 gfc_add_modify (&se->pre, tmp, se->expr);
1360 /* The temporary is an array of string values. */
1361 tmp = gfc_build_addr_expr (gfc_get_pchar_type (expr->ts.kind), tmp);
1362 /* We know the temporary and the value will be the same length,
1363 so can use memcpy. */
1364 gfc_trans_string_copy (&se->pre, esize, tmp, expr->ts.kind,
1365 se->string_length, se->expr, expr->ts.kind);
1367 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) && !typespec_chararray_ctor)
1371 gfc_add_modify (&se->pre, first_len_val,
1377 /* Verify that all constructor elements are of the same
1379 tree cond = fold_build2_loc (input_location, NE_EXPR,
1380 boolean_type_node, first_len_val,
1382 gfc_trans_runtime_check
1383 (true, false, cond, &se->pre, &expr->where,
1384 "Different CHARACTER lengths (%ld/%ld) in array constructor",
1385 fold_convert (long_integer_type_node, first_len_val),
1386 fold_convert (long_integer_type_node, se->string_length));
1392 /* TODO: Should the frontend already have done this conversion? */
1393 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1394 gfc_add_modify (&se->pre, tmp, se->expr);
1397 gfc_add_block_to_block (pblock, &se->pre);
1398 gfc_add_block_to_block (pblock, &se->post);
1402 /* Add the contents of an array to the constructor. DYNAMIC is as for
1403 gfc_trans_array_constructor_value. */
1406 gfc_trans_array_constructor_subarray (stmtblock_t * pblock,
1407 tree type ATTRIBUTE_UNUSED,
1408 tree desc, gfc_expr * expr,
1409 tree * poffset, tree * offsetvar,
1420 /* We need this to be a variable so we can increment it. */
1421 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1423 gfc_init_se (&se, NULL);
1425 /* Walk the array expression. */
1426 ss = gfc_walk_expr (expr);
1427 gcc_assert (ss != gfc_ss_terminator);
1429 /* Initialize the scalarizer. */
1430 gfc_init_loopinfo (&loop);
1431 gfc_add_ss_to_loop (&loop, ss);
1433 /* Initialize the loop. */
1434 gfc_conv_ss_startstride (&loop);
1435 gfc_conv_loop_setup (&loop, &expr->where);
1437 /* Make sure the constructed array has room for the new data. */
1440 /* Set SIZE to the total number of elements in the subarray. */
1441 size = gfc_index_one_node;
1442 for (n = 0; n < loop.dimen; n++)
1444 tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
1445 gfc_index_one_node);
1446 size = fold_build2_loc (input_location, MULT_EXPR,
1447 gfc_array_index_type, size, tmp);
1450 /* Grow the constructed array by SIZE elements. */
1451 gfc_grow_array (&loop.pre, desc, size);
1454 /* Make the loop body. */
1455 gfc_mark_ss_chain_used (ss, 1);
1456 gfc_start_scalarized_body (&loop, &body);
1457 gfc_copy_loopinfo_to_se (&se, &loop);
1460 gfc_trans_array_ctor_element (&body, desc, *poffset, &se, expr);
1461 gcc_assert (se.ss == gfc_ss_terminator);
1463 /* Increment the offset. */
1464 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1465 *poffset, gfc_index_one_node);
1466 gfc_add_modify (&body, *poffset, tmp);
1468 /* Finish the loop. */
1469 gfc_trans_scalarizing_loops (&loop, &body);
1470 gfc_add_block_to_block (&loop.pre, &loop.post);
1471 tmp = gfc_finish_block (&loop.pre);
1472 gfc_add_expr_to_block (pblock, tmp);
1474 gfc_cleanup_loop (&loop);
1478 /* Assign the values to the elements of an array constructor. DYNAMIC
1479 is true if descriptor DESC only contains enough data for the static
1480 size calculated by gfc_get_array_constructor_size. When true, memory
1481 for the dynamic parts must be allocated using realloc. */
1484 gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
1485 tree desc, gfc_constructor_base base,
1486 tree * poffset, tree * offsetvar,
1495 tree shadow_loopvar = NULL_TREE;
1496 gfc_saved_var saved_loopvar;
1499 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1501 /* If this is an iterator or an array, the offset must be a variable. */
1502 if ((c->iterator || c->expr->rank > 0) && INTEGER_CST_P (*poffset))
1503 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1505 /* Shadowing the iterator avoids changing its value and saves us from
1506 keeping track of it. Further, it makes sure that there's always a
1507 backend-decl for the symbol, even if there wasn't one before,
1508 e.g. in the case of an iterator that appears in a specification
1509 expression in an interface mapping. */
1512 gfc_symbol *sym = c->iterator->var->symtree->n.sym;
1513 tree type = gfc_typenode_for_spec (&sym->ts);
1515 shadow_loopvar = gfc_create_var (type, "shadow_loopvar");
1516 gfc_shadow_sym (sym, shadow_loopvar, &saved_loopvar);
1519 gfc_start_block (&body);
1521 if (c->expr->expr_type == EXPR_ARRAY)
1523 /* Array constructors can be nested. */
1524 gfc_trans_array_constructor_value (&body, type, desc,
1525 c->expr->value.constructor,
1526 poffset, offsetvar, dynamic);
1528 else if (c->expr->rank > 0)
1530 gfc_trans_array_constructor_subarray (&body, type, desc, c->expr,
1531 poffset, offsetvar, dynamic);
1535 /* This code really upsets the gimplifier so don't bother for now. */
1542 while (p && !(p->iterator || p->expr->expr_type != EXPR_CONSTANT))
1544 p = gfc_constructor_next (p);
1549 /* Scalar values. */
1550 gfc_init_se (&se, NULL);
1551 gfc_trans_array_ctor_element (&body, desc, *poffset,
1554 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1555 gfc_array_index_type,
1556 *poffset, gfc_index_one_node);
1560 /* Collect multiple scalar constants into a constructor. */
1561 VEC(constructor_elt,gc) *v = NULL;
1565 HOST_WIDE_INT idx = 0;
1568 /* Count the number of consecutive scalar constants. */
1569 while (p && !(p->iterator
1570 || p->expr->expr_type != EXPR_CONSTANT))
1572 gfc_init_se (&se, NULL);
1573 gfc_conv_constant (&se, p->expr);
1575 if (c->expr->ts.type != BT_CHARACTER)
1576 se.expr = fold_convert (type, se.expr);
1577 /* For constant character array constructors we build
1578 an array of pointers. */
1579 else if (POINTER_TYPE_P (type))
1580 se.expr = gfc_build_addr_expr
1581 (gfc_get_pchar_type (p->expr->ts.kind),
1584 CONSTRUCTOR_APPEND_ELT (v,
1585 build_int_cst (gfc_array_index_type,
1589 p = gfc_constructor_next (p);
1592 bound = size_int (n - 1);
1593 /* Create an array type to hold them. */
1594 tmptype = build_range_type (gfc_array_index_type,
1595 gfc_index_zero_node, bound);
1596 tmptype = build_array_type (type, tmptype);
1598 init = build_constructor (tmptype, v);
1599 TREE_CONSTANT (init) = 1;
1600 TREE_STATIC (init) = 1;
1601 /* Create a static variable to hold the data. */
1602 tmp = gfc_create_var (tmptype, "data");
1603 TREE_STATIC (tmp) = 1;
1604 TREE_CONSTANT (tmp) = 1;
1605 TREE_READONLY (tmp) = 1;
1606 DECL_INITIAL (tmp) = init;
1609 /* Use BUILTIN_MEMCPY to assign the values. */
1610 tmp = gfc_conv_descriptor_data_get (desc);
1611 tmp = build_fold_indirect_ref_loc (input_location,
1613 tmp = gfc_build_array_ref (tmp, *poffset, NULL);
1614 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
1615 init = gfc_build_addr_expr (NULL_TREE, init);
1617 size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
1618 bound = build_int_cst (size_type_node, n * size);
1619 tmp = build_call_expr_loc (input_location,
1620 builtin_decl_explicit (BUILT_IN_MEMCPY),
1621 3, tmp, init, bound);
1622 gfc_add_expr_to_block (&body, tmp);
1624 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1625 gfc_array_index_type, *poffset,
1626 build_int_cst (gfc_array_index_type, n));
1628 if (!INTEGER_CST_P (*poffset))
1630 gfc_add_modify (&body, *offsetvar, *poffset);
1631 *poffset = *offsetvar;
1635 /* The frontend should already have done any expansions
1639 /* Pass the code as is. */
1640 tmp = gfc_finish_block (&body);
1641 gfc_add_expr_to_block (pblock, tmp);
1645 /* Build the implied do-loop. */
1646 stmtblock_t implied_do_block;
1654 loopbody = gfc_finish_block (&body);
1656 /* Create a new block that holds the implied-do loop. A temporary
1657 loop-variable is used. */
1658 gfc_start_block(&implied_do_block);
1660 /* Initialize the loop. */
1661 gfc_init_se (&se, NULL);
1662 gfc_conv_expr_val (&se, c->iterator->start);
1663 gfc_add_block_to_block (&implied_do_block, &se.pre);
1664 gfc_add_modify (&implied_do_block, shadow_loopvar, se.expr);
1666 gfc_init_se (&se, NULL);
1667 gfc_conv_expr_val (&se, c->iterator->end);
1668 gfc_add_block_to_block (&implied_do_block, &se.pre);
1669 end = gfc_evaluate_now (se.expr, &implied_do_block);
1671 gfc_init_se (&se, NULL);
1672 gfc_conv_expr_val (&se, c->iterator->step);
1673 gfc_add_block_to_block (&implied_do_block, &se.pre);
1674 step = gfc_evaluate_now (se.expr, &implied_do_block);
1676 /* If this array expands dynamically, and the number of iterations
1677 is not constant, we won't have allocated space for the static
1678 part of C->EXPR's size. Do that now. */
1679 if (dynamic && gfc_iterator_has_dynamic_bounds (c->iterator))
1681 /* Get the number of iterations. */
1682 tmp = gfc_get_iteration_count (shadow_loopvar, end, step);
1684 /* Get the static part of C->EXPR's size. */
1685 gfc_get_array_constructor_element_size (&size, c->expr);
1686 tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
1688 /* Grow the array by TMP * TMP2 elements. */
1689 tmp = fold_build2_loc (input_location, MULT_EXPR,
1690 gfc_array_index_type, tmp, tmp2);
1691 gfc_grow_array (&implied_do_block, desc, tmp);
1694 /* Generate the loop body. */
1695 exit_label = gfc_build_label_decl (NULL_TREE);
1696 gfc_start_block (&body);
1698 /* Generate the exit condition. Depending on the sign of
1699 the step variable we have to generate the correct
1701 tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
1702 step, build_int_cst (TREE_TYPE (step), 0));
1703 cond = fold_build3_loc (input_location, COND_EXPR,
1704 boolean_type_node, tmp,
1705 fold_build2_loc (input_location, GT_EXPR,
1706 boolean_type_node, shadow_loopvar, end),
1707 fold_build2_loc (input_location, LT_EXPR,
1708 boolean_type_node, shadow_loopvar, end));
1709 tmp = build1_v (GOTO_EXPR, exit_label);
1710 TREE_USED (exit_label) = 1;
1711 tmp = build3_v (COND_EXPR, cond, tmp,
1712 build_empty_stmt (input_location));
1713 gfc_add_expr_to_block (&body, tmp);
1715 /* The main loop body. */
1716 gfc_add_expr_to_block (&body, loopbody);
1718 /* Increase loop variable by step. */
1719 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1720 TREE_TYPE (shadow_loopvar), shadow_loopvar,
1722 gfc_add_modify (&body, shadow_loopvar, tmp);
1724 /* Finish the loop. */
1725 tmp = gfc_finish_block (&body);
1726 tmp = build1_v (LOOP_EXPR, tmp);
1727 gfc_add_expr_to_block (&implied_do_block, tmp);
1729 /* Add the exit label. */
1730 tmp = build1_v (LABEL_EXPR, exit_label);
1731 gfc_add_expr_to_block (&implied_do_block, tmp);
1733 /* Finishe the implied-do loop. */
1734 tmp = gfc_finish_block(&implied_do_block);
1735 gfc_add_expr_to_block(pblock, tmp);
1737 gfc_restore_sym (c->iterator->var->symtree->n.sym, &saved_loopvar);
1744 /* A catch-all to obtain the string length for anything that is not a
1745 a substring of non-constant length, a constant, array or variable. */
1748 get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
1753 /* Don't bother if we already know the length is a constant. */
1754 if (*len && INTEGER_CST_P (*len))
1757 if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
1758 && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
1761 gfc_conv_const_charlen (e->ts.u.cl);
1762 *len = e->ts.u.cl->backend_decl;
1766 /* Otherwise, be brutal even if inefficient. */
1767 ss = gfc_walk_expr (e);
1768 gfc_init_se (&se, NULL);
1770 /* No function call, in case of side effects. */
1771 se.no_function_call = 1;
1772 if (ss == gfc_ss_terminator)
1773 gfc_conv_expr (&se, e);
1775 gfc_conv_expr_descriptor (&se, e, ss);
1777 /* Fix the value. */
1778 *len = gfc_evaluate_now (se.string_length, &se.pre);
1780 gfc_add_block_to_block (block, &se.pre);
1781 gfc_add_block_to_block (block, &se.post);
1783 e->ts.u.cl->backend_decl = *len;
1788 /* Figure out the string length of a variable reference expression.
1789 Used by get_array_ctor_strlen. */
1792 get_array_ctor_var_strlen (stmtblock_t *block, gfc_expr * expr, tree * len)
1798 /* Don't bother if we already know the length is a constant. */
1799 if (*len && INTEGER_CST_P (*len))
1802 ts = &expr->symtree->n.sym->ts;
1803 for (ref = expr->ref; ref; ref = ref->next)
1808 /* Array references don't change the string length. */
1812 /* Use the length of the component. */
1813 ts = &ref->u.c.component->ts;
1817 if (ref->u.ss.start->expr_type != EXPR_CONSTANT
1818 || ref->u.ss.end->expr_type != EXPR_CONSTANT)
1820 /* Note that this might evaluate expr. */
1821 get_array_ctor_all_strlen (block, expr, len);
1824 mpz_init_set_ui (char_len, 1);
1825 mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
1826 mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
1827 *len = gfc_conv_mpz_to_tree (char_len, gfc_default_integer_kind);
1828 *len = convert (gfc_charlen_type_node, *len);
1829 mpz_clear (char_len);
1837 *len = ts->u.cl->backend_decl;
1841 /* Figure out the string length of a character array constructor.
1842 If len is NULL, don't calculate the length; this happens for recursive calls
1843 when a sub-array-constructor is an element but not at the first position,
1844 so when we're not interested in the length.
1845 Returns TRUE if all elements are character constants. */
1848 get_array_ctor_strlen (stmtblock_t *block, gfc_constructor_base base, tree * len)
1855 if (gfc_constructor_first (base) == NULL)
1858 *len = build_int_cstu (gfc_charlen_type_node, 0);
1862 /* Loop over all constructor elements to find out is_const, but in len we
1863 want to store the length of the first, not the last, element. We can
1864 of course exit the loop as soon as is_const is found to be false. */
1865 for (c = gfc_constructor_first (base);
1866 c && is_const; c = gfc_constructor_next (c))
1868 switch (c->expr->expr_type)
1871 if (len && !(*len && INTEGER_CST_P (*len)))
1872 *len = build_int_cstu (gfc_charlen_type_node,
1873 c->expr->value.character.length);
1877 if (!get_array_ctor_strlen (block, c->expr->value.constructor, len))
1884 get_array_ctor_var_strlen (block, c->expr, len);
1890 get_array_ctor_all_strlen (block, c->expr, len);
1894 /* After the first iteration, we don't want the length modified. */
1901 /* Check whether the array constructor C consists entirely of constant
1902 elements, and if so returns the number of those elements, otherwise
1903 return zero. Note, an empty or NULL array constructor returns zero. */
1905 unsigned HOST_WIDE_INT
1906 gfc_constant_array_constructor_p (gfc_constructor_base base)
1908 unsigned HOST_WIDE_INT nelem = 0;
1910 gfc_constructor *c = gfc_constructor_first (base);
1914 || c->expr->rank > 0
1915 || c->expr->expr_type != EXPR_CONSTANT)
1917 c = gfc_constructor_next (c);
1924 /* Given EXPR, the constant array constructor specified by an EXPR_ARRAY,
1925 and the tree type of it's elements, TYPE, return a static constant
1926 variable that is compile-time initialized. */
1929 gfc_build_constant_array_constructor (gfc_expr * expr, tree type)
1931 tree tmptype, init, tmp;
1932 HOST_WIDE_INT nelem;
1937 VEC(constructor_elt,gc) *v = NULL;
1939 /* First traverse the constructor list, converting the constants
1940 to tree to build an initializer. */
1942 c = gfc_constructor_first (expr->value.constructor);
1945 gfc_init_se (&se, NULL);
1946 gfc_conv_constant (&se, c->expr);
1947 if (c->expr->ts.type != BT_CHARACTER)
1948 se.expr = fold_convert (type, se.expr);
1949 else if (POINTER_TYPE_P (type))
1950 se.expr = gfc_build_addr_expr (gfc_get_pchar_type (c->expr->ts.kind),
1952 CONSTRUCTOR_APPEND_ELT (v, build_int_cst (gfc_array_index_type, nelem),
1954 c = gfc_constructor_next (c);
1958 /* Next determine the tree type for the array. We use the gfortran
1959 front-end's gfc_get_nodesc_array_type in order to create a suitable
1960 GFC_ARRAY_TYPE_P that may be used by the scalarizer. */
1962 memset (&as, 0, sizeof (gfc_array_spec));
1964 as.rank = expr->rank;
1965 as.type = AS_EXPLICIT;
1968 as.lower[0] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
1969 as.upper[0] = gfc_get_int_expr (gfc_default_integer_kind,
1973 for (i = 0; i < expr->rank; i++)
1975 int tmp = (int) mpz_get_si (expr->shape[i]);
1976 as.lower[i] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
1977 as.upper[i] = gfc_get_int_expr (gfc_default_integer_kind,
1981 tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
1983 /* as is not needed anymore. */
1984 for (i = 0; i < as.rank + as.corank; i++)
1986 gfc_free_expr (as.lower[i]);
1987 gfc_free_expr (as.upper[i]);
1990 init = build_constructor (tmptype, v);
1992 TREE_CONSTANT (init) = 1;
1993 TREE_STATIC (init) = 1;
1995 tmp = gfc_create_var (tmptype, "A");
1996 TREE_STATIC (tmp) = 1;
1997 TREE_CONSTANT (tmp) = 1;
1998 TREE_READONLY (tmp) = 1;
1999 DECL_INITIAL (tmp) = init;
2005 /* Translate a constant EXPR_ARRAY array constructor for the scalarizer.
2006 This mostly initializes the scalarizer state info structure with the
2007 appropriate values to directly use the array created by the function
2008 gfc_build_constant_array_constructor. */
2011 trans_constant_array_constructor (gfc_ss * ss, tree type)
2013 gfc_array_info *info;
2017 tmp = gfc_build_constant_array_constructor (ss->info->expr, type);
2019 info = &ss->info->data.array;
2021 info->descriptor = tmp;
2022 info->data = gfc_build_addr_expr (NULL_TREE, tmp);
2023 info->offset = gfc_index_zero_node;
2025 for (i = 0; i < ss->dimen; i++)
2027 info->delta[i] = gfc_index_zero_node;
2028 info->start[i] = gfc_index_zero_node;
2029 info->end[i] = gfc_index_zero_node;
2030 info->stride[i] = gfc_index_one_node;
2036 get_rank (gfc_loopinfo *loop)
2041 for (; loop; loop = loop->parent)
2042 rank += loop->dimen;
2048 /* Helper routine of gfc_trans_array_constructor to determine if the
2049 bounds of the loop specified by LOOP are constant and simple enough
2050 to use with trans_constant_array_constructor. Returns the
2051 iteration count of the loop if suitable, and NULL_TREE otherwise. */
2054 constant_array_constructor_loop_size (gfc_loopinfo * l)
2057 tree size = gfc_index_one_node;
2061 total_dim = get_rank (l);
2063 for (loop = l; loop; loop = loop->parent)
2065 for (i = 0; i < loop->dimen; i++)
2067 /* If the bounds aren't constant, return NULL_TREE. */
2068 if (!INTEGER_CST_P (loop->from[i]) || !INTEGER_CST_P (loop->to[i]))
2070 if (!integer_zerop (loop->from[i]))
2072 /* Only allow nonzero "from" in one-dimensional arrays. */
2075 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2076 gfc_array_index_type,
2077 loop->to[i], loop->from[i]);
2081 tmp = fold_build2_loc (input_location, PLUS_EXPR,
2082 gfc_array_index_type, tmp, gfc_index_one_node);
2083 size = fold_build2_loc (input_location, MULT_EXPR,
2084 gfc_array_index_type, size, tmp);
2093 get_loop_upper_bound_for_array (gfc_ss *array, int array_dim)
2098 gcc_assert (array->nested_ss == NULL);
2100 for (ss = array; ss; ss = ss->parent)
2101 for (n = 0; n < ss->loop->dimen; n++)
2102 if (array_dim == get_array_ref_dim_for_loop_dim (ss, n))
2103 return &(ss->loop->to[n]);
2109 static gfc_loopinfo *
2110 outermost_loop (gfc_loopinfo * loop)
2112 while (loop->parent != NULL)
2113 loop = loop->parent;
2119 /* Array constructors are handled by constructing a temporary, then using that
2120 within the scalarization loop. This is not optimal, but seems by far the
2124 trans_array_constructor (gfc_ss * ss, locus * where)
2126 gfc_constructor_base c;
2134 bool old_first_len, old_typespec_chararray_ctor;
2135 tree old_first_len_val;
2136 gfc_loopinfo *loop, *outer_loop;
2137 gfc_ss_info *ss_info;
2141 /* Save the old values for nested checking. */
2142 old_first_len = first_len;
2143 old_first_len_val = first_len_val;
2144 old_typespec_chararray_ctor = typespec_chararray_ctor;
2147 outer_loop = outermost_loop (loop);
2149 expr = ss_info->expr;
2151 /* Do bounds-checking here and in gfc_trans_array_ctor_element only if no
2152 typespec was given for the array constructor. */
2153 typespec_chararray_ctor = (expr->ts.u.cl
2154 && expr->ts.u.cl->length_from_typespec);
2156 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2157 && expr->ts.type == BT_CHARACTER && !typespec_chararray_ctor)
2159 first_len_val = gfc_create_var (gfc_charlen_type_node, "len");
2163 gcc_assert (ss->dimen == ss->loop->dimen);
2165 c = expr->value.constructor;
2166 if (expr->ts.type == BT_CHARACTER)
2170 /* get_array_ctor_strlen walks the elements of the constructor, if a
2171 typespec was given, we already know the string length and want the one
2173 if (typespec_chararray_ctor && expr->ts.u.cl->length
2174 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
2178 const_string = false;
2179 gfc_init_se (&length_se, NULL);
2180 gfc_conv_expr_type (&length_se, expr->ts.u.cl->length,
2181 gfc_charlen_type_node);
2182 ss_info->string_length = length_se.expr;
2183 gfc_add_block_to_block (&outer_loop->pre, &length_se.pre);
2184 gfc_add_block_to_block (&outer_loop->post, &length_se.post);
2187 const_string = get_array_ctor_strlen (&outer_loop->pre, c,
2188 &ss_info->string_length);
2190 /* Complex character array constructors should have been taken care of
2191 and not end up here. */
2192 gcc_assert (ss_info->string_length);
2194 expr->ts.u.cl->backend_decl = ss_info->string_length;
2196 type = gfc_get_character_type_len (expr->ts.kind, ss_info->string_length);
2198 type = build_pointer_type (type);
2201 type = gfc_typenode_for_spec (&expr->ts);
2203 /* See if the constructor determines the loop bounds. */
2206 loop_ubound0 = get_loop_upper_bound_for_array (ss, 0);
2208 if (expr->shape && get_rank (loop) > 1 && *loop_ubound0 == NULL_TREE)
2210 /* We have a multidimensional parameter. */
2211 for (s = ss; s; s = s->parent)
2214 for (n = 0; n < s->loop->dimen; n++)
2216 s->loop->from[n] = gfc_index_zero_node;
2217 s->loop->to[n] = gfc_conv_mpz_to_tree (expr->shape[s->dim[n]],
2218 gfc_index_integer_kind);
2219 s->loop->to[n] = fold_build2_loc (input_location, MINUS_EXPR,
2220 gfc_array_index_type,
2222 gfc_index_one_node);
2227 if (*loop_ubound0 == NULL_TREE)
2231 /* We should have a 1-dimensional, zero-based loop. */
2232 gcc_assert (loop->parent == NULL && loop->nested == NULL);
2233 gcc_assert (loop->dimen == 1);
2234 gcc_assert (integer_zerop (loop->from[0]));
2236 /* Split the constructor size into a static part and a dynamic part.
2237 Allocate the static size up-front and record whether the dynamic
2238 size might be nonzero. */
2240 dynamic = gfc_get_array_constructor_size (&size, c);
2241 mpz_sub_ui (size, size, 1);
2242 loop->to[0] = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
2246 /* Special case constant array constructors. */
2249 unsigned HOST_WIDE_INT nelem = gfc_constant_array_constructor_p (c);
2252 tree size = constant_array_constructor_loop_size (loop);
2253 if (size && compare_tree_int (size, nelem) == 0)
2255 trans_constant_array_constructor (ss, type);
2261 if (TREE_CODE (*loop_ubound0) == VAR_DECL)
2264 gfc_trans_create_temp_array (&outer_loop->pre, &outer_loop->post, ss, type,
2265 NULL_TREE, dynamic, true, false, where);
2267 desc = ss_info->data.array.descriptor;
2268 offset = gfc_index_zero_node;
2269 offsetvar = gfc_create_var_np (gfc_array_index_type, "offset");
2270 TREE_NO_WARNING (offsetvar) = 1;
2271 TREE_USED (offsetvar) = 0;
2272 gfc_trans_array_constructor_value (&outer_loop->pre, type, desc, c,
2273 &offset, &offsetvar, dynamic);
2275 /* If the array grows dynamically, the upper bound of the loop variable
2276 is determined by the array's final upper bound. */
2279 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2280 gfc_array_index_type,
2281 offsetvar, gfc_index_one_node);
2282 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2283 gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
2284 if (*loop_ubound0 && TREE_CODE (*loop_ubound0) == VAR_DECL)
2285 gfc_add_modify (&outer_loop->pre, *loop_ubound0, tmp);
2287 *loop_ubound0 = tmp;
2290 if (TREE_USED (offsetvar))
2291 pushdecl (offsetvar);
2293 gcc_assert (INTEGER_CST_P (offset));
2296 /* Disable bound checking for now because it's probably broken. */
2297 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2304 /* Restore old values of globals. */
2305 first_len = old_first_len;
2306 first_len_val = old_first_len_val;
2307 typespec_chararray_ctor = old_typespec_chararray_ctor;
2311 /* INFO describes a GFC_SS_SECTION in loop LOOP, and this function is
2312 called after evaluating all of INFO's vector dimensions. Go through
2313 each such vector dimension and see if we can now fill in any missing
2317 set_vector_loop_bounds (gfc_ss * ss)
2319 gfc_loopinfo *loop, *outer_loop;
2320 gfc_array_info *info;
2328 outer_loop = outermost_loop (ss->loop);
2330 info = &ss->info->data.array;
2332 for (; ss; ss = ss->parent)
2336 for (n = 0; n < loop->dimen; n++)
2339 if (info->ref->u.ar.dimen_type[dim] != DIMEN_VECTOR
2340 || loop->to[n] != NULL)
2343 /* Loop variable N indexes vector dimension DIM, and we don't
2344 yet know the upper bound of loop variable N. Set it to the
2345 difference between the vector's upper and lower bounds. */
2346 gcc_assert (loop->from[n] == gfc_index_zero_node);
2347 gcc_assert (info->subscript[dim]
2348 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2350 gfc_init_se (&se, NULL);
2351 desc = info->subscript[dim]->info->data.array.descriptor;
2352 zero = gfc_rank_cst[0];
2353 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2354 gfc_array_index_type,
2355 gfc_conv_descriptor_ubound_get (desc, zero),
2356 gfc_conv_descriptor_lbound_get (desc, zero));
2357 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2364 /* Add the pre and post chains for all the scalar expressions in a SS chain
2365 to loop. This is called after the loop parameters have been calculated,
2366 but before the actual scalarizing loops. */
2369 gfc_add_loop_ss_code (gfc_loopinfo * loop, gfc_ss * ss, bool subscript,
2372 gfc_loopinfo *nested_loop, *outer_loop;
2374 gfc_ss_info *ss_info;
2375 gfc_array_info *info;
2377 bool skip_nested = false;
2380 outer_loop = outermost_loop (loop);
2382 /* TODO: This can generate bad code if there are ordering dependencies,
2383 e.g., a callee allocated function and an unknown size constructor. */
2384 gcc_assert (ss != NULL);
2386 for (; ss != gfc_ss_terminator; ss = ss->loop_chain)
2390 /* Cross loop arrays are handled from within the most nested loop. */
2391 if (ss->nested_ss != NULL)
2395 expr = ss_info->expr;
2396 info = &ss_info->data.array;
2398 switch (ss_info->type)
2401 /* Scalar expression. Evaluate this now. This includes elemental
2402 dimension indices, but not array section bounds. */
2403 gfc_init_se (&se, NULL);
2404 gfc_conv_expr (&se, expr);
2405 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2407 if (expr->ts.type != BT_CHARACTER)
2409 /* Move the evaluation of scalar expressions outside the
2410 scalarization loop, except for WHERE assignments. */
2412 se.expr = convert(gfc_array_index_type, se.expr);
2413 if (!ss_info->where)
2414 se.expr = gfc_evaluate_now (se.expr, &outer_loop->pre);
2415 gfc_add_block_to_block (&outer_loop->pre, &se.post);
2418 gfc_add_block_to_block (&outer_loop->post, &se.post);
2420 ss_info->data.scalar.value = se.expr;
2421 ss_info->string_length = se.string_length;
2424 case GFC_SS_REFERENCE:
2425 /* Scalar argument to elemental procedure. */
2426 gfc_init_se (&se, NULL);
2427 if (ss_info->data.scalar.can_be_null_ref)
2429 /* If the actual argument can be absent (in other words, it can
2430 be a NULL reference), don't try to evaluate it; pass instead
2431 the reference directly. */
2432 gfc_conv_expr_reference (&se, expr);
2436 /* Otherwise, evaluate the argument outside the loop and pass
2437 a reference to the value. */
2438 gfc_conv_expr (&se, expr);
2440 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2441 gfc_add_block_to_block (&outer_loop->post, &se.post);
2442 if (gfc_is_class_scalar_expr (expr))
2443 /* This is necessary because the dynamic type will always be
2444 large than the declared type. In consequence, assigning
2445 the value to a temporary could segfault.
2446 OOP-TODO: see if this is generally correct or is the value
2447 has to be written to an allocated temporary, whose address
2448 is passed via ss_info. */
2449 ss_info->data.scalar.value = se.expr;
2451 ss_info->data.scalar.value = gfc_evaluate_now (se.expr,
2454 ss_info->string_length = se.string_length;
2457 case GFC_SS_SECTION:
2458 /* Add the expressions for scalar and vector subscripts. */
2459 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2460 if (info->subscript[n])
2462 gfc_add_loop_ss_code (loop, info->subscript[n], true, where);
2463 /* The recursive call will have taken care of the nested loops.
2464 No need to do it twice. */
2468 set_vector_loop_bounds (ss);
2472 /* Get the vector's descriptor and store it in SS. */
2473 gfc_init_se (&se, NULL);
2474 gfc_conv_expr_descriptor (&se, expr, gfc_walk_expr (expr));
2475 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2476 gfc_add_block_to_block (&outer_loop->post, &se.post);
2477 info->descriptor = se.expr;
2480 case GFC_SS_INTRINSIC:
2481 gfc_add_intrinsic_ss_code (loop, ss);
2484 case GFC_SS_FUNCTION:
2485 /* Array function return value. We call the function and save its
2486 result in a temporary for use inside the loop. */
2487 gfc_init_se (&se, NULL);
2490 gfc_conv_expr (&se, expr);
2491 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2492 gfc_add_block_to_block (&outer_loop->post, &se.post);
2493 ss_info->string_length = se.string_length;
2496 case GFC_SS_CONSTRUCTOR:
2497 if (expr->ts.type == BT_CHARACTER
2498 && ss_info->string_length == NULL
2500 && expr->ts.u.cl->length)
2502 gfc_init_se (&se, NULL);
2503 gfc_conv_expr_type (&se, expr->ts.u.cl->length,
2504 gfc_charlen_type_node);
2505 ss_info->string_length = se.expr;
2506 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2507 gfc_add_block_to_block (&outer_loop->post, &se.post);
2509 trans_array_constructor (ss, where);
2513 case GFC_SS_COMPONENT:
2514 /* Do nothing. These are handled elsewhere. */
2523 for (nested_loop = loop->nested; nested_loop;
2524 nested_loop = nested_loop->next)
2525 gfc_add_loop_ss_code (nested_loop, nested_loop->ss, subscript, where);
2529 /* Translate expressions for the descriptor and data pointer of a SS. */
2533 gfc_conv_ss_descriptor (stmtblock_t * block, gfc_ss * ss, int base)
2536 gfc_ss_info *ss_info;
2537 gfc_array_info *info;
2541 info = &ss_info->data.array;
2543 /* Get the descriptor for the array to be scalarized. */
2544 gcc_assert (ss_info->expr->expr_type == EXPR_VARIABLE);
2545 gfc_init_se (&se, NULL);
2546 se.descriptor_only = 1;
2547 gfc_conv_expr_lhs (&se, ss_info->expr);
2548 gfc_add_block_to_block (block, &se.pre);
2549 info->descriptor = se.expr;
2550 ss_info->string_length = se.string_length;
2554 /* Also the data pointer. */
2555 tmp = gfc_conv_array_data (se.expr);
2556 /* If this is a variable or address of a variable we use it directly.
2557 Otherwise we must evaluate it now to avoid breaking dependency
2558 analysis by pulling the expressions for elemental array indices
2561 || (TREE_CODE (tmp) == ADDR_EXPR
2562 && DECL_P (TREE_OPERAND (tmp, 0)))))
2563 tmp = gfc_evaluate_now (tmp, block);
2566 tmp = gfc_conv_array_offset (se.expr);
2567 info->offset = gfc_evaluate_now (tmp, block);
2569 /* Make absolutely sure that the saved_offset is indeed saved
2570 so that the variable is still accessible after the loops
2572 info->saved_offset = info->offset;
2577 /* Initialize a gfc_loopinfo structure. */
2580 gfc_init_loopinfo (gfc_loopinfo * loop)
2584 memset (loop, 0, sizeof (gfc_loopinfo));
2585 gfc_init_block (&loop->pre);
2586 gfc_init_block (&loop->post);
2588 /* Initially scalarize in order and default to no loop reversal. */
2589 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2592 loop->reverse[n] = GFC_INHIBIT_REVERSE;
2595 loop->ss = gfc_ss_terminator;
2599 /* Copies the loop variable info to a gfc_se structure. Does not copy the SS
2603 gfc_copy_loopinfo_to_se (gfc_se * se, gfc_loopinfo * loop)
2609 /* Return an expression for the data pointer of an array. */
2612 gfc_conv_array_data (tree descriptor)
2616 type = TREE_TYPE (descriptor);
2617 if (GFC_ARRAY_TYPE_P (type))
2619 if (TREE_CODE (type) == POINTER_TYPE)
2623 /* Descriptorless arrays. */
2624 return gfc_build_addr_expr (NULL_TREE, descriptor);
2628 return gfc_conv_descriptor_data_get (descriptor);
2632 /* Return an expression for the base offset of an array. */
2635 gfc_conv_array_offset (tree descriptor)
2639 type = TREE_TYPE (descriptor);
2640 if (GFC_ARRAY_TYPE_P (type))
2641 return GFC_TYPE_ARRAY_OFFSET (type);
2643 return gfc_conv_descriptor_offset_get (descriptor);
2647 /* Get an expression for the array stride. */
2650 gfc_conv_array_stride (tree descriptor, int dim)
2655 type = TREE_TYPE (descriptor);
2657 /* For descriptorless arrays use the array size. */
2658 tmp = GFC_TYPE_ARRAY_STRIDE (type, dim);
2659 if (tmp != NULL_TREE)
2662 tmp = gfc_conv_descriptor_stride_get (descriptor, gfc_rank_cst[dim]);
2667 /* Like gfc_conv_array_stride, but for the lower bound. */
2670 gfc_conv_array_lbound (tree descriptor, int dim)
2675 type = TREE_TYPE (descriptor);
2677 tmp = GFC_TYPE_ARRAY_LBOUND (type, dim);
2678 if (tmp != NULL_TREE)
2681 tmp = gfc_conv_descriptor_lbound_get (descriptor, gfc_rank_cst[dim]);
2686 /* Like gfc_conv_array_stride, but for the upper bound. */
2689 gfc_conv_array_ubound (tree descriptor, int dim)
2694 type = TREE_TYPE (descriptor);
2696 tmp = GFC_TYPE_ARRAY_UBOUND (type, dim);
2697 if (tmp != NULL_TREE)
2700 /* This should only ever happen when passing an assumed shape array
2701 as an actual parameter. The value will never be used. */
2702 if (GFC_ARRAY_TYPE_P (TREE_TYPE (descriptor)))
2703 return gfc_index_zero_node;
2705 tmp = gfc_conv_descriptor_ubound_get (descriptor, gfc_rank_cst[dim]);
2710 /* Generate code to perform an array index bound check. */
2713 trans_array_bound_check (gfc_se * se, gfc_ss *ss, tree index, int n,
2714 locus * where, bool check_upper)
2717 tree tmp_lo, tmp_up;
2720 const char * name = NULL;
2722 if (!(gfc_option.rtcheck & GFC_RTCHECK_BOUNDS))
2725 descriptor = ss->info->data.array.descriptor;
2727 index = gfc_evaluate_now (index, &se->pre);
2729 /* We find a name for the error message. */
2730 name = ss->info->expr->symtree->n.sym->name;
2731 gcc_assert (name != NULL);
2733 if (TREE_CODE (descriptor) == VAR_DECL)
2734 name = IDENTIFIER_POINTER (DECL_NAME (descriptor));
2736 /* If upper bound is present, include both bounds in the error message. */
2739 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2740 tmp_up = gfc_conv_array_ubound (descriptor, n);
2743 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2744 "outside of expected range (%%ld:%%ld)", n+1, name);
2746 asprintf (&msg, "Index '%%ld' of dimension %d "
2747 "outside of expected range (%%ld:%%ld)", n+1);
2749 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2751 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2752 fold_convert (long_integer_type_node, index),
2753 fold_convert (long_integer_type_node, tmp_lo),
2754 fold_convert (long_integer_type_node, tmp_up));
2755 fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
2757 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2758 fold_convert (long_integer_type_node, index),
2759 fold_convert (long_integer_type_node, tmp_lo),
2760 fold_convert (long_integer_type_node, tmp_up));
2765 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2768 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2769 "below lower bound of %%ld", n+1, name);
2771 asprintf (&msg, "Index '%%ld' of dimension %d "
2772 "below lower bound of %%ld", n+1);
2774 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2776 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2777 fold_convert (long_integer_type_node, index),
2778 fold_convert (long_integer_type_node, tmp_lo));
2786 /* Return the offset for an index. Performs bound checking for elemental
2787 dimensions. Single element references are processed separately.
2788 DIM is the array dimension, I is the loop dimension. */
2791 conv_array_index_offset (gfc_se * se, gfc_ss * ss, int dim, int i,
2792 gfc_array_ref * ar, tree stride)
2794 gfc_array_info *info;
2799 info = &ss->info->data.array;
2801 /* Get the index into the array for this dimension. */
2804 gcc_assert (ar->type != AR_ELEMENT);
2805 switch (ar->dimen_type[dim])
2807 case DIMEN_THIS_IMAGE:
2811 /* Elemental dimension. */
2812 gcc_assert (info->subscript[dim]
2813 && info->subscript[dim]->info->type == GFC_SS_SCALAR);
2814 /* We've already translated this value outside the loop. */
2815 index = info->subscript[dim]->info->data.scalar.value;
2817 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2818 ar->as->type != AS_ASSUMED_SIZE
2819 || dim < ar->dimen - 1);
2823 gcc_assert (info && se->loop);
2824 gcc_assert (info->subscript[dim]
2825 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2826 desc = info->subscript[dim]->info->data.array.descriptor;
2828 /* Get a zero-based index into the vector. */
2829 index = fold_build2_loc (input_location, MINUS_EXPR,
2830 gfc_array_index_type,
2831 se->loop->loopvar[i], se->loop->from[i]);
2833 /* Multiply the index by the stride. */
2834 index = fold_build2_loc (input_location, MULT_EXPR,
2835 gfc_array_index_type,
2836 index, gfc_conv_array_stride (desc, 0));
2838 /* Read the vector to get an index into info->descriptor. */
2839 data = build_fold_indirect_ref_loc (input_location,
2840 gfc_conv_array_data (desc));
2841 index = gfc_build_array_ref (data, index, NULL);
2842 index = gfc_evaluate_now (index, &se->pre);
2843 index = fold_convert (gfc_array_index_type, index);
2845 /* Do any bounds checking on the final info->descriptor index. */
2846 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2847 ar->as->type != AS_ASSUMED_SIZE
2848 || dim < ar->dimen - 1);
2852 /* Scalarized dimension. */
2853 gcc_assert (info && se->loop);
2855 /* Multiply the loop variable by the stride and delta. */
2856 index = se->loop->loopvar[i];
2857 if (!integer_onep (info->stride[dim]))
2858 index = fold_build2_loc (input_location, MULT_EXPR,
2859 gfc_array_index_type, index,
2861 if (!integer_zerop (info->delta[dim]))
2862 index = fold_build2_loc (input_location, PLUS_EXPR,
2863 gfc_array_index_type, index,
2873 /* Temporary array or derived type component. */
2874 gcc_assert (se->loop);
2875 index = se->loop->loopvar[se->loop->order[i]];
2877 /* Pointer functions can have stride[0] different from unity.
2878 Use the stride returned by the function call and stored in
2879 the descriptor for the temporary. */
2880 if (se->ss && se->ss->info->type == GFC_SS_FUNCTION
2881 && se->ss->info->expr
2882 && se->ss->info->expr->symtree
2883 && se->ss->info->expr->symtree->n.sym->result
2884 && se->ss->info->expr->symtree->n.sym->result->attr.pointer)
2885 stride = gfc_conv_descriptor_stride_get (info->descriptor,
2888 if (!integer_zerop (info->delta[dim]))
2889 index = fold_build2_loc (input_location, PLUS_EXPR,
2890 gfc_array_index_type, index, info->delta[dim]);
2893 /* Multiply by the stride. */
2894 if (!integer_onep (stride))
2895 index = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
2902 /* Build a scalarized array reference using the vptr 'size'. */
2905 build_class_array_ref (gfc_se *se, tree base, tree index)
2912 gfc_expr *expr = se->ss->info->expr;
2917 if (expr == NULL || expr->ts.type != BT_CLASS)
2920 if (expr->symtree && expr->symtree->n.sym->ts.type == BT_CLASS)
2921 ts = &expr->symtree->n.sym->ts;
2926 for (ref = expr->ref; ref; ref = ref->next)
2928 if (ref->type == REF_COMPONENT
2929 && ref->u.c.component->ts.type == BT_CLASS
2930 && ref->next && ref->next->type == REF_COMPONENT
2931 && strcmp (ref->next->u.c.component->name, "_data") == 0
2933 && ref->next->next->type == REF_ARRAY
2934 && ref->next->next->u.ar.type != AR_ELEMENT)
2936 ts = &ref->u.c.component->ts;
2945 if (class_ref == NULL)
2946 decl = expr->symtree->n.sym->backend_decl;
2949 /* Remove everything after the last class reference, convert the
2950 expression and then recover its tailend once more. */
2952 ref = class_ref->next;
2953 class_ref->next = NULL;
2954 gfc_init_se (&tmpse, NULL);
2955 gfc_conv_expr (&tmpse, expr);
2957 class_ref->next = ref;
2960 size = gfc_vtable_size_get (decl);
2962 /* Build the address of the element. */
2963 type = TREE_TYPE (TREE_TYPE (base));
2964 size = fold_convert (TREE_TYPE (index), size);
2965 offset = fold_build2_loc (input_location, MULT_EXPR,
2966 gfc_array_index_type,
2968 tmp = gfc_build_addr_expr (pvoid_type_node, base);
2969 tmp = fold_build_pointer_plus_loc (input_location, tmp, offset);
2970 tmp = fold_convert (build_pointer_type (type), tmp);
2972 /* Return the element in the se expression. */
2973 se->expr = build_fold_indirect_ref_loc (input_location, tmp);
2978 /* Build a scalarized reference to an array. */
2981 gfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar)
2983 gfc_array_info *info;
2984 tree decl = NULL_TREE;
2992 expr = ss->info->expr;
2993 info = &ss->info->data.array;
2995 n = se->loop->order[0];
2999 index = conv_array_index_offset (se, ss, ss->dim[n], n, ar, info->stride0);
3000 /* Add the offset for this dimension to the stored offset for all other
3002 if (!integer_zerop (info->offset))
3003 index = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3004 index, info->offset);
3006 if (expr && is_subref_array (expr))
3007 decl = expr->symtree->n.sym->backend_decl;
3009 tmp = build_fold_indirect_ref_loc (input_location, info->data);
3011 /* Use the vptr 'size' field to access a class the element of a class
3013 if (build_class_array_ref (se, tmp, index))
3016 se->expr = gfc_build_array_ref (tmp, index, decl);
3020 /* Translate access of temporary array. */
3023 gfc_conv_tmp_array_ref (gfc_se * se)
3025 se->string_length = se->ss->info->string_length;
3026 gfc_conv_scalarized_array_ref (se, NULL);
3027 gfc_advance_se_ss_chain (se);
3030 /* Add T to the offset pair *OFFSET, *CST_OFFSET. */
3033 add_to_offset (tree *cst_offset, tree *offset, tree t)
3035 if (TREE_CODE (t) == INTEGER_CST)
3036 *cst_offset = int_const_binop (PLUS_EXPR, *cst_offset, t);
3039 if (!integer_zerop (*offset))
3040 *offset = fold_build2_loc (input_location, PLUS_EXPR,
3041 gfc_array_index_type, *offset, t);
3047 /* Build an array reference. se->expr already holds the array descriptor.
3048 This should be either a variable, indirect variable reference or component
3049 reference. For arrays which do not have a descriptor, se->expr will be
3051 a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/
3054 gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_symbol * sym,
3058 tree offset, cst_offset;
3066 gcc_assert (ar->codimen);
3068 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (se->expr)))
3069 se->expr = build_fold_indirect_ref (gfc_conv_array_data (se->expr));
3072 if (GFC_ARRAY_TYPE_P (TREE_TYPE (se->expr))
3073 && TREE_CODE (TREE_TYPE (se->expr)) == POINTER_TYPE)
3074 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
3076 /* Use the actual tree type and not the wrapped coarray. */
3077 if (!se->want_pointer)
3078 se->expr = fold_convert (TYPE_MAIN_VARIANT (TREE_TYPE (se->expr)),
3085 /* Handle scalarized references separately. */
3086 if (ar->type != AR_ELEMENT)
3088 gfc_conv_scalarized_array_ref (se, ar);
3089 gfc_advance_se_ss_chain (se);
3093 cst_offset = offset = gfc_index_zero_node;
3094 add_to_offset (&cst_offset, &offset, gfc_conv_array_offset (se->expr));
3096 /* Calculate the offsets from all the dimensions. Make sure to associate
3097 the final offset so that we form a chain of loop invariant summands. */
3098 for (n = ar->dimen - 1; n >= 0; n--)
3100 /* Calculate the index for this dimension. */
3101 gfc_init_se (&indexse, se);
3102 gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type);
3103 gfc_add_block_to_block (&se->pre, &indexse.pre);
3105 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3107 /* Check array bounds. */
3111 /* Evaluate the indexse.expr only once. */
3112 indexse.expr = save_expr (indexse.expr);
3115 tmp = gfc_conv_array_lbound (se->expr, n);
3116 if (sym->attr.temporary)
3118 gfc_init_se (&tmpse, se);
3119 gfc_conv_expr_type (&tmpse, ar->as->lower[n],
3120 gfc_array_index_type);
3121 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3125 cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
3127 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3128 "below lower bound of %%ld", n+1, sym->name);
3129 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3130 fold_convert (long_integer_type_node,
3132 fold_convert (long_integer_type_node, tmp));
3135 /* Upper bound, but not for the last dimension of assumed-size
3137 if (n < ar->dimen - 1 || ar->as->type != AS_ASSUMED_SIZE)
3139 tmp = gfc_conv_array_ubound (se->expr, n);
3140 if (sym->attr.temporary)
3142 gfc_init_se (&tmpse, se);
3143 gfc_conv_expr_type (&tmpse, ar->as->upper[n],
3144 gfc_array_index_type);
3145 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3149 cond = fold_build2_loc (input_location, GT_EXPR,
3150 boolean_type_node, indexse.expr, tmp);
3151 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3152 "above upper bound of %%ld", n+1, sym->name);
3153 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3154 fold_convert (long_integer_type_node,
3156 fold_convert (long_integer_type_node, tmp));
3161 /* Multiply the index by the stride. */
3162 stride = gfc_conv_array_stride (se->expr, n);
3163 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
3164 indexse.expr, stride);
3166 /* And add it to the total. */
3167 add_to_offset (&cst_offset, &offset, tmp);
3170 if (!integer_zerop (cst_offset))
3171 offset = fold_build2_loc (input_location, PLUS_EXPR,
3172 gfc_array_index_type, offset, cst_offset);
3174 /* Access the calculated element. */
3175 tmp = gfc_conv_array_data (se->expr);
3176 tmp = build_fold_indirect_ref (tmp);
3177 se->expr = gfc_build_array_ref (tmp, offset, sym->backend_decl);
3181 /* Add the offset corresponding to array's ARRAY_DIM dimension and loop's
3182 LOOP_DIM dimension (if any) to array's offset. */
3185 add_array_offset (stmtblock_t *pblock, gfc_loopinfo *loop, gfc_ss *ss,
3186 gfc_array_ref *ar, int array_dim, int loop_dim)
3189 gfc_array_info *info;
3192 info = &ss->info->data.array;
3194 gfc_init_se (&se, NULL);
3196 se.expr = info->descriptor;
3197 stride = gfc_conv_array_stride (info->descriptor, array_dim);
3198 index = conv_array_index_offset (&se, ss, array_dim, loop_dim, ar, stride);
3199 gfc_add_block_to_block (pblock, &se.pre);
3201 info->offset = fold_build2_loc (input_location, PLUS_EXPR,
3202 gfc_array_index_type,
3203 info->offset, index);
3204 info->offset = gfc_evaluate_now (info->offset, pblock);
3208 /* Generate the code to be executed immediately before entering a
3209 scalarization loop. */
3212 gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
3213 stmtblock_t * pblock)
3216 gfc_ss_info *ss_info;
3217 gfc_array_info *info;
3218 gfc_ss_type ss_type;
3220 gfc_loopinfo *ploop;
3224 /* This code will be executed before entering the scalarization loop
3225 for this dimension. */
3226 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3230 if ((ss_info->useflags & flag) == 0)
3233 ss_type = ss_info->type;
3234 if (ss_type != GFC_SS_SECTION
3235 && ss_type != GFC_SS_FUNCTION
3236 && ss_type != GFC_SS_CONSTRUCTOR
3237 && ss_type != GFC_SS_COMPONENT)
3240 info = &ss_info->data.array;
3242 gcc_assert (dim < ss->dimen);
3243 gcc_assert (ss->dimen == loop->dimen);
3246 ar = &info->ref->u.ar;
3250 if (dim == loop->dimen - 1 && loop->parent != NULL)
3252 /* If we are in the outermost dimension of this loop, the previous
3253 dimension shall be in the parent loop. */
3254 gcc_assert (ss->parent != NULL);
3257 ploop = loop->parent;
3259 /* ss and ss->parent are about the same array. */
3260 gcc_assert (ss_info == pss->info);
3268 if (dim == loop->dimen - 1)
3273 /* For the time being, there is no loop reordering. */
3274 gcc_assert (i == ploop->order[i]);
3275 i = ploop->order[i];
3277 if (dim == loop->dimen - 1 && loop->parent == NULL)
3279 stride = gfc_conv_array_stride (info->descriptor,
3280 innermost_ss (ss)->dim[i]);
3282 /* Calculate the stride of the innermost loop. Hopefully this will
3283 allow the backend optimizers to do their stuff more effectively.
3285 info->stride0 = gfc_evaluate_now (stride, pblock);
3287 /* For the outermost loop calculate the offset due to any
3288 elemental dimensions. It will have been initialized with the
3289 base offset of the array. */
3292 for (i = 0; i < ar->dimen; i++)
3294 if (ar->dimen_type[i] != DIMEN_ELEMENT)
3297 add_array_offset (pblock, loop, ss, ar, i, /* unused */ -1);
3302 /* Add the offset for the previous loop dimension. */
3303 add_array_offset (pblock, ploop, ss, ar, pss->dim[i], i);
3305 /* Remember this offset for the second loop. */
3306 if (dim == loop->temp_dim - 1 && loop->parent == NULL)
3307 info->saved_offset = info->offset;
3312 /* Start a scalarized expression. Creates a scope and declares loop
3316 gfc_start_scalarized_body (gfc_loopinfo * loop, stmtblock_t * pbody)
3322 gcc_assert (!loop->array_parameter);
3324 for (dim = loop->dimen - 1; dim >= 0; dim--)
3326 n = loop->order[dim];
3328 gfc_start_block (&loop->code[n]);
3330 /* Create the loop variable. */
3331 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "S");
3333 if (dim < loop->temp_dim)
3337 /* Calculate values that will be constant within this loop. */
3338 gfc_trans_preloop_setup (loop, dim, flags, &loop->code[n]);
3340 gfc_start_block (pbody);
3344 /* Generates the actual loop code for a scalarization loop. */
3347 gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
3348 stmtblock_t * pbody)
3359 if ((ompws_flags & (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS))
3360 == (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS)
3361 && n == loop->dimen - 1)
3363 /* We create an OMP_FOR construct for the outermost scalarized loop. */
3364 init = make_tree_vec (1);
3365 cond = make_tree_vec (1);
3366 incr = make_tree_vec (1);
3368 /* Cycle statement is implemented with a goto. Exit statement must not
3369 be present for this loop. */
3370 exit_label = gfc_build_label_decl (NULL_TREE);
3371 TREE_USED (exit_label) = 1;
3373 /* Label for cycle statements (if needed). */
3374 tmp = build1_v (LABEL_EXPR, exit_label);
3375 gfc_add_expr_to_block (pbody, tmp);
3377 stmt = make_node (OMP_FOR);
3379 TREE_TYPE (stmt) = void_type_node;
3380 OMP_FOR_BODY (stmt) = loopbody = gfc_finish_block (pbody);
3382 OMP_FOR_CLAUSES (stmt) = build_omp_clause (input_location,
3383 OMP_CLAUSE_SCHEDULE);
3384 OMP_CLAUSE_SCHEDULE_KIND (OMP_FOR_CLAUSES (stmt))
3385 = OMP_CLAUSE_SCHEDULE_STATIC;
3386 if (ompws_flags & OMPWS_NOWAIT)
3387 OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (stmt))
3388 = build_omp_clause (input_location, OMP_CLAUSE_NOWAIT);
3390 /* Initialize the loopvar. */
3391 TREE_VEC_ELT (init, 0) = build2_v (MODIFY_EXPR, loop->loopvar[n],
3393 OMP_FOR_INIT (stmt) = init;
3394 /* The exit condition. */
3395 TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
3397 loop->loopvar[n], loop->to[n]);
3398 SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
3399 OMP_FOR_COND (stmt) = cond;
3400 /* Increment the loopvar. */
3401 tmp = build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3402 loop->loopvar[n], gfc_index_one_node);
3403 TREE_VEC_ELT (incr, 0) = fold_build2_loc (input_location, MODIFY_EXPR,
3404 void_type_node, loop->loopvar[n], tmp);
3405 OMP_FOR_INCR (stmt) = incr;
3407 ompws_flags &= ~OMPWS_CURR_SINGLEUNIT;
3408 gfc_add_expr_to_block (&loop->code[n], stmt);
3412 bool reverse_loop = (loop->reverse[n] == GFC_REVERSE_SET)
3413 && (loop->temp_ss == NULL);
3415 loopbody = gfc_finish_block (pbody);
3419 tmp = loop->from[n];
3420 loop->from[n] = loop->to[n];
3424 /* Initialize the loopvar. */
3425 if (loop->loopvar[n] != loop->from[n])
3426 gfc_add_modify (&loop->code[n], loop->loopvar[n], loop->from[n]);
3428 exit_label = gfc_build_label_decl (NULL_TREE);
3430 /* Generate the loop body. */
3431 gfc_init_block (&block);
3433 /* The exit condition. */
3434 cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
3435 boolean_type_node, loop->loopvar[n], loop->to[n]);
3436 tmp = build1_v (GOTO_EXPR, exit_label);
3437 TREE_USED (exit_label) = 1;
3438 tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
3439 gfc_add_expr_to_block (&block, tmp);
3441 /* The main body. */
3442 gfc_add_expr_to_block (&block, loopbody);
3444 /* Increment the loopvar. */
3445 tmp = fold_build2_loc (input_location,
3446 reverse_loop ? MINUS_EXPR : PLUS_EXPR,
3447 gfc_array_index_type, loop->loopvar[n],
3448 gfc_index_one_node);
3450 gfc_add_modify (&block, loop->loopvar[n], tmp);
3452 /* Build the loop. */
3453 tmp = gfc_finish_block (&block);
3454 tmp = build1_v (LOOP_EXPR, tmp);
3455 gfc_add_expr_to_block (&loop->code[n], tmp);
3457 /* Add the exit label. */
3458 tmp = build1_v (LABEL_EXPR, exit_label);
3459 gfc_add_expr_to_block (&loop->code[n], tmp);
3465 /* Finishes and generates the loops for a scalarized expression. */
3468 gfc_trans_scalarizing_loops (gfc_loopinfo * loop, stmtblock_t * body)
3473 stmtblock_t *pblock;
3477 /* Generate the loops. */
3478 for (dim = 0; dim < loop->dimen; dim++)
3480 n = loop->order[dim];
3481 gfc_trans_scalarized_loop_end (loop, n, pblock);
3482 loop->loopvar[n] = NULL_TREE;
3483 pblock = &loop->code[n];
3486 tmp = gfc_finish_block (pblock);
3487 gfc_add_expr_to_block (&loop->pre, tmp);
3489 /* Clear all the used flags. */
3490 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3491 if (ss->parent == NULL)
3492 ss->info->useflags = 0;
3496 /* Finish the main body of a scalarized expression, and start the secondary
3500 gfc_trans_scalarized_loop_boundary (gfc_loopinfo * loop, stmtblock_t * body)
3504 stmtblock_t *pblock;
3508 /* We finish as many loops as are used by the temporary. */
3509 for (dim = 0; dim < loop->temp_dim - 1; dim++)
3511 n = loop->order[dim];
3512 gfc_trans_scalarized_loop_end (loop, n, pblock);
3513 loop->loopvar[n] = NULL_TREE;
3514 pblock = &loop->code[n];
3517 /* We don't want to finish the outermost loop entirely. */
3518 n = loop->order[loop->temp_dim - 1];
3519 gfc_trans_scalarized_loop_end (loop, n, pblock);
3521 /* Restore the initial offsets. */
3522 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3524 gfc_ss_type ss_type;
3525 gfc_ss_info *ss_info;
3529 if ((ss_info->useflags & 2) == 0)
3532 ss_type = ss_info->type;
3533 if (ss_type != GFC_SS_SECTION
3534 && ss_type != GFC_SS_FUNCTION
3535 && ss_type != GFC_SS_CONSTRUCTOR
3536 && ss_type != GFC_SS_COMPONENT)
3539 ss_info->data.array.offset = ss_info->data.array.saved_offset;
3542 /* Restart all the inner loops we just finished. */
3543 for (dim = loop->temp_dim - 2; dim >= 0; dim--)
3545 n = loop->order[dim];
3547 gfc_start_block (&loop->code[n]);
3549 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "Q");
3551 gfc_trans_preloop_setup (loop, dim, 2, &loop->code[n]);
3554 /* Start a block for the secondary copying code. */
3555 gfc_start_block (body);
3559 /* Precalculate (either lower or upper) bound of an array section.
3560 BLOCK: Block in which the (pre)calculation code will go.
3561 BOUNDS[DIM]: Where the bound value will be stored once evaluated.
3562 VALUES[DIM]: Specified bound (NULL <=> unspecified).
3563 DESC: Array descriptor from which the bound will be picked if unspecified
3564 (either lower or upper bound according to LBOUND). */
3567 evaluate_bound (stmtblock_t *block, tree *bounds, gfc_expr ** values,
3568 tree desc, int dim, bool lbound)
3571 gfc_expr * input_val = values[dim];
3572 tree *output = &bounds[dim];
3577 /* Specified section bound. */
3578 gfc_init_se (&se, NULL);
3579 gfc_conv_expr_type (&se, input_val, gfc_array_index_type);
3580 gfc_add_block_to_block (block, &se.pre);
3585 /* No specific bound specified so use the bound of the array. */
3586 *output = lbound ? gfc_conv_array_lbound (desc, dim) :
3587 gfc_conv_array_ubound (desc, dim);
3589 *output = gfc_evaluate_now (*output, block);
3593 /* Calculate the lower bound of an array section. */
3596 gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
3598 gfc_expr *stride = NULL;
3601 gfc_array_info *info;
3604 gcc_assert (ss->info->type == GFC_SS_SECTION);
3606 info = &ss->info->data.array;
3607 ar = &info->ref->u.ar;
3609 if (ar->dimen_type[dim] == DIMEN_VECTOR)
3611 /* We use a zero-based index to access the vector. */
3612 info->start[dim] = gfc_index_zero_node;
3613 info->end[dim] = NULL;
3614 info->stride[dim] = gfc_index_one_node;
3618 gcc_assert (ar->dimen_type[dim] == DIMEN_RANGE
3619 || ar->dimen_type[dim] == DIMEN_THIS_IMAGE);
3620 desc = info->descriptor;
3621 stride = ar->stride[dim];
3623 /* Calculate the start of the range. For vector subscripts this will
3624 be the range of the vector. */
3625 evaluate_bound (&loop->pre, info->start, ar->start, desc, dim, true);
3627 /* Similarly calculate the end. Although this is not used in the
3628 scalarizer, it is needed when checking bounds and where the end
3629 is an expression with side-effects. */
3630 evaluate_bound (&loop->pre, info->end, ar->end, desc, dim, false);
3632 /* Calculate the stride. */
3634 info->stride[dim] = gfc_index_one_node;
3637 gfc_init_se (&se, NULL);
3638 gfc_conv_expr_type (&se, stride, gfc_array_index_type);
3639 gfc_add_block_to_block (&loop->pre, &se.pre);
3640 info->stride[dim] = gfc_evaluate_now (se.expr, &loop->pre);
3645 /* Calculates the range start and stride for a SS chain. Also gets the
3646 descriptor and data pointer. The range of vector subscripts is the size
3647 of the vector. Array bounds are also checked. */
3650 gfc_conv_ss_startstride (gfc_loopinfo * loop)
3658 /* Determine the rank of the loop. */
3659 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3661 switch (ss->info->type)
3663 case GFC_SS_SECTION:
3664 case GFC_SS_CONSTRUCTOR:
3665 case GFC_SS_FUNCTION:
3666 case GFC_SS_COMPONENT:
3667 loop->dimen = ss->dimen;
3670 /* As usual, lbound and ubound are exceptions!. */
3671 case GFC_SS_INTRINSIC:
3672 switch (ss->info->expr->value.function.isym->id)
3674 case GFC_ISYM_LBOUND:
3675 case GFC_ISYM_UBOUND:
3676 case GFC_ISYM_LCOBOUND:
3677 case GFC_ISYM_UCOBOUND:
3678 case GFC_ISYM_THIS_IMAGE:
3679 loop->dimen = ss->dimen;
3691 /* We should have determined the rank of the expression by now. If
3692 not, that's bad news. */
3696 /* Loop over all the SS in the chain. */
3697 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3699 gfc_ss_info *ss_info;
3700 gfc_array_info *info;
3704 expr = ss_info->expr;
3705 info = &ss_info->data.array;
3707 if (expr && expr->shape && !info->shape)
3708 info->shape = expr->shape;
3710 switch (ss_info->type)
3712 case GFC_SS_SECTION:
3713 /* Get the descriptor for the array. If it is a cross loops array,
3714 we got the descriptor already in the outermost loop. */
3715 if (ss->parent == NULL)
3716 gfc_conv_ss_descriptor (&loop->pre, ss, !loop->array_parameter);
3718 for (n = 0; n < ss->dimen; n++)
3719 gfc_conv_section_startstride (loop, ss, ss->dim[n]);
3722 case GFC_SS_INTRINSIC:
3723 switch (expr->value.function.isym->id)
3725 /* Fall through to supply start and stride. */
3726 case GFC_ISYM_LBOUND:
3727 case GFC_ISYM_UBOUND:
3728 case GFC_ISYM_LCOBOUND:
3729 case GFC_ISYM_UCOBOUND:
3730 case GFC_ISYM_THIS_IMAGE:
3737 case GFC_SS_CONSTRUCTOR:
3738 case GFC_SS_FUNCTION:
3739 for (n = 0; n < ss->dimen; n++)
3741 int dim = ss->dim[n];
3743 info->start[dim] = gfc_index_zero_node;
3744 info->end[dim] = gfc_index_zero_node;
3745 info->stride[dim] = gfc_index_one_node;
3754 /* The rest is just runtime bound checking. */
3755 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3758 tree lbound, ubound;
3760 tree size[GFC_MAX_DIMENSIONS];
3761 tree stride_pos, stride_neg, non_zerosized, tmp2, tmp3;
3762 gfc_array_info *info;
3766 gfc_start_block (&block);
3768 for (n = 0; n < loop->dimen; n++)
3769 size[n] = NULL_TREE;
3771 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3774 gfc_ss_info *ss_info;
3777 const char *expr_name;
3780 if (ss_info->type != GFC_SS_SECTION)
3783 /* Catch allocatable lhs in f2003. */
3784 if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
3787 expr = ss_info->expr;
3788 expr_loc = &expr->where;
3789 expr_name = expr->symtree->name;
3791 gfc_start_block (&inner);
3793 /* TODO: range checking for mapped dimensions. */
3794 info = &ss_info->data.array;
3796 /* This code only checks ranges. Elemental and vector
3797 dimensions are checked later. */
3798 for (n = 0; n < loop->dimen; n++)
3803 if (info->ref->u.ar.dimen_type[dim] != DIMEN_RANGE)
3806 if (dim == info->ref->u.ar.dimen - 1
3807 && info->ref->u.ar.as->type == AS_ASSUMED_SIZE)
3808 check_upper = false;
3812 /* Zero stride is not allowed. */
3813 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
3814 info->stride[dim], gfc_index_zero_node);
3815 asprintf (&msg, "Zero stride is not allowed, for dimension %d "
3816 "of array '%s'", dim + 1, expr_name);
3817 gfc_trans_runtime_check (true, false, tmp, &inner,
3821 desc = info->descriptor;
3823 /* This is the run-time equivalent of resolve.c's
3824 check_dimension(). The logical is more readable there
3825 than it is here, with all the trees. */
3826 lbound = gfc_conv_array_lbound (desc, dim);
3827 end = info->end[dim];
3829 ubound = gfc_conv_array_ubound (desc, dim);
3833 /* non_zerosized is true when the selected range is not
3835 stride_pos = fold_build2_loc (input_location, GT_EXPR,
3836 boolean_type_node, info->stride[dim],
3837 gfc_index_zero_node);
3838 tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
3839 info->start[dim], end);
3840 stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3841 boolean_type_node, stride_pos, tmp);
3843 stride_neg = fold_build2_loc (input_location, LT_EXPR,
3845 info->stride[dim], gfc_index_zero_node);
3846 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
3847 info->start[dim], end);
3848 stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3851 non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
3853 stride_pos, stride_neg);
3855 /* Check the start of the range against the lower and upper
3856 bounds of the array, if the range is not empty.
3857 If upper bound is present, include both bounds in the
3861 tmp = fold_build2_loc (input_location, LT_EXPR,
3863 info->start[dim], lbound);
3864 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3866 non_zerosized, tmp);
3867 tmp2 = fold_build2_loc (input_location, GT_EXPR,
3869 info->start[dim], ubound);
3870 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3872 non_zerosized, tmp2);
3873 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3874 "outside of expected range (%%ld:%%ld)",
3875 dim + 1, expr_name);
3876 gfc_trans_runtime_check (true, false, tmp, &inner,
3878 fold_convert (long_integer_type_node, info->start[dim]),
3879 fold_convert (long_integer_type_node, lbound),
3880 fold_convert (long_integer_type_node, ubound));
3881 gfc_trans_runtime_check (true, false, tmp2, &inner,
3883 fold_convert (long_integer_type_node, info->start[dim]),
3884 fold_convert (long_integer_type_node, lbound),
3885 fold_convert (long_integer_type_node, ubound));
3890 tmp = fold_build2_loc (input_location, LT_EXPR,
3892 info->start[dim], lbound);
3893 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3894 boolean_type_node, non_zerosized, tmp);
3895 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3896 "below lower bound of %%ld",
3897 dim + 1, expr_name);
3898 gfc_trans_runtime_check (true, false, tmp, &inner,
3900 fold_convert (long_integer_type_node, info->start[dim]),
3901 fold_convert (long_integer_type_node, lbound));
3905 /* Compute the last element of the range, which is not
3906 necessarily "end" (think 0:5:3, which doesn't contain 5)
3907 and check it against both lower and upper bounds. */
3909 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3910 gfc_array_index_type, end,
3912 tmp = fold_build2_loc (input_location, TRUNC_MOD_EXPR,
3913 gfc_array_index_type, tmp,
3915 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3916 gfc_array_index_type, end, tmp);
3917 tmp2 = fold_build2_loc (input_location, LT_EXPR,
3918 boolean_type_node, tmp, lbound);
3919 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3920 boolean_type_node, non_zerosized, tmp2);
3923 tmp3 = fold_build2_loc (input_location, GT_EXPR,
3924 boolean_type_node, tmp, ubound);
3925 tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3926 boolean_type_node, non_zerosized, tmp3);
3927 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3928 "outside of expected range (%%ld:%%ld)",
3929 dim + 1, expr_name);
3930 gfc_trans_runtime_check (true, false, tmp2, &inner,
3932 fold_convert (long_integer_type_node, tmp),
3933 fold_convert (long_integer_type_node, ubound),
3934 fold_convert (long_integer_type_node, lbound));
3935 gfc_trans_runtime_check (true, false, tmp3, &inner,
3937 fold_convert (long_integer_type_node, tmp),
3938 fold_convert (long_integer_type_node, ubound),
3939 fold_convert (long_integer_type_node, lbound));
3944 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3945 "below lower bound of %%ld",
3946 dim + 1, expr_name);
3947 gfc_trans_runtime_check (true, false, tmp2, &inner,
3949 fold_convert (long_integer_type_node, tmp),
3950 fold_convert (long_integer_type_node, lbound));
3954 /* Check the section sizes match. */
3955 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3956 gfc_array_index_type, end,
3958 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
3959 gfc_array_index_type, tmp,
3961 tmp = fold_build2_loc (input_location, PLUS_EXPR,
3962 gfc_array_index_type,
3963 gfc_index_one_node, tmp);
3964 tmp = fold_build2_loc (input_location, MAX_EXPR,
3965 gfc_array_index_type, tmp,
3966 build_int_cst (gfc_array_index_type, 0));
3967 /* We remember the size of the first section, and check all the
3968 others against this. */
3971 tmp3 = fold_build2_loc (input_location, NE_EXPR,
3972 boolean_type_node, tmp, size[n]);
3973 asprintf (&msg, "Array bound mismatch for dimension %d "
3974 "of array '%s' (%%ld/%%ld)",
3975 dim + 1, expr_name);
3977 gfc_trans_runtime_check (true, false, tmp3, &inner,
3979 fold_convert (long_integer_type_node, tmp),
3980 fold_convert (long_integer_type_node, size[n]));
3985 size[n] = gfc_evaluate_now (tmp, &inner);
3988 tmp = gfc_finish_block (&inner);
3990 /* For optional arguments, only check bounds if the argument is
3992 if (expr->symtree->n.sym->attr.optional
3993 || expr->symtree->n.sym->attr.not_always_present)
3994 tmp = build3_v (COND_EXPR,
3995 gfc_conv_expr_present (expr->symtree->n.sym),
3996 tmp, build_empty_stmt (input_location));
3998 gfc_add_expr_to_block (&block, tmp);
4002 tmp = gfc_finish_block (&block);
4003 gfc_add_expr_to_block (&loop->pre, tmp);
4006 for (loop = loop->nested; loop; loop = loop->next)
4007 gfc_conv_ss_startstride (loop);
4010 /* Return true if both symbols could refer to the same data object. Does
4011 not take account of aliasing due to equivalence statements. */
4014 symbols_could_alias (gfc_symbol *lsym, gfc_symbol *rsym, bool lsym_pointer,
4015 bool lsym_target, bool rsym_pointer, bool rsym_target)
4017 /* Aliasing isn't possible if the symbols have different base types. */
4018 if (gfc_compare_types (&lsym->ts, &rsym->ts) == 0)
4021 /* Pointers can point to other pointers and target objects. */
4023 if ((lsym_pointer && (rsym_pointer || rsym_target))
4024 || (rsym_pointer && (lsym_pointer || lsym_target)))
4027 /* Special case: Argument association, cf. F90 12.4.1.6, F2003 12.4.1.7
4028 and F2008 12.5.2.13 items 3b and 4b. The pointer case (a) is already
4030 if (lsym_target && rsym_target
4031 && ((lsym->attr.dummy && !lsym->attr.contiguous
4032 && (!lsym->attr.dimension || lsym->as->type == AS_ASSUMED_SHAPE))
4033 || (rsym->attr.dummy && !rsym->attr.contiguous
4034 && (!rsym->attr.dimension
4035 || rsym->as->type == AS_ASSUMED_SHAPE))))
4042 /* Return true if the two SS could be aliased, i.e. both point to the same data
4044 /* TODO: resolve aliases based on frontend expressions. */
4047 gfc_could_be_alias (gfc_ss * lss, gfc_ss * rss)
4051 gfc_expr *lexpr, *rexpr;
4054 bool lsym_pointer, lsym_target, rsym_pointer, rsym_target;
4056 lexpr = lss->info->expr;
4057 rexpr = rss->info->expr;
4059 lsym = lexpr->symtree->n.sym;
4060 rsym = rexpr->symtree->n.sym;
4062 lsym_pointer = lsym->attr.pointer;
4063 lsym_target = lsym->attr.target;
4064 rsym_pointer = rsym->attr.pointer;
4065 rsym_target = rsym->attr.target;
4067 if (symbols_could_alias (lsym, rsym, lsym_pointer, lsym_target,
4068 rsym_pointer, rsym_target))
4071 if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
4072 && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
4075 /* For derived types we must check all the component types. We can ignore
4076 array references as these will have the same base type as the previous
4078 for (lref = lexpr->ref; lref != lss->info->data.array.ref; lref = lref->next)
4080 if (lref->type != REF_COMPONENT)
4083 lsym_pointer = lsym_pointer || lref->u.c.sym->attr.pointer;
4084 lsym_target = lsym_target || lref->u.c.sym->attr.target;
4086 if (symbols_could_alias (lref->u.c.sym, rsym, lsym_pointer, lsym_target,
4087 rsym_pointer, rsym_target))
4090 if ((lsym_pointer && (rsym_pointer || rsym_target))
4091 || (rsym_pointer && (lsym_pointer || lsym_target)))
4093 if (gfc_compare_types (&lref->u.c.component->ts,
4098 for (rref = rexpr->ref; rref != rss->info->data.array.ref;
4101 if (rref->type != REF_COMPONENT)
4104 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4105 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4107 if (symbols_could_alias (lref->u.c.sym, rref->u.c.sym,
4108 lsym_pointer, lsym_target,
4109 rsym_pointer, rsym_target))
4112 if ((lsym_pointer && (rsym_pointer || rsym_target))
4113 || (rsym_pointer && (lsym_pointer || lsym_target)))
4115 if (gfc_compare_types (&lref->u.c.component->ts,
4116 &rref->u.c.sym->ts))
4118 if (gfc_compare_types (&lref->u.c.sym->ts,
4119 &rref->u.c.component->ts))
4121 if (gfc_compare_types (&lref->u.c.component->ts,
4122 &rref->u.c.component->ts))
4128 lsym_pointer = lsym->attr.pointer;
4129 lsym_target = lsym->attr.target;
4130 lsym_pointer = lsym->attr.pointer;
4131 lsym_target = lsym->attr.target;
4133 for (rref = rexpr->ref; rref != rss->info->data.array.ref; rref = rref->next)
4135 if (rref->type != REF_COMPONENT)
4138 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4139 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4141 if (symbols_could_alias (rref->u.c.sym, lsym,
4142 lsym_pointer, lsym_target,
4143 rsym_pointer, rsym_target))
4146 if ((lsym_pointer && (rsym_pointer || rsym_target))
4147 || (rsym_pointer && (lsym_pointer || lsym_target)))
4149 if (gfc_compare_types (&lsym->ts, &rref->u.c.component->ts))
4158 /* Resolve array data dependencies. Creates a temporary if required. */
4159 /* TODO: Calc dependencies with gfc_expr rather than gfc_ss, and move to
4163 gfc_conv_resolve_dependencies (gfc_loopinfo * loop, gfc_ss * dest,
4169 gfc_expr *dest_expr;
4174 loop->temp_ss = NULL;
4175 dest_expr = dest->info->expr;
4177 for (ss = rss; ss != gfc_ss_terminator; ss = ss->next)
4179 if (ss->info->type != GFC_SS_SECTION)
4182 ss_expr = ss->info->expr;
4184 if (dest_expr->symtree->n.sym != ss_expr->symtree->n.sym)
4186 if (gfc_could_be_alias (dest, ss)
4187 || gfc_are_equivalenced_arrays (dest_expr, ss_expr))
4195 lref = dest_expr->ref;
4196 rref = ss_expr->ref;
4198 nDepend = gfc_dep_resolver (lref, rref, &loop->reverse[0]);
4203 for (i = 0; i < dest->dimen; i++)
4204 for (j = 0; j < ss->dimen; j++)
4206 && dest->dim[i] == ss->dim[j])
4208 /* If we don't access array elements in the same order,
4209 there is a dependency. */
4214 /* TODO : loop shifting. */
4217 /* Mark the dimensions for LOOP SHIFTING */
4218 for (n = 0; n < loop->dimen; n++)
4220 int dim = dest->data.info.dim[n];
4222 if (lref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
4224 else if (! gfc_is_same_range (&lref->u.ar,
4225 &rref->u.ar, dim, 0))
4229 /* Put all the dimensions with dependencies in the
4232 for (n = 0; n < loop->dimen; n++)
4234 gcc_assert (loop->order[n] == n);
4236 loop->order[dim++] = n;
4238 for (n = 0; n < loop->dimen; n++)
4241 loop->order[dim++] = n;
4244 gcc_assert (dim == loop->dimen);
4255 tree base_type = gfc_typenode_for_spec (&dest_expr->ts);
4256 if (GFC_ARRAY_TYPE_P (base_type)
4257 || GFC_DESCRIPTOR_TYPE_P (base_type))
4258 base_type = gfc_get_element_type (base_type);
4259 loop->temp_ss = gfc_get_temp_ss (base_type, dest->info->string_length,
4261 gfc_add_ss_to_loop (loop, loop->temp_ss);
4264 loop->temp_ss = NULL;
4268 /* Browse through each array's information from the scalarizer and set the loop
4269 bounds according to the "best" one (per dimension), i.e. the one which
4270 provides the most information (constant bounds, shape, etc). */
4273 set_loop_bounds (gfc_loopinfo *loop)
4275 int n, dim, spec_dim;
4276 gfc_array_info *info;
4277 gfc_array_info *specinfo;
4281 bool dynamic[GFC_MAX_DIMENSIONS];
4285 loopspec = loop->specloop;
4288 for (n = 0; n < loop->dimen; n++)
4292 /* We use one SS term, and use that to determine the bounds of the
4293 loop for this dimension. We try to pick the simplest term. */
4294 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4296 gfc_ss_type ss_type;
4298 ss_type = ss->info->type;
4299 if (ss_type == GFC_SS_SCALAR
4300 || ss_type == GFC_SS_TEMP
4301 || ss_type == GFC_SS_REFERENCE)
4304 info = &ss->info->data.array;
4307 if (loopspec[n] != NULL)
4309 specinfo = &loopspec[n]->info->data.array;
4310 spec_dim = loopspec[n]->dim[n];
4314 /* Silence unitialized warnings. */
4321 gcc_assert (info->shape[dim]);
4322 /* The frontend has worked out the size for us. */
4325 || !integer_zerop (specinfo->start[spec_dim]))
4326 /* Prefer zero-based descriptors if possible. */
4331 if (ss_type == GFC_SS_CONSTRUCTOR)
4333 gfc_constructor_base base;
4334 /* An unknown size constructor will always be rank one.
4335 Higher rank constructors will either have known shape,
4336 or still be wrapped in a call to reshape. */
4337 gcc_assert (loop->dimen == 1);
4339 /* Always prefer to use the constructor bounds if the size
4340 can be determined at compile time. Prefer not to otherwise,
4341 since the general case involves realloc, and it's better to
4342 avoid that overhead if possible. */
4343 base = ss->info->expr->value.constructor;
4344 dynamic[n] = gfc_get_array_constructor_size (&i, base);
4345 if (!dynamic[n] || !loopspec[n])
4350 /* TODO: Pick the best bound if we have a choice between a
4351 function and something else. */
4352 if (ss_type == GFC_SS_FUNCTION)
4358 /* Avoid using an allocatable lhs in an assignment, since
4359 there might be a reallocation coming. */
4360 if (loopspec[n] && ss->is_alloc_lhs)
4363 if (ss_type != GFC_SS_SECTION)
4368 /* Criteria for choosing a loop specifier (most important first):
4369 doesn't need realloc
4375 else if ((loopspec[n]->info->type == GFC_SS_CONSTRUCTOR && dynamic[n])
4376 || n >= loop->dimen)
4378 else if (integer_onep (info->stride[dim])
4379 && !integer_onep (specinfo->stride[spec_dim]))
4381 else if (INTEGER_CST_P (info->stride[dim])
4382 && !INTEGER_CST_P (specinfo->stride[spec_dim]))
4384 else if (INTEGER_CST_P (info->start[dim])
4385 && !INTEGER_CST_P (specinfo->start[spec_dim]))
4387 /* We don't work out the upper bound.
4388 else if (INTEGER_CST_P (info->finish[n])
4389 && ! INTEGER_CST_P (specinfo->finish[n]))
4390 loopspec[n] = ss; */
4393 /* We should have found the scalarization loop specifier. If not,
4395 gcc_assert (loopspec[n]);
4397 info = &loopspec[n]->info->data.array;
4398 dim = loopspec[n]->dim[n];
4400 /* Set the extents of this range. */
4401 cshape = info->shape;
4402 if (cshape && INTEGER_CST_P (info->start[dim])
4403 && INTEGER_CST_P (info->stride[dim]))
4405 loop->from[n] = info->start[dim];
4406 mpz_set (i, cshape[get_array_ref_dim_for_loop_dim (loopspec[n], n)]);
4407 mpz_sub_ui (i, i, 1);
4408 /* To = from + (size - 1) * stride. */
4409 tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
4410 if (!integer_onep (info->stride[dim]))
4411 tmp = fold_build2_loc (input_location, MULT_EXPR,
4412 gfc_array_index_type, tmp,
4414 loop->to[n] = fold_build2_loc (input_location, PLUS_EXPR,
4415 gfc_array_index_type,
4416 loop->from[n], tmp);
4420 loop->from[n] = info->start[dim];
4421 switch (loopspec[n]->info->type)
4423 case GFC_SS_CONSTRUCTOR:
4424 /* The upper bound is calculated when we expand the
4426 gcc_assert (loop->to[n] == NULL_TREE);
4429 case GFC_SS_SECTION:
4430 /* Use the end expression if it exists and is not constant,
4431 so that it is only evaluated once. */
4432 loop->to[n] = info->end[dim];
4435 case GFC_SS_FUNCTION:
4436 /* The loop bound will be set when we generate the call. */
4437 gcc_assert (loop->to[n] == NULL_TREE);
4445 /* Transform everything so we have a simple incrementing variable. */
4446 if (integer_onep (info->stride[dim]))
4447 info->delta[dim] = gfc_index_zero_node;
4450 /* Set the delta for this section. */
4451 info->delta[dim] = gfc_evaluate_now (loop->from[n], &loop->pre);
4452 /* Number of iterations is (end - start + step) / step.
4453 with start = 0, this simplifies to
4455 for (i = 0; i<=last; i++){...}; */
4456 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4457 gfc_array_index_type, loop->to[n],
4459 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
4460 gfc_array_index_type, tmp, info->stride[dim]);
4461 tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
4462 tmp, build_int_cst (gfc_array_index_type, -1));
4463 loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
4464 /* Make the loop variable start at 0. */
4465 loop->from[n] = gfc_index_zero_node;
4470 for (loop = loop->nested; loop; loop = loop->next)
4471 set_loop_bounds (loop);
4475 /* Initialize the scalarization loop. Creates the loop variables. Determines
4476 the range of the loop variables. Creates a temporary if required.
4477 Also generates code for scalar expressions which have been
4478 moved outside the loop. */
4481 gfc_conv_loop_setup (gfc_loopinfo * loop, locus * where)
4486 set_loop_bounds (loop);
4488 /* Add all the scalar code that can be taken out of the loops.
4489 This may include calculating the loop bounds, so do it before
4490 allocating the temporary. */
4491 gfc_add_loop_ss_code (loop, loop->ss, false, where);
4493 tmp_ss = loop->temp_ss;
4494 /* If we want a temporary then create it. */
4497 gfc_ss_info *tmp_ss_info;
4499 tmp_ss_info = tmp_ss->info;
4500 gcc_assert (tmp_ss_info->type == GFC_SS_TEMP);
4501 gcc_assert (loop->parent == NULL);
4503 /* Make absolutely sure that this is a complete type. */
4504 if (tmp_ss_info->string_length)
4505 tmp_ss_info->data.temp.type
4506 = gfc_get_character_type_len_for_eltype
4507 (TREE_TYPE (tmp_ss_info->data.temp.type),
4508 tmp_ss_info->string_length);
4510 tmp = tmp_ss_info->data.temp.type;
4511 memset (&tmp_ss_info->data.array, 0, sizeof (gfc_array_info));
4512 tmp_ss_info->type = GFC_SS_SECTION;
4514 gcc_assert (tmp_ss->dimen != 0);
4516 gfc_trans_create_temp_array (&loop->pre, &loop->post, tmp_ss, tmp,
4517 NULL_TREE, false, true, false, where);
4520 /* For array parameters we don't have loop variables, so don't calculate the
4522 if (!loop->array_parameter)
4523 gfc_set_delta (loop);
4527 /* Calculates how to transform from loop variables to array indices for each
4528 array: once loop bounds are chosen, sets the difference (DELTA field) between
4529 loop bounds and array reference bounds, for each array info. */
4532 gfc_set_delta (gfc_loopinfo *loop)
4534 gfc_ss *ss, **loopspec;
4535 gfc_array_info *info;
4539 loopspec = loop->specloop;
4541 /* Calculate the translation from loop variables to array indices. */
4542 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4544 gfc_ss_type ss_type;
4546 ss_type = ss->info->type;
4547 if (ss_type != GFC_SS_SECTION
4548 && ss_type != GFC_SS_COMPONENT
4549 && ss_type != GFC_SS_CONSTRUCTOR)
4552 info = &ss->info->data.array;
4554 for (n = 0; n < ss->dimen; n++)
4556 /* If we are specifying the range the delta is already set. */
4557 if (loopspec[n] != ss)
4561 /* Calculate the offset relative to the loop variable.
4562 First multiply by the stride. */
4563 tmp = loop->from[n];
4564 if (!integer_onep (info->stride[dim]))
4565 tmp = fold_build2_loc (input_location, MULT_EXPR,
4566 gfc_array_index_type,
4567 tmp, info->stride[dim]);
4569 /* Then subtract this from our starting value. */
4570 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4571 gfc_array_index_type,
4572 info->start[dim], tmp);
4574 info->delta[dim] = gfc_evaluate_now (tmp, &loop->pre);
4579 for (loop = loop->nested; loop; loop = loop->next)
4580 gfc_set_delta (loop);
4584 /* Calculate the size of a given array dimension from the bounds. This
4585 is simply (ubound - lbound + 1) if this expression is positive
4586 or 0 if it is negative (pick either one if it is zero). Optionally
4587 (if or_expr is present) OR the (expression != 0) condition to it. */
4590 gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
4595 /* Calculate (ubound - lbound + 1). */
4596 res = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
4598 res = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, res,
4599 gfc_index_one_node);
4601 /* Check whether the size for this dimension is negative. */
4602 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
4603 gfc_index_zero_node);
4604 res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
4605 gfc_index_zero_node, res);
4607 /* Build OR expression. */
4609 *or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
4610 boolean_type_node, *or_expr, cond);
4616 /* For an array descriptor, get the total number of elements. This is just
4617 the product of the extents along from_dim to to_dim. */
4620 gfc_conv_descriptor_size_1 (tree desc, int from_dim, int to_dim)
4625 res = gfc_index_one_node;
4627 for (dim = from_dim; dim < to_dim; ++dim)
4633 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
4634 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
4636 extent = gfc_conv_array_extent_dim (lbound, ubound, NULL);
4637 res = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4645 /* Full size of an array. */
4648 gfc_conv_descriptor_size (tree desc, int rank)
4650 return gfc_conv_descriptor_size_1 (desc, 0, rank);
4654 /* Size of a coarray for all dimensions but the last. */
4657 gfc_conv_descriptor_cosize (tree desc, int rank, int corank)
4659 return gfc_conv_descriptor_size_1 (desc, rank, rank + corank - 1);
4663 /* Fills in an array descriptor, and returns the size of the array.
4664 The size will be a simple_val, ie a variable or a constant. Also
4665 calculates the offset of the base. The pointer argument overflow,
4666 which should be of integer type, will increase in value if overflow
4667 occurs during the size calculation. Returns the size of the array.
4671 for (n = 0; n < rank; n++)
4673 a.lbound[n] = specified_lower_bound;
4674 offset = offset + a.lbond[n] * stride;
4676 a.ubound[n] = specified_upper_bound;
4677 a.stride[n] = stride;
4678 size = size >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
4679 overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
4680 stride = stride * size;
4682 for (n = rank; n < rank+corank; n++)
4683 (Set lcobound/ucobound as above.)
4684 element_size = sizeof (array element);
4687 stride = (size_t) stride;
4688 overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
4689 stride = stride * element_size;
4695 gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
4696 gfc_expr ** lower, gfc_expr ** upper, stmtblock_t * pblock,
4697 stmtblock_t * descriptor_block, tree * overflow,
4711 stmtblock_t thenblock;
4712 stmtblock_t elseblock;
4717 type = TREE_TYPE (descriptor);
4719 stride = gfc_index_one_node;
4720 offset = gfc_index_zero_node;
4722 /* Set the dtype. */
4723 tmp = gfc_conv_descriptor_dtype (descriptor);
4724 gfc_add_modify (descriptor_block, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
4726 or_expr = boolean_false_node;
4728 for (n = 0; n < rank; n++)
4733 /* We have 3 possibilities for determining the size of the array:
4734 lower == NULL => lbound = 1, ubound = upper[n]
4735 upper[n] = NULL => lbound = 1, ubound = lower[n]
4736 upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
4739 /* Set lower bound. */
4740 gfc_init_se (&se, NULL);
4742 se.expr = gfc_index_one_node;
4745 gcc_assert (lower[n]);
4748 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4749 gfc_add_block_to_block (pblock, &se.pre);
4753 se.expr = gfc_index_one_node;
4757 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4758 gfc_rank_cst[n], se.expr);
4759 conv_lbound = se.expr;
4761 /* Work out the offset for this component. */
4762 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4764 offset = fold_build2_loc (input_location, MINUS_EXPR,
4765 gfc_array_index_type, offset, tmp);
4767 /* Set upper bound. */
4768 gfc_init_se (&se, NULL);
4769 gcc_assert (ubound);
4770 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4771 gfc_add_block_to_block (pblock, &se.pre);
4773 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4774 gfc_rank_cst[n], se.expr);
4775 conv_ubound = se.expr;
4777 /* Store the stride. */
4778 gfc_conv_descriptor_stride_set (descriptor_block, descriptor,
4779 gfc_rank_cst[n], stride);
4781 /* Calculate size and check whether extent is negative. */
4782 size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
4783 size = gfc_evaluate_now (size, pblock);
4785 /* Check whether multiplying the stride by the number of
4786 elements in this dimension would overflow. We must also check
4787 whether the current dimension has zero size in order to avoid
4790 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4791 gfc_array_index_type,
4792 fold_convert (gfc_array_index_type,
4793 TYPE_MAX_VALUE (gfc_array_index_type)),
4795 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4796 boolean_type_node, tmp, stride));
4797 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4798 integer_one_node, integer_zero_node);
4799 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4800 boolean_type_node, size,
4801 gfc_index_zero_node));
4802 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4803 integer_zero_node, tmp);
4804 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4806 *overflow = gfc_evaluate_now (tmp, pblock);
4808 /* Multiply the stride by the number of elements in this dimension. */
4809 stride = fold_build2_loc (input_location, MULT_EXPR,
4810 gfc_array_index_type, stride, size);
4811 stride = gfc_evaluate_now (stride, pblock);
4814 for (n = rank; n < rank + corank; n++)
4818 /* Set lower bound. */
4819 gfc_init_se (&se, NULL);
4820 if (lower == NULL || lower[n] == NULL)
4822 gcc_assert (n == rank + corank - 1);
4823 se.expr = gfc_index_one_node;
4827 if (ubound || n == rank + corank - 1)
4829 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4830 gfc_add_block_to_block (pblock, &se.pre);
4834 se.expr = gfc_index_one_node;
4838 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4839 gfc_rank_cst[n], se.expr);
4841 if (n < rank + corank - 1)
4843 gfc_init_se (&se, NULL);
4844 gcc_assert (ubound);
4845 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4846 gfc_add_block_to_block (pblock, &se.pre);
4847 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4848 gfc_rank_cst[n], se.expr);
4852 /* The stride is the number of elements in the array, so multiply by the
4853 size of an element to get the total size. Obviously, if there ia a
4854 SOURCE expression (expr3) we must use its element size. */
4857 if (expr3->ts.type == BT_CLASS)
4860 gfc_expr *sz = gfc_copy_expr (expr3);
4861 gfc_add_vptr_component (sz);
4862 gfc_add_size_component (sz);
4863 gfc_init_se (&se_sz, NULL);
4864 gfc_conv_expr (&se_sz, sz);
4870 tmp = gfc_typenode_for_spec (&expr3->ts);
4871 tmp = TYPE_SIZE_UNIT (tmp);
4875 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
4877 /* Convert to size_t. */
4878 element_size = fold_convert (size_type_node, tmp);
4881 return element_size;
4883 stride = fold_convert (size_type_node, stride);
4885 /* First check for overflow. Since an array of type character can
4886 have zero element_size, we must check for that before
4888 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4890 TYPE_MAX_VALUE (size_type_node), element_size);
4891 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4892 boolean_type_node, tmp, stride));
4893 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4894 integer_one_node, integer_zero_node);
4895 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4896 boolean_type_node, element_size,
4897 build_int_cst (size_type_node, 0)));
4898 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4899 integer_zero_node, tmp);
4900 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4902 *overflow = gfc_evaluate_now (tmp, pblock);
4904 size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
4905 stride, element_size);
4907 if (poffset != NULL)
4909 offset = gfc_evaluate_now (offset, pblock);
4913 if (integer_zerop (or_expr))
4915 if (integer_onep (or_expr))
4916 return build_int_cst (size_type_node, 0);
4918 var = gfc_create_var (TREE_TYPE (size), "size");
4919 gfc_start_block (&thenblock);
4920 gfc_add_modify (&thenblock, var, build_int_cst (size_type_node, 0));
4921 thencase = gfc_finish_block (&thenblock);
4923 gfc_start_block (&elseblock);
4924 gfc_add_modify (&elseblock, var, size);
4925 elsecase = gfc_finish_block (&elseblock);
4927 tmp = gfc_evaluate_now (or_expr, pblock);
4928 tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
4929 gfc_add_expr_to_block (pblock, tmp);
4935 /* Initializes the descriptor and generates a call to _gfor_allocate. Does
4936 the work for an ALLOCATE statement. */
4940 gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
4941 tree errlen, tree label_finish, gfc_expr *expr3)
4945 tree offset = NULL_TREE;
4946 tree token = NULL_TREE;
4949 tree error = NULL_TREE;
4950 tree overflow; /* Boolean storing whether size calculation overflows. */
4951 tree var_overflow = NULL_TREE;
4953 tree set_descriptor;
4954 stmtblock_t set_descriptor_block;
4955 stmtblock_t elseblock;
4958 gfc_ref *ref, *prev_ref = NULL;
4959 bool allocatable, coarray, dimension;
4963 /* Find the last reference in the chain. */
4964 while (ref && ref->next != NULL)
4966 gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT
4967 || (ref->u.ar.dimen == 0 && ref->u.ar.codimen > 0));
4972 if (ref == NULL || ref->type != REF_ARRAY)
4977 allocatable = expr->symtree->n.sym->attr.allocatable;
4978 coarray = expr->symtree->n.sym->attr.codimension;
4979 dimension = expr->symtree->n.sym->attr.dimension;
4983 allocatable = prev_ref->u.c.component->attr.allocatable;
4984 coarray = prev_ref->u.c.component->attr.codimension;
4985 dimension = prev_ref->u.c.component->attr.dimension;
4989 gcc_assert (coarray);
4991 /* Figure out the size of the array. */
4992 switch (ref->u.ar.type)
4998 upper = ref->u.ar.start;
5004 lower = ref->u.ar.start;
5005 upper = ref->u.ar.end;
5009 gcc_assert (ref->u.ar.as->type == AS_EXPLICIT);
5011 lower = ref->u.ar.as->lower;
5012 upper = ref->u.ar.as->upper;
5020 overflow = integer_zero_node;
5022 gfc_init_block (&set_descriptor_block);
5023 size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
5024 ref->u.ar.as->corank, &offset, lower, upper,
5025 &se->pre, &set_descriptor_block, &overflow,
5031 var_overflow = gfc_create_var (integer_type_node, "overflow");
5032 gfc_add_modify (&se->pre, var_overflow, overflow);
5034 /* Generate the block of code handling overflow. */
5035 msg = gfc_build_addr_expr (pchar_type_node,
5036 gfc_build_localized_cstring_const
5037 ("Integer overflow when calculating the amount of "
5038 "memory to allocate"));
5039 error = build_call_expr_loc (input_location, gfor_fndecl_runtime_error,
5043 if (status != NULL_TREE)
5045 tree status_type = TREE_TYPE (status);
5046 stmtblock_t set_status_block;
5048 gfc_start_block (&set_status_block);
5049 gfc_add_modify (&set_status_block, status,
5050 build_int_cst (status_type, LIBERROR_ALLOCATION));
5051 error = gfc_finish_block (&set_status_block);
5054 gfc_start_block (&elseblock);
5056 /* Allocate memory to store the data. */
5057 pointer = gfc_conv_descriptor_data_get (se->expr);
5058 STRIP_NOPS (pointer);
5060 if (coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
5061 token = gfc_build_addr_expr (NULL_TREE,
5062 gfc_conv_descriptor_token (se->expr));
5064 /* The allocatable variant takes the old pointer as first argument. */
5066 gfc_allocate_allocatable (&elseblock, pointer, size, token,
5067 status, errmsg, errlen, label_finish, expr);
5069 gfc_allocate_using_malloc (&elseblock, pointer, size, status);
5073 cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
5074 boolean_type_node, var_overflow, integer_zero_node));
5075 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
5076 error, gfc_finish_block (&elseblock));
5079 tmp = gfc_finish_block (&elseblock);
5081 gfc_add_expr_to_block (&se->pre, tmp);
5083 if (expr->ts.type == BT_CLASS && expr3)
5085 tmp = build_int_cst (unsigned_char_type_node, 0);
5086 /* For class objects we need to nullify the memory in case they have
5087 allocatable components; the reason is that _copy, which is used for
5088 initialization, first frees the destination. */
5089 tmp = build_call_expr_loc (input_location,
5090 builtin_decl_explicit (BUILT_IN_MEMSET),
5091 3, pointer, tmp, size);
5092 gfc_add_expr_to_block (&se->pre, tmp);
5095 /* Update the array descriptors. */
5097 gfc_conv_descriptor_offset_set (&set_descriptor_block, se->expr, offset);
5099 set_descriptor = gfc_finish_block (&set_descriptor_block);
5100 if (status != NULL_TREE)
5102 cond = fold_build2_loc (input_location, EQ_EXPR,
5103 boolean_type_node, status,
5104 build_int_cst (TREE_TYPE (status), 0));
5105 gfc_add_expr_to_block (&se->pre,
5106 fold_build3_loc (input_location, COND_EXPR, void_type_node,
5107 gfc_likely (cond), set_descriptor,
5108 build_empty_stmt (input_location)));
5111 gfc_add_expr_to_block (&se->pre, set_descriptor);
5113 if ((expr->ts.type == BT_DERIVED)
5114 && expr->ts.u.derived->attr.alloc_comp)
5116 tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
5117 ref->u.ar.as->rank);
5118 gfc_add_expr_to_block (&se->pre, tmp);
5125 /* Deallocate an array variable. Also used when an allocated variable goes
5130 gfc_array_deallocate (tree descriptor, tree pstat, tree errmsg, tree errlen,
5131 tree label_finish, gfc_expr* expr)
5136 bool coarray = gfc_is_coarray (expr);
5138 gfc_start_block (&block);
5140 /* Get a pointer to the data. */
5141 var = gfc_conv_descriptor_data_get (descriptor);
5144 /* Parameter is the address of the data component. */
5145 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, pstat, errmsg,
5146 errlen, label_finish, false, expr, coarray);
5147 gfc_add_expr_to_block (&block, tmp);
5149 /* Zero the data pointer; only for coarrays an error can occur and then
5150 the allocation status may not be changed. */
5151 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
5152 var, build_int_cst (TREE_TYPE (var), 0));
5153 if (pstat != NULL_TREE && coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
5156 tree stat = build_fold_indirect_ref_loc (input_location, pstat);
5158 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5159 stat, build_int_cst (TREE_TYPE (stat), 0));
5160 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
5161 cond, tmp, build_empty_stmt (input_location));
5164 gfc_add_expr_to_block (&block, tmp);
5166 return gfc_finish_block (&block);
5170 /* Create an array constructor from an initialization expression.
5171 We assume the frontend already did any expansions and conversions. */
5174 gfc_conv_array_initializer (tree type, gfc_expr * expr)
5180 unsigned HOST_WIDE_INT lo;
5182 VEC(constructor_elt,gc) *v = NULL;
5184 if (expr->expr_type == EXPR_VARIABLE
5185 && expr->symtree->n.sym->attr.flavor == FL_PARAMETER
5186 && expr->symtree->n.sym->value)
5187 expr = expr->symtree->n.sym->value;
5189 switch (expr->expr_type)
5192 case EXPR_STRUCTURE:
5193 /* A single scalar or derived type value. Create an array with all
5194 elements equal to that value. */
5195 gfc_init_se (&se, NULL);
5197 if (expr->expr_type == EXPR_CONSTANT)
5198 gfc_conv_constant (&se, expr);
5200 gfc_conv_structure (&se, expr, 1);
5202 tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
5203 gcc_assert (tmp && INTEGER_CST_P (tmp));
5204 hi = TREE_INT_CST_HIGH (tmp);
5205 lo = TREE_INT_CST_LOW (tmp);
5209 /* This will probably eat buckets of memory for large arrays. */
5210 while (hi != 0 || lo != 0)
5212 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
5220 /* Create a vector of all the elements. */
5221 for (c = gfc_constructor_first (expr->value.constructor);
5222 c; c = gfc_constructor_next (c))
5226 /* Problems occur when we get something like
5227 integer :: a(lots) = (/(i, i=1, lots)/) */
5228 gfc_fatal_error ("The number of elements in the array constructor "
5229 "at %L requires an increase of the allowed %d "
5230 "upper limit. See -fmax-array-constructor "
5231 "option", &expr->where,
5232 gfc_option.flag_max_array_constructor);
5235 if (mpz_cmp_si (c->offset, 0) != 0)
5236 index = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5240 if (mpz_cmp_si (c->repeat, 1) > 0)
5246 mpz_add (maxval, c->offset, c->repeat);
5247 mpz_sub_ui (maxval, maxval, 1);
5248 tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5249 if (mpz_cmp_si (c->offset, 0) != 0)
5251 mpz_add_ui (maxval, c->offset, 1);
5252 tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5255 tmp1 = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5257 range = fold_build2 (RANGE_EXPR, gfc_array_index_type, tmp1, tmp2);
5263 gfc_init_se (&se, NULL);
5264 switch (c->expr->expr_type)
5267 gfc_conv_constant (&se, c->expr);
5270 case EXPR_STRUCTURE:
5271 gfc_conv_structure (&se, c->expr, 1);
5275 /* Catch those occasional beasts that do not simplify
5276 for one reason or another, assuming that if they are
5277 standard defying the frontend will catch them. */
5278 gfc_conv_expr (&se, c->expr);
5282 if (range == NULL_TREE)
5283 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5286 if (index != NULL_TREE)
5287 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5288 CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
5294 return gfc_build_null_descriptor (type);
5300 /* Create a constructor from the list of elements. */
5301 tmp = build_constructor (type, v);
5302 TREE_CONSTANT (tmp) = 1;
5307 /* Generate code to evaluate non-constant coarray cobounds. */
5310 gfc_trans_array_cobounds (tree type, stmtblock_t * pblock,
5311 const gfc_symbol *sym)
5321 for (dim = as->rank; dim < as->rank + as->corank; dim++)
5323 /* Evaluate non-constant array bound expressions. */
5324 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5325 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5327 gfc_init_se (&se, NULL);
5328 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5329 gfc_add_block_to_block (pblock, &se.pre);
5330 gfc_add_modify (pblock, lbound, se.expr);
5332 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5333 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5335 gfc_init_se (&se, NULL);
5336 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5337 gfc_add_block_to_block (pblock, &se.pre);
5338 gfc_add_modify (pblock, ubound, se.expr);
5344 /* Generate code to evaluate non-constant array bounds. Sets *poffset and
5345 returns the size (in elements) of the array. */
5348 gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
5349 stmtblock_t * pblock)
5364 size = gfc_index_one_node;
5365 offset = gfc_index_zero_node;
5366 for (dim = 0; dim < as->rank; dim++)
5368 /* Evaluate non-constant array bound expressions. */
5369 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5370 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5372 gfc_init_se (&se, NULL);
5373 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5374 gfc_add_block_to_block (pblock, &se.pre);
5375 gfc_add_modify (pblock, lbound, se.expr);
5377 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5378 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5380 gfc_init_se (&se, NULL);
5381 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5382 gfc_add_block_to_block (pblock, &se.pre);
5383 gfc_add_modify (pblock, ubound, se.expr);
5385 /* The offset of this dimension. offset = offset - lbound * stride. */
5386 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5388 offset = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
5391 /* The size of this dimension, and the stride of the next. */
5392 if (dim + 1 < as->rank)
5393 stride = GFC_TYPE_ARRAY_STRIDE (type, dim + 1);
5395 stride = GFC_TYPE_ARRAY_SIZE (type);
5397 if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
5399 /* Calculate stride = size * (ubound + 1 - lbound). */
5400 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5401 gfc_array_index_type,
5402 gfc_index_one_node, lbound);
5403 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5404 gfc_array_index_type, ubound, tmp);
5405 tmp = fold_build2_loc (input_location, MULT_EXPR,
5406 gfc_array_index_type, size, tmp);
5408 gfc_add_modify (pblock, stride, tmp);
5410 stride = gfc_evaluate_now (tmp, pblock);
5412 /* Make sure that negative size arrays are translated
5413 to being zero size. */
5414 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
5415 stride, gfc_index_zero_node);
5416 tmp = fold_build3_loc (input_location, COND_EXPR,
5417 gfc_array_index_type, tmp,
5418 stride, gfc_index_zero_node);
5419 gfc_add_modify (pblock, stride, tmp);
5425 gfc_trans_array_cobounds (type, pblock, sym);
5426 gfc_trans_vla_type_sizes (sym, pblock);
5433 /* Generate code to initialize/allocate an array variable. */
5436 gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
5437 gfc_wrapped_block * block)
5441 tree tmp = NULL_TREE;
5448 gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));
5450 /* Do nothing for USEd variables. */
5451 if (sym->attr.use_assoc)
5454 type = TREE_TYPE (decl);
5455 gcc_assert (GFC_ARRAY_TYPE_P (type));
5456 onstack = TREE_CODE (type) != POINTER_TYPE;
5458 gfc_init_block (&init);
5460 /* Evaluate character string length. */
5461 if (sym->ts.type == BT_CHARACTER
5462 && onstack && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5464 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5466 gfc_trans_vla_type_sizes (sym, &init);
5468 /* Emit a DECL_EXPR for this variable, which will cause the
5469 gimplifier to allocate storage, and all that good stuff. */
5470 tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
5471 gfc_add_expr_to_block (&init, tmp);
5476 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5480 type = TREE_TYPE (type);
5482 gcc_assert (!sym->attr.use_assoc);
5483 gcc_assert (!TREE_STATIC (decl));
5484 gcc_assert (!sym->module);
5486 if (sym->ts.type == BT_CHARACTER
5487 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5488 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5490 size = gfc_trans_array_bounds (type, sym, &offset, &init);
5492 /* Don't actually allocate space for Cray Pointees. */
5493 if (sym->attr.cray_pointee)
5495 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5496 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5498 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5502 if (gfc_option.flag_stack_arrays)
5504 gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
5505 space = build_decl (sym->declared_at.lb->location,
5506 VAR_DECL, create_tmp_var_name ("A"),
5507 TREE_TYPE (TREE_TYPE (decl)));
5508 gfc_trans_vla_type_sizes (sym, &init);
5512 /* The size is the number of elements in the array, so multiply by the
5513 size of an element to get the total size. */
5514 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
5515 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5516 size, fold_convert (gfc_array_index_type, tmp));
5518 /* Allocate memory to hold the data. */
5519 tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
5520 gfc_add_modify (&init, decl, tmp);
5522 /* Free the temporary. */
5523 tmp = gfc_call_free (convert (pvoid_type_node, decl));
5527 /* Set offset of the array. */
5528 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5529 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5531 /* Automatic arrays should not have initializers. */
5532 gcc_assert (!sym->value);
5534 inittree = gfc_finish_block (&init);
5541 /* Don't create new scope, emit the DECL_EXPR in exactly the scope
5542 where also space is located. */
5543 gfc_init_block (&init);
5544 tmp = fold_build1_loc (input_location, DECL_EXPR,
5545 TREE_TYPE (space), space);
5546 gfc_add_expr_to_block (&init, tmp);
5547 addr = fold_build1_loc (sym->declared_at.lb->location,
5548 ADDR_EXPR, TREE_TYPE (decl), space);
5549 gfc_add_modify (&init, decl, addr);
5550 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5553 gfc_add_init_cleanup (block, inittree, tmp);
5557 /* Generate entry and exit code for g77 calling convention arrays. */
5560 gfc_trans_g77_array (gfc_symbol * sym, gfc_wrapped_block * block)
5570 gfc_save_backend_locus (&loc);
5571 gfc_set_backend_locus (&sym->declared_at);
5573 /* Descriptor type. */
5574 parm = sym->backend_decl;
5575 type = TREE_TYPE (parm);
5576 gcc_assert (GFC_ARRAY_TYPE_P (type));
5578 gfc_start_block (&init);
5580 if (sym->ts.type == BT_CHARACTER
5581 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5582 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5584 /* Evaluate the bounds of the array. */
5585 gfc_trans_array_bounds (type, sym, &offset, &init);
5587 /* Set the offset. */
5588 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5589 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5591 /* Set the pointer itself if we aren't using the parameter directly. */
5592 if (TREE_CODE (parm) != PARM_DECL)
5594 tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
5595 gfc_add_modify (&init, parm, tmp);
5597 stmt = gfc_finish_block (&init);
5599 gfc_restore_backend_locus (&loc);
5601 /* Add the initialization code to the start of the function. */
5603 if (sym->attr.optional || sym->attr.not_always_present)
5605 tmp = gfc_conv_expr_present (sym);
5606 stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
5609 gfc_add_init_cleanup (block, stmt, NULL_TREE);
5613 /* Modify the descriptor of an array parameter so that it has the
5614 correct lower bound. Also move the upper bound accordingly.
5615 If the array is not packed, it will be copied into a temporary.
5616 For each dimension we set the new lower and upper bounds. Then we copy the
5617 stride and calculate the offset for this dimension. We also work out
5618 what the stride of a packed array would be, and see it the two match.
5619 If the array need repacking, we set the stride to the values we just
5620 calculated, recalculate the offset and copy the array data.
5621 Code is also added to copy the data back at the end of the function.
5625 gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
5626 gfc_wrapped_block * block)
5633 tree stmtInit, stmtCleanup;
5640 tree stride, stride2;
5650 /* Do nothing for pointer and allocatable arrays. */
5651 if (sym->attr.pointer || sym->attr.allocatable)
5654 if (sym->attr.dummy && gfc_is_nodesc_array (sym))
5656 gfc_trans_g77_array (sym, block);
5660 gfc_save_backend_locus (&loc);
5661 gfc_set_backend_locus (&sym->declared_at);
5663 /* Descriptor type. */
5664 type = TREE_TYPE (tmpdesc);
5665 gcc_assert (GFC_ARRAY_TYPE_P (type));
5666 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5667 dumdesc = build_fold_indirect_ref_loc (input_location, dumdesc);
5668 gfc_start_block (&init);
5670 if (sym->ts.type == BT_CHARACTER
5671 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5672 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5674 checkparm = (sym->as->type == AS_EXPLICIT
5675 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS));
5677 no_repack = !(GFC_DECL_PACKED_ARRAY (tmpdesc)
5678 || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));
5680 if (GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc))
5682 /* For non-constant shape arrays we only check if the first dimension
5683 is contiguous. Repacking higher dimensions wouldn't gain us
5684 anything as we still don't know the array stride. */
5685 partial = gfc_create_var (boolean_type_node, "partial");
5686 TREE_USED (partial) = 1;
5687 tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5688 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
5689 gfc_index_one_node);
5690 gfc_add_modify (&init, partial, tmp);
5693 partial = NULL_TREE;
5695 /* The naming of stmt_unpacked and stmt_packed may be counter-intuitive
5696 here, however I think it does the right thing. */
5699 /* Set the first stride. */
5700 stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5701 stride = gfc_evaluate_now (stride, &init);
5703 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5704 stride, gfc_index_zero_node);
5705 tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
5706 tmp, gfc_index_one_node, stride);
5707 stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
5708 gfc_add_modify (&init, stride, tmp);
5710 /* Allow the user to disable array repacking. */
5711 stmt_unpacked = NULL_TREE;
5715 gcc_assert (integer_onep (GFC_TYPE_ARRAY_STRIDE (type, 0)));
5716 /* A library call to repack the array if necessary. */
5717 tmp = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5718 stmt_unpacked = build_call_expr_loc (input_location,
5719 gfor_fndecl_in_pack, 1, tmp);
5721 stride = gfc_index_one_node;
5723 if (gfc_option.warn_array_temp)
5724 gfc_warning ("Creating array temporary at %L", &loc);
5727 /* This is for the case where the array data is used directly without
5728 calling the repack function. */
5729 if (no_repack || partial != NULL_TREE)
5730 stmt_packed = gfc_conv_descriptor_data_get (dumdesc);
5732 stmt_packed = NULL_TREE;
5734 /* Assign the data pointer. */
5735 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5737 /* Don't repack unknown shape arrays when the first stride is 1. */
5738 tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (stmt_packed),
5739 partial, stmt_packed, stmt_unpacked);
5742 tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
5743 gfc_add_modify (&init, tmpdesc, fold_convert (type, tmp));
5745 offset = gfc_index_zero_node;
5746 size = gfc_index_one_node;
5748 /* Evaluate the bounds of the array. */
5749 for (n = 0; n < sym->as->rank; n++)
5751 if (checkparm || !sym->as->upper[n])
5753 /* Get the bounds of the actual parameter. */
5754 dubound = gfc_conv_descriptor_ubound_get (dumdesc, gfc_rank_cst[n]);
5755 dlbound = gfc_conv_descriptor_lbound_get (dumdesc, gfc_rank_cst[n]);
5759 dubound = NULL_TREE;
5760 dlbound = NULL_TREE;
5763 lbound = GFC_TYPE_ARRAY_LBOUND (type, n);
5764 if (!INTEGER_CST_P (lbound))
5766 gfc_init_se (&se, NULL);
5767 gfc_conv_expr_type (&se, sym->as->lower[n],
5768 gfc_array_index_type);
5769 gfc_add_block_to_block (&init, &se.pre);
5770 gfc_add_modify (&init, lbound, se.expr);
5773 ubound = GFC_TYPE_ARRAY_UBOUND (type, n);
5774 /* Set the desired upper bound. */
5775 if (sym->as->upper[n])
5777 /* We know what we want the upper bound to be. */
5778 if (!INTEGER_CST_P (ubound))
5780 gfc_init_se (&se, NULL);
5781 gfc_conv_expr_type (&se, sym->as->upper[n],
5782 gfc_array_index_type);
5783 gfc_add_block_to_block (&init, &se.pre);
5784 gfc_add_modify (&init, ubound, se.expr);
5787 /* Check the sizes match. */
5790 /* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)). */
5794 temp = fold_build2_loc (input_location, MINUS_EXPR,
5795 gfc_array_index_type, ubound, lbound);
5796 temp = fold_build2_loc (input_location, PLUS_EXPR,
5797 gfc_array_index_type,
5798 gfc_index_one_node, temp);
5799 stride2 = fold_build2_loc (input_location, MINUS_EXPR,
5800 gfc_array_index_type, dubound,
5802 stride2 = fold_build2_loc (input_location, PLUS_EXPR,
5803 gfc_array_index_type,
5804 gfc_index_one_node, stride2);
5805 tmp = fold_build2_loc (input_location, NE_EXPR,
5806 gfc_array_index_type, temp, stride2);
5807 asprintf (&msg, "Dimension %d of array '%s' has extent "
5808 "%%ld instead of %%ld", n+1, sym->name);
5810 gfc_trans_runtime_check (true, false, tmp, &init, &loc, msg,
5811 fold_convert (long_integer_type_node, temp),
5812 fold_convert (long_integer_type_node, stride2));
5819 /* For assumed shape arrays move the upper bound by the same amount
5820 as the lower bound. */
5821 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5822 gfc_array_index_type, dubound, dlbound);
5823 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5824 gfc_array_index_type, tmp, lbound);
5825 gfc_add_modify (&init, ubound, tmp);
5827 /* The offset of this dimension. offset = offset - lbound * stride. */
5828 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5830 offset = fold_build2_loc (input_location, MINUS_EXPR,
5831 gfc_array_index_type, offset, tmp);
5833 /* The size of this dimension, and the stride of the next. */
5834 if (n + 1 < sym->as->rank)
5836 stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);
5838 if (no_repack || partial != NULL_TREE)
5840 gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[n+1]);
5842 /* Figure out the stride if not a known constant. */
5843 if (!INTEGER_CST_P (stride))
5846 stmt_packed = NULL_TREE;
5849 /* Calculate stride = size * (ubound + 1 - lbound). */
5850 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5851 gfc_array_index_type,
5852 gfc_index_one_node, lbound);
5853 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5854 gfc_array_index_type, ubound, tmp);
5855 size = fold_build2_loc (input_location, MULT_EXPR,
5856 gfc_array_index_type, size, tmp);
5860 /* Assign the stride. */
5861 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5862 tmp = fold_build3_loc (input_location, COND_EXPR,
5863 gfc_array_index_type, partial,
5864 stmt_unpacked, stmt_packed);
5866 tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
5867 gfc_add_modify (&init, stride, tmp);
5872 stride = GFC_TYPE_ARRAY_SIZE (type);
5874 if (stride && !INTEGER_CST_P (stride))
5876 /* Calculate size = stride * (ubound + 1 - lbound). */
5877 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5878 gfc_array_index_type,
5879 gfc_index_one_node, lbound);
5880 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5881 gfc_array_index_type,
5883 tmp = fold_build2_loc (input_location, MULT_EXPR,
5884 gfc_array_index_type,
5885 GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
5886 gfc_add_modify (&init, stride, tmp);
5891 gfc_trans_array_cobounds (type, &init, sym);
5893 /* Set the offset. */
5894 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5895 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5897 gfc_trans_vla_type_sizes (sym, &init);
5899 stmtInit = gfc_finish_block (&init);
5901 /* Only do the entry/initialization code if the arg is present. */
5902 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5903 optional_arg = (sym->attr.optional
5904 || (sym->ns->proc_name->attr.entry_master
5905 && sym->attr.dummy));
5908 tmp = gfc_conv_expr_present (sym);
5909 stmtInit = build3_v (COND_EXPR, tmp, stmtInit,
5910 build_empty_stmt (input_location));
5915 stmtCleanup = NULL_TREE;
5918 stmtblock_t cleanup;
5919 gfc_start_block (&cleanup);
5921 if (sym->attr.intent != INTENT_IN)
5923 /* Copy the data back. */
5924 tmp = build_call_expr_loc (input_location,
5925 gfor_fndecl_in_unpack, 2, dumdesc, tmpdesc);
5926 gfc_add_expr_to_block (&cleanup, tmp);
5929 /* Free the temporary. */
5930 tmp = gfc_call_free (tmpdesc);
5931 gfc_add_expr_to_block (&cleanup, tmp);
5933 stmtCleanup = gfc_finish_block (&cleanup);
5935 /* Only do the cleanup if the array was repacked. */
5936 tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
5937 tmp = gfc_conv_descriptor_data_get (tmp);
5938 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
5940 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5941 build_empty_stmt (input_location));
5945 tmp = gfc_conv_expr_present (sym);
5946 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5947 build_empty_stmt (input_location));
5951 /* We don't need to free any memory allocated by internal_pack as it will
5952 be freed at the end of the function by pop_context. */
5953 gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
5955 gfc_restore_backend_locus (&loc);
5959 /* Calculate the overall offset, including subreferences. */
5961 gfc_get_dataptr_offset (stmtblock_t *block, tree parm, tree desc, tree offset,
5962 bool subref, gfc_expr *expr)
5972 /* If offset is NULL and this is not a subreferenced array, there is
5974 if (offset == NULL_TREE)
5977 offset = gfc_index_zero_node;
5982 tmp = gfc_conv_array_data (desc);
5983 tmp = build_fold_indirect_ref_loc (input_location,
5985 tmp = gfc_build_array_ref (tmp, offset, NULL);
5987 /* Offset the data pointer for pointer assignments from arrays with
5988 subreferences; e.g. my_integer => my_type(:)%integer_component. */
5991 /* Go past the array reference. */
5992 for (ref = expr->ref; ref; ref = ref->next)
5993 if (ref->type == REF_ARRAY &&
5994 ref->u.ar.type != AR_ELEMENT)
6000 /* Calculate the offset for each subsequent subreference. */
6001 for (; ref; ref = ref->next)
6006 field = ref->u.c.component->backend_decl;
6007 gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
6008 tmp = fold_build3_loc (input_location, COMPONENT_REF,
6010 tmp, field, NULL_TREE);
6014 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE);
6015 gfc_init_se (&start, NULL);
6016 gfc_conv_expr_type (&start, ref->u.ss.start, gfc_charlen_type_node);
6017 gfc_add_block_to_block (block, &start.pre);
6018 tmp = gfc_build_array_ref (tmp, start.expr, NULL);
6022 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE
6023 && ref->u.ar.type == AR_ELEMENT);
6025 /* TODO - Add bounds checking. */
6026 stride = gfc_index_one_node;
6027 index = gfc_index_zero_node;
6028 for (n = 0; n < ref->u.ar.dimen; n++)
6033 /* Update the index. */
6034 gfc_init_se (&start, NULL);
6035 gfc_conv_expr_type (&start, ref->u.ar.start[n], gfc_array_index_type);
6036 itmp = gfc_evaluate_now (start.expr, block);
6037 gfc_init_se (&start, NULL);
6038 gfc_conv_expr_type (&start, ref->u.ar.as->lower[n], gfc_array_index_type);
6039 jtmp = gfc_evaluate_now (start.expr, block);
6040 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6041 gfc_array_index_type, itmp, jtmp);
6042 itmp = fold_build2_loc (input_location, MULT_EXPR,
6043 gfc_array_index_type, itmp, stride);
6044 index = fold_build2_loc (input_location, PLUS_EXPR,
6045 gfc_array_index_type, itmp, index);
6046 index = gfc_evaluate_now (index, block);
6048 /* Update the stride. */
6049 gfc_init_se (&start, NULL);
6050 gfc_conv_expr_type (&start, ref->u.ar.as->upper[n], gfc_array_index_type);
6051 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6052 gfc_array_index_type, start.expr,
6054 itmp = fold_build2_loc (input_location, PLUS_EXPR,
6055 gfc_array_index_type,
6056 gfc_index_one_node, itmp);
6057 stride = fold_build2_loc (input_location, MULT_EXPR,
6058 gfc_array_index_type, stride, itmp);
6059 stride = gfc_evaluate_now (stride, block);
6062 /* Apply the index to obtain the array element. */
6063 tmp = gfc_build_array_ref (tmp, index, NULL);
6073 /* Set the target data pointer. */
6074 offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
6075 gfc_conv_descriptor_data_set (block, parm, offset);
6079 /* gfc_conv_expr_descriptor needs the string length an expression
6080 so that the size of the temporary can be obtained. This is done
6081 by adding up the string lengths of all the elements in the
6082 expression. Function with non-constant expressions have their
6083 string lengths mapped onto the actual arguments using the
6084 interface mapping machinery in trans-expr.c. */
6086 get_array_charlen (gfc_expr *expr, gfc_se *se)
6088 gfc_interface_mapping mapping;
6089 gfc_formal_arglist *formal;
6090 gfc_actual_arglist *arg;
6093 if (expr->ts.u.cl->length
6094 && gfc_is_constant_expr (expr->ts.u.cl->length))
6096 if (!expr->ts.u.cl->backend_decl)
6097 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6101 switch (expr->expr_type)
6104 get_array_charlen (expr->value.op.op1, se);
6106 /* For parentheses the expression ts.u.cl is identical. */
6107 if (expr->value.op.op == INTRINSIC_PARENTHESES)
6110 expr->ts.u.cl->backend_decl =
6111 gfc_create_var (gfc_charlen_type_node, "sln");
6113 if (expr->value.op.op2)
6115 get_array_charlen (expr->value.op.op2, se);
6117 gcc_assert (expr->value.op.op == INTRINSIC_CONCAT);
6119 /* Add the string lengths and assign them to the expression
6120 string length backend declaration. */
6121 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6122 fold_build2_loc (input_location, PLUS_EXPR,
6123 gfc_charlen_type_node,
6124 expr->value.op.op1->ts.u.cl->backend_decl,
6125 expr->value.op.op2->ts.u.cl->backend_decl));
6128 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6129 expr->value.op.op1->ts.u.cl->backend_decl);
6133 if (expr->value.function.esym == NULL
6134 || expr->ts.u.cl->length->expr_type == EXPR_CONSTANT)
6136 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6140 /* Map expressions involving the dummy arguments onto the actual
6141 argument expressions. */
6142 gfc_init_interface_mapping (&mapping);
6143 formal = expr->symtree->n.sym->formal;
6144 arg = expr->value.function.actual;
6146 /* Set se = NULL in the calls to the interface mapping, to suppress any
6148 for (; arg != NULL; arg = arg->next, formal = formal ? formal->next : NULL)
6153 gfc_add_interface_mapping (&mapping, formal->sym, NULL, arg->expr);
6156 gfc_init_se (&tse, NULL);
6158 /* Build the expression for the character length and convert it. */
6159 gfc_apply_interface_mapping (&mapping, &tse, expr->ts.u.cl->length);
6161 gfc_add_block_to_block (&se->pre, &tse.pre);
6162 gfc_add_block_to_block (&se->post, &tse.post);
6163 tse.expr = fold_convert (gfc_charlen_type_node, tse.expr);
6164 tse.expr = fold_build2_loc (input_location, MAX_EXPR,
6165 gfc_charlen_type_node, tse.expr,
6166 build_int_cst (gfc_charlen_type_node, 0));
6167 expr->ts.u.cl->backend_decl = tse.expr;
6168 gfc_free_interface_mapping (&mapping);
6172 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6178 /* Helper function to check dimensions. */
6180 transposed_dims (gfc_ss *ss)
6184 for (n = 0; n < ss->dimen; n++)
6185 if (ss->dim[n] != n)
6190 /* Convert an array for passing as an actual argument. Expressions and
6191 vector subscripts are evaluated and stored in a temporary, which is then
6192 passed. For whole arrays the descriptor is passed. For array sections
6193 a modified copy of the descriptor is passed, but using the original data.
6195 This function is also used for array pointer assignments, and there
6198 - se->want_pointer && !se->direct_byref
6199 EXPR is an actual argument. On exit, se->expr contains a
6200 pointer to the array descriptor.
6202 - !se->want_pointer && !se->direct_byref
6203 EXPR is an actual argument to an intrinsic function or the
6204 left-hand side of a pointer assignment. On exit, se->expr
6205 contains the descriptor for EXPR.
6207 - !se->want_pointer && se->direct_byref
6208 EXPR is the right-hand side of a pointer assignment and
6209 se->expr is the descriptor for the previously-evaluated
6210 left-hand side. The function creates an assignment from
6214 The se->force_tmp flag disables the non-copying descriptor optimization
6215 that is used for transpose. It may be used in cases where there is an
6216 alias between the transpose argument and another argument in the same
6220 gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
6222 gfc_ss_type ss_type;
6223 gfc_ss_info *ss_info;
6225 gfc_array_info *info;
6234 bool subref_array_target = false;
6235 gfc_expr *arg, *ss_expr;
6237 gcc_assert (ss != NULL);
6238 gcc_assert (ss != gfc_ss_terminator);
6241 ss_type = ss_info->type;
6242 ss_expr = ss_info->expr;
6244 /* Special case things we know we can pass easily. */
6245 switch (expr->expr_type)
6248 /* If we have a linear array section, we can pass it directly.
6249 Otherwise we need to copy it into a temporary. */
6251 gcc_assert (ss_type == GFC_SS_SECTION);
6252 gcc_assert (ss_expr == expr);
6253 info = &ss_info->data.array;
6255 /* Get the descriptor for the array. */
6256 gfc_conv_ss_descriptor (&se->pre, ss, 0);
6257 desc = info->descriptor;
6259 subref_array_target = se->direct_byref && is_subref_array (expr);
6260 need_tmp = gfc_ref_needs_temporary_p (expr->ref)
6261 && !subref_array_target;
6268 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6270 /* Create a new descriptor if the array doesn't have one. */
6273 else if (info->ref->u.ar.type == AR_FULL)
6275 else if (se->direct_byref)
6278 full = gfc_full_array_ref_p (info->ref, NULL);
6280 if (full && !transposed_dims (ss))
6282 if (se->direct_byref && !se->byref_noassign)
6284 /* Copy the descriptor for pointer assignments. */
6285 gfc_add_modify (&se->pre, se->expr, desc);
6287 /* Add any offsets from subreferences. */
6288 gfc_get_dataptr_offset (&se->pre, se->expr, desc, NULL_TREE,
6289 subref_array_target, expr);
6291 else if (se->want_pointer)
6293 /* We pass full arrays directly. This means that pointers and
6294 allocatable arrays should also work. */
6295 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6302 if (expr->ts.type == BT_CHARACTER)
6303 se->string_length = gfc_get_expr_charlen (expr);
6311 /* We don't need to copy data in some cases. */
6312 arg = gfc_get_noncopying_intrinsic_argument (expr);
6315 /* This is a call to transpose... */
6316 gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
6317 /* ... which has already been handled by the scalarizer, so
6318 that we just need to get its argument's descriptor. */
6319 gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
6323 /* A transformational function return value will be a temporary
6324 array descriptor. We still need to go through the scalarizer
6325 to create the descriptor. Elemental functions ar handled as
6326 arbitrary expressions, i.e. copy to a temporary. */
6328 if (se->direct_byref)
6330 gcc_assert (ss_type == GFC_SS_FUNCTION && ss_expr == expr);
6332 /* For pointer assignments pass the descriptor directly. */
6336 gcc_assert (se->ss == ss);
6337 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6338 gfc_conv_expr (se, expr);
6342 if (ss_expr != expr || ss_type != GFC_SS_FUNCTION)
6344 if (ss_expr != expr)
6345 /* Elemental function. */
6346 gcc_assert ((expr->value.function.esym != NULL
6347 && expr->value.function.esym->attr.elemental)
6348 || (expr->value.function.isym != NULL
6349 && expr->value.function.isym->elemental)
6350 || gfc_inline_intrinsic_function_p (expr));
6352 gcc_assert (ss_type == GFC_SS_INTRINSIC);
6355 if (expr->ts.type == BT_CHARACTER
6356 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
6357 get_array_charlen (expr, se);
6363 /* Transformational function. */
6364 info = &ss_info->data.array;
6370 /* Constant array constructors don't need a temporary. */
6371 if (ss_type == GFC_SS_CONSTRUCTOR
6372 && expr->ts.type != BT_CHARACTER
6373 && gfc_constant_array_constructor_p (expr->value.constructor))
6376 info = &ss_info->data.array;
6386 /* Something complicated. Copy it into a temporary. */
6392 /* If we are creating a temporary, we don't need to bother about aliases
6397 gfc_init_loopinfo (&loop);
6399 /* Associate the SS with the loop. */
6400 gfc_add_ss_to_loop (&loop, ss);
6402 /* Tell the scalarizer not to bother creating loop variables, etc. */
6404 loop.array_parameter = 1;
6406 /* The right-hand side of a pointer assignment mustn't use a temporary. */
6407 gcc_assert (!se->direct_byref);
6409 /* Setup the scalarizing loops and bounds. */
6410 gfc_conv_ss_startstride (&loop);
6414 if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
6415 get_array_charlen (expr, se);
6417 /* Tell the scalarizer to make a temporary. */
6418 loop.temp_ss = gfc_get_temp_ss (gfc_typenode_for_spec (&expr->ts),
6419 ((expr->ts.type == BT_CHARACTER)
6420 ? expr->ts.u.cl->backend_decl
6424 se->string_length = loop.temp_ss->info->string_length;
6425 gcc_assert (loop.temp_ss->dimen == loop.dimen);
6426 gfc_add_ss_to_loop (&loop, loop.temp_ss);
6429 gfc_conv_loop_setup (&loop, & expr->where);
6433 /* Copy into a temporary and pass that. We don't need to copy the data
6434 back because expressions and vector subscripts must be INTENT_IN. */
6435 /* TODO: Optimize passing function return values. */
6439 /* Start the copying loops. */
6440 gfc_mark_ss_chain_used (loop.temp_ss, 1);
6441 gfc_mark_ss_chain_used (ss, 1);
6442 gfc_start_scalarized_body (&loop, &block);
6444 /* Copy each data element. */
6445 gfc_init_se (&lse, NULL);
6446 gfc_copy_loopinfo_to_se (&lse, &loop);
6447 gfc_init_se (&rse, NULL);
6448 gfc_copy_loopinfo_to_se (&rse, &loop);
6450 lse.ss = loop.temp_ss;
6453 gfc_conv_scalarized_array_ref (&lse, NULL);
6454 if (expr->ts.type == BT_CHARACTER)
6456 gfc_conv_expr (&rse, expr);
6457 if (POINTER_TYPE_P (TREE_TYPE (rse.expr)))
6458 rse.expr = build_fold_indirect_ref_loc (input_location,
6462 gfc_conv_expr_val (&rse, expr);
6464 gfc_add_block_to_block (&block, &rse.pre);
6465 gfc_add_block_to_block (&block, &lse.pre);
6467 lse.string_length = rse.string_length;
6468 tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, true,
6469 expr->expr_type == EXPR_VARIABLE
6470 || expr->expr_type == EXPR_ARRAY, true);
6471 gfc_add_expr_to_block (&block, tmp);
6473 /* Finish the copying loops. */
6474 gfc_trans_scalarizing_loops (&loop, &block);
6476 desc = loop.temp_ss->info->data.array.descriptor;
6478 else if (expr->expr_type == EXPR_FUNCTION && !transposed_dims (ss))
6480 desc = info->descriptor;
6481 se->string_length = ss_info->string_length;
6485 /* We pass sections without copying to a temporary. Make a new
6486 descriptor and point it at the section we want. The loop variable
6487 limits will be the limits of the section.
6488 A function may decide to repack the array to speed up access, but
6489 we're not bothered about that here. */
6490 int dim, ndim, codim;
6498 ndim = info->ref ? info->ref->u.ar.dimen : ss->dimen;
6500 if (se->want_coarray)
6502 gfc_array_ref *ar = &info->ref->u.ar;
6504 codim = gfc_get_corank (expr);
6505 for (n = 0; n < codim - 1; n++)
6507 /* Make sure we are not lost somehow. */
6508 gcc_assert (ar->dimen_type[n + ndim] == DIMEN_THIS_IMAGE);
6510 /* Make sure the call to gfc_conv_section_startstride won't
6511 generate unnecessary code to calculate stride. */
6512 gcc_assert (ar->stride[n + ndim] == NULL);
6514 gfc_conv_section_startstride (&loop, ss, n + ndim);
6515 loop.from[n + loop.dimen] = info->start[n + ndim];
6516 loop.to[n + loop.dimen] = info->end[n + ndim];
6519 gcc_assert (n == codim - 1);
6520 evaluate_bound (&loop.pre, info->start, ar->start,
6521 info->descriptor, n + ndim, true);
6522 loop.from[n + loop.dimen] = info->start[n + ndim];
6527 /* Set the string_length for a character array. */
6528 if (expr->ts.type == BT_CHARACTER)
6529 se->string_length = gfc_get_expr_charlen (expr);
6531 desc = info->descriptor;
6532 if (se->direct_byref && !se->byref_noassign)
6534 /* For pointer assignments we fill in the destination. */
6536 parmtype = TREE_TYPE (parm);
6540 /* Otherwise make a new one. */
6541 parmtype = gfc_get_element_type (TREE_TYPE (desc));
6542 parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, codim,
6543 loop.from, loop.to, 0,
6544 GFC_ARRAY_UNKNOWN, false);
6545 parm = gfc_create_var (parmtype, "parm");
6548 offset = gfc_index_zero_node;
6550 /* The following can be somewhat confusing. We have two
6551 descriptors, a new one and the original array.
6552 {parm, parmtype, dim} refer to the new one.
6553 {desc, type, n, loop} refer to the original, which maybe
6554 a descriptorless array.
6555 The bounds of the scalarization are the bounds of the section.
6556 We don't have to worry about numeric overflows when calculating
6557 the offsets because all elements are within the array data. */
6559 /* Set the dtype. */
6560 tmp = gfc_conv_descriptor_dtype (parm);
6561 gfc_add_modify (&loop.pre, tmp, gfc_get_dtype (parmtype));
6563 /* Set offset for assignments to pointer only to zero if it is not
6565 if (se->direct_byref
6566 && info->ref && info->ref->u.ar.type != AR_FULL)
6567 base = gfc_index_zero_node;
6568 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6569 base = gfc_evaluate_now (gfc_conv_array_offset (desc), &loop.pre);
6573 for (n = 0; n < ndim; n++)
6575 stride = gfc_conv_array_stride (desc, n);
6577 /* Work out the offset. */
6579 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6581 gcc_assert (info->subscript[n]
6582 && info->subscript[n]->info->type == GFC_SS_SCALAR);
6583 start = info->subscript[n]->info->data.scalar.value;
6587 /* Evaluate and remember the start of the section. */
6588 start = info->start[n];
6589 stride = gfc_evaluate_now (stride, &loop.pre);
6592 tmp = gfc_conv_array_lbound (desc, n);
6593 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp),
6595 tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp),
6597 offset = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (tmp),
6601 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6603 /* For elemental dimensions, we only need the offset. */
6607 /* Vector subscripts need copying and are handled elsewhere. */
6609 gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
6611 /* look for the corresponding scalarizer dimension: dim. */
6612 for (dim = 0; dim < ndim; dim++)
6613 if (ss->dim[dim] == n)
6616 /* loop exited early: the DIM being looked for has been found. */
6617 gcc_assert (dim < ndim);
6619 /* Set the new lower bound. */
6620 from = loop.from[dim];
6623 /* If we have an array section or are assigning make sure that
6624 the lower bound is 1. References to the full
6625 array should otherwise keep the original bounds. */
6627 || info->ref->u.ar.type != AR_FULL)
6628 && !integer_onep (from))
6630 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6631 gfc_array_index_type, gfc_index_one_node,
6633 to = fold_build2_loc (input_location, PLUS_EXPR,
6634 gfc_array_index_type, to, tmp);
6635 from = gfc_index_one_node;
6637 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6638 gfc_rank_cst[dim], from);
6640 /* Set the new upper bound. */
6641 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6642 gfc_rank_cst[dim], to);
6644 /* Multiply the stride by the section stride to get the
6646 stride = fold_build2_loc (input_location, MULT_EXPR,
6647 gfc_array_index_type,
6648 stride, info->stride[n]);
6650 if (se->direct_byref
6652 && info->ref->u.ar.type != AR_FULL)
6654 base = fold_build2_loc (input_location, MINUS_EXPR,
6655 TREE_TYPE (base), base, stride);
6657 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6659 tmp = gfc_conv_array_lbound (desc, n);
6660 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6661 TREE_TYPE (base), tmp, loop.from[dim]);
6662 tmp = fold_build2_loc (input_location, MULT_EXPR,
6663 TREE_TYPE (base), tmp,
6664 gfc_conv_array_stride (desc, n));
6665 base = fold_build2_loc (input_location, PLUS_EXPR,
6666 TREE_TYPE (base), tmp, base);
6669 /* Store the new stride. */
6670 gfc_conv_descriptor_stride_set (&loop.pre, parm,
6671 gfc_rank_cst[dim], stride);
6674 for (n = loop.dimen; n < loop.dimen + codim; n++)
6676 from = loop.from[n];
6678 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6679 gfc_rank_cst[n], from);
6680 if (n < loop.dimen + codim - 1)
6681 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6682 gfc_rank_cst[n], to);
6685 if (se->data_not_needed)
6686 gfc_conv_descriptor_data_set (&loop.pre, parm,
6687 gfc_index_zero_node);
6689 /* Point the data pointer at the 1st element in the section. */
6690 gfc_get_dataptr_offset (&loop.pre, parm, desc, offset,
6691 subref_array_target, expr);
6693 if ((se->direct_byref || GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6694 && !se->data_not_needed)
6696 /* Set the offset. */
6697 gfc_conv_descriptor_offset_set (&loop.pre, parm, base);
6701 /* Only the callee knows what the correct offset it, so just set
6703 gfc_conv_descriptor_offset_set (&loop.pre, parm, gfc_index_zero_node);
6708 if (!se->direct_byref || se->byref_noassign)
6710 /* Get a pointer to the new descriptor. */
6711 if (se->want_pointer)
6712 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6717 gfc_add_block_to_block (&se->pre, &loop.pre);
6718 gfc_add_block_to_block (&se->post, &loop.post);
6720 /* Cleanup the scalarizer. */
6721 gfc_cleanup_loop (&loop);
6724 /* Helper function for gfc_conv_array_parameter if array size needs to be
6728 array_parameter_size (tree desc, gfc_expr *expr, tree *size)
6731 if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6732 *size = GFC_TYPE_ARRAY_SIZE (TREE_TYPE (desc));
6733 else if (expr->rank > 1)
6734 *size = build_call_expr_loc (input_location,
6735 gfor_fndecl_size0, 1,
6736 gfc_build_addr_expr (NULL, desc));
6739 tree ubound = gfc_conv_descriptor_ubound_get (desc, gfc_index_zero_node);
6740 tree lbound = gfc_conv_descriptor_lbound_get (desc, gfc_index_zero_node);
6742 *size = fold_build2_loc (input_location, MINUS_EXPR,
6743 gfc_array_index_type, ubound, lbound);
6744 *size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
6745 *size, gfc_index_one_node);
6746 *size = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
6747 *size, gfc_index_zero_node);
6749 elem = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
6750 *size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
6751 *size, fold_convert (gfc_array_index_type, elem));
6754 /* Convert an array for passing as an actual parameter. */
6755 /* TODO: Optimize passing g77 arrays. */
6758 gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, gfc_ss * ss, bool g77,
6759 const gfc_symbol *fsym, const char *proc_name,
6764 tree tmp = NULL_TREE;
6766 tree parent = DECL_CONTEXT (current_function_decl);
6767 bool full_array_var;
6768 bool this_array_result;
6771 bool array_constructor;
6772 bool good_allocatable;
6773 bool ultimate_ptr_comp;
6774 bool ultimate_alloc_comp;
6779 ultimate_ptr_comp = false;
6780 ultimate_alloc_comp = false;
6782 for (ref = expr->ref; ref; ref = ref->next)
6784 if (ref->next == NULL)
6787 if (ref->type == REF_COMPONENT)
6789 ultimate_ptr_comp = ref->u.c.component->attr.pointer;
6790 ultimate_alloc_comp = ref->u.c.component->attr.allocatable;
6794 full_array_var = false;
6797 if (expr->expr_type == EXPR_VARIABLE && ref && !ultimate_ptr_comp)
6798 full_array_var = gfc_full_array_ref_p (ref, &contiguous);
6800 sym = full_array_var ? expr->symtree->n.sym : NULL;
6802 /* The symbol should have an array specification. */
6803 gcc_assert (!sym || sym->as || ref->u.ar.as);
6805 if (expr->expr_type == EXPR_ARRAY && expr->ts.type == BT_CHARACTER)
6807 get_array_ctor_strlen (&se->pre, expr->value.constructor, &tmp);
6808 expr->ts.u.cl->backend_decl = tmp;
6809 se->string_length = tmp;
6812 /* Is this the result of the enclosing procedure? */
6813 this_array_result = (full_array_var && sym->attr.flavor == FL_PROCEDURE);
6814 if (this_array_result
6815 && (sym->backend_decl != current_function_decl)
6816 && (sym->backend_decl != parent))
6817 this_array_result = false;
6819 /* Passing address of the array if it is not pointer or assumed-shape. */
6820 if (full_array_var && g77 && !this_array_result)
6822 tmp = gfc_get_symbol_decl (sym);
6824 if (sym->ts.type == BT_CHARACTER)
6825 se->string_length = sym->ts.u.cl->backend_decl;
6827 if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
6829 gfc_conv_expr_descriptor (se, expr, ss);
6830 se->expr = gfc_conv_array_data (se->expr);
6834 if (!sym->attr.pointer
6836 && sym->as->type != AS_ASSUMED_SHAPE
6837 && !sym->attr.allocatable)
6839 /* Some variables are declared directly, others are declared as
6840 pointers and allocated on the heap. */
6841 if (sym->attr.dummy || POINTER_TYPE_P (TREE_TYPE (tmp)))
6844 se->expr = gfc_build_addr_expr (NULL_TREE, tmp);
6846 array_parameter_size (tmp, expr, size);
6850 if (sym->attr.allocatable)
6852 if (sym->attr.dummy || sym->attr.result)
6854 gfc_conv_expr_descriptor (se, expr, ss);
6858 array_parameter_size (tmp, expr, size);
6859 se->expr = gfc_conv_array_data (tmp);
6864 /* A convenient reduction in scope. */
6865 contiguous = g77 && !this_array_result && contiguous;
6867 /* There is no need to pack and unpack the array, if it is contiguous
6868 and not a deferred- or assumed-shape array, or if it is simply
6870 no_pack = ((sym && sym->as
6871 && !sym->attr.pointer
6872 && sym->as->type != AS_DEFERRED
6873 && sym->as->type != AS_ASSUMED_SHAPE)
6875 (ref && ref->u.ar.as
6876 && ref->u.ar.as->type != AS_DEFERRED
6877 && ref->u.ar.as->type != AS_ASSUMED_SHAPE)
6879 gfc_is_simply_contiguous (expr, false));
6881 no_pack = contiguous && no_pack;
6883 /* Array constructors are always contiguous and do not need packing. */
6884 array_constructor = g77 && !this_array_result && expr->expr_type == EXPR_ARRAY;
6886 /* Same is true of contiguous sections from allocatable variables. */
6887 good_allocatable = contiguous
6889 && expr->symtree->n.sym->attr.allocatable;
6891 /* Or ultimate allocatable components. */
6892 ultimate_alloc_comp = contiguous && ultimate_alloc_comp;
6894 if (no_pack || array_constructor || good_allocatable || ultimate_alloc_comp)
6896 gfc_conv_expr_descriptor (se, expr, ss);
6897 if (expr->ts.type == BT_CHARACTER)
6898 se->string_length = expr->ts.u.cl->backend_decl;
6900 array_parameter_size (se->expr, expr, size);
6901 se->expr = gfc_conv_array_data (se->expr);
6905 if (this_array_result)
6907 /* Result of the enclosing function. */
6908 gfc_conv_expr_descriptor (se, expr, ss);
6910 array_parameter_size (se->expr, expr, size);
6911 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6913 if (g77 && TREE_TYPE (TREE_TYPE (se->expr)) != NULL_TREE
6914 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (se->expr))))
6915 se->expr = gfc_conv_array_data (build_fold_indirect_ref_loc (input_location,
6922 /* Every other type of array. */
6923 se->want_pointer = 1;
6924 gfc_conv_expr_descriptor (se, expr, ss);
6926 array_parameter_size (build_fold_indirect_ref_loc (input_location,
6931 /* Deallocate the allocatable components of structures that are
6933 if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
6934 && expr->ts.u.derived->attr.alloc_comp
6935 && expr->expr_type != EXPR_VARIABLE)
6937 tmp = build_fold_indirect_ref_loc (input_location, se->expr);
6938 tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
6940 /* The components shall be deallocated before their containing entity. */
6941 gfc_prepend_expr_to_block (&se->post, tmp);
6944 if (g77 || (fsym && fsym->attr.contiguous
6945 && !gfc_is_simply_contiguous (expr, false)))
6947 tree origptr = NULL_TREE;
6951 /* For contiguous arrays, save the original value of the descriptor. */
6954 origptr = gfc_create_var (pvoid_type_node, "origptr");
6955 tmp = build_fold_indirect_ref_loc (input_location, desc);
6956 tmp = gfc_conv_array_data (tmp);
6957 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
6958 TREE_TYPE (origptr), origptr,
6959 fold_convert (TREE_TYPE (origptr), tmp));
6960 gfc_add_expr_to_block (&se->pre, tmp);
6963 /* Repack the array. */
6964 if (gfc_option.warn_array_temp)
6967 gfc_warning ("Creating array temporary at %L for argument '%s'",
6968 &expr->where, fsym->name);
6970 gfc_warning ("Creating array temporary at %L", &expr->where);
6973 ptr = build_call_expr_loc (input_location,
6974 gfor_fndecl_in_pack, 1, desc);
6976 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
6978 tmp = gfc_conv_expr_present (sym);
6979 ptr = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
6980 tmp, fold_convert (TREE_TYPE (se->expr), ptr),
6981 fold_convert (TREE_TYPE (se->expr), null_pointer_node));
6984 ptr = gfc_evaluate_now (ptr, &se->pre);
6986 /* Use the packed data for the actual argument, except for contiguous arrays,
6987 where the descriptor's data component is set. */
6992 tmp = build_fold_indirect_ref_loc (input_location, desc);
6993 gfc_conv_descriptor_data_set (&se->pre, tmp, ptr);
6996 if (gfc_option.rtcheck & GFC_RTCHECK_ARRAY_TEMPS)
7000 if (fsym && proc_name)
7001 asprintf (&msg, "An array temporary was created for argument "
7002 "'%s' of procedure '%s'", fsym->name, proc_name);
7004 asprintf (&msg, "An array temporary was created");
7006 tmp = build_fold_indirect_ref_loc (input_location,
7008 tmp = gfc_conv_array_data (tmp);
7009 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7010 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7012 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7013 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7015 gfc_conv_expr_present (sym), tmp);
7017 gfc_trans_runtime_check (false, true, tmp, &se->pre,
7022 gfc_start_block (&block);
7024 /* Copy the data back. */
7025 if (fsym == NULL || fsym->attr.intent != INTENT_IN)
7027 tmp = build_call_expr_loc (input_location,
7028 gfor_fndecl_in_unpack, 2, desc, ptr);
7029 gfc_add_expr_to_block (&block, tmp);
7032 /* Free the temporary. */
7033 tmp = gfc_call_free (convert (pvoid_type_node, ptr));
7034 gfc_add_expr_to_block (&block, tmp);
7036 stmt = gfc_finish_block (&block);
7038 gfc_init_block (&block);
7039 /* Only if it was repacked. This code needs to be executed before the
7040 loop cleanup code. */
7041 tmp = build_fold_indirect_ref_loc (input_location,
7043 tmp = gfc_conv_array_data (tmp);
7044 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7045 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7047 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7048 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7050 gfc_conv_expr_present (sym), tmp);
7052 tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
7054 gfc_add_expr_to_block (&block, tmp);
7055 gfc_add_block_to_block (&block, &se->post);
7057 gfc_init_block (&se->post);
7059 /* Reset the descriptor pointer. */
7062 tmp = build_fold_indirect_ref_loc (input_location, desc);
7063 gfc_conv_descriptor_data_set (&se->post, tmp, origptr);
7066 gfc_add_block_to_block (&se->post, &block);
7071 /* Generate code to deallocate an array, if it is allocated. */
7074 gfc_trans_dealloc_allocated (tree descriptor, bool coarray)
7080 gfc_start_block (&block);
7082 var = gfc_conv_descriptor_data_get (descriptor);
7085 /* Call array_deallocate with an int * present in the second argument.
7086 Although it is ignored here, it's presence ensures that arrays that
7087 are already deallocated are ignored. */
7088 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, NULL_TREE,
7089 NULL_TREE, NULL_TREE, NULL_TREE, true,
7091 gfc_add_expr_to_block (&block, tmp);
7093 /* Zero the data pointer. */
7094 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7095 var, build_int_cst (TREE_TYPE (var), 0));
7096 gfc_add_expr_to_block (&block, tmp);
7098 return gfc_finish_block (&block);
7102 /* This helper function calculates the size in words of a full array. */
7105 get_full_array_size (stmtblock_t *block, tree decl, int rank)
7110 idx = gfc_rank_cst[rank - 1];
7111 nelems = gfc_conv_descriptor_ubound_get (decl, idx);
7112 tmp = gfc_conv_descriptor_lbound_get (decl, idx);
7113 tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
7115 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
7116 tmp, gfc_index_one_node);
7117 tmp = gfc_evaluate_now (tmp, block);
7119 nelems = gfc_conv_descriptor_stride_get (decl, idx);
7120 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7122 return gfc_evaluate_now (tmp, block);
7126 /* Allocate dest to the same size as src, and copy src -> dest.
7127 If no_malloc is set, only the copy is done. */
7130 duplicate_allocatable (tree dest, tree src, tree type, int rank,
7140 /* If the source is null, set the destination to null. Then,
7141 allocate memory to the destination. */
7142 gfc_init_block (&block);
7146 tmp = null_pointer_node;
7147 tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, dest, tmp);
7148 gfc_add_expr_to_block (&block, tmp);
7149 null_data = gfc_finish_block (&block);
7151 gfc_init_block (&block);
7152 size = TYPE_SIZE_UNIT (TREE_TYPE (type));
7155 tmp = gfc_call_malloc (&block, type, size);
7156 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7157 dest, fold_convert (type, tmp));
7158 gfc_add_expr_to_block (&block, tmp);
7161 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7162 tmp = build_call_expr_loc (input_location, tmp, 3,
7167 gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
7168 null_data = gfc_finish_block (&block);
7170 gfc_init_block (&block);
7171 nelems = get_full_array_size (&block, src, rank);
7172 tmp = fold_convert (gfc_array_index_type,
7173 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
7174 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7178 tmp = TREE_TYPE (gfc_conv_descriptor_data_get (src));
7179 tmp = gfc_call_malloc (&block, tmp, size);
7180 gfc_conv_descriptor_data_set (&block, dest, tmp);
7183 /* We know the temporary and the value will be the same length,
7184 so can use memcpy. */
7185 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7186 tmp = build_call_expr_loc (input_location,
7187 tmp, 3, gfc_conv_descriptor_data_get (dest),
7188 gfc_conv_descriptor_data_get (src), size);
7191 gfc_add_expr_to_block (&block, tmp);
7192 tmp = gfc_finish_block (&block);
7194 /* Null the destination if the source is null; otherwise do
7195 the allocate and copy. */
7199 null_cond = gfc_conv_descriptor_data_get (src);
7201 null_cond = convert (pvoid_type_node, null_cond);
7202 null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7203 null_cond, null_pointer_node);
7204 return build3_v (COND_EXPR, null_cond, tmp, null_data);
7208 /* Allocate dest to the same size as src, and copy data src -> dest. */
7211 gfc_duplicate_allocatable (tree dest, tree src, tree type, int rank)
7213 return duplicate_allocatable (dest, src, type, rank, false);
7217 /* Copy data src -> dest. */
7220 gfc_copy_allocatable_data (tree dest, tree src, tree type, int rank)
7222 return duplicate_allocatable (dest, src, type, rank, true);
7226 /* Recursively traverse an object of derived type, generating code to
7227 deallocate, nullify or copy allocatable components. This is the work horse
7228 function for the functions named in this enum. */
7230 enum {DEALLOCATE_ALLOC_COMP = 1, NULLIFY_ALLOC_COMP, COPY_ALLOC_COMP,
7231 COPY_ONLY_ALLOC_COMP};
7234 structure_alloc_comps (gfc_symbol * der_type, tree decl,
7235 tree dest, int rank, int purpose)
7239 stmtblock_t fnblock;
7240 stmtblock_t loopbody;
7251 tree null_cond = NULL_TREE;
7253 gfc_init_block (&fnblock);
7255 decl_type = TREE_TYPE (decl);
7257 if ((POINTER_TYPE_P (decl_type) && rank != 0)
7258 || (TREE_CODE (decl_type) == REFERENCE_TYPE && rank == 0))
7260 decl = build_fold_indirect_ref_loc (input_location,
7263 /* Just in case in gets dereferenced. */
7264 decl_type = TREE_TYPE (decl);
7266 /* If this an array of derived types with allocatable components
7267 build a loop and recursively call this function. */
7268 if (TREE_CODE (decl_type) == ARRAY_TYPE
7269 || GFC_DESCRIPTOR_TYPE_P (decl_type))
7271 tmp = gfc_conv_array_data (decl);
7272 var = build_fold_indirect_ref_loc (input_location,
7275 /* Get the number of elements - 1 and set the counter. */
7276 if (GFC_DESCRIPTOR_TYPE_P (decl_type))
7278 /* Use the descriptor for an allocatable array. Since this
7279 is a full array reference, we only need the descriptor
7280 information from dimension = rank. */
7281 tmp = get_full_array_size (&fnblock, decl, rank);
7282 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7283 gfc_array_index_type, tmp,
7284 gfc_index_one_node);
7286 null_cond = gfc_conv_descriptor_data_get (decl);
7287 null_cond = fold_build2_loc (input_location, NE_EXPR,
7288 boolean_type_node, null_cond,
7289 build_int_cst (TREE_TYPE (null_cond), 0));
7293 /* Otherwise use the TYPE_DOMAIN information. */
7294 tmp = array_type_nelts (decl_type);
7295 tmp = fold_convert (gfc_array_index_type, tmp);
7298 /* Remember that this is, in fact, the no. of elements - 1. */
7299 nelems = gfc_evaluate_now (tmp, &fnblock);
7300 index = gfc_create_var (gfc_array_index_type, "S");
7302 /* Build the body of the loop. */
7303 gfc_init_block (&loopbody);
7305 vref = gfc_build_array_ref (var, index, NULL);
7307 if (purpose == COPY_ALLOC_COMP)
7309 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (dest)))
7311 tmp = gfc_duplicate_allocatable (dest, decl, decl_type, rank);
7312 gfc_add_expr_to_block (&fnblock, tmp);
7314 tmp = build_fold_indirect_ref_loc (input_location,
7315 gfc_conv_array_data (dest));
7316 dref = gfc_build_array_ref (tmp, index, NULL);
7317 tmp = structure_alloc_comps (der_type, vref, dref, rank, purpose);
7319 else if (purpose == COPY_ONLY_ALLOC_COMP)
7321 tmp = build_fold_indirect_ref_loc (input_location,
7322 gfc_conv_array_data (dest));
7323 dref = gfc_build_array_ref (tmp, index, NULL);
7324 tmp = structure_alloc_comps (der_type, vref, dref, rank,
7328 tmp = structure_alloc_comps (der_type, vref, NULL_TREE, rank, purpose);
7330 gfc_add_expr_to_block (&loopbody, tmp);
7332 /* Build the loop and return. */
7333 gfc_init_loopinfo (&loop);
7335 loop.from[0] = gfc_index_zero_node;
7336 loop.loopvar[0] = index;
7337 loop.to[0] = nelems;
7338 gfc_trans_scalarizing_loops (&loop, &loopbody);
7339 gfc_add_block_to_block (&fnblock, &loop.pre);
7341 tmp = gfc_finish_block (&fnblock);
7342 if (null_cond != NULL_TREE)
7343 tmp = build3_v (COND_EXPR, null_cond, tmp,
7344 build_empty_stmt (input_location));
7349 /* Otherwise, act on the components or recursively call self to
7350 act on a chain of components. */
7351 for (c = der_type->components; c; c = c->next)
7353 bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
7354 || c->ts.type == BT_CLASS)
7355 && c->ts.u.derived->attr.alloc_comp;
7356 cdecl = c->backend_decl;
7357 ctype = TREE_TYPE (cdecl);
7361 case DEALLOCATE_ALLOC_COMP:
7362 if (cmp_has_alloc_comps && !c->attr.pointer)
7364 /* Do not deallocate the components of ultimate pointer
7366 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7367 decl, cdecl, NULL_TREE);
7368 rank = c->as ? c->as->rank : 0;
7369 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7371 gfc_add_expr_to_block (&fnblock, tmp);
7374 if (c->attr.allocatable
7375 && (c->attr.dimension || c->attr.codimension))
7377 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7378 decl, cdecl, NULL_TREE);
7379 tmp = gfc_trans_dealloc_allocated (comp, c->attr.codimension);
7380 gfc_add_expr_to_block (&fnblock, tmp);
7382 else if (c->attr.allocatable)
7384 /* Allocatable scalar components. */
7385 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7386 decl, cdecl, NULL_TREE);
7388 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7390 gfc_add_expr_to_block (&fnblock, tmp);
7392 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7393 void_type_node, comp,
7394 build_int_cst (TREE_TYPE (comp), 0));
7395 gfc_add_expr_to_block (&fnblock, tmp);
7397 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7399 /* Allocatable CLASS components. */
7400 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7401 decl, cdecl, NULL_TREE);
7403 /* Add reference to '_data' component. */
7404 tmp = CLASS_DATA (c)->backend_decl;
7405 comp = fold_build3_loc (input_location, COMPONENT_REF,
7406 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7408 if (GFC_DESCRIPTOR_TYPE_P(TREE_TYPE (comp)))
7409 tmp = gfc_trans_dealloc_allocated (comp,
7410 CLASS_DATA (c)->attr.codimension);
7413 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7414 CLASS_DATA (c)->ts);
7415 gfc_add_expr_to_block (&fnblock, tmp);
7417 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7418 void_type_node, comp,
7419 build_int_cst (TREE_TYPE (comp), 0));
7421 gfc_add_expr_to_block (&fnblock, tmp);
7425 case NULLIFY_ALLOC_COMP:
7426 if (c->attr.pointer)
7428 else if (c->attr.allocatable
7429 && (c->attr.dimension|| c->attr.codimension))
7431 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7432 decl, cdecl, NULL_TREE);
7433 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
7435 else if (c->attr.allocatable)
7437 /* Allocatable scalar components. */
7438 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7439 decl, cdecl, NULL_TREE);
7440 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7441 void_type_node, comp,
7442 build_int_cst (TREE_TYPE (comp), 0));
7443 gfc_add_expr_to_block (&fnblock, tmp);
7445 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7447 /* Allocatable CLASS components. */
7448 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7449 decl, cdecl, NULL_TREE);
7450 /* Add reference to '_data' component. */
7451 tmp = CLASS_DATA (c)->backend_decl;
7452 comp = fold_build3_loc (input_location, COMPONENT_REF,
7453 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7454 if (GFC_DESCRIPTOR_TYPE_P(TREE_TYPE (comp)))
7455 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
7458 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7459 void_type_node, comp,
7460 build_int_cst (TREE_TYPE (comp), 0));
7461 gfc_add_expr_to_block (&fnblock, tmp);
7464 else if (cmp_has_alloc_comps)
7466 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7467 decl, cdecl, NULL_TREE);
7468 rank = c->as ? c->as->rank : 0;
7469 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7471 gfc_add_expr_to_block (&fnblock, tmp);
7475 case COPY_ALLOC_COMP:
7476 if (c->attr.pointer)
7479 /* We need source and destination components. */
7480 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
7482 dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
7484 dcmp = fold_convert (TREE_TYPE (comp), dcmp);
7486 if (c->attr.allocatable && !cmp_has_alloc_comps)
7488 rank = c->as ? c->as->rank : 0;
7489 tmp = gfc_duplicate_allocatable (dcmp, comp, ctype, rank);
7490 gfc_add_expr_to_block (&fnblock, tmp);
7493 if (cmp_has_alloc_comps)
7495 rank = c->as ? c->as->rank : 0;
7496 tmp = fold_convert (TREE_TYPE (dcmp), comp);
7497 gfc_add_modify (&fnblock, dcmp, tmp);
7498 tmp = structure_alloc_comps (c->ts.u.derived, comp, dcmp,
7500 gfc_add_expr_to_block (&fnblock, tmp);
7510 return gfc_finish_block (&fnblock);
7513 /* Recursively traverse an object of derived type, generating code to
7514 nullify allocatable components. */
7517 gfc_nullify_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7519 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7520 NULLIFY_ALLOC_COMP);
7524 /* Recursively traverse an object of derived type, generating code to
7525 deallocate allocatable components. */
7528 gfc_deallocate_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7530 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7531 DEALLOCATE_ALLOC_COMP);
7535 /* Recursively traverse an object of derived type, generating code to
7536 copy it and its allocatable components. */
7539 gfc_copy_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7541 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ALLOC_COMP);
7545 /* Recursively traverse an object of derived type, generating code to
7546 copy only its allocatable components. */
7549 gfc_copy_only_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7551 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ONLY_ALLOC_COMP);
7555 /* Returns the value of LBOUND for an expression. This could be broken out
7556 from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
7557 called by gfc_alloc_allocatable_for_assignment. */
7559 get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
7564 tree cond, cond1, cond3, cond4;
7568 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
7570 tmp = gfc_rank_cst[dim];
7571 lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
7572 ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
7573 stride = gfc_conv_descriptor_stride_get (desc, tmp);
7574 cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7576 cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7577 stride, gfc_index_zero_node);
7578 cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7579 boolean_type_node, cond3, cond1);
7580 cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
7581 stride, gfc_index_zero_node);
7583 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7584 tmp, build_int_cst (gfc_array_index_type,
7587 cond = boolean_false_node;
7589 cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7590 boolean_type_node, cond3, cond4);
7591 cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7592 boolean_type_node, cond, cond1);
7594 return fold_build3_loc (input_location, COND_EXPR,
7595 gfc_array_index_type, cond,
7596 lbound, gfc_index_one_node);
7599 if (expr->expr_type == EXPR_FUNCTION)
7601 /* A conversion function, so use the argument. */
7602 gcc_assert (expr->value.function.isym
7603 && expr->value.function.isym->conversion);
7604 expr = expr->value.function.actual->expr;
7607 if (expr->expr_type == EXPR_VARIABLE)
7609 tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
7610 for (ref = expr->ref; ref; ref = ref->next)
7612 if (ref->type == REF_COMPONENT
7613 && ref->u.c.component->as
7615 && ref->next->u.ar.type == AR_FULL)
7616 tmp = TREE_TYPE (ref->u.c.component->backend_decl);
7618 return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
7621 return gfc_index_one_node;
7625 /* Returns true if an expression represents an lhs that can be reallocated
7629 gfc_is_reallocatable_lhs (gfc_expr *expr)
7636 /* An allocatable variable. */
7637 if (expr->symtree->n.sym->attr.allocatable
7639 && expr->ref->type == REF_ARRAY
7640 && expr->ref->u.ar.type == AR_FULL)
7643 /* All that can be left are allocatable components. */
7644 if ((expr->symtree->n.sym->ts.type != BT_DERIVED
7645 && expr->symtree->n.sym->ts.type != BT_CLASS)
7646 || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
7649 /* Find a component ref followed by an array reference. */
7650 for (ref = expr->ref; ref; ref = ref->next)
7652 && ref->type == REF_COMPONENT
7653 && ref->next->type == REF_ARRAY
7654 && !ref->next->next)
7660 /* Return true if valid reallocatable lhs. */
7661 if (ref->u.c.component->attr.allocatable
7662 && ref->next->u.ar.type == AR_FULL)
7669 /* Allocate the lhs of an assignment to an allocatable array, otherwise
7673 gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
7677 stmtblock_t realloc_block;
7678 stmtblock_t alloc_block;
7682 gfc_array_info *linfo;
7702 gfc_array_spec * as;
7704 /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
7705 Find the lhs expression in the loop chain and set expr1 and
7706 expr2 accordingly. */
7707 if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
7710 /* Find the ss for the lhs. */
7712 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7713 if (lss->info->expr && lss->info->expr->expr_type == EXPR_VARIABLE)
7715 if (lss == gfc_ss_terminator)
7717 expr1 = lss->info->expr;
7720 /* Bail out if this is not a valid allocate on assignment. */
7721 if (!gfc_is_reallocatable_lhs (expr1)
7722 || (expr2 && !expr2->rank))
7725 /* Find the ss for the lhs. */
7727 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7728 if (lss->info->expr == expr1)
7731 if (lss == gfc_ss_terminator)
7734 linfo = &lss->info->data.array;
7736 /* Find an ss for the rhs. For operator expressions, we see the
7737 ss's for the operands. Any one of these will do. */
7739 for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
7740 if (rss->info->expr != expr1 && rss != loop->temp_ss)
7743 if (expr2 && rss == gfc_ss_terminator)
7746 gfc_start_block (&fblock);
7748 /* Since the lhs is allocatable, this must be a descriptor type.
7749 Get the data and array size. */
7750 desc = linfo->descriptor;
7751 gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
7752 array1 = gfc_conv_descriptor_data_get (desc);
7754 /* 7.4.1.3 "If variable is an allocated allocatable variable, it is
7755 deallocated if expr is an array of different shape or any of the
7756 corresponding length type parameter values of variable and expr
7757 differ." This assures F95 compatibility. */
7758 jump_label1 = gfc_build_label_decl (NULL_TREE);
7759 jump_label2 = gfc_build_label_decl (NULL_TREE);
7761 /* Allocate if data is NULL. */
7762 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7763 array1, build_int_cst (TREE_TYPE (array1), 0));
7764 tmp = build3_v (COND_EXPR, cond,
7765 build1_v (GOTO_EXPR, jump_label1),
7766 build_empty_stmt (input_location));
7767 gfc_add_expr_to_block (&fblock, tmp);
7769 /* Get arrayspec if expr is a full array. */
7770 if (expr2 && expr2->expr_type == EXPR_FUNCTION
7771 && expr2->value.function.isym
7772 && expr2->value.function.isym->conversion)
7774 /* For conversion functions, take the arg. */
7775 gfc_expr *arg = expr2->value.function.actual->expr;
7776 as = gfc_get_full_arrayspec_from_expr (arg);
7779 as = gfc_get_full_arrayspec_from_expr (expr2);
7783 /* If the lhs shape is not the same as the rhs jump to setting the
7784 bounds and doing the reallocation....... */
7785 for (n = 0; n < expr1->rank; n++)
7787 /* Check the shape. */
7788 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
7789 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
7790 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7791 gfc_array_index_type,
7792 loop->to[n], loop->from[n]);
7793 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7794 gfc_array_index_type,
7796 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7797 gfc_array_index_type,
7799 cond = fold_build2_loc (input_location, NE_EXPR,
7801 tmp, gfc_index_zero_node);
7802 tmp = build3_v (COND_EXPR, cond,
7803 build1_v (GOTO_EXPR, jump_label1),
7804 build_empty_stmt (input_location));
7805 gfc_add_expr_to_block (&fblock, tmp);
7808 /* ....else jump past the (re)alloc code. */
7809 tmp = build1_v (GOTO_EXPR, jump_label2);
7810 gfc_add_expr_to_block (&fblock, tmp);
7812 /* Add the label to start automatic (re)allocation. */
7813 tmp = build1_v (LABEL_EXPR, jump_label1);
7814 gfc_add_expr_to_block (&fblock, tmp);
7816 size1 = gfc_conv_descriptor_size (desc, expr1->rank);
7818 /* Get the rhs size. Fix both sizes. */
7820 desc2 = rss->info->data.array.descriptor;
7823 size2 = gfc_index_one_node;
7824 for (n = 0; n < expr2->rank; n++)
7826 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7827 gfc_array_index_type,
7828 loop->to[n], loop->from[n]);
7829 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7830 gfc_array_index_type,
7831 tmp, gfc_index_one_node);
7832 size2 = fold_build2_loc (input_location, MULT_EXPR,
7833 gfc_array_index_type,
7837 size1 = gfc_evaluate_now (size1, &fblock);
7838 size2 = gfc_evaluate_now (size2, &fblock);
7840 cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7842 neq_size = gfc_evaluate_now (cond, &fblock);
7845 /* Now modify the lhs descriptor and the associated scalarizer
7846 variables. F2003 7.4.1.3: "If variable is or becomes an
7847 unallocated allocatable variable, then it is allocated with each
7848 deferred type parameter equal to the corresponding type parameters
7849 of expr , with the shape of expr , and with each lower bound equal
7850 to the corresponding element of LBOUND(expr)."
7851 Reuse size1 to keep a dimension-by-dimension track of the
7852 stride of the new array. */
7853 size1 = gfc_index_one_node;
7854 offset = gfc_index_zero_node;
7856 for (n = 0; n < expr2->rank; n++)
7858 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7859 gfc_array_index_type,
7860 loop->to[n], loop->from[n]);
7861 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7862 gfc_array_index_type,
7863 tmp, gfc_index_one_node);
7865 lbound = gfc_index_one_node;
7870 lbd = get_std_lbound (expr2, desc2, n,
7871 as->type == AS_ASSUMED_SIZE);
7872 ubound = fold_build2_loc (input_location,
7874 gfc_array_index_type,
7876 ubound = fold_build2_loc (input_location,
7878 gfc_array_index_type,
7883 gfc_conv_descriptor_lbound_set (&fblock, desc,
7886 gfc_conv_descriptor_ubound_set (&fblock, desc,
7889 gfc_conv_descriptor_stride_set (&fblock, desc,
7892 lbound = gfc_conv_descriptor_lbound_get (desc,
7894 tmp2 = fold_build2_loc (input_location, MULT_EXPR,
7895 gfc_array_index_type,
7897 offset = fold_build2_loc (input_location, MINUS_EXPR,
7898 gfc_array_index_type,
7900 size1 = fold_build2_loc (input_location, MULT_EXPR,
7901 gfc_array_index_type,
7905 /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
7906 the array offset is saved and the info.offset is used for a
7907 running offset. Use the saved_offset instead. */
7908 tmp = gfc_conv_descriptor_offset (desc);
7909 gfc_add_modify (&fblock, tmp, offset);
7910 if (linfo->saved_offset
7911 && TREE_CODE (linfo->saved_offset) == VAR_DECL)
7912 gfc_add_modify (&fblock, linfo->saved_offset, tmp);
7914 /* Now set the deltas for the lhs. */
7915 for (n = 0; n < expr1->rank; n++)
7917 tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
7919 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7920 gfc_array_index_type, tmp,
7922 if (linfo->delta[dim]
7923 && TREE_CODE (linfo->delta[dim]) == VAR_DECL)
7924 gfc_add_modify (&fblock, linfo->delta[dim], tmp);
7927 /* Get the new lhs size in bytes. */
7928 if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
7930 tmp = expr2->ts.u.cl->backend_decl;
7931 gcc_assert (expr1->ts.u.cl->backend_decl);
7932 tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
7933 gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
7935 else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
7937 tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
7938 tmp = fold_build2_loc (input_location, MULT_EXPR,
7939 gfc_array_index_type, tmp,
7940 expr1->ts.u.cl->backend_decl);
7943 tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
7944 tmp = fold_convert (gfc_array_index_type, tmp);
7945 size2 = fold_build2_loc (input_location, MULT_EXPR,
7946 gfc_array_index_type,
7948 size2 = fold_convert (size_type_node, size2);
7949 size2 = gfc_evaluate_now (size2, &fblock);
7951 /* Realloc expression. Note that the scalarizer uses desc.data
7952 in the array reference - (*desc.data)[<element>]. */
7953 gfc_init_block (&realloc_block);
7954 tmp = build_call_expr_loc (input_location,
7955 builtin_decl_explicit (BUILT_IN_REALLOC), 2,
7956 fold_convert (pvoid_type_node, array1),
7958 gfc_conv_descriptor_data_set (&realloc_block,
7960 realloc_expr = gfc_finish_block (&realloc_block);
7962 /* Only reallocate if sizes are different. */
7963 tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
7964 build_empty_stmt (input_location));
7968 /* Malloc expression. */
7969 gfc_init_block (&alloc_block);
7970 tmp = build_call_expr_loc (input_location,
7971 builtin_decl_explicit (BUILT_IN_MALLOC),
7973 gfc_conv_descriptor_data_set (&alloc_block,
7975 tmp = gfc_conv_descriptor_dtype (desc);
7976 gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
7977 alloc_expr = gfc_finish_block (&alloc_block);
7979 /* Malloc if not allocated; realloc otherwise. */
7980 tmp = build_int_cst (TREE_TYPE (array1), 0);
7981 cond = fold_build2_loc (input_location, EQ_EXPR,
7984 tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
7985 gfc_add_expr_to_block (&fblock, tmp);
7987 /* Make sure that the scalarizer data pointer is updated. */
7989 && TREE_CODE (linfo->data) == VAR_DECL)
7991 tmp = gfc_conv_descriptor_data_get (desc);
7992 gfc_add_modify (&fblock, linfo->data, tmp);
7995 /* Add the exit label. */
7996 tmp = build1_v (LABEL_EXPR, jump_label2);
7997 gfc_add_expr_to_block (&fblock, tmp);
7999 return gfc_finish_block (&fblock);
8003 /* NULLIFY an allocatable/pointer array on function entry, free it on exit.
8004 Do likewise, recursively if necessary, with the allocatable components of
8008 gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
8014 stmtblock_t cleanup;
8017 bool sym_has_alloc_comp;
8019 sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
8020 || sym->ts.type == BT_CLASS)
8021 && sym->ts.u.derived->attr.alloc_comp;
8023 /* Make sure the frontend gets these right. */
8024 if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
8025 fatal_error ("Possible front-end bug: Deferred array size without pointer, "
8026 "allocatable attribute or derived type without allocatable "
8029 gfc_save_backend_locus (&loc);
8030 gfc_set_backend_locus (&sym->declared_at);
8031 gfc_init_block (&init);
8033 gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
8034 || TREE_CODE (sym->backend_decl) == PARM_DECL);
8036 if (sym->ts.type == BT_CHARACTER
8037 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
8039 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
8040 gfc_trans_vla_type_sizes (sym, &init);
8043 /* Dummy, use associated and result variables don't need anything special. */
8044 if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
8046 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
8047 gfc_restore_backend_locus (&loc);
8051 descriptor = sym->backend_decl;
8053 /* Although static, derived types with default initializers and
8054 allocatable components must not be nulled wholesale; instead they
8055 are treated component by component. */
8056 if (TREE_STATIC (descriptor) && !sym_has_alloc_comp)
8058 /* SAVEd variables are not freed on exit. */
8059 gfc_trans_static_array_pointer (sym);
8061 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
8062 gfc_restore_backend_locus (&loc);
8066 /* Get the descriptor type. */
8067 type = TREE_TYPE (sym->backend_decl);
8069 if (sym_has_alloc_comp && !(sym->attr.pointer || sym->attr.allocatable))
8072 && !(TREE_STATIC (sym->backend_decl) && sym->attr.is_main_program))
8074 if (sym->value == NULL
8075 || !gfc_has_default_initializer (sym->ts.u.derived))
8077 rank = sym->as ? sym->as->rank : 0;
8078 tmp = gfc_nullify_alloc_comp (sym->ts.u.derived,
8080 gfc_add_expr_to_block (&init, tmp);
8083 gfc_init_default_dt (sym, &init, false);
8086 else if (!GFC_DESCRIPTOR_TYPE_P (type))
8088 /* If the backend_decl is not a descriptor, we must have a pointer
8090 descriptor = build_fold_indirect_ref_loc (input_location,
8092 type = TREE_TYPE (descriptor);
8095 /* NULLIFY the data pointer. */
8096 if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save)
8097 gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
8099 gfc_restore_backend_locus (&loc);
8100 gfc_init_block (&cleanup);
8102 /* Allocatable arrays need to be freed when they go out of scope.
8103 The allocatable components of pointers must not be touched. */
8104 if (sym_has_alloc_comp && !(sym->attr.function || sym->attr.result)
8105 && !sym->attr.pointer && !sym->attr.save)
8108 rank = sym->as ? sym->as->rank : 0;
8109 tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank);
8110 gfc_add_expr_to_block (&cleanup, tmp);
8113 if (sym->attr.allocatable && (sym->attr.dimension || sym->attr.codimension)
8114 && !sym->attr.save && !sym->attr.result)
8116 tmp = gfc_trans_dealloc_allocated (sym->backend_decl,
8117 sym->attr.codimension);
8118 gfc_add_expr_to_block (&cleanup, tmp);
8121 gfc_add_init_cleanup (block, gfc_finish_block (&init),
8122 gfc_finish_block (&cleanup));
8125 /************ Expression Walking Functions ******************/
8127 /* Walk a variable reference.
8129 Possible extension - multiple component subscripts.
8130 x(:,:) = foo%a(:)%b(:)
8132 forall (i=..., j=...)
8133 x(i,j) = foo%a(j)%b(i)
8135 This adds a fair amount of complexity because you need to deal with more
8136 than one ref. Maybe handle in a similar manner to vector subscripts.
8137 Maybe not worth the effort. */
8141 gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
8145 for (ref = expr->ref; ref; ref = ref->next)
8146 if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
8149 return gfc_walk_array_ref (ss, expr, ref);
8154 gfc_walk_array_ref (gfc_ss * ss, gfc_expr * expr, gfc_ref * ref)
8160 for (; ref; ref = ref->next)
8162 if (ref->type == REF_SUBSTRING)
8164 ss = gfc_get_scalar_ss (ss, ref->u.ss.start);
8165 ss = gfc_get_scalar_ss (ss, ref->u.ss.end);
8168 /* We're only interested in array sections from now on. */
8169 if (ref->type != REF_ARRAY)
8177 for (n = ar->dimen - 1; n >= 0; n--)
8178 ss = gfc_get_scalar_ss (ss, ar->start[n]);
8182 newss = gfc_get_array_ss (ss, expr, ar->as->rank, GFC_SS_SECTION);
8183 newss->info->data.array.ref = ref;
8185 /* Make sure array is the same as array(:,:), this way
8186 we don't need to special case all the time. */
8187 ar->dimen = ar->as->rank;
8188 for (n = 0; n < ar->dimen; n++)
8190 ar->dimen_type[n] = DIMEN_RANGE;
8192 gcc_assert (ar->start[n] == NULL);
8193 gcc_assert (ar->end[n] == NULL);
8194 gcc_assert (ar->stride[n] == NULL);
8200 newss = gfc_get_array_ss (ss, expr, 0, GFC_SS_SECTION);
8201 newss->info->data.array.ref = ref;
8203 /* We add SS chains for all the subscripts in the section. */
8204 for (n = 0; n < ar->dimen; n++)
8208 switch (ar->dimen_type[n])
8211 /* Add SS for elemental (scalar) subscripts. */
8212 gcc_assert (ar->start[n]);
8213 indexss = gfc_get_scalar_ss (gfc_ss_terminator, ar->start[n]);
8214 indexss->loop_chain = gfc_ss_terminator;
8215 newss->info->data.array.subscript[n] = indexss;
8219 /* We don't add anything for sections, just remember this
8220 dimension for later. */
8221 newss->dim[newss->dimen] = n;
8226 /* Create a GFC_SS_VECTOR index in which we can store
8227 the vector's descriptor. */
8228 indexss = gfc_get_array_ss (gfc_ss_terminator, ar->start[n],
8230 indexss->loop_chain = gfc_ss_terminator;
8231 newss->info->data.array.subscript[n] = indexss;
8232 newss->dim[newss->dimen] = n;
8237 /* We should know what sort of section it is by now. */
8241 /* We should have at least one non-elemental dimension,
8242 unless we are creating a descriptor for a (scalar) coarray. */
8243 gcc_assert (newss->dimen > 0
8244 || newss->info->data.array.ref->u.ar.as->corank > 0);
8249 /* We should know what sort of section it is by now. */
8258 /* Walk an expression operator. If only one operand of a binary expression is
8259 scalar, we must also add the scalar term to the SS chain. */
8262 gfc_walk_op_expr (gfc_ss * ss, gfc_expr * expr)
8267 head = gfc_walk_subexpr (ss, expr->value.op.op1);
8268 if (expr->value.op.op2 == NULL)
8271 head2 = gfc_walk_subexpr (head, expr->value.op.op2);
8273 /* All operands are scalar. Pass back and let the caller deal with it. */
8277 /* All operands require scalarization. */
8278 if (head != ss && (expr->value.op.op2 == NULL || head2 != head))
8281 /* One of the operands needs scalarization, the other is scalar.
8282 Create a gfc_ss for the scalar expression. */
8285 /* First operand is scalar. We build the chain in reverse order, so
8286 add the scalar SS after the second operand. */
8288 while (head && head->next != ss)
8290 /* Check we haven't somehow broken the chain. */
8292 head->next = gfc_get_scalar_ss (ss, expr->value.op.op1);
8294 else /* head2 == head */
8296 gcc_assert (head2 == head);
8297 /* Second operand is scalar. */
8298 head2 = gfc_get_scalar_ss (head2, expr->value.op.op2);
8305 /* Reverse a SS chain. */
8308 gfc_reverse_ss (gfc_ss * ss)
8313 gcc_assert (ss != NULL);
8315 head = gfc_ss_terminator;
8316 while (ss != gfc_ss_terminator)
8319 /* Check we didn't somehow break the chain. */
8320 gcc_assert (next != NULL);
8330 /* Walk the arguments of an elemental function.
8331 PROC_EXPR is used to check whether an argument is permitted to be absent. If
8332 it is NULL, we don't do the check and the argument is assumed to be present.
8336 gfc_walk_elemental_function_args (gfc_ss * ss, gfc_actual_arglist *arg,
8337 gfc_expr *proc_expr, gfc_ss_type type)
8339 gfc_formal_arglist *dummy_arg;
8345 head = gfc_ss_terminator;
8352 /* Normal procedure case. */
8353 dummy_arg = proc_expr->symtree->n.sym->formal;
8355 /* Typebound procedure case. */
8356 for (ref = proc_expr->ref; ref; ref = ref->next)
8358 if (ref->type == REF_COMPONENT
8359 && ref->u.c.component->attr.proc_pointer
8360 && ref->u.c.component->ts.interface)
8361 dummy_arg = ref->u.c.component->ts.interface->formal;
8370 for (; arg; arg = arg->next)
8375 newss = gfc_walk_subexpr (head, arg->expr);
8378 /* Scalar argument. */
8379 gcc_assert (type == GFC_SS_SCALAR || type == GFC_SS_REFERENCE);
8380 newss = gfc_get_scalar_ss (head, arg->expr);
8381 newss->info->type = type;
8383 if (dummy_arg != NULL
8384 && dummy_arg->sym->attr.optional
8385 && arg->expr->symtree
8386 && arg->expr->symtree->n.sym->attr.optional
8387 && arg->expr->ref == NULL)
8388 newss->info->data.scalar.can_be_null_ref = true;
8397 while (tail->next != gfc_ss_terminator)
8401 if (dummy_arg != NULL)
8402 dummy_arg = dummy_arg->next;
8407 /* If all the arguments are scalar we don't need the argument SS. */
8408 gfc_free_ss_chain (head);
8413 /* Add it onto the existing chain. */
8419 /* Walk a function call. Scalar functions are passed back, and taken out of
8420 scalarization loops. For elemental functions we walk their arguments.
8421 The result of functions returning arrays is stored in a temporary outside
8422 the loop, so that the function is only called once. Hence we do not need
8423 to walk their arguments. */
8426 gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
8428 gfc_intrinsic_sym *isym;
8430 gfc_component *comp = NULL;
8432 isym = expr->value.function.isym;
8434 /* Handle intrinsic functions separately. */
8436 return gfc_walk_intrinsic_function (ss, expr, isym);
8438 sym = expr->value.function.esym;
8440 sym = expr->symtree->n.sym;
8442 /* A function that returns arrays. */
8443 gfc_is_proc_ptr_comp (expr, &comp);
8444 if ((!comp && gfc_return_by_reference (sym) && sym->result->attr.dimension)
8445 || (comp && comp->attr.dimension))
8446 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION);
8448 /* Walk the parameters of an elemental function. For now we always pass
8450 if (sym->attr.elemental || (comp && comp->attr.elemental))
8451 return gfc_walk_elemental_function_args (ss, expr->value.function.actual,
8452 expr, GFC_SS_REFERENCE);
8454 /* Scalar functions are OK as these are evaluated outside the scalarization
8455 loop. Pass back and let the caller deal with it. */
8460 /* An array temporary is constructed for array constructors. */
8463 gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
8465 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_CONSTRUCTOR);
8469 /* Walk an expression. Add walked expressions to the head of the SS chain.
8470 A wholly scalar expression will not be added. */
8473 gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
8477 switch (expr->expr_type)
8480 head = gfc_walk_variable_expr (ss, expr);
8484 head = gfc_walk_op_expr (ss, expr);
8488 head = gfc_walk_function_expr (ss, expr);
8493 case EXPR_STRUCTURE:
8494 /* Pass back and let the caller deal with it. */
8498 head = gfc_walk_array_constructor (ss, expr);
8501 case EXPR_SUBSTRING:
8502 /* Pass back and let the caller deal with it. */
8506 internal_error ("bad expression type during walk (%d)",
8513 /* Entry point for expression walking.
8514 A return value equal to the passed chain means this is
8515 a scalar expression. It is up to the caller to take whatever action is
8516 necessary to translate these. */
8519 gfc_walk_expr (gfc_expr * expr)
8523 res = gfc_walk_subexpr (gfc_ss_terminator, expr);
8524 return gfc_reverse_ss (res);