1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
7 Free Software Foundation, Inc.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
27 #include "coretypes.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
41 #include "tree-pass.h"
44 #include "splay-tree.h"
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
63 typedef struct omp_context
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
94 /* What to do with variables with implicitly determined sharing
96 enum omp_clause_default_kind default_kind;
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
103 /* True if this parallel directive is nested within another. */
108 struct omp_for_data_loop
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
114 /* A structure describing the main elements of a parallel loop. */
118 struct omp_for_data_loop loop;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
137 #define WALK_SUBSTMTS \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
149 scan_omp_op (tree *tp, omp_context *ctx)
151 struct walk_stmt_info wi;
153 memset (&wi, 0, sizeof (wi));
155 wi.want_locations = true;
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
176 /* Return true if CTX is for an omp parallel. */
179 is_parallel_ctx (omp_context *ctx)
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
185 /* Return true if CTX is for an omp task. */
188 is_task_ctx (omp_context *ctx)
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
194 /* Return true if CTX is for an omp parallel or omp task. */
197 is_taskreg_ctx (omp_context *ctx)
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
204 /* Return true if REGION is a combined parallel+workshare region. */
207 is_combined_parallel (struct omp_region *region)
209 return region->is_combined_parallel;
213 /* Extract the header elements of parallel loop FOR_STMT and store
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
227 fd->for_stmt = for_stmt;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
233 fd->loops = &fd->loop;
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
287 for (i = 0; i < fd->collapse; i++)
289 if (fd->collapse == 1)
291 else if (loops != NULL)
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315 loop->n2 = fold_build2_loc (loc,
316 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
317 build_int_cst (TREE_TYPE (loop->n2), 1));
318 loop->cond_code = LT_EXPR;
321 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
322 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324 loop->n2 = fold_build2_loc (loc,
325 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
326 build_int_cst (TREE_TYPE (loop->n2), 1));
327 loop->cond_code = GT_EXPR;
333 t = gimple_omp_for_incr (for_stmt, i);
334 gcc_assert (TREE_OPERAND (t, 0) == var);
335 switch (TREE_CODE (t))
338 case POINTER_PLUS_EXPR:
339 loop->step = TREE_OPERAND (t, 1);
342 loop->step = TREE_OPERAND (t, 1);
343 loop->step = fold_build1_loc (loc,
344 NEGATE_EXPR, TREE_TYPE (loop->step),
351 if (iter_type != long_long_unsigned_type_node)
353 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
354 iter_type = long_long_unsigned_type_node;
355 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
356 && TYPE_PRECISION (TREE_TYPE (loop->v))
357 >= TYPE_PRECISION (iter_type))
361 if (loop->cond_code == LT_EXPR)
362 n = fold_build2_loc (loc,
363 PLUS_EXPR, TREE_TYPE (loop->v),
364 loop->n2, loop->step);
367 if (TREE_CODE (n) != INTEGER_CST
368 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
369 iter_type = long_long_unsigned_type_node;
371 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
372 > TYPE_PRECISION (iter_type))
376 if (loop->cond_code == LT_EXPR)
379 n2 = fold_build2_loc (loc,
380 PLUS_EXPR, TREE_TYPE (loop->v),
381 loop->n2, loop->step);
385 n1 = fold_build2_loc (loc,
386 MINUS_EXPR, TREE_TYPE (loop->v),
387 loop->n2, loop->step);
390 if (TREE_CODE (n1) != INTEGER_CST
391 || TREE_CODE (n2) != INTEGER_CST
392 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
393 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
394 iter_type = long_long_unsigned_type_node;
398 if (collapse_count && *collapse_count == NULL)
400 if ((i == 0 || count != NULL_TREE)
401 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
402 && TREE_CONSTANT (loop->n1)
403 && TREE_CONSTANT (loop->n2)
404 && TREE_CODE (loop->step) == INTEGER_CST)
406 tree itype = TREE_TYPE (loop->v);
408 if (POINTER_TYPE_P (itype))
410 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
411 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
412 t = fold_build2_loc (loc,
414 fold_convert_loc (loc, itype, loop->step), t);
415 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
416 fold_convert_loc (loc, itype, loop->n2));
417 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n1));
419 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
420 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
421 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
422 fold_build1_loc (loc, NEGATE_EXPR, itype,
423 fold_convert_loc (loc, itype,
426 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
427 fold_convert_loc (loc, itype, loop->step));
428 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
429 if (count != NULL_TREE)
430 count = fold_build2_loc (loc,
431 MULT_EXPR, long_long_unsigned_type_node,
435 if (TREE_CODE (count) != INTEGER_CST)
445 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
446 iter_type = long_long_unsigned_type_node;
448 iter_type = long_integer_type_node;
450 else if (collapse_iter && *collapse_iter != NULL)
451 iter_type = TREE_TYPE (*collapse_iter);
452 fd->iter_type = iter_type;
453 if (collapse_iter && *collapse_iter == NULL)
454 *collapse_iter = create_tmp_var (iter_type, ".iter");
455 if (collapse_count && *collapse_count == NULL)
458 *collapse_count = fold_convert_loc (loc, iter_type, count);
460 *collapse_count = create_tmp_var (iter_type, ".count");
463 if (fd->collapse > 1)
465 fd->loop.v = *collapse_iter;
466 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
467 fd->loop.n2 = *collapse_count;
468 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
469 fd->loop.cond_code = LT_EXPR;
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
485 #pragma omp parallel for schedule (guided, i * 4)
490 # BLOCK 2 (PAR_ENTRY_BB)
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
498 #pragma omp for schedule (guided, D.1598)
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
517 workshare_safe_to_combine_p (basic_block ws_entry_bb)
519 struct omp_for_data fd;
520 gimple ws_stmt = last_stmt (ws_entry_bb);
522 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
525 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
527 extract_omp_for_data (ws_stmt, &fd, NULL);
529 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531 if (fd.iter_type != long_integer_type_node)
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
539 if (!is_gimple_min_invariant (fd.loop.n1)
540 || !is_gimple_min_invariant (fd.loop.n2)
541 || !is_gimple_min_invariant (fd.loop.step)
542 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
553 static VEC(tree,gc) *
554 get_ws_args_for (gimple ws_stmt)
557 location_t loc = gimple_location (ws_stmt);
558 VEC(tree,gc) *ws_args;
560 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
562 struct omp_for_data fd;
564 extract_omp_for_data (ws_stmt, &fd, NULL);
566 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
568 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
569 VEC_quick_push (tree, ws_args, t);
571 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
572 VEC_quick_push (tree, ws_args, t);
574 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
575 VEC_quick_push (tree, ws_args, t);
579 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
580 VEC_quick_push (tree, ws_args, t);
585 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb = single_succ (gimple_bb (ws_stmt));
591 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
592 ws_args = VEC_alloc (tree, gc, 1);
593 VEC_quick_push (tree, ws_args, t);
601 /* Discover whether REGION is a combined parallel+workshare region. */
604 determine_parallel_type (struct omp_region *region)
606 basic_block par_entry_bb, par_exit_bb;
607 basic_block ws_entry_bb, ws_exit_bb;
609 if (region == NULL || region->inner == NULL
610 || region->exit == NULL || region->inner->exit == NULL
611 || region->inner->cont == NULL)
614 /* We only support parallel+for and parallel+sections. */
615 if (region->type != GIMPLE_OMP_PARALLEL
616 || (region->inner->type != GIMPLE_OMP_FOR
617 && region->inner->type != GIMPLE_OMP_SECTIONS))
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb = region->entry;
623 par_exit_bb = region->exit;
624 ws_entry_bb = region->inner->entry;
625 ws_exit_bb = region->inner->exit;
627 if (single_succ (par_entry_bb) == ws_entry_bb
628 && single_succ (ws_exit_bb) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
631 || (last_and_only_stmt (ws_entry_bb)
632 && last_and_only_stmt (par_exit_bb))))
634 gimple ws_stmt = last_stmt (ws_entry_bb);
636 if (region->inner->type == GIMPLE_OMP_FOR)
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses = gimple_omp_for_clauses (ws_stmt);
648 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653 region->is_combined_parallel = false;
654 region->inner->is_combined_parallel = false;
659 region->is_combined_parallel = true;
660 region->inner->is_combined_parallel = true;
661 region->ws_args = get_ws_args_for (ws_stmt);
666 /* Return true if EXPR is variable sized. */
669 is_variable_sized (const_tree expr)
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
674 /* Return true if DECL is a reference type. */
677 is_reference (tree decl)
679 return lang_hooks.decls.omp_privatize_by_reference (decl);
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
687 lookup_decl (tree var, omp_context *ctx)
690 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
695 maybe_lookup_decl (const_tree var, omp_context *ctx)
698 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
699 return n ? *n : NULL_TREE;
703 lookup_field (tree var, omp_context *ctx)
706 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
707 return (tree) n->value;
711 lookup_sfield (tree var, omp_context *ctx)
714 n = splay_tree_lookup (ctx->sfield_map
715 ? ctx->sfield_map : ctx->field_map,
716 (splay_tree_key) var);
717 return (tree) n->value;
721 maybe_lookup_field (tree var, omp_context *ctx)
724 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
725 return n ? (tree) n->value : NULL_TREE;
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
732 use_pointer_for_field (tree decl, omp_context *shared_ctx)
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
755 /* Do not use copy-in/copy-out for variables that have their
757 if (TREE_ADDRESSABLE (decl))
760 /* Disallow copy-in/out in nested parallel if
761 decl is shared in outer parallel, otherwise
762 each thread could store the shared variable
763 in its own copy-in location, making the
764 variable no longer really shared. */
765 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
769 for (up = shared_ctx->outer; up; up = up->outer)
770 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
777 for (c = gimple_omp_taskreg_clauses (up->stmt);
778 c; c = OMP_CLAUSE_CHAIN (c))
779 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
780 && OMP_CLAUSE_DECL (c) == decl)
784 goto maybe_mark_addressable_and_ret;
788 /* For tasks avoid using copy-in/out, unless they are readonly
789 (in which case just copy-in is used). As tasks can be
790 deferred or executed in different thread, when GOMP_task
791 returns, the task hasn't necessarily terminated. */
792 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
795 maybe_mark_addressable_and_ret:
796 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
797 if (is_gimple_reg (outer))
799 /* Taking address of OUTER in lower_send_shared_vars
800 might need regimplification of everything that uses the
802 if (!task_shared_vars)
803 task_shared_vars = BITMAP_ALLOC (NULL);
804 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
805 TREE_ADDRESSABLE (outer) = 1;
814 /* Create a new VAR_DECL and copy information from VAR to it. */
817 copy_var_decl (tree var, tree name, tree type)
819 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
822 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
823 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
824 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
825 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
826 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
827 TREE_USED (copy) = 1;
828 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
833 /* Construct a new automatic decl similar to VAR. */
836 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838 tree copy = copy_var_decl (var, name, type);
840 DECL_CONTEXT (copy) = current_function_decl;
841 DECL_CHAIN (copy) = ctx->block_vars;
842 ctx->block_vars = copy;
848 omp_copy_decl_1 (tree var, omp_context *ctx)
850 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853 /* Build tree nodes to access the field for VAR on the receiver side. */
856 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858 tree x, field = lookup_field (var, ctx);
860 /* If the receiver record type was remapped in the child function,
861 remap the field into the new record type. */
862 x = maybe_lookup_field (field, ctx);
866 x = build_simple_mem_ref (ctx->receiver_decl);
867 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869 x = build_simple_mem_ref (x);
874 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
875 of a parallel, this is a component reference; for workshare constructs
876 this is some variable. */
879 build_outer_var_ref (tree var, omp_context *ctx)
883 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
885 else if (is_variable_sized (var))
887 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
888 x = build_outer_var_ref (x, ctx);
889 x = build_simple_mem_ref (x);
891 else if (is_taskreg_ctx (ctx))
893 bool by_ref = use_pointer_for_field (var, NULL);
894 x = build_receiver_ref (var, by_ref, ctx);
897 x = lookup_decl (var, ctx->outer);
898 else if (is_reference (var))
899 /* This can happen with orphaned constructs. If var is reference, it is
900 possible it is shared and as such valid. */
905 if (is_reference (var))
906 x = build_simple_mem_ref (x);
911 /* Build tree nodes to access the field for VAR on the sender side. */
914 build_sender_ref (tree var, omp_context *ctx)
916 tree field = lookup_sfield (var, ctx);
917 return build3 (COMPONENT_REF, TREE_TYPE (field),
918 ctx->sender_decl, field, NULL);
921 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
924 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926 tree field, type, sfield = NULL_TREE;
928 gcc_assert ((mask & 1) == 0
929 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
930 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
931 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933 type = TREE_TYPE (var);
935 type = build_pointer_type (type);
936 else if ((mask & 3) == 1 && is_reference (var))
937 type = TREE_TYPE (type);
939 field = build_decl (DECL_SOURCE_LOCATION (var),
940 FIELD_DECL, DECL_NAME (var), type);
942 /* Remember what variable this field was created for. This does have a
943 side effect of making dwarf2out ignore this member, so for helpful
944 debugging we clear it later in delete_omp_context. */
945 DECL_ABSTRACT_ORIGIN (field) = var;
946 if (type == TREE_TYPE (var))
948 DECL_ALIGN (field) = DECL_ALIGN (var);
949 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
950 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
953 DECL_ALIGN (field) = TYPE_ALIGN (type);
957 insert_field_into_struct (ctx->record_type, field);
958 if (ctx->srecord_type)
960 sfield = build_decl (DECL_SOURCE_LOCATION (var),
961 FIELD_DECL, DECL_NAME (var), type);
962 DECL_ABSTRACT_ORIGIN (sfield) = var;
963 DECL_ALIGN (sfield) = DECL_ALIGN (field);
964 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
965 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
966 insert_field_into_struct (ctx->srecord_type, sfield);
971 if (ctx->srecord_type == NULL_TREE)
975 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
976 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
977 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 sfield = build_decl (DECL_SOURCE_LOCATION (var),
980 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
981 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
982 insert_field_into_struct (ctx->srecord_type, sfield);
983 splay_tree_insert (ctx->sfield_map,
984 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
985 (splay_tree_value) sfield);
989 insert_field_into_struct ((mask & 1) ? ctx->record_type
990 : ctx->srecord_type, field);
994 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
995 (splay_tree_value) field);
996 if ((mask & 2) && ctx->sfield_map)
997 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
998 (splay_tree_value) sfield);
1002 install_var_local (tree var, omp_context *ctx)
1004 tree new_var = omp_copy_decl_1 (var, ctx);
1005 insert_decl_map (&ctx->cb, var, new_var);
1009 /* Adjust the replacement for DECL in CTX for the new context. This means
1010 copying the DECL_VALUE_EXPR, and fixing up the type. */
1013 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015 tree new_decl, size;
1017 new_decl = lookup_decl (decl, ctx);
1019 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1022 && DECL_HAS_VALUE_EXPR_P (decl))
1024 tree ve = DECL_VALUE_EXPR (decl);
1025 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1026 SET_DECL_VALUE_EXPR (new_decl, ve);
1027 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1030 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1033 if (size == error_mark_node)
1034 size = TYPE_SIZE (TREE_TYPE (new_decl));
1035 DECL_SIZE (new_decl) = size;
1037 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1038 if (size == error_mark_node)
1039 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1040 DECL_SIZE_UNIT (new_decl) = size;
1044 /* The callback for remap_decl. Search all containing contexts for a
1045 mapping of the variable; this avoids having to duplicate the splay
1046 tree ahead of time. We know a mapping doesn't already exist in the
1047 given context. Create new mappings to implement default semantics. */
1050 omp_copy_decl (tree var, copy_body_data *cb)
1052 omp_context *ctx = (omp_context *) cb;
1055 if (TREE_CODE (var) == LABEL_DECL)
1057 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1058 DECL_CONTEXT (new_var) = current_function_decl;
1059 insert_decl_map (&ctx->cb, var, new_var);
1063 while (!is_taskreg_ctx (ctx))
1068 new_var = maybe_lookup_decl (var, ctx);
1073 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1076 return error_mark_node;
1080 /* Return the parallel region associated with STMT. */
1082 /* Debugging dumps for parallel regions. */
1083 void dump_omp_region (FILE *, struct omp_region *, int);
1084 void debug_omp_region (struct omp_region *);
1085 void debug_all_omp_regions (void);
1087 /* Dump the parallel region tree rooted at REGION. */
1090 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1093 gimple_code_name[region->type]);
1096 dump_omp_region (file, region->inner, indent + 4);
1100 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1101 region->cont->index);
1105 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1106 region->exit->index);
1108 fprintf (file, "%*s[no exit marker]\n", indent, "");
1111 dump_omp_region (file, region->next, indent);
1115 debug_omp_region (struct omp_region *region)
1117 dump_omp_region (stderr, region, 0);
1121 debug_all_omp_regions (void)
1123 dump_omp_region (stderr, root_omp_region, 0);
1127 /* Create a new parallel region starting at STMT inside region PARENT. */
1130 new_omp_region (basic_block bb, enum gimple_code type,
1131 struct omp_region *parent)
1133 struct omp_region *region = XCNEW (struct omp_region);
1135 region->outer = parent;
1137 region->type = type;
1141 /* This is a nested region. Add it to the list of inner
1142 regions in PARENT. */
1143 region->next = parent->inner;
1144 parent->inner = region;
1148 /* This is a toplevel region. Add it to the list of toplevel
1149 regions in ROOT_OMP_REGION. */
1150 region->next = root_omp_region;
1151 root_omp_region = region;
1157 /* Release the memory associated with the region tree rooted at REGION. */
1160 free_omp_region_1 (struct omp_region *region)
1162 struct omp_region *i, *n;
1164 for (i = region->inner; i ; i = n)
1167 free_omp_region_1 (i);
1173 /* Release the memory for the entire omp region tree. */
1176 free_omp_regions (void)
1178 struct omp_region *r, *n;
1179 for (r = root_omp_region; r ; r = n)
1182 free_omp_region_1 (r);
1184 root_omp_region = NULL;
1188 /* Create a new context, with OUTER_CTX being the surrounding context. */
1190 static omp_context *
1191 new_omp_context (gimple stmt, omp_context *outer_ctx)
1193 omp_context *ctx = XCNEW (omp_context);
1195 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1196 (splay_tree_value) ctx);
1201 ctx->outer = outer_ctx;
1202 ctx->cb = outer_ctx->cb;
1203 ctx->cb.block = NULL;
1204 ctx->depth = outer_ctx->depth + 1;
1208 ctx->cb.src_fn = current_function_decl;
1209 ctx->cb.dst_fn = current_function_decl;
1210 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1211 gcc_checking_assert (ctx->cb.src_node);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
1215 ctx->cb.eh_lp_nr = 0;
1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1220 ctx->cb.decl_map = pointer_map_create ();
1225 static gimple_seq maybe_catch_exception (gimple_seq);
1227 /* Finalize task copyfn. */
1230 finalize_task_copyfn (gimple task_stmt)
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
1234 gimple_seq seq, new_seq;
1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
1238 if (child_fn == NULL_TREE)
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
1250 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1260 gimple_set_body (child_fn, seq);
1262 current_function_decl = old_fn;
1264 cgraph_add_new_function (child_fn, false);
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1271 delete_omp_context (splay_tree_value value)
1273 omp_context *ctx = (omp_context *) value;
1275 pointer_map_destroy (ctx->cb.decl_map);
1278 splay_tree_delete (ctx->field_map);
1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1290 if (ctx->srecord_type)
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1307 fixup_child_record_type (omp_context *ctx)
1309 tree f, type = ctx->record_type;
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1320 tree name, new_fields = NULL;
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1326 TYPE_NAME (type) = name;
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 DECL_CHAIN (new_f) = new_fields;
1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1357 scan_sharing_clauses (tree clauses, omp_context *ctx)
1360 bool scan_array_reductions = false;
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1366 switch (OMP_CLAUSE_CODE (c))
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1372 else if (!is_variable_sized (decl))
1373 install_var_local (decl, ctx);
1376 case OMP_CLAUSE_SHARED:
1377 gcc_assert (is_taskreg_ctx (ctx));
1378 decl = OMP_CLAUSE_DECL (c);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1385 by_ref = use_pointer_for_field (decl, ctx);
1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1389 || is_reference (decl))
1391 install_var_field (decl, by_ref, 3, ctx);
1392 install_var_local (decl, ctx);
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1410 if (is_variable_sized (decl))
1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1416 else if (is_taskreg_ctx (ctx))
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420 by_ref = use_pointer_for_field (decl, NULL);
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1425 install_var_field (decl, false, 1, ctx);
1427 install_var_field (decl, by_ref, 2, ctx);
1430 install_var_field (decl, by_ref, 3, ctx);
1432 install_var_local (decl, ctx);
1435 case OMP_CLAUSE_COPYPRIVATE:
1436 case OMP_CLAUSE_COPYIN:
1437 decl = OMP_CLAUSE_DECL (c);
1438 by_ref = use_pointer_for_field (decl, NULL);
1439 install_var_field (decl, by_ref, 3, ctx);
1442 case OMP_CLAUSE_DEFAULT:
1443 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1447 case OMP_CLAUSE_NUM_THREADS:
1448 case OMP_CLAUSE_SCHEDULE:
1450 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1453 case OMP_CLAUSE_NOWAIT:
1454 case OMP_CLAUSE_ORDERED:
1455 case OMP_CLAUSE_COLLAPSE:
1456 case OMP_CLAUSE_UNTIED:
1464 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1466 switch (OMP_CLAUSE_CODE (c))
1468 case OMP_CLAUSE_LASTPRIVATE:
1469 /* Let the corresponding firstprivate clause create
1471 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1472 scan_array_reductions = true;
1473 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1477 case OMP_CLAUSE_PRIVATE:
1478 case OMP_CLAUSE_FIRSTPRIVATE:
1479 case OMP_CLAUSE_REDUCTION:
1480 decl = OMP_CLAUSE_DECL (c);
1481 if (is_variable_sized (decl))
1482 install_var_local (decl, ctx);
1483 fixup_remapped_decl (decl, ctx,
1484 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1485 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1486 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1487 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1488 scan_array_reductions = true;
1491 case OMP_CLAUSE_SHARED:
1492 decl = OMP_CLAUSE_DECL (c);
1493 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1494 fixup_remapped_decl (decl, ctx, false);
1497 case OMP_CLAUSE_COPYPRIVATE:
1498 case OMP_CLAUSE_COPYIN:
1499 case OMP_CLAUSE_DEFAULT:
1501 case OMP_CLAUSE_NUM_THREADS:
1502 case OMP_CLAUSE_SCHEDULE:
1503 case OMP_CLAUSE_NOWAIT:
1504 case OMP_CLAUSE_ORDERED:
1505 case OMP_CLAUSE_COLLAPSE:
1506 case OMP_CLAUSE_UNTIED:
1514 if (scan_array_reductions)
1515 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1516 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1517 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1519 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1520 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1522 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1523 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1524 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1527 /* Create a new name for omp child function. Returns an identifier. */
1529 static GTY(()) unsigned int tmp_ompfn_id_num;
1532 create_omp_child_function_name (bool task_copy)
1534 return (clone_function_name (current_function_decl,
1535 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1538 /* Build a decl for the omp child function. It'll not contain a body
1539 yet, just the bare decl. */
1542 create_omp_child_function (omp_context *ctx, bool task_copy)
1544 tree decl, type, name, t;
1546 name = create_omp_child_function_name (task_copy);
1548 type = build_function_type_list (void_type_node, ptr_type_node,
1549 ptr_type_node, NULL_TREE);
1551 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1553 decl = build_decl (gimple_location (ctx->stmt),
1554 FUNCTION_DECL, name, type);
1557 ctx->cb.dst_fn = decl;
1559 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1561 TREE_STATIC (decl) = 1;
1562 TREE_USED (decl) = 1;
1563 DECL_ARTIFICIAL (decl) = 1;
1564 DECL_NAMELESS (decl) = 1;
1565 DECL_IGNORED_P (decl) = 0;
1566 TREE_PUBLIC (decl) = 0;
1567 DECL_UNINLINABLE (decl) = 1;
1568 DECL_EXTERNAL (decl) = 0;
1569 DECL_CONTEXT (decl) = NULL_TREE;
1570 DECL_INITIAL (decl) = make_node (BLOCK);
1572 t = build_decl (DECL_SOURCE_LOCATION (decl),
1573 RESULT_DECL, NULL_TREE, void_type_node);
1574 DECL_ARTIFICIAL (t) = 1;
1575 DECL_IGNORED_P (t) = 1;
1576 DECL_CONTEXT (t) = decl;
1577 DECL_RESULT (decl) = t;
1579 t = build_decl (DECL_SOURCE_LOCATION (decl),
1580 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1581 DECL_ARTIFICIAL (t) = 1;
1582 DECL_NAMELESS (t) = 1;
1583 DECL_ARG_TYPE (t) = ptr_type_node;
1584 DECL_CONTEXT (t) = current_function_decl;
1586 DECL_ARGUMENTS (decl) = t;
1588 ctx->receiver_decl = t;
1591 t = build_decl (DECL_SOURCE_LOCATION (decl),
1592 PARM_DECL, get_identifier (".omp_data_o"),
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_NAMELESS (t) = 1;
1596 DECL_ARG_TYPE (t) = ptr_type_node;
1597 DECL_CONTEXT (t) = current_function_decl;
1599 TREE_ADDRESSABLE (t) = 1;
1600 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1601 DECL_ARGUMENTS (decl) = t;
1604 /* Allocate memory for the function structure. The call to
1605 allocate_struct_function clobbers CFUN, so we need to restore
1607 push_struct_function (decl);
1608 cfun->function_end_locus = gimple_location (ctx->stmt);
1613 /* Scan an OpenMP parallel directive. */
1616 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1620 gimple stmt = gsi_stmt (*gsi);
1622 /* Ignore parallel directives with empty bodies, unless there
1623 are copyin clauses. */
1625 && empty_body_p (gimple_omp_body (stmt))
1626 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1627 OMP_CLAUSE_COPYIN) == NULL)
1629 gsi_replace (gsi, gimple_build_nop (), false);
1633 ctx = new_omp_context (stmt, outer_ctx);
1634 if (taskreg_nesting_level > 1)
1635 ctx->is_nested = true;
1636 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1637 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1638 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1639 name = create_tmp_var_name (".omp_data_s");
1640 name = build_decl (gimple_location (stmt),
1641 TYPE_DECL, name, ctx->record_type);
1642 DECL_ARTIFICIAL (name) = 1;
1643 DECL_NAMELESS (name) = 1;
1644 TYPE_NAME (ctx->record_type) = name;
1645 create_omp_child_function (ctx, false);
1646 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1648 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1649 scan_omp (gimple_omp_body (stmt), ctx);
1651 if (TYPE_FIELDS (ctx->record_type) == NULL)
1652 ctx->record_type = ctx->receiver_decl = NULL;
1655 layout_type (ctx->record_type);
1656 fixup_child_record_type (ctx);
1660 /* Scan an OpenMP task directive. */
1663 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1667 gimple stmt = gsi_stmt (*gsi);
1668 location_t loc = gimple_location (stmt);
1670 /* Ignore task directives with empty bodies. */
1672 && empty_body_p (gimple_omp_body (stmt)))
1674 gsi_replace (gsi, gimple_build_nop (), false);
1678 ctx = new_omp_context (stmt, outer_ctx);
1679 if (taskreg_nesting_level > 1)
1680 ctx->is_nested = true;
1681 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1682 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1683 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1684 name = create_tmp_var_name (".omp_data_s");
1685 name = build_decl (gimple_location (stmt),
1686 TYPE_DECL, name, ctx->record_type);
1687 DECL_ARTIFICIAL (name) = 1;
1688 DECL_NAMELESS (name) = 1;
1689 TYPE_NAME (ctx->record_type) = name;
1690 create_omp_child_function (ctx, false);
1691 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1693 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1695 if (ctx->srecord_type)
1697 name = create_tmp_var_name (".omp_data_a");
1698 name = build_decl (gimple_location (stmt),
1699 TYPE_DECL, name, ctx->srecord_type);
1700 DECL_ARTIFICIAL (name) = 1;
1701 DECL_NAMELESS (name) = 1;
1702 TYPE_NAME (ctx->srecord_type) = name;
1703 create_omp_child_function (ctx, true);
1706 scan_omp (gimple_omp_body (stmt), ctx);
1708 if (TYPE_FIELDS (ctx->record_type) == NULL)
1710 ctx->record_type = ctx->receiver_decl = NULL;
1711 t = build_int_cst (long_integer_type_node, 0);
1712 gimple_omp_task_set_arg_size (stmt, t);
1713 t = build_int_cst (long_integer_type_node, 1);
1714 gimple_omp_task_set_arg_align (stmt, t);
1718 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1719 /* Move VLA fields to the end. */
1720 p = &TYPE_FIELDS (ctx->record_type);
1722 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1723 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1726 *p = TREE_CHAIN (*p);
1727 TREE_CHAIN (*q) = NULL_TREE;
1728 q = &TREE_CHAIN (*q);
1731 p = &DECL_CHAIN (*p);
1733 layout_type (ctx->record_type);
1734 fixup_child_record_type (ctx);
1735 if (ctx->srecord_type)
1736 layout_type (ctx->srecord_type);
1737 t = fold_convert_loc (loc, long_integer_type_node,
1738 TYPE_SIZE_UNIT (ctx->record_type));
1739 gimple_omp_task_set_arg_size (stmt, t);
1740 t = build_int_cst (long_integer_type_node,
1741 TYPE_ALIGN_UNIT (ctx->record_type));
1742 gimple_omp_task_set_arg_align (stmt, t);
1747 /* Scan an OpenMP loop directive. */
1750 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1755 ctx = new_omp_context (stmt, outer_ctx);
1757 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1759 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1760 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1762 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1763 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1764 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1765 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1767 scan_omp (gimple_omp_body (stmt), ctx);
1770 /* Scan an OpenMP sections directive. */
1773 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1777 ctx = new_omp_context (stmt, outer_ctx);
1778 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1779 scan_omp (gimple_omp_body (stmt), ctx);
1782 /* Scan an OpenMP single directive. */
1785 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1790 ctx = new_omp_context (stmt, outer_ctx);
1791 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1792 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1793 name = create_tmp_var_name (".omp_copy_s");
1794 name = build_decl (gimple_location (stmt),
1795 TYPE_DECL, name, ctx->record_type);
1796 TYPE_NAME (ctx->record_type) = name;
1798 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1799 scan_omp (gimple_omp_body (stmt), ctx);
1801 if (TYPE_FIELDS (ctx->record_type) == NULL)
1802 ctx->record_type = NULL;
1804 layout_type (ctx->record_type);
1808 /* Check OpenMP nesting restrictions. */
1810 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1812 switch (gimple_code (stmt))
1814 case GIMPLE_OMP_FOR:
1815 case GIMPLE_OMP_SECTIONS:
1816 case GIMPLE_OMP_SINGLE:
1818 for (; ctx != NULL; ctx = ctx->outer)
1819 switch (gimple_code (ctx->stmt))
1821 case GIMPLE_OMP_FOR:
1822 case GIMPLE_OMP_SECTIONS:
1823 case GIMPLE_OMP_SINGLE:
1824 case GIMPLE_OMP_ORDERED:
1825 case GIMPLE_OMP_MASTER:
1826 case GIMPLE_OMP_TASK:
1827 if (is_gimple_call (stmt))
1829 warning (0, "barrier region may not be closely nested inside "
1830 "of work-sharing, critical, ordered, master or "
1831 "explicit task region");
1834 warning (0, "work-sharing region may not be closely nested inside "
1835 "of work-sharing, critical, ordered, master or explicit "
1838 case GIMPLE_OMP_PARALLEL:
1844 case GIMPLE_OMP_MASTER:
1845 for (; ctx != NULL; ctx = ctx->outer)
1846 switch (gimple_code (ctx->stmt))
1848 case GIMPLE_OMP_FOR:
1849 case GIMPLE_OMP_SECTIONS:
1850 case GIMPLE_OMP_SINGLE:
1851 case GIMPLE_OMP_TASK:
1852 warning (0, "master region may not be closely nested inside "
1853 "of work-sharing or explicit task region");
1855 case GIMPLE_OMP_PARALLEL:
1861 case GIMPLE_OMP_ORDERED:
1862 for (; ctx != NULL; ctx = ctx->outer)
1863 switch (gimple_code (ctx->stmt))
1865 case GIMPLE_OMP_CRITICAL:
1866 case GIMPLE_OMP_TASK:
1867 warning (0, "ordered region may not be closely nested inside "
1868 "of critical or explicit task region");
1870 case GIMPLE_OMP_FOR:
1871 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1872 OMP_CLAUSE_ORDERED) == NULL)
1873 warning (0, "ordered region must be closely nested inside "
1874 "a loop region with an ordered clause");
1876 case GIMPLE_OMP_PARALLEL:
1882 case GIMPLE_OMP_CRITICAL:
1883 for (; ctx != NULL; ctx = ctx->outer)
1884 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1885 && (gimple_omp_critical_name (stmt)
1886 == gimple_omp_critical_name (ctx->stmt)))
1888 warning (0, "critical region may not be nested inside a critical "
1889 "region with the same name");
1899 /* Helper function scan_omp.
1901 Callback for walk_tree or operators in walk_gimple_stmt used to
1902 scan for OpenMP directives in TP. */
1905 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1907 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1908 omp_context *ctx = (omp_context *) wi->info;
1911 switch (TREE_CODE (t))
1918 *tp = remap_decl (t, &ctx->cb);
1922 if (ctx && TYPE_P (t))
1923 *tp = remap_type (t, &ctx->cb);
1924 else if (!DECL_P (t))
1929 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1930 if (tem != TREE_TYPE (t))
1932 if (TREE_CODE (t) == INTEGER_CST)
1933 *tp = build_int_cst_wide (tem,
1934 TREE_INT_CST_LOW (t),
1935 TREE_INT_CST_HIGH (t));
1937 TREE_TYPE (t) = tem;
1948 /* Helper function for scan_omp.
1950 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1951 the current statement in GSI. */
1954 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1955 struct walk_stmt_info *wi)
1957 gimple stmt = gsi_stmt (*gsi);
1958 omp_context *ctx = (omp_context *) wi->info;
1960 if (gimple_has_location (stmt))
1961 input_location = gimple_location (stmt);
1963 /* Check the OpenMP nesting restrictions. */
1966 if (is_gimple_omp (stmt))
1967 check_omp_nesting_restrictions (stmt, ctx);
1968 else if (is_gimple_call (stmt))
1970 tree fndecl = gimple_call_fndecl (stmt);
1971 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1972 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1973 check_omp_nesting_restrictions (stmt, ctx);
1977 *handled_ops_p = true;
1979 switch (gimple_code (stmt))
1981 case GIMPLE_OMP_PARALLEL:
1982 taskreg_nesting_level++;
1983 scan_omp_parallel (gsi, ctx);
1984 taskreg_nesting_level--;
1987 case GIMPLE_OMP_TASK:
1988 taskreg_nesting_level++;
1989 scan_omp_task (gsi, ctx);
1990 taskreg_nesting_level--;
1993 case GIMPLE_OMP_FOR:
1994 scan_omp_for (stmt, ctx);
1997 case GIMPLE_OMP_SECTIONS:
1998 scan_omp_sections (stmt, ctx);
2001 case GIMPLE_OMP_SINGLE:
2002 scan_omp_single (stmt, ctx);
2005 case GIMPLE_OMP_SECTION:
2006 case GIMPLE_OMP_MASTER:
2007 case GIMPLE_OMP_ORDERED:
2008 case GIMPLE_OMP_CRITICAL:
2009 ctx = new_omp_context (stmt, ctx);
2010 scan_omp (gimple_omp_body (stmt), ctx);
2017 *handled_ops_p = false;
2019 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2020 insert_decl_map (&ctx->cb, var, var);
2024 *handled_ops_p = false;
2032 /* Scan all the statements starting at the current statement. CTX
2033 contains context information about the OpenMP directives and
2034 clauses found during the scan. */
2037 scan_omp (gimple_seq body, omp_context *ctx)
2039 location_t saved_location;
2040 struct walk_stmt_info wi;
2042 memset (&wi, 0, sizeof (wi));
2044 wi.want_locations = true;
2046 saved_location = input_location;
2047 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2048 input_location = saved_location;
2051 /* Re-gimplification and code generation routines. */
2053 /* Build a call to GOMP_barrier. */
2056 build_omp_barrier (void)
2058 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2061 /* If a context was created for STMT when it was scanned, return it. */
2063 static omp_context *
2064 maybe_lookup_ctx (gimple stmt)
2067 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2068 return n ? (omp_context *) n->value : NULL;
2072 /* Find the mapping for DECL in CTX or the immediately enclosing
2073 context that has a mapping for DECL.
2075 If CTX is a nested parallel directive, we may have to use the decl
2076 mappings created in CTX's parent context. Suppose that we have the
2077 following parallel nesting (variable UIDs showed for clarity):
2080 #omp parallel shared(iD.1562) -> outer parallel
2081 iD.1562 = iD.1562 + 1;
2083 #omp parallel shared (iD.1562) -> inner parallel
2084 iD.1562 = iD.1562 - 1;
2086 Each parallel structure will create a distinct .omp_data_s structure
2087 for copying iD.1562 in/out of the directive:
2089 outer parallel .omp_data_s.1.i -> iD.1562
2090 inner parallel .omp_data_s.2.i -> iD.1562
2092 A shared variable mapping will produce a copy-out operation before
2093 the parallel directive and a copy-in operation after it. So, in
2094 this case we would have:
2097 .omp_data_o.1.i = iD.1562;
2098 #omp parallel shared(iD.1562) -> outer parallel
2099 .omp_data_i.1 = &.omp_data_o.1
2100 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2102 .omp_data_o.2.i = iD.1562; -> **
2103 #omp parallel shared(iD.1562) -> inner parallel
2104 .omp_data_i.2 = &.omp_data_o.2
2105 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2108 ** This is a problem. The symbol iD.1562 cannot be referenced
2109 inside the body of the outer parallel region. But since we are
2110 emitting this copy operation while expanding the inner parallel
2111 directive, we need to access the CTX structure of the outer
2112 parallel directive to get the correct mapping:
2114 .omp_data_o.2.i = .omp_data_i.1->i
2116 Since there may be other workshare or parallel directives enclosing
2117 the parallel directive, it may be necessary to walk up the context
2118 parent chain. This is not a problem in general because nested
2119 parallelism happens only rarely. */
2122 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2127 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2128 t = maybe_lookup_decl (decl, up);
2130 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2132 return t ? t : decl;
2136 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2137 in outer contexts. */
2140 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2145 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2146 t = maybe_lookup_decl (decl, up);
2148 return t ? t : decl;
2152 /* Construct the initialization value for reduction CLAUSE. */
2155 omp_reduction_init (tree clause, tree type)
2157 location_t loc = OMP_CLAUSE_LOCATION (clause);
2158 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2165 case TRUTH_ORIF_EXPR:
2166 case TRUTH_XOR_EXPR:
2168 return build_zero_cst (type);
2171 case TRUTH_AND_EXPR:
2172 case TRUTH_ANDIF_EXPR:
2174 return fold_convert_loc (loc, type, integer_one_node);
2177 return fold_convert_loc (loc, type, integer_minus_one_node);
2180 if (SCALAR_FLOAT_TYPE_P (type))
2182 REAL_VALUE_TYPE max, min;
2183 if (HONOR_INFINITIES (TYPE_MODE (type)))
2186 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2189 real_maxval (&min, 1, TYPE_MODE (type));
2190 return build_real (type, min);
2194 gcc_assert (INTEGRAL_TYPE_P (type));
2195 return TYPE_MIN_VALUE (type);
2199 if (SCALAR_FLOAT_TYPE_P (type))
2201 REAL_VALUE_TYPE max;
2202 if (HONOR_INFINITIES (TYPE_MODE (type)))
2205 real_maxval (&max, 0, TYPE_MODE (type));
2206 return build_real (type, max);
2210 gcc_assert (INTEGRAL_TYPE_P (type));
2211 return TYPE_MAX_VALUE (type);
2219 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2220 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2221 private variables. Initialization statements go in ILIST, while calls
2222 to destructors go in DLIST. */
2225 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2228 gimple_stmt_iterator diter;
2229 tree c, dtor, copyin_seq, x, ptr;
2230 bool copyin_by_ref = false;
2231 bool lastprivate_firstprivate = false;
2234 *dlist = gimple_seq_alloc ();
2235 diter = gsi_start (*dlist);
2238 /* Do all the fixed sized types in the first pass, and the variable sized
2239 types in the second pass. This makes sure that the scalar arguments to
2240 the variable sized types are processed before we use them in the
2241 variable sized operations. */
2242 for (pass = 0; pass < 2; ++pass)
2244 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2246 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2249 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2253 case OMP_CLAUSE_PRIVATE:
2254 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2257 case OMP_CLAUSE_SHARED:
2258 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2260 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2263 case OMP_CLAUSE_FIRSTPRIVATE:
2264 case OMP_CLAUSE_COPYIN:
2265 case OMP_CLAUSE_REDUCTION:
2267 case OMP_CLAUSE_LASTPRIVATE:
2268 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2270 lastprivate_firstprivate = true;
2279 new_var = var = OMP_CLAUSE_DECL (c);
2280 if (c_kind != OMP_CLAUSE_COPYIN)
2281 new_var = lookup_decl (var, ctx);
2283 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2288 else if (is_variable_sized (var))
2290 /* For variable sized types, we need to allocate the
2291 actual storage here. Call alloca and store the
2292 result in the pointer decl that we created elsewhere. */
2296 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2301 ptr = DECL_VALUE_EXPR (new_var);
2302 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2303 ptr = TREE_OPERAND (ptr, 0);
2304 gcc_assert (DECL_P (ptr));
2305 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2307 /* void *tmp = __builtin_alloca */
2309 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2310 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2311 gimple_add_tmp_var (tmp);
2312 gimple_call_set_lhs (stmt, tmp);
2314 gimple_seq_add_stmt (ilist, stmt);
2316 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2317 gimplify_assign (ptr, x, ilist);
2320 else if (is_reference (var))
2322 /* For references that are being privatized for Fortran,
2323 allocate new backing storage for the new pointer
2324 variable. This allows us to avoid changing all the
2325 code that expects a pointer to something that expects
2326 a direct variable. Note that this doesn't apply to
2327 C++, since reference types are disallowed in data
2328 sharing clauses there, except for NRV optimized
2333 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2334 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2336 x = build_receiver_ref (var, false, ctx);
2337 x = build_fold_addr_expr_loc (clause_loc, x);
2339 else if (TREE_CONSTANT (x))
2341 const char *name = NULL;
2342 if (DECL_NAME (var))
2343 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2345 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2347 gimple_add_tmp_var (x);
2348 TREE_ADDRESSABLE (x) = 1;
2349 x = build_fold_addr_expr_loc (clause_loc, x);
2353 x = build_call_expr_loc (clause_loc,
2354 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2357 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2358 gimplify_assign (new_var, x, ilist);
2360 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2362 else if (c_kind == OMP_CLAUSE_REDUCTION
2363 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2371 switch (OMP_CLAUSE_CODE (c))
2373 case OMP_CLAUSE_SHARED:
2374 /* Shared global vars are just accessed directly. */
2375 if (is_global_var (new_var))
2377 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2378 needs to be delayed until after fixup_child_record_type so
2379 that we get the correct type during the dereference. */
2380 by_ref = use_pointer_for_field (var, ctx);
2381 x = build_receiver_ref (var, by_ref, ctx);
2382 SET_DECL_VALUE_EXPR (new_var, x);
2383 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2385 /* ??? If VAR is not passed by reference, and the variable
2386 hasn't been initialized yet, then we'll get a warning for
2387 the store into the omp_data_s structure. Ideally, we'd be
2388 able to notice this and not store anything at all, but
2389 we're generating code too early. Suppress the warning. */
2391 TREE_NO_WARNING (var) = 1;
2394 case OMP_CLAUSE_LASTPRIVATE:
2395 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2399 case OMP_CLAUSE_PRIVATE:
2400 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2401 x = build_outer_var_ref (var, ctx);
2402 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2404 if (is_task_ctx (ctx))
2405 x = build_receiver_ref (var, false, ctx);
2407 x = build_outer_var_ref (var, ctx);
2411 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2413 gimplify_and_add (x, ilist);
2417 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2420 gimple_seq tseq = NULL;
2423 gimplify_stmt (&dtor, &tseq);
2424 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2428 case OMP_CLAUSE_FIRSTPRIVATE:
2429 if (is_task_ctx (ctx))
2431 if (is_reference (var) || is_variable_sized (var))
2433 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2435 || use_pointer_for_field (var, NULL))
2437 x = build_receiver_ref (var, false, ctx);
2438 SET_DECL_VALUE_EXPR (new_var, x);
2439 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2443 x = build_outer_var_ref (var, ctx);
2444 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2445 gimplify_and_add (x, ilist);
2449 case OMP_CLAUSE_COPYIN:
2450 by_ref = use_pointer_for_field (var, NULL);
2451 x = build_receiver_ref (var, by_ref, ctx);
2452 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2453 append_to_statement_list (x, ©in_seq);
2454 copyin_by_ref |= by_ref;
2457 case OMP_CLAUSE_REDUCTION:
2458 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2460 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2461 x = build_outer_var_ref (var, ctx);
2463 if (is_reference (var))
2464 x = build_fold_addr_expr_loc (clause_loc, x);
2465 SET_DECL_VALUE_EXPR (placeholder, x);
2466 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2467 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2468 gimple_seq_add_seq (ilist,
2469 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2470 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2471 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2475 x = omp_reduction_init (c, TREE_TYPE (new_var));
2476 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2477 gimplify_assign (new_var, x, ilist);
2487 /* The copyin sequence is not to be executed by the main thread, since
2488 that would result in self-copies. Perhaps not visible to scalars,
2489 but it certainly is to C++ operator=. */
2492 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2493 x = build2 (NE_EXPR, boolean_type_node, x,
2494 build_int_cst (TREE_TYPE (x), 0));
2495 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2496 gimplify_and_add (x, ilist);
2499 /* If any copyin variable is passed by reference, we must ensure the
2500 master thread doesn't modify it before it is copied over in all
2501 threads. Similarly for variables in both firstprivate and
2502 lastprivate clauses we need to ensure the lastprivate copying
2503 happens after firstprivate copying in all threads. */
2504 if (copyin_by_ref || lastprivate_firstprivate)
2505 gimplify_and_add (build_omp_barrier (), ilist);
2509 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2510 both parallel and workshare constructs. PREDICATE may be NULL if it's
2514 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2517 tree x, c, label = NULL;
2518 bool par_clauses = false;
2520 /* Early exit if there are no lastprivate clauses. */
2521 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2522 if (clauses == NULL)
2524 /* If this was a workshare clause, see if it had been combined
2525 with its parallel. In that case, look for the clauses on the
2526 parallel statement itself. */
2527 if (is_parallel_ctx (ctx))
2531 if (ctx == NULL || !is_parallel_ctx (ctx))
2534 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2535 OMP_CLAUSE_LASTPRIVATE);
2536 if (clauses == NULL)
2544 tree label_true, arm1, arm2;
2546 label = create_artificial_label (UNKNOWN_LOCATION);
2547 label_true = create_artificial_label (UNKNOWN_LOCATION);
2548 arm1 = TREE_OPERAND (predicate, 0);
2549 arm2 = TREE_OPERAND (predicate, 1);
2550 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2551 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2552 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2554 gimple_seq_add_stmt (stmt_list, stmt);
2555 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2558 for (c = clauses; c ;)
2561 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2563 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2565 var = OMP_CLAUSE_DECL (c);
2566 new_var = lookup_decl (var, ctx);
2568 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2570 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2571 gimple_seq_add_seq (stmt_list,
2572 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2574 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2576 x = build_outer_var_ref (var, ctx);
2577 if (is_reference (var))
2578 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2579 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2580 gimplify_and_add (x, stmt_list);
2582 c = OMP_CLAUSE_CHAIN (c);
2583 if (c == NULL && !par_clauses)
2585 /* If this was a workshare clause, see if it had been combined
2586 with its parallel. In that case, continue looking for the
2587 clauses also on the parallel statement itself. */
2588 if (is_parallel_ctx (ctx))
2592 if (ctx == NULL || !is_parallel_ctx (ctx))
2595 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2596 OMP_CLAUSE_LASTPRIVATE);
2602 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2606 /* Generate code to implement the REDUCTION clauses. */
2609 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2611 gimple_seq sub_seq = NULL;
2616 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2617 update in that case, otherwise use a lock. */
2618 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2619 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2621 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2623 /* Never use OMP_ATOMIC for array reductions. */
2633 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2635 tree var, ref, new_var;
2636 enum tree_code code;
2637 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2639 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2642 var = OMP_CLAUSE_DECL (c);
2643 new_var = lookup_decl (var, ctx);
2644 if (is_reference (var))
2645 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2646 ref = build_outer_var_ref (var, ctx);
2647 code = OMP_CLAUSE_REDUCTION_CODE (c);
2649 /* reduction(-:var) sums up the partial results, so it acts
2650 identically to reduction(+:var). */
2651 if (code == MINUS_EXPR)
2656 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2658 addr = save_expr (addr);
2659 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2660 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2661 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2662 gimplify_and_add (x, stmt_seqp);
2666 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2668 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2670 if (is_reference (var))
2671 ref = build_fold_addr_expr_loc (clause_loc, ref);
2672 SET_DECL_VALUE_EXPR (placeholder, ref);
2673 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2674 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2675 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2676 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2677 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2681 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2682 ref = build_outer_var_ref (var, ctx);
2683 gimplify_assign (ref, x, &sub_seq);
2687 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2688 gimple_seq_add_stmt (stmt_seqp, stmt);
2690 gimple_seq_add_seq (stmt_seqp, sub_seq);
2692 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2693 gimple_seq_add_stmt (stmt_seqp, stmt);
2697 /* Generate code to implement the COPYPRIVATE clauses. */
2700 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2705 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2707 tree var, new_var, ref, x;
2709 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2711 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2714 var = OMP_CLAUSE_DECL (c);
2715 by_ref = use_pointer_for_field (var, NULL);
2717 ref = build_sender_ref (var, ctx);
2718 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2721 x = build_fold_addr_expr_loc (clause_loc, new_var);
2722 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2724 gimplify_assign (ref, x, slist);
2726 ref = build_receiver_ref (var, false, ctx);
2729 ref = fold_convert_loc (clause_loc,
2730 build_pointer_type (TREE_TYPE (new_var)),
2732 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2734 if (is_reference (var))
2736 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2737 ref = build_simple_mem_ref_loc (clause_loc, ref);
2738 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2740 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2741 gimplify_and_add (x, rlist);
2746 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2747 and REDUCTION from the sender (aka parent) side. */
2750 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2755 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2757 tree val, ref, x, var;
2758 bool by_ref, do_in = false, do_out = false;
2759 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2761 switch (OMP_CLAUSE_CODE (c))
2763 case OMP_CLAUSE_PRIVATE:
2764 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2767 case OMP_CLAUSE_FIRSTPRIVATE:
2768 case OMP_CLAUSE_COPYIN:
2769 case OMP_CLAUSE_LASTPRIVATE:
2770 case OMP_CLAUSE_REDUCTION:
2776 val = OMP_CLAUSE_DECL (c);
2777 var = lookup_decl_in_outer_ctx (val, ctx);
2779 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2780 && is_global_var (var))
2782 if (is_variable_sized (val))
2784 by_ref = use_pointer_for_field (val, NULL);
2786 switch (OMP_CLAUSE_CODE (c))
2788 case OMP_CLAUSE_PRIVATE:
2789 case OMP_CLAUSE_FIRSTPRIVATE:
2790 case OMP_CLAUSE_COPYIN:
2794 case OMP_CLAUSE_LASTPRIVATE:
2795 if (by_ref || is_reference (val))
2797 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2804 if (lang_hooks.decls.omp_private_outer_ref (val))
2809 case OMP_CLAUSE_REDUCTION:
2811 do_out = !(by_ref || is_reference (val));
2820 ref = build_sender_ref (val, ctx);
2821 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2822 gimplify_assign (ref, x, ilist);
2823 if (is_task_ctx (ctx))
2824 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2829 ref = build_sender_ref (val, ctx);
2830 gimplify_assign (var, ref, olist);
2835 /* Generate code to implement SHARED from the sender (aka parent)
2836 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2837 list things that got automatically shared. */
2840 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2842 tree var, ovar, nvar, f, x, record_type;
2844 if (ctx->record_type == NULL)
2847 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2848 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2850 ovar = DECL_ABSTRACT_ORIGIN (f);
2851 nvar = maybe_lookup_decl (ovar, ctx);
2852 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2855 /* If CTX is a nested parallel directive. Find the immediately
2856 enclosing parallel or workshare construct that contains a
2857 mapping for OVAR. */
2858 var = lookup_decl_in_outer_ctx (ovar, ctx);
2860 if (use_pointer_for_field (ovar, ctx))
2862 x = build_sender_ref (ovar, ctx);
2863 var = build_fold_addr_expr (var);
2864 gimplify_assign (x, var, ilist);
2868 x = build_sender_ref (ovar, ctx);
2869 gimplify_assign (x, var, ilist);
2871 if (!TREE_READONLY (var)
2872 /* We don't need to receive a new reference to a result
2873 or parm decl. In fact we may not store to it as we will
2874 invalidate any pending RSO and generate wrong gimple
2876 && !((TREE_CODE (var) == RESULT_DECL
2877 || TREE_CODE (var) == PARM_DECL)
2878 && DECL_BY_REFERENCE (var)))
2880 x = build_sender_ref (ovar, ctx);
2881 gimplify_assign (var, x, olist);
2888 /* A convenience function to build an empty GIMPLE_COND with just the
2892 gimple_build_cond_empty (tree cond)
2894 enum tree_code pred_code;
2897 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2898 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2902 /* Build the function calls to GOMP_parallel_start etc to actually
2903 generate the parallel operation. REGION is the parallel region
2904 being expanded. BB is the block where to insert the code. WS_ARGS
2905 will be set if this is a call to a combined parallel+workshare
2906 construct, it contains the list of additional arguments needed by
2907 the workshare construct. */
2910 expand_parallel_call (struct omp_region *region, basic_block bb,
2911 gimple entry_stmt, VEC(tree,gc) *ws_args)
2913 tree t, t1, t2, val, cond, c, clauses;
2914 gimple_stmt_iterator gsi;
2917 location_t clause_loc;
2920 clauses = gimple_omp_parallel_clauses (entry_stmt);
2922 /* Determine what flavor of GOMP_parallel_start we will be
2924 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2925 if (is_combined_parallel (region))
2927 switch (region->inner->type)
2929 case GIMPLE_OMP_FOR:
2930 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2931 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2932 + (region->inner->sched_kind
2933 == OMP_CLAUSE_SCHEDULE_RUNTIME
2934 ? 3 : region->inner->sched_kind);
2936 case GIMPLE_OMP_SECTIONS:
2937 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2944 /* By default, the value of NUM_THREADS is zero (selected at run time)
2945 and there is no conditional. */
2947 val = build_int_cst (unsigned_type_node, 0);
2949 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2951 cond = OMP_CLAUSE_IF_EXPR (c);
2953 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2956 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2957 clause_loc = OMP_CLAUSE_LOCATION (c);
2960 clause_loc = gimple_location (entry_stmt);
2962 /* Ensure 'val' is of the correct type. */
2963 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2965 /* If we found the clause 'if (cond)', build either
2966 (cond != 0) or (cond ? val : 1u). */
2969 gimple_stmt_iterator gsi;
2971 cond = gimple_boolify (cond);
2973 if (integer_zerop (val))
2974 val = fold_build2_loc (clause_loc,
2975 EQ_EXPR, unsigned_type_node, cond,
2976 build_int_cst (TREE_TYPE (cond), 0));
2979 basic_block cond_bb, then_bb, else_bb;
2980 edge e, e_then, e_else;
2981 tree tmp_then, tmp_else, tmp_join, tmp_var;
2983 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2984 if (gimple_in_ssa_p (cfun))
2986 tmp_then = make_ssa_name (tmp_var, NULL);
2987 tmp_else = make_ssa_name (tmp_var, NULL);
2988 tmp_join = make_ssa_name (tmp_var, NULL);
2997 e = split_block (bb, NULL);
3002 then_bb = create_empty_bb (cond_bb);
3003 else_bb = create_empty_bb (then_bb);
3004 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3005 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3007 stmt = gimple_build_cond_empty (cond);
3008 gsi = gsi_start_bb (cond_bb);
3009 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3011 gsi = gsi_start_bb (then_bb);
3012 stmt = gimple_build_assign (tmp_then, val);
3013 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3015 gsi = gsi_start_bb (else_bb);
3016 stmt = gimple_build_assign
3017 (tmp_else, build_int_cst (unsigned_type_node, 1));
3018 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3020 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3021 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3022 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3023 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3025 if (gimple_in_ssa_p (cfun))
3027 gimple phi = create_phi_node (tmp_join, bb);
3028 SSA_NAME_DEF_STMT (tmp_join) = phi;
3029 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3030 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3036 gsi = gsi_start_bb (bb);
3037 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3038 false, GSI_CONTINUE_LINKING);
3041 gsi = gsi_last_bb (bb);
3042 t = gimple_omp_parallel_data_arg (entry_stmt);
3044 t1 = null_pointer_node;
3046 t1 = build_fold_addr_expr (t);
3047 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3049 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3050 VEC_quick_push (tree, args, t2);
3051 VEC_quick_push (tree, args, t1);
3052 VEC_quick_push (tree, args, val);
3053 VEC_splice (tree, args, ws_args);
3055 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3056 built_in_decls[start_ix], args);
3058 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3059 false, GSI_CONTINUE_LINKING);
3061 t = gimple_omp_parallel_data_arg (entry_stmt);
3063 t = null_pointer_node;
3065 t = build_fold_addr_expr (t);
3066 t = build_call_expr_loc (gimple_location (entry_stmt),
3067 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3068 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3069 false, GSI_CONTINUE_LINKING);
3071 t = build_call_expr_loc (gimple_location (entry_stmt),
3072 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3073 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3074 false, GSI_CONTINUE_LINKING);
3078 /* Build the function call to GOMP_task to actually
3079 generate the task operation. BB is the block where to insert the code. */
3082 expand_task_call (basic_block bb, gimple entry_stmt)
3084 tree t, t1, t2, t3, flags, cond, c, clauses;
3085 gimple_stmt_iterator gsi;
3086 location_t loc = gimple_location (entry_stmt);
3088 clauses = gimple_omp_task_clauses (entry_stmt);
3090 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3092 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3094 cond = boolean_true_node;
3096 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3097 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3099 gsi = gsi_last_bb (bb);
3100 t = gimple_omp_task_data_arg (entry_stmt);
3102 t2 = null_pointer_node;
3104 t2 = build_fold_addr_expr_loc (loc, t);
3105 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3106 t = gimple_omp_task_copy_fn (entry_stmt);
3108 t3 = null_pointer_node;
3110 t3 = build_fold_addr_expr_loc (loc, t);
3112 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3113 gimple_omp_task_arg_size (entry_stmt),
3114 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3116 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3117 false, GSI_CONTINUE_LINKING);
3121 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3122 catch handler and return it. This prevents programs from violating the
3123 structured block semantics with throws. */
3126 maybe_catch_exception (gimple_seq body)
3131 if (!flag_exceptions)
3134 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3135 decl = lang_hooks.eh_protect_cleanup_actions ();
3137 decl = built_in_decls[BUILT_IN_TRAP];
3139 g = gimple_build_eh_must_not_throw (decl);
3140 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3143 return gimple_seq_alloc_with_stmt (g);
3146 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3149 vec2chain (VEC(tree,gc) *v)
3151 tree chain = NULL_TREE, t;
3154 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3156 DECL_CHAIN (t) = chain;
3164 /* Remove barriers in REGION->EXIT's block. Note that this is only
3165 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3166 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3167 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3171 remove_exit_barrier (struct omp_region *region)
3173 gimple_stmt_iterator gsi;
3174 basic_block exit_bb;
3178 int any_addressable_vars = -1;
3180 exit_bb = region->exit;
3182 /* If the parallel region doesn't return, we don't have REGION->EXIT
3187 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3188 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3189 statements that can appear in between are extremely limited -- no
3190 memory operations at all. Here, we allow nothing at all, so the
3191 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3192 gsi = gsi_last_bb (exit_bb);
3193 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3195 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3198 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3200 gsi = gsi_last_bb (e->src);
3201 if (gsi_end_p (gsi))
3203 stmt = gsi_stmt (gsi);
3204 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3205 && !gimple_omp_return_nowait_p (stmt))
3207 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3208 in many cases. If there could be tasks queued, the barrier
3209 might be needed to let the tasks run before some local
3210 variable of the parallel that the task uses as shared
3211 runs out of scope. The task can be spawned either
3212 from within current function (this would be easy to check)
3213 or from some function it calls and gets passed an address
3214 of such a variable. */
3215 if (any_addressable_vars < 0)
3217 gimple parallel_stmt = last_stmt (region->entry);
3218 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3219 tree local_decls, block, decl;
3222 any_addressable_vars = 0;
3223 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3224 if (TREE_ADDRESSABLE (decl))
3226 any_addressable_vars = 1;
3229 for (block = gimple_block (stmt);
3230 !any_addressable_vars
3232 && TREE_CODE (block) == BLOCK;
3233 block = BLOCK_SUPERCONTEXT (block))
3235 for (local_decls = BLOCK_VARS (block);
3237 local_decls = DECL_CHAIN (local_decls))
3238 if (TREE_ADDRESSABLE (local_decls))
3240 any_addressable_vars = 1;
3243 if (block == gimple_block (parallel_stmt))
3247 if (!any_addressable_vars)
3248 gimple_omp_return_set_nowait (stmt);
3254 remove_exit_barriers (struct omp_region *region)
3256 if (region->type == GIMPLE_OMP_PARALLEL)
3257 remove_exit_barrier (region);
3261 region = region->inner;
3262 remove_exit_barriers (region);
3263 while (region->next)
3265 region = region->next;
3266 remove_exit_barriers (region);
3271 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3272 calls. These can't be declared as const functions, but
3273 within one parallel body they are constant, so they can be
3274 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3275 which are declared const. Similarly for task body, except
3276 that in untied task omp_get_thread_num () can change at any task
3277 scheduling point. */
3280 optimize_omp_library_calls (gimple entry_stmt)
3283 gimple_stmt_iterator gsi;
3285 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3287 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3288 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3289 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3290 OMP_CLAUSE_UNTIED) != NULL);
3293 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3295 gimple call = gsi_stmt (gsi);
3298 if (is_gimple_call (call)
3299 && (decl = gimple_call_fndecl (call))
3300 && DECL_EXTERNAL (decl)
3301 && TREE_PUBLIC (decl)
3302 && DECL_INITIAL (decl) == NULL)
3306 if (DECL_NAME (decl) == thr_num_id)
3308 /* In #pragma omp task untied omp_get_thread_num () can change
3309 during the execution of the task region. */
3312 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3314 else if (DECL_NAME (decl) == num_thr_id)
3315 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3319 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3320 || gimple_call_num_args (call) != 0)
3323 if (flag_exceptions && !TREE_NOTHROW (decl))
3326 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3327 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3328 TREE_TYPE (TREE_TYPE (built_in))))
3331 gimple_call_set_fndecl (call, built_in);
3336 /* Expand the OpenMP parallel or task directive starting at REGION. */
3339 expand_omp_taskreg (struct omp_region *region)
3341 basic_block entry_bb, exit_bb, new_bb;
3342 struct function *child_cfun;
3343 tree child_fn, block, t;
3345 gimple_stmt_iterator gsi;
3346 gimple entry_stmt, stmt;
3348 VEC(tree,gc) *ws_args;
3350 entry_stmt = last_stmt (region->entry);
3351 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3352 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3353 /* If this function has been already instrumented, make sure
3354 the child function isn't instrumented again. */
3355 child_cfun->after_tree_profile = cfun->after_tree_profile;
3357 entry_bb = region->entry;
3358 exit_bb = region->exit;
3360 if (is_combined_parallel (region))
3361 ws_args = region->ws_args;
3365 if (child_cfun->cfg)
3367 /* Due to inlining, it may happen that we have already outlined
3368 the region, in which case all we need to do is make the
3369 sub-graph unreachable and emit the parallel call. */
3370 edge entry_succ_e, exit_succ_e;
3371 gimple_stmt_iterator gsi;
3373 entry_succ_e = single_succ_edge (entry_bb);
3375 gsi = gsi_last_bb (entry_bb);
3376 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3377 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3378 gsi_remove (&gsi, true);
3383 exit_succ_e = single_succ_edge (exit_bb);
3384 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3386 remove_edge_and_dominated_blocks (entry_succ_e);
3390 unsigned srcidx, dstidx, num;
3392 /* If the parallel region needs data sent from the parent
3393 function, then the very first statement (except possible
3394 tree profile counter updates) of the parallel body
3395 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3396 &.OMP_DATA_O is passed as an argument to the child function,
3397 we need to replace it with the argument as seen by the child
3400 In most cases, this will end up being the identity assignment
3401 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3402 a function call that has been inlined, the original PARM_DECL
3403 .OMP_DATA_I may have been converted into a different local
3404 variable. In which case, we need to keep the assignment. */
3405 if (gimple_omp_taskreg_data_arg (entry_stmt))
3407 basic_block entry_succ_bb = single_succ (entry_bb);
3408 gimple_stmt_iterator gsi;
3410 gimple parcopy_stmt = NULL;
3412 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3416 gcc_assert (!gsi_end_p (gsi));
3417 stmt = gsi_stmt (gsi);
3418 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3421 if (gimple_num_ops (stmt) == 2)
3423 tree arg = gimple_assign_rhs1 (stmt);
3425 /* We're ignore the subcode because we're
3426 effectively doing a STRIP_NOPS. */
3428 if (TREE_CODE (arg) == ADDR_EXPR
3429 && TREE_OPERAND (arg, 0)
3430 == gimple_omp_taskreg_data_arg (entry_stmt))
3432 parcopy_stmt = stmt;
3438 gcc_assert (parcopy_stmt != NULL);
3439 arg = DECL_ARGUMENTS (child_fn);
3441 if (!gimple_in_ssa_p (cfun))
3443 if (gimple_assign_lhs (parcopy_stmt) == arg)
3444 gsi_remove (&gsi, true);
3447 /* ?? Is setting the subcode really necessary ?? */
3448 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3449 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3454 /* If we are in ssa form, we must load the value from the default
3455 definition of the argument. That should not be defined now,
3456 since the argument is not used uninitialized. */
3457 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3458 narg = make_ssa_name (arg, gimple_build_nop ());
3459 set_default_def (arg, narg);
3460 /* ?? Is setting the subcode really necessary ?? */
3461 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3462 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3463 update_stmt (parcopy_stmt);
3467 /* Declare local variables needed in CHILD_CFUN. */
3468 block = DECL_INITIAL (child_fn);
3469 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3470 /* The gimplifier could record temporaries in parallel/task block
3471 rather than in containing function's local_decls chain,
3472 which would mean cgraph missed finalizing them. Do it now. */
3473 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3474 if (TREE_CODE (t) == VAR_DECL
3476 && !DECL_EXTERNAL (t))
3477 varpool_finalize_decl (t);
3478 DECL_SAVED_TREE (child_fn) = NULL;
3479 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3480 TREE_USED (block) = 1;
3482 /* Reset DECL_CONTEXT on function arguments. */
3483 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3484 DECL_CONTEXT (t) = child_fn;
3486 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3487 so that it can be moved to the child function. */
3488 gsi = gsi_last_bb (entry_bb);
3489 stmt = gsi_stmt (gsi);
3490 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3491 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3492 gsi_remove (&gsi, true);
3493 e = split_block (entry_bb, stmt);
3495 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3497 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3500 gsi = gsi_last_bb (exit_bb);
3501 gcc_assert (!gsi_end_p (gsi)
3502 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3503 stmt = gimple_build_return (NULL);
3504 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3505 gsi_remove (&gsi, true);
3508 /* Move the parallel region into CHILD_CFUN. */
3510 if (gimple_in_ssa_p (cfun))
3512 push_cfun (child_cfun);
3513 init_tree_ssa (child_cfun);
3514 init_ssa_operands ();
3515 cfun->gimple_df->in_ssa_p = true;
3520 block = gimple_block (entry_stmt);
3522 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3524 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3526 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3527 num = VEC_length (tree, child_cfun->local_decls);
3528 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3530 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3531 if (DECL_CONTEXT (t) == cfun->decl)
3533 if (srcidx != dstidx)
3534 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3538 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3540 /* Inform the callgraph about the new function. */
3541 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3542 = cfun->curr_properties;
3543 cgraph_add_new_function (child_fn, true);
3545 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3546 fixed in a following pass. */
3547 push_cfun (child_cfun);
3548 save_current = current_function_decl;
3549 current_function_decl = child_fn;
3551 optimize_omp_library_calls (entry_stmt);
3552 rebuild_cgraph_edges ();
3554 /* Some EH regions might become dead, see PR34608. If
3555 pass_cleanup_cfg isn't the first pass to happen with the
3556 new child, these dead EH edges might cause problems.
3557 Clean them up now. */
3558 if (flag_exceptions)
3561 bool changed = false;
3564 changed |= gimple_purge_dead_eh_edges (bb);
3566 cleanup_tree_cfg ();
3568 if (gimple_in_ssa_p (cfun))
3569 update_ssa (TODO_update_ssa);
3570 current_function_decl = save_current;
3574 /* Emit a library call to launch the children threads. */
3575 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3576 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3578 expand_task_call (new_bb, entry_stmt);
3579 update_ssa (TODO_update_ssa_only_virtuals);
3583 /* A subroutine of expand_omp_for. Generate code for a parallel
3584 loop with any schedule. Given parameters:
3586 for (V = N1; V cond N2; V += STEP) BODY;
3588 where COND is "<" or ">", we generate pseudocode
3590 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3591 if (more) goto L0; else goto L3;
3598 if (V cond iend) goto L1; else goto L2;
3600 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3603 If this is a combined omp parallel loop, instead of the call to
3604 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3606 For collapsed loops, given parameters:
3608 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3609 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3610 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3613 we generate pseudocode
3619 count3 = (adj + N32 - N31) / STEP3;
3624 count2 = (adj + N22 - N21) / STEP2;
3629 count1 = (adj + N12 - N11) / STEP1;
3630 count = count1 * count2 * count3;
3631 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3632 if (more) goto L0; else goto L3;
3636 V3 = N31 + (T % count3) * STEP3;
3638 V2 = N21 + (T % count2) * STEP2;
3640 V1 = N11 + T * STEP1;
3645 if (V < iend) goto L10; else goto L2;
3648 if (V3 cond3 N32) goto L1; else goto L11;
3652 if (V2 cond2 N22) goto L1; else goto L12;
3658 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3664 expand_omp_for_generic (struct omp_region *region,
3665 struct omp_for_data *fd,
3666 enum built_in_function start_fn,
3667 enum built_in_function next_fn)
3669 tree type, istart0, iend0, iend;
3670 tree t, vmain, vback, bias = NULL_TREE;
3671 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3672 basic_block l2_bb = NULL, l3_bb = NULL;
3673 gimple_stmt_iterator gsi;
3675 bool in_combined_parallel = is_combined_parallel (region);
3676 bool broken_loop = region->cont == NULL;
3678 tree *counts = NULL;
3681 gcc_assert (!broken_loop || !in_combined_parallel);
3682 gcc_assert (fd->iter_type == long_integer_type_node
3683 || !in_combined_parallel);
3685 type = TREE_TYPE (fd->loop.v);
3686 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3687 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3688 TREE_ADDRESSABLE (istart0) = 1;
3689 TREE_ADDRESSABLE (iend0) = 1;
3690 if (gimple_in_ssa_p (cfun))
3692 add_referenced_var (istart0);
3693 add_referenced_var (iend0);
3696 /* See if we need to bias by LLONG_MIN. */
3697 if (fd->iter_type == long_long_unsigned_type_node
3698 && TREE_CODE (type) == INTEGER_TYPE
3699 && !TYPE_UNSIGNED (type))
3703 if (fd->loop.cond_code == LT_EXPR)
3706 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3710 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3713 if (TREE_CODE (n1) != INTEGER_CST
3714 || TREE_CODE (n2) != INTEGER_CST
3715 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3716 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3719 entry_bb = region->entry;
3720 cont_bb = region->cont;
3722 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3723 gcc_assert (broken_loop
3724 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3725 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3726 l1_bb = single_succ (l0_bb);
3729 l2_bb = create_empty_bb (cont_bb);
3730 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3731 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3735 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3736 exit_bb = region->exit;
3738 gsi = gsi_last_bb (entry_bb);
3740 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3741 if (fd->collapse > 1)
3743 /* collapsed loops need work for expansion in SSA form. */
3744 gcc_assert (!gimple_in_ssa_p (cfun));
3745 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3746 for (i = 0; i < fd->collapse; i++)
3748 tree itype = TREE_TYPE (fd->loops[i].v);
3750 if (POINTER_TYPE_P (itype))
3751 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3752 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3754 t = fold_build2 (PLUS_EXPR, itype,
3755 fold_convert (itype, fd->loops[i].step), t);
3756 t = fold_build2 (PLUS_EXPR, itype, t,
3757 fold_convert (itype, fd->loops[i].n2));
3758 t = fold_build2 (MINUS_EXPR, itype, t,
3759 fold_convert (itype, fd->loops[i].n1));
3760 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3761 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3762 fold_build1 (NEGATE_EXPR, itype, t),
3763 fold_build1 (NEGATE_EXPR, itype,
3764 fold_convert (itype,
3765 fd->loops[i].step)));
3767 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3768 fold_convert (itype, fd->loops[i].step));
3769 t = fold_convert (type, t);
3770 if (TREE_CODE (t) == INTEGER_CST)
3774 counts[i] = create_tmp_var (type, ".count");
3775 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3776 true, GSI_SAME_STMT);
3777 stmt = gimple_build_assign (counts[i], t);
3778 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3780 if (SSA_VAR_P (fd->loop.n2))
3786 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3787 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3788 true, GSI_SAME_STMT);
3790 stmt = gimple_build_assign (fd->loop.n2, t);
3791 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3795 if (in_combined_parallel)
3797 /* In a combined parallel loop, emit a call to
3798 GOMP_loop_foo_next. */
3799 t = build_call_expr (built_in_decls[next_fn], 2,
3800 build_fold_addr_expr (istart0),
3801 build_fold_addr_expr (iend0));
3805 tree t0, t1, t2, t3, t4;
3806 /* If this is not a combined parallel loop, emit a call to
3807 GOMP_loop_foo_start in ENTRY_BB. */
3808 t4 = build_fold_addr_expr (iend0);
3809 t3 = build_fold_addr_expr (istart0);
3810 t2 = fold_convert (fd->iter_type, fd->loop.step);
3811 if (POINTER_TYPE_P (type)
3812 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3814 /* Avoid casting pointers to integer of a different size. */
3816 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3817 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3818 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3822 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3823 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3827 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3828 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3830 if (fd->iter_type == long_integer_type_node)
3834 t = fold_convert (fd->iter_type, fd->chunk_size);
3835 t = build_call_expr (built_in_decls[start_fn], 6,
3836 t0, t1, t2, t, t3, t4);
3839 t = build_call_expr (built_in_decls[start_fn], 5,
3840 t0, t1, t2, t3, t4);
3847 /* The GOMP_loop_ull_*start functions have additional boolean
3848 argument, true for < loops and false for > loops.
3849 In Fortran, the C bool type can be different from
3850 boolean_type_node. */
3851 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3852 t5 = build_int_cst (c_bool_type,
3853 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3856 t = fold_convert (fd->iter_type, fd->chunk_size);
3857 t = build_call_expr (built_in_decls[start_fn], 7,
3858 t5, t0, t1, t2, t, t3, t4);
3861 t = build_call_expr (built_in_decls[start_fn], 6,
3862 t5, t0, t1, t2, t3, t4);
3865 if (TREE_TYPE (t) != boolean_type_node)
3866 t = fold_build2 (NE_EXPR, boolean_type_node,
3867 t, build_int_cst (TREE_TYPE (t), 0));
3868 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3869 true, GSI_SAME_STMT);
3870 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3872 /* Remove the GIMPLE_OMP_FOR statement. */
3873 gsi_remove (&gsi, true);
3875 /* Iteration setup for sequential loop goes in L0_BB. */
3876 gsi = gsi_start_bb (l0_bb);
3879 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3880 if (POINTER_TYPE_P (type))
3881 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3883 t = fold_convert (type, t);
3884 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3885 false, GSI_CONTINUE_LINKING);
3886 stmt = gimple_build_assign (fd->loop.v, t);
3887 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3891 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3892 if (POINTER_TYPE_P (type))
3893 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3895 t = fold_convert (type, t);
3896 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3897 false, GSI_CONTINUE_LINKING);
3898 if (fd->collapse > 1)
3900 tree tem = create_tmp_var (type, ".tem");
3902 stmt = gimple_build_assign (tem, fd->loop.v);
3903 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3904 for (i = fd->collapse - 1; i >= 0; i--)
3906 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3908 if (POINTER_TYPE_P (vtype))
3909 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3910 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3911 t = fold_convert (itype, t);
3912 t = fold_build2 (MULT_EXPR, itype, t,
3913 fold_convert (itype, fd->loops[i].step));
3914 if (POINTER_TYPE_P (vtype))
3915 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3917 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3918 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3919 false, GSI_CONTINUE_LINKING);
3920 stmt = gimple_build_assign (fd->loops[i].v, t);
3921 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3924 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3925 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3926 false, GSI_CONTINUE_LINKING);
3927 stmt = gimple_build_assign (tem, t);
3928 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3935 /* Code to control the increment and predicate for the sequential
3936 loop goes in the CONT_BB. */
3937 gsi = gsi_last_bb (cont_bb);
3938 stmt = gsi_stmt (gsi);
3939 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3940 vmain = gimple_omp_continue_control_use (stmt);
3941 vback = gimple_omp_continue_control_def (stmt);
3943 if (POINTER_TYPE_P (type))
3944 t = fold_build_pointer_plus (vmain, fd->loop.step);
3946 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3947 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3948 true, GSI_SAME_STMT);
3949 stmt = gimple_build_assign (vback, t);
3950 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3952 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3953 stmt = gimple_build_cond_empty (t);
3954 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3956 /* Remove GIMPLE_OMP_CONTINUE. */
3957 gsi_remove (&gsi, true);
3959 if (fd->collapse > 1)
3961 basic_block last_bb, bb;
3964 for (i = fd->collapse - 1; i >= 0; i--)
3966 tree vtype = TREE_TYPE (fd->loops[i].v);
3968 bb = create_empty_bb (last_bb);
3969 gsi = gsi_start_bb (bb);
3971 if (i < fd->collapse - 1)
3973 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3974 e->probability = REG_BR_PROB_BASE / 8;
3976 t = fd->loops[i + 1].n1;
3977 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3978 false, GSI_CONTINUE_LINKING);
3979 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3980 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3985 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3987 if (POINTER_TYPE_P (vtype))
3988 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
3990 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3992 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3993 false, GSI_CONTINUE_LINKING);
3994 stmt = gimple_build_assign (fd->loops[i].v, t);
3995 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3999 t = fd->loops[i].n2;
4000 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4001 false, GSI_CONTINUE_LINKING);
4002 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4004 stmt = gimple_build_cond_empty (t);
4005 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4006 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4007 e->probability = REG_BR_PROB_BASE * 7 / 8;
4010 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4015 /* Emit code to get the next parallel iteration in L2_BB. */
4016 gsi = gsi_start_bb (l2_bb);
4018 t = build_call_expr (built_in_decls[next_fn], 2,
4019 build_fold_addr_expr (istart0),
4020 build_fold_addr_expr (iend0));
4021 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4022 false, GSI_CONTINUE_LINKING);
4023 if (TREE_TYPE (t) != boolean_type_node)
4024 t = fold_build2 (NE_EXPR, boolean_type_node,
4025 t, build_int_cst (TREE_TYPE (t), 0));
4026 stmt = gimple_build_cond_empty (t);
4027 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4030 /* Add the loop cleanup function. */
4031 gsi = gsi_last_bb (exit_bb);
4032 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4033 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4035 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4036 stmt = gimple_build_call (t, 0);
4037 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4038 gsi_remove (&gsi, true);
4040 /* Connect the new blocks. */
4041 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4042 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4048 e = find_edge (cont_bb, l3_bb);
4049 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4051 phis = phi_nodes (l3_bb);
4052 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4054 gimple phi = gsi_stmt (gsi);
4055 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4056 PHI_ARG_DEF_FROM_EDGE (phi, e));
4060 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4061 if (fd->collapse > 1)
4063 e = find_edge (cont_bb, l1_bb);
4065 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4069 e = find_edge (cont_bb, l1_bb);
4070 e->flags = EDGE_TRUE_VALUE;
4072 e->probability = REG_BR_PROB_BASE * 7 / 8;
4073 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4074 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4076 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4077 recompute_dominator (CDI_DOMINATORS, l2_bb));
4078 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4079 recompute_dominator (CDI_DOMINATORS, l3_bb));
4080 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4081 recompute_dominator (CDI_DOMINATORS, l0_bb));
4082 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4083 recompute_dominator (CDI_DOMINATORS, l1_bb));
4088 /* A subroutine of expand_omp_for. Generate code for a parallel
4089 loop with static schedule and no specified chunk size. Given
4092 for (V = N1; V cond N2; V += STEP) BODY;
4094 where COND is "<" or ">", we generate pseudocode
4100 if ((__typeof (V)) -1 > 0 && cond is >)
4101 n = -(adj + N2 - N1) / -STEP;
4103 n = (adj + N2 - N1) / STEP;
4106 if (threadid < tt) goto L3; else goto L4;
4111 s0 = q * threadid + tt;
4114 if (s0 >= e0) goto L2; else goto L0;
4120 if (V cond e) goto L1;
4125 expand_omp_for_static_nochunk (struct omp_region *region,
4126 struct omp_for_data *fd)
4128 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4129 tree type, itype, vmain, vback;
4130 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4131 basic_block body_bb, cont_bb;
4133 gimple_stmt_iterator gsi;
4137 itype = type = TREE_TYPE (fd->loop.v);
4138 if (POINTER_TYPE_P (type))
4139 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4141 entry_bb = region->entry;
4142 cont_bb = region->cont;
4143 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4144 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4145 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4146 body_bb = single_succ (seq_start_bb);
4147 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4148 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4149 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4150 exit_bb = region->exit;
4152 /* Iteration space partitioning goes in ENTRY_BB. */
4153 gsi = gsi_last_bb (entry_bb);
4154 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4156 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4157 t = fold_convert (itype, t);
4158 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4159 true, GSI_SAME_STMT);
4161 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4162 t = fold_convert (itype, t);
4163 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4164 true, GSI_SAME_STMT);
4167 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4168 true, NULL_TREE, true, GSI_SAME_STMT);
4170 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4171 true, NULL_TREE, true, GSI_SAME_STMT);
4173 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4174 true, NULL_TREE, true, GSI_SAME_STMT);
4176 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4177 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4178 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4179 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4180 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4181 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4182 fold_build1 (NEGATE_EXPR, itype, t),
4183 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4185 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4186 t = fold_convert (itype, t);
4187 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4189 q = create_tmp_var (itype, "q");
4190 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4191 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4192 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4194 tt = create_tmp_var (itype, "tt");
4195 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4196 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4197 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4199 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4200 stmt = gimple_build_cond_empty (t);
4201 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4203 second_bb = split_block (entry_bb, stmt)->dest;
4204 gsi = gsi_last_bb (second_bb);
4205 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4207 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4209 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4210 build_int_cst (itype, 1));
4211 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4213 third_bb = split_block (second_bb, stmt)->dest;
4214 gsi = gsi_last_bb (third_bb);
4215 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4217 t = build2 (MULT_EXPR, itype, q, threadid);
4218 t = build2 (PLUS_EXPR, itype, t, tt);
4219 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4221 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4222 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4224 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4225 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4227 /* Remove the GIMPLE_OMP_FOR statement. */
4228 gsi_remove (&gsi, true);
4230 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4231 gsi = gsi_start_bb (seq_start_bb);
4233 t = fold_convert (itype, s0);
4234 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4235 if (POINTER_TYPE_P (type))
4236 t = fold_build_pointer_plus (fd->loop.n1, t);
4238 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4239 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4240 false, GSI_CONTINUE_LINKING);
4241 stmt = gimple_build_assign (fd->loop.v, t);
4242 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4244 t = fold_convert (itype, e0);
4245 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4246 if (POINTER_TYPE_P (type))
4247 t = fold_build_pointer_plus (fd->loop.n1, t);
4249 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4250 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4251 false, GSI_CONTINUE_LINKING);
4253 /* The code controlling the sequential loop replaces the
4254 GIMPLE_OMP_CONTINUE. */
4255 gsi = gsi_last_bb (cont_bb);
4256 stmt = gsi_stmt (gsi);
4257 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4258 vmain = gimple_omp_continue_control_use (stmt);
4259 vback = gimple_omp_continue_control_def (stmt);
4261 if (POINTER_TYPE_P (type))
4262 t = fold_build_pointer_plus (vmain, fd->loop.step);
4264 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4265 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4266 true, GSI_SAME_STMT);
4267 stmt = gimple_build_assign (vback, t);
4268 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4270 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4271 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4273 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4274 gsi_remove (&gsi, true);
4276 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4277 gsi = gsi_last_bb (exit_bb);
4278 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4279 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4280 false, GSI_SAME_STMT);
4281 gsi_remove (&gsi, true);
4283 /* Connect all the blocks. */
4284 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4285 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4286 ep = find_edge (entry_bb, second_bb);
4287 ep->flags = EDGE_TRUE_VALUE;
4288 ep->probability = REG_BR_PROB_BASE / 4;
4289 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4290 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4292 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4293 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4295 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4296 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4297 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4298 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4299 recompute_dominator (CDI_DOMINATORS, body_bb));
4300 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4301 recompute_dominator (CDI_DOMINATORS, fin_bb));
4305 /* A subroutine of expand_omp_for. Generate code for a parallel
4306 loop with static schedule and a specified chunk size. Given
4309 for (V = N1; V cond N2; V += STEP) BODY;
4311 where COND is "<" or ">", we generate pseudocode
4317 if ((__typeof (V)) -1 > 0 && cond is >)
4318 n = -(adj + N2 - N1) / -STEP;
4320 n = (adj + N2 - N1) / STEP;
4322 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4323 here so that V is defined
4324 if the loop is not entered
4326 s0 = (trip * nthreads + threadid) * CHUNK;
4327 e0 = min(s0 + CHUNK, n);
4328 if (s0 < n) goto L1; else goto L4;
4335 if (V cond e) goto L2; else goto L3;
4343 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4345 tree n, s0, e0, e, t;
4346 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4347 tree type, itype, v_main, v_back, v_extra;
4348 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4349 basic_block trip_update_bb, cont_bb, fin_bb;
4350 gimple_stmt_iterator si;
4354 itype = type = TREE_TYPE (fd->loop.v);
4355 if (POINTER_TYPE_P (type))
4356 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4358 entry_bb = region->entry;
4359 se = split_block (entry_bb, last_stmt (entry_bb));
4361 iter_part_bb = se->dest;
4362 cont_bb = region->cont;
4363 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4364 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4365 == FALLTHRU_EDGE (cont_bb)->dest);
4366 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4367 body_bb = single_succ (seq_start_bb);
4368 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4369 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4370 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4371 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4372 exit_bb = region->exit;
4374 /* Trip and adjustment setup goes in ENTRY_BB. */
4375 si = gsi_last_bb (entry_bb);
4376 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4378 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4379 t = fold_convert (itype, t);
4380 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4381 true, GSI_SAME_STMT);
4383 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4384 t = fold_convert (itype, t);
4385 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4386 true, GSI_SAME_STMT);
4389 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4390 true, NULL_TREE, true, GSI_SAME_STMT);
4392 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4393 true, NULL_TREE, true, GSI_SAME_STMT);
4395 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4396 true, NULL_TREE, true, GSI_SAME_STMT);
4398 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4399 true, NULL_TREE, true, GSI_SAME_STMT);
4401 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4402 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4403 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4404 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4405 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4406 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4407 fold_build1 (NEGATE_EXPR, itype, t),
4408 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4410 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4411 t = fold_convert (itype, t);
4412 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4413 true, GSI_SAME_STMT);
4415 trip_var = create_tmp_var (itype, ".trip");
4416 if (gimple_in_ssa_p (cfun))
4418 add_referenced_var (trip_var);
4419 trip_init = make_ssa_name (trip_var, NULL);
4420 trip_main = make_ssa_name (trip_var, NULL);
4421 trip_back = make_ssa_name (trip_var, NULL);
4425 trip_init = trip_var;
4426 trip_main = trip_var;
4427 trip_back = trip_var;
4430 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4431 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4433 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4434 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4435 if (POINTER_TYPE_P (type))
4436 t = fold_build_pointer_plus (fd->loop.n1, t);
4438 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4439 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4440 true, GSI_SAME_STMT);
4442 /* Remove the GIMPLE_OMP_FOR. */
4443 gsi_remove (&si, true);
4445 /* Iteration space partitioning goes in ITER_PART_BB. */
4446 si = gsi_last_bb (iter_part_bb);
4448 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4449 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4450 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4451 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4452 false, GSI_CONTINUE_LINKING);
4454 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4455 t = fold_build2 (MIN_EXPR, itype, t, n);
4456 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4457 false, GSI_CONTINUE_LINKING);
4459 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4460 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4462 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4463 si = gsi_start_bb (seq_start_bb);
4465 t = fold_convert (itype, s0);
4466 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4467 if (POINTER_TYPE_P (type))
4468 t = fold_build_pointer_plus (fd->loop.n1, t);
4470 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4471 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4472 false, GSI_CONTINUE_LINKING);
4473 stmt = gimple_build_assign (fd->loop.v, t);
4474 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4476 t = fold_convert (itype, e0);
4477 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4478 if (POINTER_TYPE_P (type))
4479 t = fold_build_pointer_plus (fd->loop.n1, t);
4481 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4482 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4483 false, GSI_CONTINUE_LINKING);
4485 /* The code controlling the sequential loop goes in CONT_BB,
4486 replacing the GIMPLE_OMP_CONTINUE. */
4487 si = gsi_last_bb (cont_bb);
4488 stmt = gsi_stmt (si);
4489 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4490 v_main = gimple_omp_continue_control_use (stmt);
4491 v_back = gimple_omp_continue_control_def (stmt);
4493 if (POINTER_TYPE_P (type))
4494 t = fold_build_pointer_plus (v_main, fd->loop.step);
4496 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4497 stmt = gimple_build_assign (v_back, t);
4498 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4500 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4501 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4503 /* Remove GIMPLE_OMP_CONTINUE. */
4504 gsi_remove (&si, true);
4506 /* Trip update code goes into TRIP_UPDATE_BB. */
4507 si = gsi_start_bb (trip_update_bb);
4509 t = build_int_cst (itype, 1);
4510 t = build2 (PLUS_EXPR, itype, trip_main, t);
4511 stmt = gimple_build_assign (trip_back, t);
4512 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4514 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4515 si = gsi_last_bb (exit_bb);
4516 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4517 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4518 false, GSI_SAME_STMT);
4519 gsi_remove (&si, true);
4521 /* Connect the new blocks. */
4522 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4523 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4525 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4526 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4528 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4530 if (gimple_in_ssa_p (cfun))
4532 gimple_stmt_iterator psi;
4535 edge_var_map_vector head;
4539 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4540 remove arguments of the phi nodes in fin_bb. We need to create
4541 appropriate phi nodes in iter_part_bb instead. */
4542 se = single_pred_edge (fin_bb);
4543 re = single_succ_edge (trip_update_bb);
4544 head = redirect_edge_var_map_vector (re);
4545 ene = single_succ_edge (entry_bb);
4547 psi = gsi_start_phis (fin_bb);
4548 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4549 gsi_next (&psi), ++i)
4552 source_location locus;
4554 phi = gsi_stmt (psi);
4555 t = gimple_phi_result (phi);
4556 gcc_assert (t == redirect_edge_var_map_result (vm));
4557 nphi = create_phi_node (t, iter_part_bb);
4558 SSA_NAME_DEF_STMT (t) = nphi;
4560 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4561 locus = gimple_phi_arg_location_from_edge (phi, se);
4563 /* A special case -- fd->loop.v is not yet computed in
4564 iter_part_bb, we need to use v_extra instead. */
4565 if (t == fd->loop.v)
4567 add_phi_arg (nphi, t, ene, locus);
4568 locus = redirect_edge_var_map_location (vm);
4569 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4571 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4572 redirect_edge_var_map_clear (re);
4575 psi = gsi_start_phis (fin_bb);
4576 if (gsi_end_p (psi))
4578 remove_phi_node (&psi, false);
4581 /* Make phi node for trip. */
4582 phi = create_phi_node (trip_main, iter_part_bb);
4583 SSA_NAME_DEF_STMT (trip_main) = phi;
4584 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4586 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4590 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4591 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4592 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4593 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4594 recompute_dominator (CDI_DOMINATORS, fin_bb));
4595 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4596 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4597 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4598 recompute_dominator (CDI_DOMINATORS, body_bb));
4602 /* Expand the OpenMP loop defined by REGION. */
4605 expand_omp_for (struct omp_region *region)
4607 struct omp_for_data fd;
4608 struct omp_for_data_loop *loops;
4611 = (struct omp_for_data_loop *)
4612 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4613 * sizeof (struct omp_for_data_loop));
4614 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4615 region->sched_kind = fd.sched_kind;
4617 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4618 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4619 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4622 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4623 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4624 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4627 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4630 && region->cont != NULL)
4632 if (fd.chunk_size == NULL)
4633 expand_omp_for_static_nochunk (region, &fd);
4635 expand_omp_for_static_chunk (region, &fd);
4639 int fn_index, start_ix, next_ix;
4641 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4642 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4643 ? 3 : fd.sched_kind;
4644 fn_index += fd.have_ordered * 4;
4645 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4646 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4647 if (fd.iter_type == long_long_unsigned_type_node)
4649 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4650 - BUILT_IN_GOMP_LOOP_STATIC_START;
4651 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4652 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4654 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4655 (enum built_in_function) next_ix);
4658 update_ssa (TODO_update_ssa_only_virtuals);
4662 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4664 v = GOMP_sections_start (n);
4681 v = GOMP_sections_next ();
4686 If this is a combined parallel sections, replace the call to
4687 GOMP_sections_start with call to GOMP_sections_next. */
4690 expand_omp_sections (struct omp_region *region)
4692 tree t, u, vin = NULL, vmain, vnext, l2;
4693 VEC (tree,heap) *label_vec;
4695 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4696 gimple_stmt_iterator si, switch_si;
4697 gimple sections_stmt, stmt, cont;
4700 struct omp_region *inner;
4702 bool exit_reachable = region->cont != NULL;
4704 gcc_assert (exit_reachable == (region->exit != NULL));
4705 entry_bb = region->entry;
4706 l0_bb = single_succ (entry_bb);
4707 l1_bb = region->cont;
4708 l2_bb = region->exit;
4711 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4712 l2 = gimple_block_label (l2_bb);
4715 /* This can happen if there are reductions. */
4716 len = EDGE_COUNT (l0_bb->succs);
4717 gcc_assert (len > 0);
4718 e = EDGE_SUCC (l0_bb, len - 1);
4719 si = gsi_last_bb (e->dest);
4722 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4723 l2 = gimple_block_label (e->dest);
4725 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4727 si = gsi_last_bb (e->dest);
4729 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4731 l2 = gimple_block_label (e->dest);
4736 default_bb = create_empty_bb (l1_bb->prev_bb);
4740 default_bb = create_empty_bb (l0_bb);
4741 l2 = gimple_block_label (default_bb);
4744 /* We will build a switch() with enough cases for all the
4745 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4746 and a default case to abort if something goes wrong. */
4747 len = EDGE_COUNT (l0_bb->succs);
4749 /* Use VEC_quick_push on label_vec throughout, since we know the size
4751 label_vec = VEC_alloc (tree, heap, len);
4753 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4754 GIMPLE_OMP_SECTIONS statement. */
4755 si = gsi_last_bb (entry_bb);
4756 sections_stmt = gsi_stmt (si);
4757 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4758 vin = gimple_omp_sections_control (sections_stmt);
4759 if (!is_combined_parallel (region))
4761 /* If we are not inside a combined parallel+sections region,
4762 call GOMP_sections_start. */
4763 t = build_int_cst (unsigned_type_node,
4764 exit_reachable ? len - 1 : len);
4765 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4766 stmt = gimple_build_call (u, 1, t);
4770 /* Otherwise, call GOMP_sections_next. */
4771 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4772 stmt = gimple_build_call (u, 0);
4774 gimple_call_set_lhs (stmt, vin);
4775 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4776 gsi_remove (&si, true);
4778 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4780 switch_si = gsi_last_bb (l0_bb);
4781 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4784 cont = last_stmt (l1_bb);
4785 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4786 vmain = gimple_omp_continue_control_use (cont);
4787 vnext = gimple_omp_continue_control_def (cont);
4798 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4799 VEC_quick_push (tree, label_vec, t);
4803 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4804 for (inner = region->inner, casei = 1;
4806 inner = inner->next, i++, casei++)
4808 basic_block s_entry_bb, s_exit_bb;
4810 /* Skip optional reduction region. */
4811 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4818 s_entry_bb = inner->entry;
4819 s_exit_bb = inner->exit;
4821 t = gimple_block_label (s_entry_bb);
4822 u = build_int_cst (unsigned_type_node, casei);
4823 u = build_case_label (u, NULL, t);
4824 VEC_quick_push (tree, label_vec, u);
4826 si = gsi_last_bb (s_entry_bb);
4827 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4828 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4829 gsi_remove (&si, true);
4830 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4832 if (s_exit_bb == NULL)
4835 si = gsi_last_bb (s_exit_bb);
4836 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4837 gsi_remove (&si, true);
4839 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4842 /* Error handling code goes in DEFAULT_BB. */
4843 t = gimple_block_label (default_bb);
4844 u = build_case_label (NULL, NULL, t);
4845 make_edge (l0_bb, default_bb, 0);
4847 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4848 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4849 gsi_remove (&switch_si, true);
4850 VEC_free (tree, heap, label_vec);
4852 si = gsi_start_bb (default_bb);
4853 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4854 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4858 /* Code to get the next section goes in L1_BB. */
4859 si = gsi_last_bb (l1_bb);
4860 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4862 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4863 gimple_call_set_lhs (stmt, vnext);
4864 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4865 gsi_remove (&si, true);
4867 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4869 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4870 si = gsi_last_bb (l2_bb);
4871 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4872 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4874 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4875 stmt = gimple_build_call (t, 0);
4876 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4877 gsi_remove (&si, true);
4880 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4884 /* Expand code for an OpenMP single directive. We've already expanded
4885 much of the code, here we simply place the GOMP_barrier call. */
4888 expand_omp_single (struct omp_region *region)
4890 basic_block entry_bb, exit_bb;
4891 gimple_stmt_iterator si;
4892 bool need_barrier = false;
4894 entry_bb = region->entry;
4895 exit_bb = region->exit;
4897 si = gsi_last_bb (entry_bb);
4898 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4899 be removed. We need to ensure that the thread that entered the single
4900 does not exit before the data is copied out by the other threads. */
4901 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4902 OMP_CLAUSE_COPYPRIVATE))
4903 need_barrier = true;
4904 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4905 gsi_remove (&si, true);
4906 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4908 si = gsi_last_bb (exit_bb);
4909 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4910 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4911 false, GSI_SAME_STMT);
4912 gsi_remove (&si, true);
4913 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4917 /* Generic expansion for OpenMP synchronization directives: master,
4918 ordered and critical. All we need to do here is remove the entry
4919 and exit markers for REGION. */
4922 expand_omp_synch (struct omp_region *region)
4924 basic_block entry_bb, exit_bb;
4925 gimple_stmt_iterator si;
4927 entry_bb = region->entry;
4928 exit_bb = region->exit;
4930 si = gsi_last_bb (entry_bb);
4931 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4932 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4933 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4934 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4935 gsi_remove (&si, true);
4936 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4940 si = gsi_last_bb (exit_bb);
4941 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4942 gsi_remove (&si, true);
4943 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4947 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4948 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4949 size of the data type, and thus usable to find the index of the builtin
4950 decl. Returns false if the expression is not of the proper form. */
4953 expand_omp_atomic_fetch_op (basic_block load_bb,
4954 tree addr, tree loaded_val,
4955 tree stored_val, int index)
4957 enum built_in_function base;
4958 tree decl, itype, call;
4961 basic_block store_bb = single_succ (load_bb);
4962 gimple_stmt_iterator gsi;
4966 /* We expect to find the following sequences:
4969 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4972 val = tmp OP something; (or: something OP tmp)
4973 GIMPLE_OMP_STORE (val)
4975 ???FIXME: Allow a more flexible sequence.
4976 Perhaps use data flow to pick the statements.
4980 gsi = gsi_after_labels (store_bb);
4981 stmt = gsi_stmt (gsi);
4982 loc = gimple_location (stmt);
4983 if (!is_gimple_assign (stmt))
4986 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4989 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4992 /* Check for one of the supported fetch-op operations. */
4993 switch (gimple_assign_rhs_code (stmt))
4996 case POINTER_PLUS_EXPR:
4997 base = BUILT_IN_SYNC_FETCH_AND_ADD_N;
4998 optab = sync_add_optab;
5001 base = BUILT_IN_SYNC_FETCH_AND_SUB_N;
5002 optab = sync_add_optab;
5005 base = BUILT_IN_SYNC_FETCH_AND_AND_N;
5006 optab = sync_and_optab;
5009 base = BUILT_IN_SYNC_FETCH_AND_OR_N;
5010 optab = sync_ior_optab;
5013 base = BUILT_IN_SYNC_FETCH_AND_XOR_N;
5014 optab = sync_xor_optab;
5019 /* Make sure the expression is of the proper form. */
5020 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5021 rhs = gimple_assign_rhs2 (stmt);
5022 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5023 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5024 rhs = gimple_assign_rhs1 (stmt);
5028 decl = built_in_decls[base + index + 1];
5029 if (decl == NULL_TREE)
5031 itype = TREE_TYPE (TREE_TYPE (decl));
5033 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5036 gsi = gsi_last_bb (load_bb);
5037 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5038 call = build_call_expr_loc (loc,
5040 fold_convert_loc (loc, itype, rhs));
5041 call = fold_convert_loc (loc, void_type_node, call);
5042 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5043 gsi_remove (&gsi, true);
5045 gsi = gsi_last_bb (store_bb);
5046 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5047 gsi_remove (&gsi, true);
5048 gsi = gsi_last_bb (store_bb);
5049 gsi_remove (&gsi, true);
5051 if (gimple_in_ssa_p (cfun))
5052 update_ssa (TODO_update_ssa_no_phi);
5057 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5061 newval = rhs; // with oldval replacing *addr in rhs
5062 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5063 if (oldval != newval)
5066 INDEX is log2 of the size of the data type, and thus usable to find the
5067 index of the builtin decl. */
5070 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5071 tree addr, tree loaded_val, tree stored_val,
5074 tree loadedi, storedi, initial, new_storedi, old_vali;
5075 tree type, itype, cmpxchg, iaddr;
5076 gimple_stmt_iterator si;
5077 basic_block loop_header = single_succ (load_bb);
5081 cmpxchg = built_in_decls[BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1];
5082 if (cmpxchg == NULL_TREE)
5084 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5085 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5087 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5088 == CODE_FOR_nothing)
5091 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5092 si = gsi_last_bb (load_bb);
5093 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5095 /* For floating-point values, we'll need to view-convert them to integers
5096 so that we can perform the atomic compare and swap. Simplify the
5097 following code by always setting up the "i"ntegral variables. */
5098 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5102 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5105 = force_gimple_operand_gsi (&si,
5106 fold_convert (TREE_TYPE (iaddr), addr),
5107 false, NULL_TREE, true, GSI_SAME_STMT);
5108 stmt = gimple_build_assign (iaddr, iaddr_val);
5109 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5110 loadedi = create_tmp_var (itype, NULL);
5111 if (gimple_in_ssa_p (cfun))
5113 add_referenced_var (iaddr);
5114 add_referenced_var (loadedi);
5115 loadedi = make_ssa_name (loadedi, NULL);
5121 loadedi = loaded_val;
5125 = force_gimple_operand_gsi (&si,
5126 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5128 build_int_cst (TREE_TYPE (iaddr), 0)),
5129 true, NULL_TREE, true, GSI_SAME_STMT);
5131 /* Move the value to the LOADEDI temporary. */
5132 if (gimple_in_ssa_p (cfun))
5134 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5135 phi = create_phi_node (loadedi, loop_header);
5136 SSA_NAME_DEF_STMT (loadedi) = phi;
5137 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5141 gsi_insert_before (&si,
5142 gimple_build_assign (loadedi, initial),
5144 if (loadedi != loaded_val)
5146 gimple_stmt_iterator gsi2;
5149 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5150 gsi2 = gsi_start_bb (loop_header);
5151 if (gimple_in_ssa_p (cfun))
5154 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5155 true, GSI_SAME_STMT);
5156 stmt = gimple_build_assign (loaded_val, x);
5157 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5161 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5162 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5163 true, GSI_SAME_STMT);
5166 gsi_remove (&si, true);
5168 si = gsi_last_bb (store_bb);
5169 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5172 storedi = stored_val;
5175 force_gimple_operand_gsi (&si,
5176 build1 (VIEW_CONVERT_EXPR, itype,
5177 stored_val), true, NULL_TREE, true,
5180 /* Build the compare&swap statement. */
5181 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5182 new_storedi = force_gimple_operand_gsi (&si,
5183 fold_convert (TREE_TYPE (loadedi),
5186 true, GSI_SAME_STMT);
5188 if (gimple_in_ssa_p (cfun))
5192 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5193 if (gimple_in_ssa_p (cfun))
5194 add_referenced_var (old_vali);
5195 stmt = gimple_build_assign (old_vali, loadedi);
5196 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5198 stmt = gimple_build_assign (loadedi, new_storedi);
5199 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5202 /* Note that we always perform the comparison as an integer, even for
5203 floating point. This allows the atomic operation to properly
5204 succeed even with NaNs and -0.0. */
5205 stmt = gimple_build_cond_empty
5206 (build2 (NE_EXPR, boolean_type_node,
5207 new_storedi, old_vali));
5208 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5211 e = single_succ_edge (store_bb);
5212 e->flags &= ~EDGE_FALLTHRU;
5213 e->flags |= EDGE_FALSE_VALUE;
5215 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5217 /* Copy the new value to loadedi (we already did that before the condition
5218 if we are not in SSA). */
5219 if (gimple_in_ssa_p (cfun))
5221 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5222 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5225 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5226 gsi_remove (&si, true);
5228 if (gimple_in_ssa_p (cfun))
5229 update_ssa (TODO_update_ssa_no_phi);
5234 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5236 GOMP_atomic_start ();
5240 The result is not globally atomic, but works so long as all parallel
5241 references are within #pragma omp atomic directives. According to
5242 responses received from omp@openmp.org, appears to be within spec.
5243 Which makes sense, since that's how several other compilers handle
5244 this situation as well.
5245 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5246 expanding. STORED_VAL is the operand of the matching
5247 GIMPLE_OMP_ATOMIC_STORE.
5250 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5254 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5259 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5260 tree addr, tree loaded_val, tree stored_val)
5262 gimple_stmt_iterator si;
5266 si = gsi_last_bb (load_bb);
5267 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5269 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5270 t = build_call_expr (t, 0);
5271 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5273 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5274 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5275 gsi_remove (&si, true);
5277 si = gsi_last_bb (store_bb);
5278 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5280 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5282 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5284 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5285 t = build_call_expr (t, 0);
5286 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5287 gsi_remove (&si, true);
5289 if (gimple_in_ssa_p (cfun))
5290 update_ssa (TODO_update_ssa_no_phi);
5294 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5295 using expand_omp_atomic_fetch_op. If it failed, we try to
5296 call expand_omp_atomic_pipeline, and if it fails too, the
5297 ultimate fallback is wrapping the operation in a mutex
5298 (expand_omp_atomic_mutex). REGION is the atomic region built
5299 by build_omp_regions_1(). */
5302 expand_omp_atomic (struct omp_region *region)
5304 basic_block load_bb = region->entry, store_bb = region->exit;
5305 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5306 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5307 tree addr = gimple_omp_atomic_load_rhs (load);
5308 tree stored_val = gimple_omp_atomic_store_val (store);
5309 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5310 HOST_WIDE_INT index;
5312 /* Make sure the type is one of the supported sizes. */
5313 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5314 index = exact_log2 (index);
5315 if (index >= 0 && index <= 4)
5317 unsigned int align = TYPE_ALIGN_UNIT (type);
5319 /* __sync builtins require strict data alignment. */
5320 if (exact_log2 (align) >= index)
5322 /* When possible, use specialized atomic update functions. */
5323 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5324 && store_bb == single_succ (load_bb))
5326 if (expand_omp_atomic_fetch_op (load_bb, addr,
5327 loaded_val, stored_val, index))
5331 /* If we don't have specialized __sync builtins, try and implement
5332 as a compare and swap loop. */
5333 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5334 loaded_val, stored_val, index))
5339 /* The ultimate fallback is wrapping the operation in a mutex. */
5340 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5344 /* Expand the parallel region tree rooted at REGION. Expansion
5345 proceeds in depth-first order. Innermost regions are expanded
5346 first. This way, parallel regions that require a new function to
5347 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5348 internal dependencies in their body. */
5351 expand_omp (struct omp_region *region)
5355 location_t saved_location;
5357 /* First, determine whether this is a combined parallel+workshare
5359 if (region->type == GIMPLE_OMP_PARALLEL)
5360 determine_parallel_type (region);
5363 expand_omp (region->inner);
5365 saved_location = input_location;
5366 if (gimple_has_location (last_stmt (region->entry)))
5367 input_location = gimple_location (last_stmt (region->entry));
5369 switch (region->type)
5371 case GIMPLE_OMP_PARALLEL:
5372 case GIMPLE_OMP_TASK:
5373 expand_omp_taskreg (region);
5376 case GIMPLE_OMP_FOR:
5377 expand_omp_for (region);
5380 case GIMPLE_OMP_SECTIONS:
5381 expand_omp_sections (region);
5384 case GIMPLE_OMP_SECTION:
5385 /* Individual omp sections are handled together with their
5386 parent GIMPLE_OMP_SECTIONS region. */
5389 case GIMPLE_OMP_SINGLE:
5390 expand_omp_single (region);
5393 case GIMPLE_OMP_MASTER:
5394 case GIMPLE_OMP_ORDERED:
5395 case GIMPLE_OMP_CRITICAL:
5396 expand_omp_synch (region);
5399 case GIMPLE_OMP_ATOMIC_LOAD:
5400 expand_omp_atomic (region);
5407 input_location = saved_location;
5408 region = region->next;
5413 /* Helper for build_omp_regions. Scan the dominator tree starting at
5414 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5415 true, the function ends once a single tree is built (otherwise, whole
5416 forest of OMP constructs may be built). */
5419 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5422 gimple_stmt_iterator gsi;
5426 gsi = gsi_last_bb (bb);
5427 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5429 struct omp_region *region;
5430 enum gimple_code code;
5432 stmt = gsi_stmt (gsi);
5433 code = gimple_code (stmt);
5434 if (code == GIMPLE_OMP_RETURN)
5436 /* STMT is the return point out of region PARENT. Mark it
5437 as the exit point and make PARENT the immediately
5438 enclosing region. */
5439 gcc_assert (parent);
5442 parent = parent->outer;
5444 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5446 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5447 GIMPLE_OMP_RETURN, but matches with
5448 GIMPLE_OMP_ATOMIC_LOAD. */
5449 gcc_assert (parent);
5450 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5453 parent = parent->outer;
5456 else if (code == GIMPLE_OMP_CONTINUE)
5458 gcc_assert (parent);
5461 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5463 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5464 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5469 /* Otherwise, this directive becomes the parent for a new
5471 region = new_omp_region (bb, code, parent);
5476 if (single_tree && !parent)
5479 for (son = first_dom_son (CDI_DOMINATORS, bb);
5481 son = next_dom_son (CDI_DOMINATORS, son))
5482 build_omp_regions_1 (son, parent, single_tree);
5485 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5489 build_omp_regions_root (basic_block root)
5491 gcc_assert (root_omp_region == NULL);
5492 build_omp_regions_1 (root, NULL, true);
5493 gcc_assert (root_omp_region != NULL);
5496 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5499 omp_expand_local (basic_block head)
5501 build_omp_regions_root (head);
5502 if (dump_file && (dump_flags & TDF_DETAILS))
5504 fprintf (dump_file, "\nOMP region tree\n\n");
5505 dump_omp_region (dump_file, root_omp_region, 0);
5506 fprintf (dump_file, "\n");
5509 remove_exit_barriers (root_omp_region);
5510 expand_omp (root_omp_region);
5512 free_omp_regions ();
5515 /* Scan the CFG and build a tree of OMP regions. Return the root of
5516 the OMP region tree. */
5519 build_omp_regions (void)
5521 gcc_assert (root_omp_region == NULL);
5522 calculate_dominance_info (CDI_DOMINATORS);
5523 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5526 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5529 execute_expand_omp (void)
5531 build_omp_regions ();
5533 if (!root_omp_region)
5538 fprintf (dump_file, "\nOMP region tree\n\n");
5539 dump_omp_region (dump_file, root_omp_region, 0);
5540 fprintf (dump_file, "\n");
5543 remove_exit_barriers (root_omp_region);
5545 expand_omp (root_omp_region);
5547 cleanup_tree_cfg ();
5549 free_omp_regions ();
5554 /* OMP expansion -- the default pass, run before creation of SSA form. */
5557 gate_expand_omp (void)
5559 return (flag_openmp != 0 && !seen_error ());
5562 struct gimple_opt_pass pass_expand_omp =
5566 "ompexp", /* name */
5567 gate_expand_omp, /* gate */
5568 execute_expand_omp, /* execute */
5571 0, /* static_pass_number */
5572 TV_NONE, /* tv_id */
5573 PROP_gimple_any, /* properties_required */
5574 0, /* properties_provided */
5575 0, /* properties_destroyed */
5576 0, /* todo_flags_start */
5577 0 /* todo_flags_finish */
5581 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5583 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5584 CTX is the enclosing OMP context for the current statement. */
5587 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5589 tree block, control;
5590 gimple_stmt_iterator tgsi;
5592 gimple stmt, new_stmt, bind, t;
5593 gimple_seq ilist, dlist, olist, new_body, body;
5594 struct gimplify_ctx gctx;
5596 stmt = gsi_stmt (*gsi_p);
5598 push_gimplify_context (&gctx);
5602 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5603 &ilist, &dlist, ctx);
5605 tgsi = gsi_start (gimple_omp_body (stmt));
5606 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5609 tgsi = gsi_start (gimple_omp_body (stmt));
5611 for (i = 0; i < len; i++, gsi_next (&tgsi))
5616 sec_start = gsi_stmt (tgsi);
5617 sctx = maybe_lookup_ctx (sec_start);
5620 gimple_seq_add_stmt (&body, sec_start);
5622 lower_omp (gimple_omp_body (sec_start), sctx);
5623 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5624 gimple_omp_set_body (sec_start, NULL);
5628 gimple_seq l = NULL;
5629 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5631 gimple_seq_add_seq (&body, l);
5632 gimple_omp_section_set_last (sec_start);
5635 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5638 block = make_node (BLOCK);
5639 bind = gimple_build_bind (NULL, body, block);
5642 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5644 block = make_node (BLOCK);
5645 new_stmt = gimple_build_bind (NULL, NULL, block);
5647 pop_gimplify_context (new_stmt);
5648 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5649 BLOCK_VARS (block) = gimple_bind_vars (bind);
5650 if (BLOCK_VARS (block))
5651 TREE_USED (block) = 1;
5654 gimple_seq_add_seq (&new_body, ilist);
5655 gimple_seq_add_stmt (&new_body, stmt);
5656 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5657 gimple_seq_add_stmt (&new_body, bind);
5659 control = create_tmp_var (unsigned_type_node, ".section");
5660 t = gimple_build_omp_continue (control, control);
5661 gimple_omp_sections_set_control (stmt, control);
5662 gimple_seq_add_stmt (&new_body, t);
5664 gimple_seq_add_seq (&new_body, olist);
5665 gimple_seq_add_seq (&new_body, dlist);
5667 new_body = maybe_catch_exception (new_body);
5669 t = gimple_build_omp_return
5670 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5671 OMP_CLAUSE_NOWAIT));
5672 gimple_seq_add_stmt (&new_body, t);
5674 gimple_bind_set_body (new_stmt, new_body);
5675 gimple_omp_set_body (stmt, NULL);
5677 gsi_replace (gsi_p, new_stmt, true);
5681 /* A subroutine of lower_omp_single. Expand the simple form of
5682 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5684 if (GOMP_single_start ())
5686 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5688 FIXME. It may be better to delay expanding the logic of this until
5689 pass_expand_omp. The expanded logic may make the job more difficult
5690 to a synchronization analysis pass. */
5693 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5695 location_t loc = gimple_location (single_stmt);
5696 tree tlabel = create_artificial_label (loc);
5697 tree flabel = create_artificial_label (loc);
5701 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5702 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5703 call = gimple_build_call (decl, 0);
5704 gimple_call_set_lhs (call, lhs);
5705 gimple_seq_add_stmt (pre_p, call);
5707 cond = gimple_build_cond (EQ_EXPR, lhs,
5708 fold_convert_loc (loc, TREE_TYPE (lhs),
5711 gimple_seq_add_stmt (pre_p, cond);
5712 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5713 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5714 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5718 /* A subroutine of lower_omp_single. Expand the simple form of
5719 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5721 #pragma omp single copyprivate (a, b, c)
5723 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5726 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5732 GOMP_single_copy_end (©out);
5743 FIXME. It may be better to delay expanding the logic of this until
5744 pass_expand_omp. The expanded logic may make the job more difficult
5745 to a synchronization analysis pass. */
5748 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5750 tree ptr_type, t, l0, l1, l2;
5751 gimple_seq copyin_seq;
5752 location_t loc = gimple_location (single_stmt);
5754 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5756 ptr_type = build_pointer_type (ctx->record_type);
5757 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5759 l0 = create_artificial_label (loc);
5760 l1 = create_artificial_label (loc);
5761 l2 = create_artificial_label (loc);
5763 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5764 t = fold_convert_loc (loc, ptr_type, t);
5765 gimplify_assign (ctx->receiver_decl, t, pre_p);
5767 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5768 build_int_cst (ptr_type, 0));
5769 t = build3 (COND_EXPR, void_type_node, t,
5770 build_and_jump (&l0), build_and_jump (&l1));
5771 gimplify_and_add (t, pre_p);
5773 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5775 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5778 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5781 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5782 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5784 gimplify_and_add (t, pre_p);
5786 t = build_and_jump (&l2);
5787 gimplify_and_add (t, pre_p);
5789 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5791 gimple_seq_add_seq (pre_p, copyin_seq);
5793 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5797 /* Expand code for an OpenMP single directive. */
5800 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5803 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5804 gimple_seq bind_body, dlist;
5805 struct gimplify_ctx gctx;
5807 push_gimplify_context (&gctx);
5810 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5811 &bind_body, &dlist, ctx);
5812 lower_omp (gimple_omp_body (single_stmt), ctx);
5814 gimple_seq_add_stmt (&bind_body, single_stmt);
5816 if (ctx->record_type)
5817 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5819 lower_omp_single_simple (single_stmt, &bind_body);
5821 gimple_omp_set_body (single_stmt, NULL);
5823 gimple_seq_add_seq (&bind_body, dlist);
5825 bind_body = maybe_catch_exception (bind_body);
5827 t = gimple_build_omp_return
5828 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5829 OMP_CLAUSE_NOWAIT));
5830 gimple_seq_add_stmt (&bind_body, t);
5832 block = make_node (BLOCK);
5833 bind = gimple_build_bind (NULL, bind_body, block);
5835 pop_gimplify_context (bind);
5837 gimple_bind_append_vars (bind, ctx->block_vars);
5838 BLOCK_VARS (block) = ctx->block_vars;
5839 gsi_replace (gsi_p, bind, true);
5840 if (BLOCK_VARS (block))
5841 TREE_USED (block) = 1;
5845 /* Expand code for an OpenMP master directive. */
5848 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5850 tree block, lab = NULL, x;
5851 gimple stmt = gsi_stmt (*gsi_p), bind;
5852 location_t loc = gimple_location (stmt);
5854 struct gimplify_ctx gctx;
5856 push_gimplify_context (&gctx);
5858 block = make_node (BLOCK);
5859 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5862 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5863 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5864 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5866 gimplify_and_add (x, &tseq);
5867 gimple_bind_add_seq (bind, tseq);
5869 lower_omp (gimple_omp_body (stmt), ctx);
5870 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5871 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5872 gimple_omp_set_body (stmt, NULL);
5874 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5876 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5878 pop_gimplify_context (bind);
5880 gimple_bind_append_vars (bind, ctx->block_vars);
5881 BLOCK_VARS (block) = ctx->block_vars;
5882 gsi_replace (gsi_p, bind, true);
5886 /* Expand code for an OpenMP ordered directive. */
5889 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5892 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5893 struct gimplify_ctx gctx;
5895 push_gimplify_context (&gctx);
5897 block = make_node (BLOCK);
5898 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5901 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5902 gimple_bind_add_stmt (bind, x);
5904 lower_omp (gimple_omp_body (stmt), ctx);
5905 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5906 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5907 gimple_omp_set_body (stmt, NULL);
5909 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5910 gimple_bind_add_stmt (bind, x);
5912 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5914 pop_gimplify_context (bind);
5916 gimple_bind_append_vars (bind, ctx->block_vars);
5917 BLOCK_VARS (block) = gimple_bind_vars (bind);
5918 gsi_replace (gsi_p, bind, true);
5922 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5923 substitution of a couple of function calls. But in the NAMED case,
5924 requires that languages coordinate a symbol name. It is therefore
5925 best put here in common code. */
5927 static GTY((param1_is (tree), param2_is (tree)))
5928 splay_tree critical_name_mutexes;
5931 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5934 tree name, lock, unlock;
5935 gimple stmt = gsi_stmt (*gsi_p), bind;
5936 location_t loc = gimple_location (stmt);
5938 struct gimplify_ctx gctx;
5940 name = gimple_omp_critical_name (stmt);
5946 if (!critical_name_mutexes)
5947 critical_name_mutexes
5948 = splay_tree_new_ggc (splay_tree_compare_pointers,
5949 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5950 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5952 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5957 decl = create_tmp_var_raw (ptr_type_node, NULL);
5959 new_str = ACONCAT ((".gomp_critical_user_",
5960 IDENTIFIER_POINTER (name), NULL));
5961 DECL_NAME (decl) = get_identifier (new_str);
5962 TREE_PUBLIC (decl) = 1;
5963 TREE_STATIC (decl) = 1;
5964 DECL_COMMON (decl) = 1;
5965 DECL_ARTIFICIAL (decl) = 1;
5966 DECL_IGNORED_P (decl) = 1;
5967 varpool_finalize_decl (decl);
5969 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5970 (splay_tree_value) decl);
5973 decl = (tree) n->value;
5975 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5976 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5978 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5979 unlock = build_call_expr_loc (loc, unlock, 1,
5980 build_fold_addr_expr_loc (loc, decl));
5984 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5985 lock = build_call_expr_loc (loc, lock, 0);
5987 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5988 unlock = build_call_expr_loc (loc, unlock, 0);
5991 push_gimplify_context (&gctx);
5993 block = make_node (BLOCK);
5994 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5996 tbody = gimple_bind_body (bind);
5997 gimplify_and_add (lock, &tbody);
5998 gimple_bind_set_body (bind, tbody);
6000 lower_omp (gimple_omp_body (stmt), ctx);
6001 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6002 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6003 gimple_omp_set_body (stmt, NULL);
6005 tbody = gimple_bind_body (bind);
6006 gimplify_and_add (unlock, &tbody);
6007 gimple_bind_set_body (bind, tbody);
6009 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6011 pop_gimplify_context (bind);
6012 gimple_bind_append_vars (bind, ctx->block_vars);
6013 BLOCK_VARS (block) = gimple_bind_vars (bind);
6014 gsi_replace (gsi_p, bind, true);
6018 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6019 for a lastprivate clause. Given a loop control predicate of (V
6020 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6021 is appended to *DLIST, iterator initialization is appended to
6025 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6026 gimple_seq *dlist, struct omp_context *ctx)
6028 tree clauses, cond, vinit;
6029 enum tree_code cond_code;
6032 cond_code = fd->loop.cond_code;
6033 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6035 /* When possible, use a strict equality expression. This can let VRP
6036 type optimizations deduce the value and remove a copy. */
6037 if (host_integerp (fd->loop.step, 0))
6039 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6040 if (step == 1 || step == -1)
6041 cond_code = EQ_EXPR;
6044 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6046 clauses = gimple_omp_for_clauses (fd->for_stmt);
6048 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6049 if (!gimple_seq_empty_p (stmts))
6051 gimple_seq_add_seq (&stmts, *dlist);
6054 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6055 vinit = fd->loop.n1;
6056 if (cond_code == EQ_EXPR
6057 && host_integerp (fd->loop.n2, 0)
6058 && ! integer_zerop (fd->loop.n2))
6059 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6061 /* Initialize the iterator variable, so that threads that don't execute
6062 any iterations don't execute the lastprivate clauses by accident. */
6063 gimplify_assign (fd->loop.v, vinit, body_p);
6068 /* Lower code for an OpenMP loop directive. */
6071 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6074 struct omp_for_data fd;
6075 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6076 gimple_seq omp_for_body, body, dlist;
6078 struct gimplify_ctx gctx;
6080 push_gimplify_context (&gctx);
6082 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6083 lower_omp (gimple_omp_body (stmt), ctx);
6085 block = make_node (BLOCK);
6086 new_stmt = gimple_build_bind (NULL, NULL, block);
6088 /* Move declaration of temporaries in the loop body before we make
6090 omp_for_body = gimple_omp_body (stmt);
6091 if (!gimple_seq_empty_p (omp_for_body)
6092 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6094 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6095 gimple_bind_append_vars (new_stmt, vars);
6098 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6101 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6102 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6104 /* Lower the header expressions. At this point, we can assume that
6105 the header is of the form:
6107 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6109 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6110 using the .omp_data_s mapping, if needed. */
6111 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6113 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6114 if (!is_gimple_min_invariant (*rhs_p))
6115 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6117 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6118 if (!is_gimple_min_invariant (*rhs_p))
6119 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6121 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6122 if (!is_gimple_min_invariant (*rhs_p))
6123 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6126 /* Once lowered, extract the bounds and clauses. */
6127 extract_omp_for_data (stmt, &fd, NULL);
6129 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6131 gimple_seq_add_stmt (&body, stmt);
6132 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6134 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6137 /* After the loop, add exit clauses. */
6138 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6139 gimple_seq_add_seq (&body, dlist);
6141 body = maybe_catch_exception (body);
6143 /* Region exit marker goes at the end of the loop body. */
6144 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6146 pop_gimplify_context (new_stmt);
6148 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6149 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6150 if (BLOCK_VARS (block))
6151 TREE_USED (block) = 1;
6153 gimple_bind_set_body (new_stmt, body);
6154 gimple_omp_set_body (stmt, NULL);
6155 gimple_omp_for_set_pre_body (stmt, NULL);
6156 gsi_replace (gsi_p, new_stmt, true);
6159 /* Callback for walk_stmts. Check if the current statement only contains
6160 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6163 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6164 bool *handled_ops_p,
6165 struct walk_stmt_info *wi)
6167 int *info = (int *) wi->info;
6168 gimple stmt = gsi_stmt (*gsi_p);
6170 *handled_ops_p = true;
6171 switch (gimple_code (stmt))
6175 case GIMPLE_OMP_FOR:
6176 case GIMPLE_OMP_SECTIONS:
6177 *info = *info == 0 ? 1 : -1;
6186 struct omp_taskcopy_context
6188 /* This field must be at the beginning, as we do "inheritance": Some
6189 callback functions for tree-inline.c (e.g., omp_copy_decl)
6190 receive a copy_body_data pointer that is up-casted to an
6191 omp_context pointer. */
6197 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6199 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6201 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6202 return create_tmp_var (TREE_TYPE (var), NULL);
6208 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6210 tree name, new_fields = NULL, type, f;
6212 type = lang_hooks.types.make_type (RECORD_TYPE);
6213 name = DECL_NAME (TYPE_NAME (orig_type));
6214 name = build_decl (gimple_location (tcctx->ctx->stmt),
6215 TYPE_DECL, name, type);
6216 TYPE_NAME (type) = name;
6218 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6220 tree new_f = copy_node (f);
6221 DECL_CONTEXT (new_f) = type;
6222 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6223 TREE_CHAIN (new_f) = new_fields;
6224 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6225 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6226 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6229 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6231 TYPE_FIELDS (type) = nreverse (new_fields);
6236 /* Create task copyfn. */
6239 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6241 struct function *child_cfun;
6242 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6243 tree record_type, srecord_type, bind, list;
6244 bool record_needs_remap = false, srecord_needs_remap = false;
6246 struct omp_taskcopy_context tcctx;
6247 struct gimplify_ctx gctx;
6248 location_t loc = gimple_location (task_stmt);
6250 child_fn = gimple_omp_task_copy_fn (task_stmt);
6251 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6252 gcc_assert (child_cfun->cfg == NULL);
6253 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6255 /* Reset DECL_CONTEXT on function arguments. */
6256 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6257 DECL_CONTEXT (t) = child_fn;
6259 /* Populate the function. */
6260 push_gimplify_context (&gctx);
6261 current_function_decl = child_fn;
6263 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6264 TREE_SIDE_EFFECTS (bind) = 1;
6266 DECL_SAVED_TREE (child_fn) = bind;
6267 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6269 /* Remap src and dst argument types if needed. */
6270 record_type = ctx->record_type;
6271 srecord_type = ctx->srecord_type;
6272 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6273 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6275 record_needs_remap = true;
6278 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6279 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6281 srecord_needs_remap = true;
6285 if (record_needs_remap || srecord_needs_remap)
6287 memset (&tcctx, '\0', sizeof (tcctx));
6288 tcctx.cb.src_fn = ctx->cb.src_fn;
6289 tcctx.cb.dst_fn = child_fn;
6290 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6291 gcc_checking_assert (tcctx.cb.src_node);
6292 tcctx.cb.dst_node = tcctx.cb.src_node;
6293 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6294 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6295 tcctx.cb.eh_lp_nr = 0;
6296 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6297 tcctx.cb.decl_map = pointer_map_create ();
6300 if (record_needs_remap)
6301 record_type = task_copyfn_remap_type (&tcctx, record_type);
6302 if (srecord_needs_remap)
6303 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6306 tcctx.cb.decl_map = NULL;
6308 push_cfun (child_cfun);
6310 arg = DECL_ARGUMENTS (child_fn);
6311 TREE_TYPE (arg) = build_pointer_type (record_type);
6312 sarg = DECL_CHAIN (arg);
6313 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6315 /* First pass: initialize temporaries used in record_type and srecord_type
6316 sizes and field offsets. */
6317 if (tcctx.cb.decl_map)
6318 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6319 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6323 decl = OMP_CLAUSE_DECL (c);
6324 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6327 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6328 sf = (tree) n->value;
6329 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6330 src = build_simple_mem_ref_loc (loc, sarg);
6331 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6332 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6333 append_to_statement_list (t, &list);
6336 /* Second pass: copy shared var pointers and copy construct non-VLA
6337 firstprivate vars. */
6338 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6339 switch (OMP_CLAUSE_CODE (c))
6341 case OMP_CLAUSE_SHARED:
6342 decl = OMP_CLAUSE_DECL (c);
6343 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6346 f = (tree) n->value;
6347 if (tcctx.cb.decl_map)
6348 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6349 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6350 sf = (tree) n->value;
6351 if (tcctx.cb.decl_map)
6352 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6353 src = build_simple_mem_ref_loc (loc, sarg);
6354 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6355 dst = build_simple_mem_ref_loc (loc, arg);
6356 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6357 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6358 append_to_statement_list (t, &list);
6360 case OMP_CLAUSE_FIRSTPRIVATE:
6361 decl = OMP_CLAUSE_DECL (c);
6362 if (is_variable_sized (decl))
6364 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6367 f = (tree) n->value;
6368 if (tcctx.cb.decl_map)
6369 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6370 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6373 sf = (tree) n->value;
6374 if (tcctx.cb.decl_map)
6375 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6376 src = build_simple_mem_ref_loc (loc, sarg);
6377 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6378 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6379 src = build_simple_mem_ref_loc (loc, src);
6383 dst = build_simple_mem_ref_loc (loc, arg);
6384 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6385 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6386 append_to_statement_list (t, &list);
6388 case OMP_CLAUSE_PRIVATE:
6389 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6391 decl = OMP_CLAUSE_DECL (c);
6392 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6393 f = (tree) n->value;
6394 if (tcctx.cb.decl_map)
6395 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6396 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6399 sf = (tree) n->value;
6400 if (tcctx.cb.decl_map)
6401 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6402 src = build_simple_mem_ref_loc (loc, sarg);
6403 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6404 if (use_pointer_for_field (decl, NULL))
6405 src = build_simple_mem_ref_loc (loc, src);
6409 dst = build_simple_mem_ref_loc (loc, arg);
6410 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6411 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6412 append_to_statement_list (t, &list);
6418 /* Last pass: handle VLA firstprivates. */
6419 if (tcctx.cb.decl_map)
6420 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6421 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6425 decl = OMP_CLAUSE_DECL (c);
6426 if (!is_variable_sized (decl))
6428 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6431 f = (tree) n->value;
6432 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6433 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6434 ind = DECL_VALUE_EXPR (decl);
6435 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6436 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6437 n = splay_tree_lookup (ctx->sfield_map,
6438 (splay_tree_key) TREE_OPERAND (ind, 0));
6439 sf = (tree) n->value;
6440 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6441 src = build_simple_mem_ref_loc (loc, sarg);
6442 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6443 src = build_simple_mem_ref_loc (loc, src);
6444 dst = build_simple_mem_ref_loc (loc, arg);
6445 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6446 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6447 append_to_statement_list (t, &list);
6448 n = splay_tree_lookup (ctx->field_map,
6449 (splay_tree_key) TREE_OPERAND (ind, 0));
6450 df = (tree) n->value;
6451 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6452 ptr = build_simple_mem_ref_loc (loc, arg);
6453 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6454 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6455 build_fold_addr_expr_loc (loc, dst));
6456 append_to_statement_list (t, &list);
6459 t = build1 (RETURN_EXPR, void_type_node, NULL);
6460 append_to_statement_list (t, &list);
6462 if (tcctx.cb.decl_map)
6463 pointer_map_destroy (tcctx.cb.decl_map);
6464 pop_gimplify_context (NULL);
6465 BIND_EXPR_BODY (bind) = list;
6467 current_function_decl = ctx->cb.src_fn;
6470 /* Lower the OpenMP parallel or task directive in the current statement
6471 in GSI_P. CTX holds context information for the directive. */
6474 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6478 gimple stmt = gsi_stmt (*gsi_p);
6479 gimple par_bind, bind;
6480 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6481 struct gimplify_ctx gctx;
6482 location_t loc = gimple_location (stmt);
6484 clauses = gimple_omp_taskreg_clauses (stmt);
6485 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6486 par_body = gimple_bind_body (par_bind);
6487 child_fn = ctx->cb.dst_fn;
6488 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6489 && !gimple_omp_parallel_combined_p (stmt))
6491 struct walk_stmt_info wi;
6494 memset (&wi, 0, sizeof (wi));
6497 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6499 gimple_omp_parallel_set_combined_p (stmt, true);
6501 if (ctx->srecord_type)
6502 create_task_copyfn (stmt, ctx);
6504 push_gimplify_context (&gctx);
6508 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6509 lower_omp (par_body, ctx);
6510 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6511 lower_reduction_clauses (clauses, &par_olist, ctx);
6513 /* Declare all the variables created by mapping and the variables
6514 declared in the scope of the parallel body. */
6515 record_vars_into (ctx->block_vars, child_fn);
6516 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6518 if (ctx->record_type)
6521 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6522 : ctx->record_type, ".omp_data_o");
6523 DECL_NAMELESS (ctx->sender_decl) = 1;
6524 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6525 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6530 lower_send_clauses (clauses, &ilist, &olist, ctx);
6531 lower_send_shared_vars (&ilist, &olist, ctx);
6533 /* Once all the expansions are done, sequence all the different
6534 fragments inside gimple_omp_body. */
6538 if (ctx->record_type)
6540 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6541 /* fixup_child_record_type might have changed receiver_decl's type. */
6542 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6543 gimple_seq_add_stmt (&new_body,
6544 gimple_build_assign (ctx->receiver_decl, t));
6547 gimple_seq_add_seq (&new_body, par_ilist);
6548 gimple_seq_add_seq (&new_body, par_body);
6549 gimple_seq_add_seq (&new_body, par_olist);
6550 new_body = maybe_catch_exception (new_body);
6551 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6552 gimple_omp_set_body (stmt, new_body);
6554 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6555 gimple_bind_add_stmt (bind, stmt);
6558 gimple_seq_add_stmt (&ilist, bind);
6559 gimple_seq_add_seq (&ilist, olist);
6560 bind = gimple_build_bind (NULL, ilist, NULL);
6563 gsi_replace (gsi_p, bind, true);
6565 pop_gimplify_context (NULL);
6568 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6569 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6570 of OpenMP context, but with task_shared_vars set. */
6573 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6578 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6579 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6582 if (task_shared_vars
6584 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6587 /* If a global variable has been privatized, TREE_CONSTANT on
6588 ADDR_EXPR might be wrong. */
6589 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6590 recompute_tree_invariant_for_addr_expr (t);
6592 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6597 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6599 gimple stmt = gsi_stmt (*gsi_p);
6600 struct walk_stmt_info wi;
6602 if (gimple_has_location (stmt))
6603 input_location = gimple_location (stmt);
6605 if (task_shared_vars)
6606 memset (&wi, '\0', sizeof (wi));
6608 /* If we have issued syntax errors, avoid doing any heavy lifting.
6609 Just replace the OpenMP directives with a NOP to avoid
6610 confusing RTL expansion. */
6611 if (seen_error () && is_gimple_omp (stmt))
6613 gsi_replace (gsi_p, gimple_build_nop (), true);
6617 switch (gimple_code (stmt))
6620 if ((ctx || task_shared_vars)
6621 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6622 ctx ? NULL : &wi, NULL)
6623 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6624 ctx ? NULL : &wi, NULL)))
6625 gimple_regimplify_operands (stmt, gsi_p);
6628 lower_omp (gimple_catch_handler (stmt), ctx);
6630 case GIMPLE_EH_FILTER:
6631 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6634 lower_omp (gimple_try_eval (stmt), ctx);
6635 lower_omp (gimple_try_cleanup (stmt), ctx);
6638 lower_omp (gimple_bind_body (stmt), ctx);
6640 case GIMPLE_OMP_PARALLEL:
6641 case GIMPLE_OMP_TASK:
6642 ctx = maybe_lookup_ctx (stmt);
6643 lower_omp_taskreg (gsi_p, ctx);
6645 case GIMPLE_OMP_FOR:
6646 ctx = maybe_lookup_ctx (stmt);
6648 lower_omp_for (gsi_p, ctx);
6650 case GIMPLE_OMP_SECTIONS:
6651 ctx = maybe_lookup_ctx (stmt);
6653 lower_omp_sections (gsi_p, ctx);
6655 case GIMPLE_OMP_SINGLE:
6656 ctx = maybe_lookup_ctx (stmt);
6658 lower_omp_single (gsi_p, ctx);
6660 case GIMPLE_OMP_MASTER:
6661 ctx = maybe_lookup_ctx (stmt);
6663 lower_omp_master (gsi_p, ctx);
6665 case GIMPLE_OMP_ORDERED:
6666 ctx = maybe_lookup_ctx (stmt);
6668 lower_omp_ordered (gsi_p, ctx);
6670 case GIMPLE_OMP_CRITICAL:
6671 ctx = maybe_lookup_ctx (stmt);
6673 lower_omp_critical (gsi_p, ctx);
6675 case GIMPLE_OMP_ATOMIC_LOAD:
6676 if ((ctx || task_shared_vars)
6677 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6678 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6679 gimple_regimplify_operands (stmt, gsi_p);
6682 if ((ctx || task_shared_vars)
6683 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6685 gimple_regimplify_operands (stmt, gsi_p);
6691 lower_omp (gimple_seq body, omp_context *ctx)
6693 location_t saved_location = input_location;
6694 gimple_stmt_iterator gsi = gsi_start (body);
6695 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6696 lower_omp_1 (&gsi, ctx);
6697 input_location = saved_location;
6700 /* Main entry point. */
6703 execute_lower_omp (void)
6707 /* This pass always runs, to provide PROP_gimple_lomp.
6708 But there is nothing to do unless -fopenmp is given. */
6709 if (flag_openmp == 0)
6712 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6713 delete_omp_context);
6715 body = gimple_body (current_function_decl);
6716 scan_omp (body, NULL);
6717 gcc_assert (taskreg_nesting_level == 0);
6719 if (all_contexts->root)
6721 struct gimplify_ctx gctx;
6723 if (task_shared_vars)
6724 push_gimplify_context (&gctx);
6725 lower_omp (body, NULL);
6726 if (task_shared_vars)
6727 pop_gimplify_context (NULL);
6732 splay_tree_delete (all_contexts);
6733 all_contexts = NULL;
6735 BITMAP_FREE (task_shared_vars);
6739 struct gimple_opt_pass pass_lower_omp =
6743 "omplower", /* name */
6745 execute_lower_omp, /* execute */
6748 0, /* static_pass_number */
6749 TV_NONE, /* tv_id */
6750 PROP_gimple_any, /* properties_required */
6751 PROP_gimple_lomp, /* properties_provided */
6752 0, /* properties_destroyed */
6753 0, /* todo_flags_start */
6754 0 /* todo_flags_finish */
6758 /* The following is a utility to diagnose OpenMP structured block violations.
6759 It is not part of the "omplower" pass, as that's invoked too late. It
6760 should be invoked by the respective front ends after gimplification. */
6762 static splay_tree all_labels;
6764 /* Check for mismatched contexts and generate an error if needed. Return
6765 true if an error is detected. */
6768 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6769 gimple branch_ctx, gimple label_ctx)
6771 if (label_ctx == branch_ctx)
6776 Previously we kept track of the label's entire context in diagnose_sb_[12]
6777 so we could traverse it and issue a correct "exit" or "enter" error
6778 message upon a structured block violation.
6780 We built the context by building a list with tree_cons'ing, but there is
6781 no easy counterpart in gimple tuples. It seems like far too much work
6782 for issuing exit/enter error messages. If someone really misses the
6783 distinct error message... patches welcome.
6787 /* Try to avoid confusing the user by producing and error message
6788 with correct "exit" or "enter" verbiage. We prefer "exit"
6789 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6790 if (branch_ctx == NULL)
6796 if (TREE_VALUE (label_ctx) == branch_ctx)
6801 label_ctx = TREE_CHAIN (label_ctx);
6806 error ("invalid exit from OpenMP structured block");
6808 error ("invalid entry to OpenMP structured block");
6811 /* If it's obvious we have an invalid entry, be specific about the error. */
6812 if (branch_ctx == NULL)
6813 error ("invalid entry to OpenMP structured block");
6815 /* Otherwise, be vague and lazy, but efficient. */
6816 error ("invalid branch to/from an OpenMP structured block");
6818 gsi_replace (gsi_p, gimple_build_nop (), false);
6822 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6823 where each label is found. */
6826 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6827 struct walk_stmt_info *wi)
6829 gimple context = (gimple) wi->info;
6830 gimple inner_context;
6831 gimple stmt = gsi_stmt (*gsi_p);
6833 *handled_ops_p = true;
6835 switch (gimple_code (stmt))
6839 case GIMPLE_OMP_PARALLEL:
6840 case GIMPLE_OMP_TASK:
6841 case GIMPLE_OMP_SECTIONS:
6842 case GIMPLE_OMP_SINGLE:
6843 case GIMPLE_OMP_SECTION:
6844 case GIMPLE_OMP_MASTER:
6845 case GIMPLE_OMP_ORDERED:
6846 case GIMPLE_OMP_CRITICAL:
6847 /* The minimal context here is just the current OMP construct. */
6848 inner_context = stmt;
6849 wi->info = inner_context;
6850 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6854 case GIMPLE_OMP_FOR:
6855 inner_context = stmt;
6856 wi->info = inner_context;
6857 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6859 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6860 diagnose_sb_1, NULL, wi);
6861 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6866 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6867 (splay_tree_value) context);
6877 /* Pass 2: Check each branch and see if its context differs from that of
6878 the destination label's context. */
6881 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6882 struct walk_stmt_info *wi)
6884 gimple context = (gimple) wi->info;
6886 gimple stmt = gsi_stmt (*gsi_p);
6888 *handled_ops_p = true;
6890 switch (gimple_code (stmt))
6894 case GIMPLE_OMP_PARALLEL:
6895 case GIMPLE_OMP_TASK:
6896 case GIMPLE_OMP_SECTIONS:
6897 case GIMPLE_OMP_SINGLE:
6898 case GIMPLE_OMP_SECTION:
6899 case GIMPLE_OMP_MASTER:
6900 case GIMPLE_OMP_ORDERED:
6901 case GIMPLE_OMP_CRITICAL:
6903 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6907 case GIMPLE_OMP_FOR:
6909 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6911 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6912 diagnose_sb_2, NULL, wi);
6913 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6919 tree lab = gimple_cond_true_label (stmt);
6922 n = splay_tree_lookup (all_labels,
6923 (splay_tree_key) lab);
6924 diagnose_sb_0 (gsi_p, context,
6925 n ? (gimple) n->value : NULL);
6927 lab = gimple_cond_false_label (stmt);
6930 n = splay_tree_lookup (all_labels,
6931 (splay_tree_key) lab);
6932 diagnose_sb_0 (gsi_p, context,
6933 n ? (gimple) n->value : NULL);
6940 tree lab = gimple_goto_dest (stmt);
6941 if (TREE_CODE (lab) != LABEL_DECL)
6944 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6945 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6952 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6954 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6955 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6956 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6963 diagnose_sb_0 (gsi_p, context, NULL);
6974 diagnose_omp_structured_block_errors (void)
6976 struct walk_stmt_info wi;
6977 gimple_seq body = gimple_body (current_function_decl);
6979 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6981 memset (&wi, 0, sizeof (wi));
6982 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6984 memset (&wi, 0, sizeof (wi));
6985 wi.want_locations = true;
6986 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6988 splay_tree_delete (all_labels);
6995 gate_diagnose_omp_blocks (void)
6997 return flag_openmp != 0;
7000 struct gimple_opt_pass pass_diagnose_omp_blocks =
7004 "*diagnose_omp_blocks", /* name */
7005 gate_diagnose_omp_blocks, /* gate */
7006 diagnose_omp_structured_block_errors, /* execute */
7009 0, /* static_pass_number */
7010 TV_NONE, /* tv_id */
7011 PROP_gimple_any, /* properties_required */
7012 0, /* properties_provided */
7013 0, /* properties_destroyed */
7014 0, /* todo_flags_start */
7015 0, /* todo_flags_finish */
7019 #include "gt-omp-low.h"