1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
6 Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
30 #include "tree-gimple.h"
31 #include "tree-inline.h"
32 #include "langhooks.h"
33 #include "diagnostic.h"
34 #include "tree-flow.h"
40 #include "tree-pass.h"
43 #include "splay-tree.h"
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
61 typedef struct omp_context
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
80 /* A chain of variables to add to the top-level block surrounding the
81 construct. In the case of a parallel, this is in the child function. */
84 /* What to do with variables with implicitly determined sharing
86 enum omp_clause_default_kind default_kind;
88 /* Nesting depth of this context. Used to beautify error messages re
89 invalid gotos. The outermost ctx is depth 1, with depth 0 being
90 reserved for the main body of the function. */
93 /* True if this parallel directive is nested within another. */
98 /* A structure describing the main elements of a parallel loop. */
102 tree v, n1, n2, step, chunk_size, for_stmt;
103 enum tree_code cond_code;
105 bool have_nowait, have_ordered;
106 enum omp_clause_schedule_kind sched_kind;
110 static splay_tree all_contexts;
111 static int parallel_nesting_level;
112 struct omp_region *root_omp_region;
114 static void scan_omp (tree *, omp_context *);
115 static void lower_omp (tree *, omp_context *);
116 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
117 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
119 /* Find an OpenMP clause of type KIND within CLAUSES. */
122 find_omp_clause (tree clauses, enum tree_code kind)
124 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
125 if (OMP_CLAUSE_CODE (clauses) == kind)
131 /* Return true if CTX is for an omp parallel. */
134 is_parallel_ctx (omp_context *ctx)
136 return TREE_CODE (ctx->stmt) == OMP_PARALLEL;
140 /* Return true if REGION is a combined parallel+workshare region. */
143 is_combined_parallel (struct omp_region *region)
145 return region->is_combined_parallel;
149 /* Extract the header elements of parallel loop FOR_STMT and store
153 extract_omp_for_data (tree for_stmt, struct omp_for_data *fd)
157 fd->for_stmt = for_stmt;
160 t = OMP_FOR_INIT (for_stmt);
161 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
162 fd->v = GIMPLE_STMT_OPERAND (t, 0);
163 gcc_assert (SSA_VAR_P (fd->v));
164 gcc_assert (TREE_CODE (TREE_TYPE (fd->v)) == INTEGER_TYPE);
165 var = TREE_CODE (fd->v) == SSA_NAME ? SSA_NAME_VAR (fd->v) : fd->v;
166 fd->n1 = GIMPLE_STMT_OPERAND (t, 1);
168 t = OMP_FOR_COND (for_stmt);
169 fd->cond_code = TREE_CODE (t);
170 gcc_assert (TREE_OPERAND (t, 0) == var);
171 fd->n2 = TREE_OPERAND (t, 1);
172 switch (fd->cond_code)
178 fd->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
179 build_int_cst (TREE_TYPE (fd->n2), 1));
180 fd->cond_code = LT_EXPR;
183 fd->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->n2), fd->n2,
184 build_int_cst (TREE_TYPE (fd->n2), 1));
185 fd->cond_code = GT_EXPR;
191 t = OMP_FOR_INCR (fd->for_stmt);
192 gcc_assert (TREE_CODE (t) == GIMPLE_MODIFY_STMT);
193 gcc_assert (GIMPLE_STMT_OPERAND (t, 0) == var);
194 t = GIMPLE_STMT_OPERAND (t, 1);
195 gcc_assert (TREE_OPERAND (t, 0) == var);
196 switch (TREE_CODE (t))
199 fd->step = TREE_OPERAND (t, 1);
202 fd->step = TREE_OPERAND (t, 1);
203 fd->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (fd->step), fd->step);
209 fd->have_nowait = fd->have_ordered = false;
210 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
211 fd->chunk_size = NULL_TREE;
213 for (t = OMP_FOR_CLAUSES (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
214 switch (OMP_CLAUSE_CODE (t))
216 case OMP_CLAUSE_NOWAIT:
217 fd->have_nowait = true;
219 case OMP_CLAUSE_ORDERED:
220 fd->have_ordered = true;
222 case OMP_CLAUSE_SCHEDULE:
223 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
224 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
230 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
231 gcc_assert (fd->chunk_size == NULL);
232 else if (fd->chunk_size == NULL)
234 /* We only need to compute a default chunk size for ordered
235 static loops and dynamic loops. */
236 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC || fd->have_ordered)
237 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
238 ? integer_zero_node : integer_one_node;
243 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
244 is the immediate dominator of PAR_ENTRY_BB, return true if there
245 are no data dependencies that would prevent expanding the parallel
246 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
248 When expanding a combined parallel+workshare region, the call to
249 the child function may need additional arguments in the case of
250 OMP_FOR regions. In some cases, these arguments are computed out
251 of variables passed in from the parent to the child via 'struct
252 .omp_data_s'. For instance:
254 #pragma omp parallel for schedule (guided, i * 4)
259 # BLOCK 2 (PAR_ENTRY_BB)
261 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
263 # BLOCK 3 (WS_ENTRY_BB)
264 .omp_data_i = &.omp_data_o;
265 D.1667 = .omp_data_i->i;
267 #pragma omp for schedule (guided, D.1598)
269 When we outline the parallel region, the call to the child function
270 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
271 that value is computed *after* the call site. So, in principle we
272 cannot do the transformation.
274 To see whether the code in WS_ENTRY_BB blocks the combined
275 parallel+workshare call, we collect all the variables used in the
276 OMP_FOR header check whether they appear on the LHS of any
277 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
280 FIXME. If we had the SSA form built at this point, we could merely
281 hoist the code in block 3 into block 2 and be done with it. But at
282 this point we don't have dataflow information and though we could
283 hack something up here, it is really not worth the aggravation. */
286 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
288 struct omp_for_data fd;
289 tree par_stmt, ws_stmt;
291 par_stmt = last_stmt (par_entry_bb);
292 ws_stmt = last_stmt (ws_entry_bb);
294 if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
297 gcc_assert (TREE_CODE (ws_stmt) == OMP_FOR);
299 extract_omp_for_data (ws_stmt, &fd);
301 /* FIXME. We give up too easily here. If any of these arguments
302 are not constants, they will likely involve variables that have
303 been mapped into fields of .omp_data_s for sharing with the child
304 function. With appropriate data flow, it would be possible to
306 if (!is_gimple_min_invariant (fd.n1)
307 || !is_gimple_min_invariant (fd.n2)
308 || !is_gimple_min_invariant (fd.step)
309 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
316 /* Collect additional arguments needed to emit a combined
317 parallel+workshare call. WS_STMT is the workshare directive being
321 get_ws_args_for (tree ws_stmt)
325 if (TREE_CODE (ws_stmt) == OMP_FOR)
327 struct omp_for_data fd;
330 extract_omp_for_data (ws_stmt, &fd);
335 t = fold_convert (long_integer_type_node, fd.chunk_size);
336 ws_args = tree_cons (NULL, t, ws_args);
339 t = fold_convert (long_integer_type_node, fd.step);
340 ws_args = tree_cons (NULL, t, ws_args);
342 t = fold_convert (long_integer_type_node, fd.n2);
343 ws_args = tree_cons (NULL, t, ws_args);
345 t = fold_convert (long_integer_type_node, fd.n1);
346 ws_args = tree_cons (NULL, t, ws_args);
350 else if (TREE_CODE (ws_stmt) == OMP_SECTIONS)
352 /* Number of sections is equal to the number of edges from the
353 OMP_SECTIONS_SWITCH statement, except for the one to the exit
354 of the sections region. */
355 basic_block bb = single_succ (bb_for_stmt (ws_stmt));
356 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
357 t = tree_cons (NULL, t, NULL);
365 /* Discover whether REGION is a combined parallel+workshare region. */
368 determine_parallel_type (struct omp_region *region)
370 basic_block par_entry_bb, par_exit_bb;
371 basic_block ws_entry_bb, ws_exit_bb;
373 if (region == NULL || region->inner == NULL
374 || region->exit == NULL || region->inner->exit == NULL
375 || region->inner->cont == NULL)
378 /* We only support parallel+for and parallel+sections. */
379 if (region->type != OMP_PARALLEL
380 || (region->inner->type != OMP_FOR
381 && region->inner->type != OMP_SECTIONS))
384 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
385 WS_EXIT_BB -> PAR_EXIT_BB. */
386 par_entry_bb = region->entry;
387 par_exit_bb = region->exit;
388 ws_entry_bb = region->inner->entry;
389 ws_exit_bb = region->inner->exit;
391 if (single_succ (par_entry_bb) == ws_entry_bb
392 && single_succ (ws_exit_bb) == par_exit_bb
393 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
394 && (OMP_PARALLEL_COMBINED (last_stmt (par_entry_bb))
395 || (last_and_only_stmt (ws_entry_bb)
396 && last_and_only_stmt (par_exit_bb))))
398 tree ws_stmt = last_stmt (ws_entry_bb);
400 if (region->inner->type == OMP_FOR)
402 /* If this is a combined parallel loop, we need to determine
403 whether or not to use the combined library calls. There
404 are two cases where we do not apply the transformation:
405 static loops and any kind of ordered loop. In the first
406 case, we already open code the loop so there is no need
407 to do anything else. In the latter case, the combined
408 parallel loop call would still need extra synchronization
409 to implement ordered semantics, so there would not be any
410 gain in using the combined call. */
411 tree clauses = OMP_FOR_CLAUSES (ws_stmt);
412 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
414 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
415 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
417 region->is_combined_parallel = false;
418 region->inner->is_combined_parallel = false;
423 region->is_combined_parallel = true;
424 region->inner->is_combined_parallel = true;
425 region->ws_args = get_ws_args_for (ws_stmt);
430 /* Return true if EXPR is variable sized. */
433 is_variable_sized (const_tree expr)
435 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
438 /* Return true if DECL is a reference type. */
441 is_reference (tree decl)
443 return lang_hooks.decls.omp_privatize_by_reference (decl);
446 /* Lookup variables in the decl or field splay trees. The "maybe" form
447 allows for the variable form to not have been entered, otherwise we
448 assert that the variable must have been entered. */
451 lookup_decl (tree var, omp_context *ctx)
454 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
459 maybe_lookup_decl (const_tree var, omp_context *ctx)
462 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
463 return n ? *n : NULL_TREE;
467 lookup_field (tree var, omp_context *ctx)
470 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
471 return (tree) n->value;
475 maybe_lookup_field (tree var, omp_context *ctx)
478 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
479 return n ? (tree) n->value : NULL_TREE;
482 /* Return true if DECL should be copied by pointer. SHARED_CTX is
483 the parallel context if DECL is to be shared. */
486 use_pointer_for_field (const_tree decl, omp_context *shared_ctx)
488 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
491 /* We can only use copy-in/copy-out semantics for shared variables
492 when we know the value is not accessible from an outer scope. */
495 /* ??? Trivially accessible from anywhere. But why would we even
496 be passing an address in this case? Should we simply assert
497 this to be false, or should we have a cleanup pass that removes
498 these from the list of mappings? */
499 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
502 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
503 without analyzing the expression whether or not its location
504 is accessible to anyone else. In the case of nested parallel
505 regions it certainly may be. */
506 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
509 /* Do not use copy-in/copy-out for variables that have their
511 if (TREE_ADDRESSABLE (decl))
514 /* Disallow copy-in/out in nested parallel if
515 decl is shared in outer parallel, otherwise
516 each thread could store the shared variable
517 in its own copy-in location, making the
518 variable no longer really shared. */
519 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
523 for (up = shared_ctx->outer; up; up = up->outer)
524 if (maybe_lookup_decl (decl, up))
527 if (up && is_parallel_ctx (up))
531 for (c = OMP_PARALLEL_CLAUSES (up->stmt);
532 c; c = OMP_CLAUSE_CHAIN (c))
533 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
534 && OMP_CLAUSE_DECL (c) == decl)
546 /* Create a new VAR_DECL and copy information from VAR to it. */
549 copy_var_decl (tree var, tree name, tree type)
551 tree copy = build_decl (VAR_DECL, name, type);
553 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
554 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
555 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
556 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
557 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
558 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
559 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
560 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
561 TREE_USED (copy) = 1;
562 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
567 /* Construct a new automatic decl similar to VAR. */
570 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
572 tree copy = copy_var_decl (var, name, type);
574 DECL_CONTEXT (copy) = current_function_decl;
575 TREE_CHAIN (copy) = ctx->block_vars;
576 ctx->block_vars = copy;
582 omp_copy_decl_1 (tree var, omp_context *ctx)
584 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
587 /* Build tree nodes to access the field for VAR on the receiver side. */
590 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
592 tree x, field = lookup_field (var, ctx);
594 /* If the receiver record type was remapped in the child function,
595 remap the field into the new record type. */
596 x = maybe_lookup_field (field, ctx);
600 x = build_fold_indirect_ref (ctx->receiver_decl);
601 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
603 x = build_fold_indirect_ref (x);
608 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
609 of a parallel, this is a component reference; for workshare constructs
610 this is some variable. */
613 build_outer_var_ref (tree var, omp_context *ctx)
617 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
619 else if (is_variable_sized (var))
621 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
622 x = build_outer_var_ref (x, ctx);
623 x = build_fold_indirect_ref (x);
625 else if (is_parallel_ctx (ctx))
627 bool by_ref = use_pointer_for_field (var, NULL);
628 x = build_receiver_ref (var, by_ref, ctx);
631 x = lookup_decl (var, ctx->outer);
632 else if (is_reference (var))
633 /* This can happen with orphaned constructs. If var is reference, it is
634 possible it is shared and as such valid. */
639 if (is_reference (var))
640 x = build_fold_indirect_ref (x);
645 /* Build tree nodes to access the field for VAR on the sender side. */
648 build_sender_ref (tree var, omp_context *ctx)
650 tree field = lookup_field (var, ctx);
651 return build3 (COMPONENT_REF, TREE_TYPE (field),
652 ctx->sender_decl, field, NULL);
655 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
658 install_var_field (tree var, bool by_ref, omp_context *ctx)
662 gcc_assert (!splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
664 type = TREE_TYPE (var);
666 type = build_pointer_type (type);
668 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
670 /* Remember what variable this field was created for. This does have a
671 side effect of making dwarf2out ignore this member, so for helpful
672 debugging we clear it later in delete_omp_context. */
673 DECL_ABSTRACT_ORIGIN (field) = var;
675 insert_field_into_struct (ctx->record_type, field);
677 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
678 (splay_tree_value) field);
682 install_var_local (tree var, omp_context *ctx)
684 tree new_var = omp_copy_decl_1 (var, ctx);
685 insert_decl_map (&ctx->cb, var, new_var);
689 /* Adjust the replacement for DECL in CTX for the new context. This means
690 copying the DECL_VALUE_EXPR, and fixing up the type. */
693 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
697 new_decl = lookup_decl (decl, ctx);
699 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
701 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
702 && DECL_HAS_VALUE_EXPR_P (decl))
704 tree ve = DECL_VALUE_EXPR (decl);
705 walk_tree (&ve, copy_body_r, &ctx->cb, NULL);
706 SET_DECL_VALUE_EXPR (new_decl, ve);
707 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
710 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
712 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
713 if (size == error_mark_node)
714 size = TYPE_SIZE (TREE_TYPE (new_decl));
715 DECL_SIZE (new_decl) = size;
717 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
718 if (size == error_mark_node)
719 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
720 DECL_SIZE_UNIT (new_decl) = size;
724 /* The callback for remap_decl. Search all containing contexts for a
725 mapping of the variable; this avoids having to duplicate the splay
726 tree ahead of time. We know a mapping doesn't already exist in the
727 given context. Create new mappings to implement default semantics. */
730 omp_copy_decl (tree var, copy_body_data *cb)
732 omp_context *ctx = (omp_context *) cb;
735 if (TREE_CODE (var) == LABEL_DECL)
737 new_var = create_artificial_label ();
738 DECL_CONTEXT (new_var) = current_function_decl;
739 insert_decl_map (&ctx->cb, var, new_var);
743 while (!is_parallel_ctx (ctx))
748 new_var = maybe_lookup_decl (var, ctx);
753 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
756 return error_mark_node;
760 /* Return the parallel region associated with STMT. */
762 /* Debugging dumps for parallel regions. */
763 void dump_omp_region (FILE *, struct omp_region *, int);
764 void debug_omp_region (struct omp_region *);
765 void debug_all_omp_regions (void);
767 /* Dump the parallel region tree rooted at REGION. */
770 dump_omp_region (FILE *file, struct omp_region *region, int indent)
772 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
773 tree_code_name[region->type]);
776 dump_omp_region (file, region->inner, indent + 4);
780 fprintf (file, "%*sbb %d: OMP_CONTINUE\n", indent, "",
781 region->cont->index);
785 fprintf (file, "%*sbb %d: OMP_RETURN\n", indent, "",
786 region->exit->index);
788 fprintf (file, "%*s[no exit marker]\n", indent, "");
791 dump_omp_region (file, region->next, indent);
795 debug_omp_region (struct omp_region *region)
797 dump_omp_region (stderr, region, 0);
801 debug_all_omp_regions (void)
803 dump_omp_region (stderr, root_omp_region, 0);
807 /* Create a new parallel region starting at STMT inside region PARENT. */
810 new_omp_region (basic_block bb, enum tree_code type, struct omp_region *parent)
812 struct omp_region *region = xcalloc (1, sizeof (*region));
814 region->outer = parent;
820 /* This is a nested region. Add it to the list of inner
821 regions in PARENT. */
822 region->next = parent->inner;
823 parent->inner = region;
827 /* This is a toplevel region. Add it to the list of toplevel
828 regions in ROOT_OMP_REGION. */
829 region->next = root_omp_region;
830 root_omp_region = region;
836 /* Release the memory associated with the region tree rooted at REGION. */
839 free_omp_region_1 (struct omp_region *region)
841 struct omp_region *i, *n;
843 for (i = region->inner; i ; i = n)
846 free_omp_region_1 (i);
852 /* Release the memory for the entire omp region tree. */
855 free_omp_regions (void)
857 struct omp_region *r, *n;
858 for (r = root_omp_region; r ; r = n)
861 free_omp_region_1 (r);
863 root_omp_region = NULL;
867 /* Create a new context, with OUTER_CTX being the surrounding context. */
870 new_omp_context (tree stmt, omp_context *outer_ctx)
872 omp_context *ctx = XCNEW (omp_context);
874 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
875 (splay_tree_value) ctx);
880 ctx->outer = outer_ctx;
881 ctx->cb = outer_ctx->cb;
882 ctx->cb.block = NULL;
883 ctx->depth = outer_ctx->depth + 1;
887 ctx->cb.src_fn = current_function_decl;
888 ctx->cb.dst_fn = current_function_decl;
889 ctx->cb.src_node = cgraph_node (current_function_decl);
890 ctx->cb.dst_node = ctx->cb.src_node;
891 ctx->cb.src_cfun = cfun;
892 ctx->cb.copy_decl = omp_copy_decl;
893 ctx->cb.eh_region = -1;
894 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
898 ctx->cb.decl_map = pointer_map_create ();
903 /* Destroy a omp_context data structures. Called through the splay tree
904 value delete callback. */
907 delete_omp_context (splay_tree_value value)
909 omp_context *ctx = (omp_context *) value;
911 pointer_map_destroy (ctx->cb.decl_map);
914 splay_tree_delete (ctx->field_map);
916 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
917 it produces corrupt debug information. */
918 if (ctx->record_type)
921 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
922 DECL_ABSTRACT_ORIGIN (t) = NULL;
928 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
932 fixup_child_record_type (omp_context *ctx)
934 tree f, type = ctx->record_type;
936 /* ??? It isn't sufficient to just call remap_type here, because
937 variably_modified_type_p doesn't work the way we expect for
938 record types. Testing each field for whether it needs remapping
939 and creating a new record by hand works, however. */
940 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
941 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
945 tree name, new_fields = NULL;
947 type = lang_hooks.types.make_type (RECORD_TYPE);
948 name = DECL_NAME (TYPE_NAME (ctx->record_type));
949 name = build_decl (TYPE_DECL, name, type);
950 TYPE_NAME (type) = name;
952 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
954 tree new_f = copy_node (f);
955 DECL_CONTEXT (new_f) = type;
956 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
957 TREE_CHAIN (new_f) = new_fields;
960 /* Arrange to be able to look up the receiver field
961 given the sender field. */
962 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
963 (splay_tree_value) new_f);
965 TYPE_FIELDS (type) = nreverse (new_fields);
969 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
972 /* Instantiate decls as necessary in CTX to satisfy the data sharing
973 specified by CLAUSES. */
976 scan_sharing_clauses (tree clauses, omp_context *ctx)
979 bool scan_array_reductions = false;
981 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
985 switch (OMP_CLAUSE_CODE (c))
987 case OMP_CLAUSE_PRIVATE:
988 decl = OMP_CLAUSE_DECL (c);
989 if (!is_variable_sized (decl))
990 install_var_local (decl, ctx);
993 case OMP_CLAUSE_SHARED:
994 gcc_assert (is_parallel_ctx (ctx));
995 decl = OMP_CLAUSE_DECL (c);
996 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
997 || !is_variable_sized (decl));
998 by_ref = use_pointer_for_field (decl, ctx);
999 /* Global variables don't need to be copied,
1000 the receiver side will use them directly. */
1001 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1003 if (! TREE_READONLY (decl)
1004 || TREE_ADDRESSABLE (decl)
1006 || is_reference (decl))
1008 install_var_field (decl, by_ref, ctx);
1009 install_var_local (decl, ctx);
1012 /* We don't need to copy const scalar vars back. */
1013 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1016 case OMP_CLAUSE_LASTPRIVATE:
1017 /* Let the corresponding firstprivate clause create
1019 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1023 case OMP_CLAUSE_FIRSTPRIVATE:
1024 case OMP_CLAUSE_REDUCTION:
1025 decl = OMP_CLAUSE_DECL (c);
1027 if (is_variable_sized (decl))
1029 else if (is_parallel_ctx (ctx)
1030 && ! is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
1033 by_ref = use_pointer_for_field (decl, NULL);
1034 install_var_field (decl, by_ref, ctx);
1036 install_var_local (decl, ctx);
1039 case OMP_CLAUSE_COPYPRIVATE:
1041 scan_omp (&OMP_CLAUSE_DECL (c), ctx->outer);
1044 case OMP_CLAUSE_COPYIN:
1045 decl = OMP_CLAUSE_DECL (c);
1046 by_ref = use_pointer_for_field (decl, NULL);
1047 install_var_field (decl, by_ref, ctx);
1050 case OMP_CLAUSE_DEFAULT:
1051 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1055 case OMP_CLAUSE_NUM_THREADS:
1056 case OMP_CLAUSE_SCHEDULE:
1058 scan_omp (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1061 case OMP_CLAUSE_NOWAIT:
1062 case OMP_CLAUSE_ORDERED:
1070 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1072 switch (OMP_CLAUSE_CODE (c))
1074 case OMP_CLAUSE_LASTPRIVATE:
1075 /* Let the corresponding firstprivate clause create
1077 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1081 case OMP_CLAUSE_PRIVATE:
1082 case OMP_CLAUSE_FIRSTPRIVATE:
1083 case OMP_CLAUSE_REDUCTION:
1084 decl = OMP_CLAUSE_DECL (c);
1085 if (is_variable_sized (decl))
1086 install_var_local (decl, ctx);
1087 fixup_remapped_decl (decl, ctx,
1088 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1089 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1090 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1091 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1092 scan_array_reductions = true;
1095 case OMP_CLAUSE_SHARED:
1096 decl = OMP_CLAUSE_DECL (c);
1097 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1098 fixup_remapped_decl (decl, ctx, false);
1101 case OMP_CLAUSE_COPYPRIVATE:
1102 case OMP_CLAUSE_COPYIN:
1103 case OMP_CLAUSE_DEFAULT:
1105 case OMP_CLAUSE_NUM_THREADS:
1106 case OMP_CLAUSE_SCHEDULE:
1107 case OMP_CLAUSE_NOWAIT:
1108 case OMP_CLAUSE_ORDERED:
1116 if (scan_array_reductions)
1117 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1118 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1119 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1121 scan_omp (&OMP_CLAUSE_REDUCTION_INIT (c), ctx);
1122 scan_omp (&OMP_CLAUSE_REDUCTION_MERGE (c), ctx);
1126 /* Create a new name for omp child function. Returns an identifier. */
1128 static GTY(()) unsigned int tmp_ompfn_id_num;
1131 create_omp_child_function_name (void)
1133 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1134 size_t len = IDENTIFIER_LENGTH (name);
1135 char *tmp_name, *prefix;
1137 prefix = alloca (len + sizeof ("_omp_fn"));
1138 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1139 strcpy (prefix + len, "_omp_fn");
1140 #ifndef NO_DOT_IN_LABEL
1142 #elif !defined NO_DOLLAR_IN_LABEL
1145 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1146 return get_identifier (tmp_name);
1149 /* Build a decl for the omp child function. It'll not contain a body
1150 yet, just the bare decl. */
1153 create_omp_child_function (omp_context *ctx)
1155 tree decl, type, name, t;
1157 name = create_omp_child_function_name ();
1158 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1160 decl = build_decl (FUNCTION_DECL, name, type);
1161 decl = lang_hooks.decls.pushdecl (decl);
1163 ctx->cb.dst_fn = decl;
1165 TREE_STATIC (decl) = 1;
1166 TREE_USED (decl) = 1;
1167 DECL_ARTIFICIAL (decl) = 1;
1168 DECL_IGNORED_P (decl) = 0;
1169 TREE_PUBLIC (decl) = 0;
1170 DECL_UNINLINABLE (decl) = 1;
1171 DECL_EXTERNAL (decl) = 0;
1172 DECL_CONTEXT (decl) = NULL_TREE;
1173 DECL_INITIAL (decl) = make_node (BLOCK);
1175 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1176 DECL_ARTIFICIAL (t) = 1;
1177 DECL_IGNORED_P (t) = 1;
1178 DECL_RESULT (decl) = t;
1180 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1181 DECL_ARTIFICIAL (t) = 1;
1182 DECL_ARG_TYPE (t) = ptr_type_node;
1183 DECL_CONTEXT (t) = current_function_decl;
1185 DECL_ARGUMENTS (decl) = t;
1186 ctx->receiver_decl = t;
1188 /* Allocate memory for the function structure. The call to
1189 allocate_struct_function clobbers CFUN, so we need to restore
1191 push_struct_function (decl);
1192 DECL_SOURCE_LOCATION (decl) = EXPR_LOCATION (ctx->stmt);
1193 cfun->function_end_locus = EXPR_LOCATION (ctx->stmt);
1198 /* Scan an OpenMP parallel directive. */
1201 scan_omp_parallel (tree *stmt_p, omp_context *outer_ctx)
1206 /* Ignore parallel directives with empty bodies, unless there
1207 are copyin clauses. */
1209 && empty_body_p (OMP_PARALLEL_BODY (*stmt_p))
1210 && find_omp_clause (OMP_CLAUSES (*stmt_p), OMP_CLAUSE_COPYIN) == NULL)
1212 *stmt_p = build_empty_stmt ();
1216 ctx = new_omp_context (*stmt_p, outer_ctx);
1217 if (parallel_nesting_level > 1)
1218 ctx->is_nested = true;
1219 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1220 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1221 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1222 name = create_tmp_var_name (".omp_data_s");
1223 name = build_decl (TYPE_DECL, name, ctx->record_type);
1224 TYPE_NAME (ctx->record_type) = name;
1225 create_omp_child_function (ctx);
1226 OMP_PARALLEL_FN (*stmt_p) = ctx->cb.dst_fn;
1228 scan_sharing_clauses (OMP_PARALLEL_CLAUSES (*stmt_p), ctx);
1229 scan_omp (&OMP_PARALLEL_BODY (*stmt_p), ctx);
1231 if (TYPE_FIELDS (ctx->record_type) == NULL)
1232 ctx->record_type = ctx->receiver_decl = NULL;
1235 layout_type (ctx->record_type);
1236 fixup_child_record_type (ctx);
1241 /* Scan an OpenMP loop directive. */
1244 scan_omp_for (tree *stmt_p, omp_context *outer_ctx)
1250 ctx = new_omp_context (stmt, outer_ctx);
1252 scan_sharing_clauses (OMP_FOR_CLAUSES (stmt), ctx);
1254 scan_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
1255 scan_omp (&OMP_FOR_INIT (stmt), ctx);
1256 scan_omp (&OMP_FOR_COND (stmt), ctx);
1257 scan_omp (&OMP_FOR_INCR (stmt), ctx);
1258 scan_omp (&OMP_FOR_BODY (stmt), ctx);
1261 /* Scan an OpenMP sections directive. */
1264 scan_omp_sections (tree *stmt_p, omp_context *outer_ctx)
1270 ctx = new_omp_context (stmt, outer_ctx);
1271 scan_sharing_clauses (OMP_SECTIONS_CLAUSES (stmt), ctx);
1272 scan_omp (&OMP_SECTIONS_BODY (stmt), ctx);
1275 /* Scan an OpenMP single directive. */
1278 scan_omp_single (tree *stmt_p, omp_context *outer_ctx)
1280 tree stmt = *stmt_p;
1284 ctx = new_omp_context (stmt, outer_ctx);
1285 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1286 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1287 name = create_tmp_var_name (".omp_copy_s");
1288 name = build_decl (TYPE_DECL, name, ctx->record_type);
1289 TYPE_NAME (ctx->record_type) = name;
1291 scan_sharing_clauses (OMP_SINGLE_CLAUSES (stmt), ctx);
1292 scan_omp (&OMP_SINGLE_BODY (stmt), ctx);
1294 if (TYPE_FIELDS (ctx->record_type) == NULL)
1295 ctx->record_type = NULL;
1297 layout_type (ctx->record_type);
1301 /* Check OpenMP nesting restrictions. */
1303 check_omp_nesting_restrictions (tree t, omp_context *ctx)
1305 switch (TREE_CODE (t))
1310 for (; ctx != NULL; ctx = ctx->outer)
1311 switch (TREE_CODE (ctx->stmt))
1318 warning (0, "work-sharing region may not be closely nested inside "
1319 "of work-sharing, critical, ordered or master region");
1328 for (; ctx != NULL; ctx = ctx->outer)
1329 switch (TREE_CODE (ctx->stmt))
1334 warning (0, "master region may not be closely nested inside "
1335 "of work-sharing region");
1344 for (; ctx != NULL; ctx = ctx->outer)
1345 switch (TREE_CODE (ctx->stmt))
1348 warning (0, "ordered region may not be closely nested inside "
1349 "of critical region");
1352 if (find_omp_clause (OMP_CLAUSES (ctx->stmt),
1353 OMP_CLAUSE_ORDERED) == NULL)
1354 warning (0, "ordered region must be closely nested inside "
1355 "a loop region with an ordered clause");
1364 for (; ctx != NULL; ctx = ctx->outer)
1365 if (TREE_CODE (ctx->stmt) == OMP_CRITICAL
1366 && OMP_CRITICAL_NAME (t) == OMP_CRITICAL_NAME (ctx->stmt))
1368 warning (0, "critical region may not be nested inside a critical "
1369 "region with the same name");
1379 /* Callback for walk_stmts used to scan for OpenMP directives at TP. */
1382 scan_omp_1 (tree *tp, int *walk_subtrees, void *data)
1384 struct walk_stmt_info *wi = data;
1385 omp_context *ctx = wi->info;
1388 if (EXPR_HAS_LOCATION (t))
1389 input_location = EXPR_LOCATION (t);
1391 /* Check the OpenMP nesting restrictions. */
1392 if (OMP_DIRECTIVE_P (t) && ctx != NULL)
1393 check_omp_nesting_restrictions (t, ctx);
1396 switch (TREE_CODE (t))
1399 parallel_nesting_level++;
1400 scan_omp_parallel (tp, ctx);
1401 parallel_nesting_level--;
1405 scan_omp_for (tp, ctx);
1409 scan_omp_sections (tp, ctx);
1413 scan_omp_single (tp, ctx);
1420 ctx = new_omp_context (*tp, ctx);
1421 scan_omp (&OMP_BODY (*tp), ctx);
1429 for (var = BIND_EXPR_VARS (t); var ; var = TREE_CHAIN (var))
1430 insert_decl_map (&ctx->cb, var, var);
1439 *tp = remap_decl (t, &ctx->cb);
1443 if (ctx && TYPE_P (t))
1444 *tp = remap_type (t, &ctx->cb);
1445 else if (!DECL_P (t))
1454 /* Scan all the statements starting at STMT_P. CTX contains context
1455 information about the OpenMP directives and clauses found during
1459 scan_omp (tree *stmt_p, omp_context *ctx)
1461 location_t saved_location;
1462 struct walk_stmt_info wi;
1464 memset (&wi, 0, sizeof (wi));
1465 wi.callback = scan_omp_1;
1467 wi.want_bind_expr = (ctx != NULL);
1468 wi.want_locations = true;
1470 saved_location = input_location;
1471 walk_stmts (&wi, stmt_p);
1472 input_location = saved_location;
1475 /* Re-gimplification and code generation routines. */
1477 /* Build a call to GOMP_barrier. */
1480 build_omp_barrier (void)
1482 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
1485 /* If a context was created for STMT when it was scanned, return it. */
1487 static omp_context *
1488 maybe_lookup_ctx (tree stmt)
1491 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
1492 return n ? (omp_context *) n->value : NULL;
1496 /* Find the mapping for DECL in CTX or the immediately enclosing
1497 context that has a mapping for DECL.
1499 If CTX is a nested parallel directive, we may have to use the decl
1500 mappings created in CTX's parent context. Suppose that we have the
1501 following parallel nesting (variable UIDs showed for clarity):
1504 #omp parallel shared(iD.1562) -> outer parallel
1505 iD.1562 = iD.1562 + 1;
1507 #omp parallel shared (iD.1562) -> inner parallel
1508 iD.1562 = iD.1562 - 1;
1510 Each parallel structure will create a distinct .omp_data_s structure
1511 for copying iD.1562 in/out of the directive:
1513 outer parallel .omp_data_s.1.i -> iD.1562
1514 inner parallel .omp_data_s.2.i -> iD.1562
1516 A shared variable mapping will produce a copy-out operation before
1517 the parallel directive and a copy-in operation after it. So, in
1518 this case we would have:
1521 .omp_data_o.1.i = iD.1562;
1522 #omp parallel shared(iD.1562) -> outer parallel
1523 .omp_data_i.1 = &.omp_data_o.1
1524 .omp_data_i.1->i = .omp_data_i.1->i + 1;
1526 .omp_data_o.2.i = iD.1562; -> **
1527 #omp parallel shared(iD.1562) -> inner parallel
1528 .omp_data_i.2 = &.omp_data_o.2
1529 .omp_data_i.2->i = .omp_data_i.2->i - 1;
1532 ** This is a problem. The symbol iD.1562 cannot be referenced
1533 inside the body of the outer parallel region. But since we are
1534 emitting this copy operation while expanding the inner parallel
1535 directive, we need to access the CTX structure of the outer
1536 parallel directive to get the correct mapping:
1538 .omp_data_o.2.i = .omp_data_i.1->i
1540 Since there may be other workshare or parallel directives enclosing
1541 the parallel directive, it may be necessary to walk up the context
1542 parent chain. This is not a problem in general because nested
1543 parallelism happens only rarely. */
1546 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1551 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1552 t = maybe_lookup_decl (decl, up);
1554 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
1556 return t ? t : decl;
1560 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
1561 in outer contexts. */
1564 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
1569 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
1570 t = maybe_lookup_decl (decl, up);
1572 return t ? t : decl;
1576 /* Construct the initialization value for reduction CLAUSE. */
1579 omp_reduction_init (tree clause, tree type)
1581 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
1588 case TRUTH_ORIF_EXPR:
1589 case TRUTH_XOR_EXPR:
1591 return fold_convert (type, integer_zero_node);
1594 case TRUTH_AND_EXPR:
1595 case TRUTH_ANDIF_EXPR:
1597 return fold_convert (type, integer_one_node);
1600 return fold_convert (type, integer_minus_one_node);
1603 if (SCALAR_FLOAT_TYPE_P (type))
1605 REAL_VALUE_TYPE max, min;
1606 if (HONOR_INFINITIES (TYPE_MODE (type)))
1609 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
1612 real_maxval (&min, 1, TYPE_MODE (type));
1613 return build_real (type, min);
1617 gcc_assert (INTEGRAL_TYPE_P (type));
1618 return TYPE_MIN_VALUE (type);
1622 if (SCALAR_FLOAT_TYPE_P (type))
1624 REAL_VALUE_TYPE max;
1625 if (HONOR_INFINITIES (TYPE_MODE (type)))
1628 real_maxval (&max, 0, TYPE_MODE (type));
1629 return build_real (type, max);
1633 gcc_assert (INTEGRAL_TYPE_P (type));
1634 return TYPE_MAX_VALUE (type);
1642 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
1643 from the receiver (aka child) side and initializers for REFERENCE_TYPE
1644 private variables. Initialization statements go in ILIST, while calls
1645 to destructors go in DLIST. */
1648 lower_rec_input_clauses (tree clauses, tree *ilist, tree *dlist,
1651 tree_stmt_iterator diter;
1652 tree c, dtor, copyin_seq, x, ptr;
1653 bool copyin_by_ref = false;
1654 bool lastprivate_firstprivate = false;
1657 *dlist = alloc_stmt_list ();
1658 diter = tsi_start (*dlist);
1661 /* Do all the fixed sized types in the first pass, and the variable sized
1662 types in the second pass. This makes sure that the scalar arguments to
1663 the variable sized types are processed before we use them in the
1664 variable sized operations. */
1665 for (pass = 0; pass < 2; ++pass)
1667 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1669 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
1675 case OMP_CLAUSE_PRIVATE:
1676 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
1679 case OMP_CLAUSE_SHARED:
1680 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
1682 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
1685 case OMP_CLAUSE_FIRSTPRIVATE:
1686 case OMP_CLAUSE_COPYIN:
1687 case OMP_CLAUSE_REDUCTION:
1689 case OMP_CLAUSE_LASTPRIVATE:
1690 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1692 lastprivate_firstprivate = true;
1701 new_var = var = OMP_CLAUSE_DECL (c);
1702 if (c_kind != OMP_CLAUSE_COPYIN)
1703 new_var = lookup_decl (var, ctx);
1705 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
1710 else if (is_variable_sized (var))
1712 /* For variable sized types, we need to allocate the
1713 actual storage here. Call alloca and store the
1714 result in the pointer decl that we created elsewhere. */
1718 ptr = DECL_VALUE_EXPR (new_var);
1719 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
1720 ptr = TREE_OPERAND (ptr, 0);
1721 gcc_assert (DECL_P (ptr));
1723 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
1724 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1725 x = fold_convert (TREE_TYPE (ptr), x);
1726 x = build_gimple_modify_stmt (ptr, x);
1727 gimplify_and_add (x, ilist);
1729 else if (is_reference (var))
1731 /* For references that are being privatized for Fortran,
1732 allocate new backing storage for the new pointer
1733 variable. This allows us to avoid changing all the
1734 code that expects a pointer to something that expects
1735 a direct variable. Note that this doesn't apply to
1736 C++, since reference types are disallowed in data
1737 sharing clauses there, except for NRV optimized
1742 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
1743 if (TREE_CONSTANT (x))
1745 const char *name = NULL;
1746 if (DECL_NAME (var))
1747 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
1749 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
1751 gimple_add_tmp_var (x);
1752 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
1756 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
1757 x = fold_convert (TREE_TYPE (new_var), x);
1760 x = build_gimple_modify_stmt (new_var, x);
1761 gimplify_and_add (x, ilist);
1763 new_var = build_fold_indirect_ref (new_var);
1765 else if (c_kind == OMP_CLAUSE_REDUCTION
1766 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1774 switch (OMP_CLAUSE_CODE (c))
1776 case OMP_CLAUSE_SHARED:
1777 /* Shared global vars are just accessed directly. */
1778 if (is_global_var (new_var))
1780 /* Set up the DECL_VALUE_EXPR for shared variables now. This
1781 needs to be delayed until after fixup_child_record_type so
1782 that we get the correct type during the dereference. */
1783 by_ref = use_pointer_for_field (var, ctx);
1784 x = build_receiver_ref (var, by_ref, ctx);
1785 SET_DECL_VALUE_EXPR (new_var, x);
1786 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
1788 /* ??? If VAR is not passed by reference, and the variable
1789 hasn't been initialized yet, then we'll get a warning for
1790 the store into the omp_data_s structure. Ideally, we'd be
1791 able to notice this and not store anything at all, but
1792 we're generating code too early. Suppress the warning. */
1794 TREE_NO_WARNING (var) = 1;
1797 case OMP_CLAUSE_LASTPRIVATE:
1798 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1802 case OMP_CLAUSE_PRIVATE:
1803 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var);
1805 gimplify_and_add (x, ilist);
1809 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
1813 gimplify_stmt (&dtor);
1814 tsi_link_before (&diter, dtor, TSI_SAME_STMT);
1818 case OMP_CLAUSE_FIRSTPRIVATE:
1819 x = build_outer_var_ref (var, ctx);
1820 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
1821 gimplify_and_add (x, ilist);
1825 case OMP_CLAUSE_COPYIN:
1826 by_ref = use_pointer_for_field (var, NULL);
1827 x = build_receiver_ref (var, by_ref, ctx);
1828 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
1829 append_to_statement_list (x, ©in_seq);
1830 copyin_by_ref |= by_ref;
1833 case OMP_CLAUSE_REDUCTION:
1834 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1836 gimplify_and_add (OMP_CLAUSE_REDUCTION_INIT (c), ilist);
1837 OMP_CLAUSE_REDUCTION_INIT (c) = NULL;
1841 x = omp_reduction_init (c, TREE_TYPE (new_var));
1842 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
1843 x = build_gimple_modify_stmt (new_var, x);
1844 gimplify_and_add (x, ilist);
1854 /* The copyin sequence is not to be executed by the main thread, since
1855 that would result in self-copies. Perhaps not visible to scalars,
1856 but it certainly is to C++ operator=. */
1859 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
1860 x = build2 (NE_EXPR, boolean_type_node, x,
1861 build_int_cst (TREE_TYPE (x), 0));
1862 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
1863 gimplify_and_add (x, ilist);
1866 /* If any copyin variable is passed by reference, we must ensure the
1867 master thread doesn't modify it before it is copied over in all
1868 threads. Similarly for variables in both firstprivate and
1869 lastprivate clauses we need to ensure the lastprivate copying
1870 happens after firstprivate copying in all threads. */
1871 if (copyin_by_ref || lastprivate_firstprivate)
1872 gimplify_and_add (build_omp_barrier (), ilist);
1876 /* Generate code to implement the LASTPRIVATE clauses. This is used for
1877 both parallel and workshare constructs. PREDICATE may be NULL if it's
1881 lower_lastprivate_clauses (tree clauses, tree predicate, tree *stmt_list,
1884 tree sub_list, x, c;
1886 /* Early exit if there are no lastprivate clauses. */
1887 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
1888 if (clauses == NULL)
1890 /* If this was a workshare clause, see if it had been combined
1891 with its parallel. In that case, look for the clauses on the
1892 parallel statement itself. */
1893 if (is_parallel_ctx (ctx))
1897 if (ctx == NULL || !is_parallel_ctx (ctx))
1900 clauses = find_omp_clause (OMP_PARALLEL_CLAUSES (ctx->stmt),
1901 OMP_CLAUSE_LASTPRIVATE);
1902 if (clauses == NULL)
1906 sub_list = alloc_stmt_list ();
1908 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1912 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LASTPRIVATE)
1915 var = OMP_CLAUSE_DECL (c);
1916 new_var = lookup_decl (var, ctx);
1918 x = build_outer_var_ref (var, ctx);
1919 if (is_reference (var))
1920 new_var = build_fold_indirect_ref (new_var);
1921 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
1922 append_to_statement_list (x, &sub_list);
1926 x = build3 (COND_EXPR, void_type_node, predicate, sub_list, NULL);
1930 gimplify_and_add (x, stmt_list);
1934 /* Generate code to implement the REDUCTION clauses. */
1937 lower_reduction_clauses (tree clauses, tree *stmt_list, omp_context *ctx)
1939 tree sub_list = NULL, x, c;
1942 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
1943 update in that case, otherwise use a lock. */
1944 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
1945 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
1947 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1949 /* Never use OMP_ATOMIC for array reductions. */
1959 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
1961 tree var, ref, new_var;
1962 enum tree_code code;
1964 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
1967 var = OMP_CLAUSE_DECL (c);
1968 new_var = lookup_decl (var, ctx);
1969 if (is_reference (var))
1970 new_var = build_fold_indirect_ref (new_var);
1971 ref = build_outer_var_ref (var, ctx);
1972 code = OMP_CLAUSE_REDUCTION_CODE (c);
1974 /* reduction(-:var) sums up the partial results, so it acts
1975 identically to reduction(+:var). */
1976 if (code == MINUS_EXPR)
1981 tree addr = build_fold_addr_expr (ref);
1983 addr = save_expr (addr);
1984 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
1985 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
1986 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
1987 gimplify_and_add (x, stmt_list);
1991 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1993 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
1995 if (is_reference (var))
1996 ref = build_fold_addr_expr (ref);
1997 SET_DECL_VALUE_EXPR (placeholder, ref);
1998 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
1999 gimplify_and_add (OMP_CLAUSE_REDUCTION_MERGE (c), &sub_list);
2000 OMP_CLAUSE_REDUCTION_MERGE (c) = NULL;
2001 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2005 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2006 ref = build_outer_var_ref (var, ctx);
2007 x = build_gimple_modify_stmt (ref, x);
2008 append_to_statement_list (x, &sub_list);
2012 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2013 gimplify_and_add (x, stmt_list);
2015 gimplify_and_add (sub_list, stmt_list);
2017 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2018 gimplify_and_add (x, stmt_list);
2022 /* Generate code to implement the COPYPRIVATE clauses. */
2025 lower_copyprivate_clauses (tree clauses, tree *slist, tree *rlist,
2030 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2035 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2038 var = OMP_CLAUSE_DECL (c);
2039 by_ref = use_pointer_for_field (var, NULL);
2041 ref = build_sender_ref (var, ctx);
2042 x = lookup_decl_in_outer_ctx (var, ctx);
2043 x = by_ref ? build_fold_addr_expr (x) : x;
2044 x = build_gimple_modify_stmt (ref, x);
2045 gimplify_and_add (x, slist);
2047 ref = build_receiver_ref (var, by_ref, ctx);
2048 if (is_reference (var))
2050 ref = build_fold_indirect_ref (ref);
2051 var = build_fold_indirect_ref (var);
2053 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2054 gimplify_and_add (x, rlist);
2059 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2060 and REDUCTION from the sender (aka parent) side. */
2063 lower_send_clauses (tree clauses, tree *ilist, tree *olist, omp_context *ctx)
2067 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2069 tree val, ref, x, var;
2070 bool by_ref, do_in = false, do_out = false;
2072 switch (OMP_CLAUSE_CODE (c))
2074 case OMP_CLAUSE_FIRSTPRIVATE:
2075 case OMP_CLAUSE_COPYIN:
2076 case OMP_CLAUSE_LASTPRIVATE:
2077 case OMP_CLAUSE_REDUCTION:
2083 val = OMP_CLAUSE_DECL (c);
2084 var = lookup_decl_in_outer_ctx (val, ctx);
2086 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2087 && is_global_var (var))
2089 if (is_variable_sized (val))
2091 by_ref = use_pointer_for_field (val, NULL);
2093 switch (OMP_CLAUSE_CODE (c))
2095 case OMP_CLAUSE_FIRSTPRIVATE:
2096 case OMP_CLAUSE_COPYIN:
2100 case OMP_CLAUSE_LASTPRIVATE:
2101 if (by_ref || is_reference (val))
2103 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2111 case OMP_CLAUSE_REDUCTION:
2113 do_out = !(by_ref || is_reference (val));
2122 ref = build_sender_ref (val, ctx);
2123 x = by_ref ? build_fold_addr_expr (var) : var;
2124 x = build_gimple_modify_stmt (ref, x);
2125 gimplify_and_add (x, ilist);
2130 ref = build_sender_ref (val, ctx);
2131 x = build_gimple_modify_stmt (var, ref);
2132 gimplify_and_add (x, olist);
2137 /* Generate code to implement SHARED from the sender (aka parent) side.
2138 This is trickier, since OMP_PARALLEL_CLAUSES doesn't list things that
2139 got automatically shared. */
2142 lower_send_shared_vars (tree *ilist, tree *olist, omp_context *ctx)
2144 tree var, ovar, nvar, f, x;
2146 if (ctx->record_type == NULL)
2149 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
2151 ovar = DECL_ABSTRACT_ORIGIN (f);
2152 nvar = maybe_lookup_decl (ovar, ctx);
2153 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2156 /* If CTX is a nested parallel directive. Find the immediately
2157 enclosing parallel or workshare construct that contains a
2158 mapping for OVAR. */
2159 var = lookup_decl_in_outer_ctx (ovar, ctx);
2161 if (use_pointer_for_field (ovar, ctx))
2163 x = build_sender_ref (ovar, ctx);
2164 var = build_fold_addr_expr (var);
2165 x = build_gimple_modify_stmt (x, var);
2166 gimplify_and_add (x, ilist);
2170 x = build_sender_ref (ovar, ctx);
2171 x = build_gimple_modify_stmt (x, var);
2172 gimplify_and_add (x, ilist);
2174 x = build_sender_ref (ovar, ctx);
2175 x = build_gimple_modify_stmt (var, x);
2176 gimplify_and_add (x, olist);
2181 /* Build the function calls to GOMP_parallel_start etc to actually
2182 generate the parallel operation. REGION is the parallel region
2183 being expanded. BB is the block where to insert the code. WS_ARGS
2184 will be set if this is a call to a combined parallel+workshare
2185 construct, it contains the list of additional arguments needed by
2186 the workshare construct. */
2189 expand_parallel_call (struct omp_region *region, basic_block bb,
2190 tree entry_stmt, tree ws_args)
2192 tree t, t1, t2, val, cond, c, clauses;
2193 block_stmt_iterator si;
2196 clauses = OMP_PARALLEL_CLAUSES (entry_stmt);
2198 /* Determine what flavor of GOMP_parallel_start we will be
2200 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2201 if (is_combined_parallel (region))
2203 switch (region->inner->type)
2206 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2207 + region->inner->sched_kind;
2210 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2217 /* By default, the value of NUM_THREADS is zero (selected at run time)
2218 and there is no conditional. */
2220 val = build_int_cst (unsigned_type_node, 0);
2222 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2224 cond = OMP_CLAUSE_IF_EXPR (c);
2226 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2228 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2230 /* Ensure 'val' is of the correct type. */
2231 val = fold_convert (unsigned_type_node, val);
2233 /* If we found the clause 'if (cond)', build either
2234 (cond != 0) or (cond ? val : 1u). */
2237 block_stmt_iterator si;
2239 cond = gimple_boolify (cond);
2241 if (integer_zerop (val))
2242 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2243 build_int_cst (TREE_TYPE (cond), 0));
2246 basic_block cond_bb, then_bb, else_bb;
2247 edge e, e_then, e_else;
2248 tree t, tmp_then, tmp_else, tmp_join, tmp_var;
2250 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2251 if (gimple_in_ssa_p (cfun))
2253 tmp_then = make_ssa_name (tmp_var, NULL_TREE);
2254 tmp_else = make_ssa_name (tmp_var, NULL_TREE);
2255 tmp_join = make_ssa_name (tmp_var, NULL_TREE);
2264 e = split_block (bb, NULL);
2269 then_bb = create_empty_bb (cond_bb);
2270 else_bb = create_empty_bb (then_bb);
2271 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2272 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2274 t = build3 (COND_EXPR, void_type_node,
2275 cond, NULL_TREE, NULL_TREE);
2277 si = bsi_start (cond_bb);
2278 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2280 si = bsi_start (then_bb);
2281 t = build_gimple_modify_stmt (tmp_then, val);
2282 if (gimple_in_ssa_p (cfun))
2283 SSA_NAME_DEF_STMT (tmp_then) = t;
2284 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2286 si = bsi_start (else_bb);
2287 t = build_gimple_modify_stmt (tmp_else,
2288 build_int_cst (unsigned_type_node, 1));
2289 if (gimple_in_ssa_p (cfun))
2290 SSA_NAME_DEF_STMT (tmp_else) = t;
2291 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2293 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2294 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2295 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2296 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2298 if (gimple_in_ssa_p (cfun))
2300 tree phi = create_phi_node (tmp_join, bb);
2301 SSA_NAME_DEF_STMT (tmp_join) = phi;
2302 add_phi_arg (phi, tmp_then, e_then);
2303 add_phi_arg (phi, tmp_else, e_else);
2309 si = bsi_start (bb);
2310 val = force_gimple_operand_bsi (&si, val, true, NULL_TREE,
2311 false, BSI_CONTINUE_LINKING);
2315 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2317 t1 = null_pointer_node;
2319 t1 = build_fold_addr_expr (t);
2320 t2 = build_fold_addr_expr (OMP_PARALLEL_FN (entry_stmt));
2324 tree args = tree_cons (NULL, t2,
2325 tree_cons (NULL, t1,
2326 tree_cons (NULL, val, ws_args)));
2327 t = build_function_call_expr (built_in_decls[start_ix], args);
2330 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
2332 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2333 false, BSI_CONTINUE_LINKING);
2335 t = OMP_PARALLEL_DATA_ARG (entry_stmt);
2337 t = null_pointer_node;
2339 t = build_fold_addr_expr (t);
2340 t = build_call_expr (OMP_PARALLEL_FN (entry_stmt), 1, t);
2341 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2342 false, BSI_CONTINUE_LINKING);
2344 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
2345 force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2346 false, BSI_CONTINUE_LINKING);
2350 /* If exceptions are enabled, wrap *STMT_P in a MUST_NOT_THROW catch
2351 handler. This prevents programs from violating the structured
2352 block semantics with throws. */
2355 maybe_catch_exception (tree *stmt_p)
2359 if (!flag_exceptions)
2362 if (lang_protect_cleanup_actions)
2363 t = lang_protect_cleanup_actions ();
2365 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
2366 f = build2 (EH_FILTER_EXPR, void_type_node, NULL, NULL);
2367 EH_FILTER_MUST_NOT_THROW (f) = 1;
2368 gimplify_and_add (t, &EH_FILTER_FAILURE (f));
2370 t = build2 (TRY_CATCH_EXPR, void_type_node, *stmt_p, NULL);
2371 append_to_statement_list (f, &TREE_OPERAND (t, 1));
2374 append_to_statement_list (t, stmt_p);
2377 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
2380 list2chain (tree list)
2384 for (t = list; t; t = TREE_CHAIN (t))
2386 tree var = TREE_VALUE (t);
2388 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
2390 TREE_CHAIN (var) = NULL_TREE;
2393 return list ? TREE_VALUE (list) : NULL_TREE;
2397 /* Remove barriers in REGION->EXIT's block. Note that this is only
2398 valid for OMP_PARALLEL regions. Since the end of a parallel region
2399 is an implicit barrier, any workshare inside the OMP_PARALLEL that
2400 left a barrier at the end of the OMP_PARALLEL region can now be
2404 remove_exit_barrier (struct omp_region *region)
2406 block_stmt_iterator si;
2407 basic_block exit_bb;
2412 exit_bb = region->exit;
2414 /* If the parallel region doesn't return, we don't have REGION->EXIT
2419 /* The last insn in the block will be the parallel's OMP_RETURN. The
2420 workshare's OMP_RETURN will be in a preceding block. The kinds of
2421 statements that can appear in between are extremely limited -- no
2422 memory operations at all. Here, we allow nothing at all, so the
2423 only thing we allow to precede this OMP_RETURN is a label. */
2424 si = bsi_last (exit_bb);
2425 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2427 if (!bsi_end_p (si) && TREE_CODE (bsi_stmt (si)) != LABEL_EXPR)
2430 FOR_EACH_EDGE (e, ei, exit_bb->preds)
2432 si = bsi_last (e->src);
2436 if (TREE_CODE (t) == OMP_RETURN)
2437 OMP_RETURN_NOWAIT (t) = 1;
2442 remove_exit_barriers (struct omp_region *region)
2444 if (region->type == OMP_PARALLEL)
2445 remove_exit_barrier (region);
2449 region = region->inner;
2450 remove_exit_barriers (region);
2451 while (region->next)
2453 region = region->next;
2454 remove_exit_barriers (region);
2459 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
2460 calls. These can't be declared as const functions, but
2461 within one parallel body they are constant, so they can be
2462 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
2463 which are declared const. */
2466 optimize_omp_library_calls (void)
2469 block_stmt_iterator bsi;
2471 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
2473 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
2476 for (bsi = bsi_start (bb); !bsi_end_p (bsi); bsi_next (&bsi))
2478 tree stmt = bsi_stmt (bsi);
2479 tree call = get_call_expr_in (stmt);
2483 && (decl = get_callee_fndecl (call))
2484 && DECL_EXTERNAL (decl)
2485 && TREE_PUBLIC (decl)
2486 && DECL_INITIAL (decl) == NULL)
2490 if (DECL_NAME (decl) == thr_num_id)
2491 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
2492 else if (DECL_NAME (decl) == num_thr_id)
2493 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
2497 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
2498 || call_expr_nargs (call) != 0)
2501 if (flag_exceptions && !TREE_NOTHROW (decl))
2504 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
2505 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
2506 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
2509 CALL_EXPR_FN (call) = build_fold_addr_expr (built_in);
2514 /* Expand the OpenMP parallel directive starting at REGION. */
2517 expand_omp_parallel (struct omp_region *region)
2519 basic_block entry_bb, exit_bb, new_bb;
2520 struct function *child_cfun;
2521 tree child_fn, block, t, ws_args;
2522 block_stmt_iterator si;
2526 entry_stmt = last_stmt (region->entry);
2527 child_fn = OMP_PARALLEL_FN (entry_stmt);
2528 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
2529 /* If this function has been already instrumented, make sure
2530 the child function isn't instrumented again. */
2531 child_cfun->after_tree_profile = cfun->after_tree_profile;
2533 entry_bb = region->entry;
2534 exit_bb = region->exit;
2536 if (is_combined_parallel (region))
2537 ws_args = region->ws_args;
2539 ws_args = NULL_TREE;
2541 if (child_cfun->cfg)
2543 /* Due to inlining, it may happen that we have already outlined
2544 the region, in which case all we need to do is make the
2545 sub-graph unreachable and emit the parallel call. */
2546 edge entry_succ_e, exit_succ_e;
2547 block_stmt_iterator si;
2549 entry_succ_e = single_succ_edge (entry_bb);
2551 si = bsi_last (entry_bb);
2552 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_PARALLEL);
2553 bsi_remove (&si, true);
2558 exit_succ_e = single_succ_edge (exit_bb);
2559 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
2561 remove_edge_and_dominated_blocks (entry_succ_e);
2565 /* If the parallel region needs data sent from the parent
2566 function, then the very first statement (except possible
2567 tree profile counter updates) of the parallel body
2568 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
2569 &.OMP_DATA_O is passed as an argument to the child function,
2570 we need to replace it with the argument as seen by the child
2573 In most cases, this will end up being the identity assignment
2574 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
2575 a function call that has been inlined, the original PARM_DECL
2576 .OMP_DATA_I may have been converted into a different local
2577 variable. In which case, we need to keep the assignment. */
2578 if (OMP_PARALLEL_DATA_ARG (entry_stmt))
2580 basic_block entry_succ_bb = single_succ (entry_bb);
2581 block_stmt_iterator si;
2582 tree parcopy_stmt = NULL_TREE, arg, narg;
2584 for (si = bsi_start (entry_succ_bb); ; bsi_next (&si))
2588 gcc_assert (!bsi_end_p (si));
2589 stmt = bsi_stmt (si);
2590 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
2593 arg = GIMPLE_STMT_OPERAND (stmt, 1);
2595 if (TREE_CODE (arg) == ADDR_EXPR
2596 && TREE_OPERAND (arg, 0)
2597 == OMP_PARALLEL_DATA_ARG (entry_stmt))
2599 parcopy_stmt = stmt;
2604 gcc_assert (parcopy_stmt != NULL_TREE);
2605 arg = DECL_ARGUMENTS (child_fn);
2607 if (!gimple_in_ssa_p (cfun))
2609 if (GIMPLE_STMT_OPERAND (parcopy_stmt, 0) == arg)
2610 bsi_remove (&si, true);
2612 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = arg;
2616 /* If we are in ssa form, we must load the value from the default
2617 definition of the argument. That should not be defined now,
2618 since the argument is not used uninitialized. */
2619 gcc_assert (gimple_default_def (cfun, arg) == NULL);
2620 narg = make_ssa_name (arg, build_empty_stmt ());
2621 set_default_def (arg, narg);
2622 GIMPLE_STMT_OPERAND (parcopy_stmt, 1) = narg;
2623 update_stmt (parcopy_stmt);
2627 /* Declare local variables needed in CHILD_CFUN. */
2628 block = DECL_INITIAL (child_fn);
2629 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
2630 DECL_SAVED_TREE (child_fn) = bb_stmt_list (single_succ (entry_bb));
2632 /* Reset DECL_CONTEXT on function arguments. */
2633 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
2634 DECL_CONTEXT (t) = child_fn;
2636 /* Split ENTRY_BB at OMP_PARALLEL so that it can be moved to the
2638 si = bsi_last (entry_bb);
2640 gcc_assert (t && TREE_CODE (t) == OMP_PARALLEL);
2641 bsi_remove (&si, true);
2642 e = split_block (entry_bb, t);
2644 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
2646 /* Convert OMP_RETURN into a RETURN_EXPR. */
2649 si = bsi_last (exit_bb);
2650 gcc_assert (!bsi_end_p (si)
2651 && TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
2652 t = build1 (RETURN_EXPR, void_type_node, NULL);
2653 bsi_insert_after (&si, t, BSI_SAME_STMT);
2654 bsi_remove (&si, true);
2657 /* Move the parallel region into CHILD_CFUN. */
2659 if (gimple_in_ssa_p (cfun))
2661 push_cfun (child_cfun);
2662 init_tree_ssa (child_cfun);
2663 init_ssa_operands ();
2664 cfun->gimple_df->in_ssa_p = true;
2667 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb);
2669 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
2671 /* Inform the callgraph about the new function. */
2672 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
2673 = cfun->curr_properties;
2674 cgraph_add_new_function (child_fn, true);
2676 /* Fix the callgraph edges for child_cfun. Those for cfun will be
2677 fixed in a following pass. */
2678 push_cfun (child_cfun);
2680 optimize_omp_library_calls ();
2681 rebuild_cgraph_edges ();
2683 /* Some EH regions might become dead, see PR34608. If
2684 pass_cleanup_cfg isn't the first pass to happen with the
2685 new child, these dead EH edges might cause problems.
2686 Clean them up now. */
2687 if (flag_exceptions)
2690 tree save_current = current_function_decl;
2691 bool changed = false;
2693 current_function_decl = child_fn;
2695 changed |= tree_purge_dead_eh_edges (bb);
2697 cleanup_tree_cfg ();
2698 current_function_decl = save_current;
2703 /* Emit a library call to launch the children threads. */
2704 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
2705 update_ssa (TODO_update_ssa_only_virtuals);
2709 /* A subroutine of expand_omp_for. Generate code for a parallel
2710 loop with any schedule. Given parameters:
2712 for (V = N1; V cond N2; V += STEP) BODY;
2714 where COND is "<" or ">", we generate pseudocode
2716 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2717 if (more) goto L0; else goto L3;
2724 if (V cond iend) goto L1; else goto L2;
2726 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2729 If this is a combined omp parallel loop, instead of the call to
2730 GOMP_loop_foo_start, we call GOMP_loop_foo_next. */
2733 expand_omp_for_generic (struct omp_region *region,
2734 struct omp_for_data *fd,
2735 enum built_in_function start_fn,
2736 enum built_in_function next_fn)
2738 tree type, istart0, iend0, iend, phi;
2739 tree t, vmain, vback;
2740 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb;
2741 basic_block l2_bb = NULL, l3_bb = NULL;
2742 block_stmt_iterator si;
2743 bool in_combined_parallel = is_combined_parallel (region);
2744 bool broken_loop = region->cont == NULL;
2747 gcc_assert (!broken_loop || !in_combined_parallel);
2749 type = TREE_TYPE (fd->v);
2751 istart0 = create_tmp_var (long_integer_type_node, ".istart0");
2752 iend0 = create_tmp_var (long_integer_type_node, ".iend0");
2753 TREE_ADDRESSABLE (istart0) = 1;
2754 TREE_ADDRESSABLE (iend0) = 1;
2755 if (gimple_in_ssa_p (cfun))
2757 add_referenced_var (istart0);
2758 add_referenced_var (iend0);
2761 entry_bb = region->entry;
2762 cont_bb = region->cont;
2763 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2764 gcc_assert (broken_loop
2765 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2766 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2767 l1_bb = single_succ (l0_bb);
2770 l2_bb = create_empty_bb (cont_bb);
2771 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
2772 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2776 l3_bb = BRANCH_EDGE (entry_bb)->dest;
2777 exit_bb = region->exit;
2779 si = bsi_last (entry_bb);
2780 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2781 if (in_combined_parallel)
2783 /* In a combined parallel loop, emit a call to
2784 GOMP_loop_foo_next. */
2785 t = build_call_expr (built_in_decls[next_fn], 2,
2786 build_fold_addr_expr (istart0),
2787 build_fold_addr_expr (iend0));
2791 tree t0, t1, t2, t3, t4;
2792 /* If this is not a combined parallel loop, emit a call to
2793 GOMP_loop_foo_start in ENTRY_BB. */
2794 t4 = build_fold_addr_expr (iend0);
2795 t3 = build_fold_addr_expr (istart0);
2796 t2 = fold_convert (long_integer_type_node, fd->step);
2797 t1 = fold_convert (long_integer_type_node, fd->n2);
2798 t0 = fold_convert (long_integer_type_node, fd->n1);
2801 t = fold_convert (long_integer_type_node, fd->chunk_size);
2802 t = build_call_expr (built_in_decls[start_fn], 6,
2803 t0, t1, t2, t, t3, t4);
2806 t = build_call_expr (built_in_decls[start_fn], 5,
2807 t0, t1, t2, t3, t4);
2809 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2810 true, BSI_SAME_STMT);
2811 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2812 bsi_insert_after (&si, t, BSI_SAME_STMT);
2814 /* Remove the OMP_FOR statement. */
2815 bsi_remove (&si, true);
2817 /* Iteration setup for sequential loop goes in L0_BB. */
2818 si = bsi_start (l0_bb);
2819 t = fold_convert (type, istart0);
2820 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2821 false, BSI_CONTINUE_LINKING);
2822 t = build_gimple_modify_stmt (fd->v, t);
2823 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2824 if (gimple_in_ssa_p (cfun))
2825 SSA_NAME_DEF_STMT (fd->v) = t;
2827 t = fold_convert (type, iend0);
2828 iend = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2829 false, BSI_CONTINUE_LINKING);
2833 /* Code to control the increment and predicate for the sequential
2834 loop goes in the CONT_BB. */
2835 si = bsi_last (cont_bb);
2837 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
2838 vmain = TREE_OPERAND (t, 1);
2839 vback = TREE_OPERAND (t, 0);
2841 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
2842 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
2843 true, BSI_SAME_STMT);
2844 t = build_gimple_modify_stmt (vback, t);
2845 bsi_insert_before (&si, t, BSI_SAME_STMT);
2846 if (gimple_in_ssa_p (cfun))
2847 SSA_NAME_DEF_STMT (vback) = t;
2849 t = build2 (fd->cond_code, boolean_type_node, vback, iend);
2850 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2851 bsi_insert_before (&si, t, BSI_SAME_STMT);
2853 /* Remove OMP_CONTINUE. */
2854 bsi_remove (&si, true);
2856 /* Emit code to get the next parallel iteration in L2_BB. */
2857 si = bsi_start (l2_bb);
2859 t = build_call_expr (built_in_decls[next_fn], 2,
2860 build_fold_addr_expr (istart0),
2861 build_fold_addr_expr (iend0));
2862 t = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2863 false, BSI_CONTINUE_LINKING);
2864 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
2865 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
2868 /* Add the loop cleanup function. */
2869 si = bsi_last (exit_bb);
2870 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
2871 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
2873 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
2874 t = build_call_expr (t, 0);
2875 bsi_insert_after (&si, t, BSI_SAME_STMT);
2876 bsi_remove (&si, true);
2878 /* Connect the new blocks. */
2879 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
2880 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
2884 e = find_edge (cont_bb, l3_bb);
2885 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
2887 for (phi = phi_nodes (l3_bb); phi; phi = PHI_CHAIN (phi))
2888 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
2889 PHI_ARG_DEF_FROM_EDGE (phi, e));
2892 find_edge (cont_bb, l1_bb)->flags = EDGE_TRUE_VALUE;
2893 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
2894 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
2896 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
2897 recompute_dominator (CDI_DOMINATORS, l2_bb));
2898 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
2899 recompute_dominator (CDI_DOMINATORS, l3_bb));
2900 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
2901 recompute_dominator (CDI_DOMINATORS, l0_bb));
2902 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
2903 recompute_dominator (CDI_DOMINATORS, l1_bb));
2908 /* A subroutine of expand_omp_for. Generate code for a parallel
2909 loop with static schedule and no specified chunk size. Given
2912 for (V = N1; V cond N2; V += STEP) BODY;
2914 where COND is "<" or ">", we generate pseudocode
2920 n = (adj + N2 - N1) / STEP;
2922 q += (q * nthreads != n);
2924 e0 = min(s0 + q, n);
2926 if (s0 >= e0) goto L2; else goto L0;
2932 if (V cond e) goto L1;
2937 expand_omp_for_static_nochunk (struct omp_region *region,
2938 struct omp_for_data *fd)
2940 tree n, q, s0, e0, e, t, nthreads, threadid;
2941 tree type, vmain, vback;
2942 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
2944 block_stmt_iterator si;
2946 type = TREE_TYPE (fd->v);
2948 entry_bb = region->entry;
2949 cont_bb = region->cont;
2950 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2951 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2952 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2953 body_bb = single_succ (seq_start_bb);
2954 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
2955 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2956 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
2957 exit_bb = region->exit;
2959 /* Iteration space partitioning goes in ENTRY_BB. */
2960 si = bsi_last (entry_bb);
2961 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
2963 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
2964 t = fold_convert (type, t);
2965 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2966 true, BSI_SAME_STMT);
2968 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2969 t = fold_convert (type, t);
2970 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
2971 true, BSI_SAME_STMT);
2973 fd->n1 = force_gimple_operand_bsi (&si,
2974 fold_convert (type, fd->n1),
2976 true, BSI_SAME_STMT);
2978 fd->n2 = force_gimple_operand_bsi (&si,
2979 fold_convert (type, fd->n2),
2981 true, BSI_SAME_STMT);
2983 fd->step = force_gimple_operand_bsi (&si,
2984 fold_convert (type, fd->step),
2986 true, BSI_SAME_STMT);
2988 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
2989 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
2990 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
2991 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
2992 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
2993 t = fold_convert (type, t);
2994 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2996 t = fold_build2 (TRUNC_DIV_EXPR, type, n, nthreads);
2997 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
2999 t = fold_build2 (MULT_EXPR, type, q, nthreads);
3000 t = fold_build2 (NE_EXPR, type, t, n);
3001 t = fold_build2 (PLUS_EXPR, type, q, t);
3002 q = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3004 t = build2 (MULT_EXPR, type, q, threadid);
3005 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3007 t = fold_build2 (PLUS_EXPR, type, s0, q);
3008 t = fold_build2 (MIN_EXPR, type, t, n);
3009 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE, true, BSI_SAME_STMT);
3011 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
3012 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3013 bsi_insert_before (&si, t, BSI_SAME_STMT);
3015 /* Remove the OMP_FOR statement. */
3016 bsi_remove (&si, true);
3018 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3019 si = bsi_start (seq_start_bb);
3021 t = fold_convert (type, s0);
3022 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3023 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3024 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3025 false, BSI_CONTINUE_LINKING);
3026 t = build_gimple_modify_stmt (fd->v, t);
3027 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3028 if (gimple_in_ssa_p (cfun))
3029 SSA_NAME_DEF_STMT (fd->v) = t;
3031 t = fold_convert (type, e0);
3032 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3033 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3034 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3035 false, BSI_CONTINUE_LINKING);
3037 /* The code controlling the sequential loop replaces the OMP_CONTINUE. */
3038 si = bsi_last (cont_bb);
3040 gcc_assert (TREE_CODE (t) == OMP_CONTINUE);
3041 vmain = TREE_OPERAND (t, 1);
3042 vback = TREE_OPERAND (t, 0);
3044 t = fold_build2 (PLUS_EXPR, type, vmain, fd->step);
3045 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3046 true, BSI_SAME_STMT);
3047 t = build_gimple_modify_stmt (vback, t);
3048 bsi_insert_before (&si, t, BSI_SAME_STMT);
3049 if (gimple_in_ssa_p (cfun))
3050 SSA_NAME_DEF_STMT (vback) = t;
3052 t = build2 (fd->cond_code, boolean_type_node, vback, e);
3053 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3054 bsi_insert_before (&si, t, BSI_SAME_STMT);
3056 /* Remove the OMP_CONTINUE statement. */
3057 bsi_remove (&si, true);
3059 /* Replace the OMP_RETURN with a barrier, or nothing. */
3060 si = bsi_last (exit_bb);
3061 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3062 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3063 false, BSI_SAME_STMT);
3064 bsi_remove (&si, true);
3066 /* Connect all the blocks. */
3067 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
3068 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
3070 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3071 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3073 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
3074 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3075 recompute_dominator (CDI_DOMINATORS, body_bb));
3076 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3077 recompute_dominator (CDI_DOMINATORS, fin_bb));
3081 /* A subroutine of expand_omp_for. Generate code for a parallel
3082 loop with static schedule and a specified chunk size. Given
3085 for (V = N1; V cond N2; V += STEP) BODY;
3087 where COND is "<" or ">", we generate pseudocode
3093 n = (adj + N2 - N1) / STEP;
3095 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
3096 here so that V is defined
3097 if the loop is not entered
3099 s0 = (trip * nthreads + threadid) * CHUNK;
3100 e0 = min(s0 + CHUNK, n);
3101 if (s0 < n) goto L1; else goto L4;
3108 if (V cond e) goto L2; else goto L3;
3116 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
3118 tree n, s0, e0, e, t, phi, nphi, args;
3119 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
3120 tree type, cont, v_main, v_back, v_extra;
3121 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
3122 basic_block trip_update_bb, cont_bb, fin_bb;
3123 block_stmt_iterator si;
3126 type = TREE_TYPE (fd->v);
3128 entry_bb = region->entry;
3129 se = split_block (entry_bb, last_stmt (entry_bb));
3131 iter_part_bb = se->dest;
3132 cont_bb = region->cont;
3133 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
3134 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
3135 == FALLTHRU_EDGE (cont_bb)->dest);
3136 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
3137 body_bb = single_succ (seq_start_bb);
3138 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
3139 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3140 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
3141 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
3142 exit_bb = region->exit;
3144 /* Trip and adjustment setup goes in ENTRY_BB. */
3145 si = bsi_last (entry_bb);
3146 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_FOR);
3148 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
3149 t = fold_convert (type, t);
3150 nthreads = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3151 true, BSI_SAME_STMT);
3153 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
3154 t = fold_convert (type, t);
3155 threadid = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3156 true, BSI_SAME_STMT);
3158 fd->n1 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n1),
3160 true, BSI_SAME_STMT);
3161 fd->n2 = force_gimple_operand_bsi (&si, fold_convert (type, fd->n2),
3163 true, BSI_SAME_STMT);
3164 fd->step = force_gimple_operand_bsi (&si, fold_convert (type, fd->step),
3166 true, BSI_SAME_STMT);
3168 = force_gimple_operand_bsi (&si, fold_convert (type,
3171 true, BSI_SAME_STMT);
3173 t = build_int_cst (type, (fd->cond_code == LT_EXPR ? -1 : 1));
3174 t = fold_build2 (PLUS_EXPR, type, fd->step, t);
3175 t = fold_build2 (PLUS_EXPR, type, t, fd->n2);
3176 t = fold_build2 (MINUS_EXPR, type, t, fd->n1);
3177 t = fold_build2 (TRUNC_DIV_EXPR, type, t, fd->step);
3178 t = fold_convert (type, t);
3179 n = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3180 true, BSI_SAME_STMT);
3182 trip_var = create_tmp_var (type, ".trip");
3183 if (gimple_in_ssa_p (cfun))
3185 add_referenced_var (trip_var);
3186 trip_init = make_ssa_name (trip_var, NULL_TREE);
3187 trip_main = make_ssa_name (trip_var, NULL_TREE);
3188 trip_back = make_ssa_name (trip_var, NULL_TREE);
3192 trip_init = trip_var;
3193 trip_main = trip_var;
3194 trip_back = trip_var;
3197 t = build_gimple_modify_stmt (trip_init, build_int_cst (type, 0));
3198 bsi_insert_before (&si, t, BSI_SAME_STMT);
3199 if (gimple_in_ssa_p (cfun))
3200 SSA_NAME_DEF_STMT (trip_init) = t;
3202 t = fold_build2 (MULT_EXPR, type, threadid, fd->chunk_size);
3203 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3204 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3205 v_extra = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3206 true, BSI_SAME_STMT);
3208 /* Remove the OMP_FOR. */
3209 bsi_remove (&si, true);
3211 /* Iteration space partitioning goes in ITER_PART_BB. */
3212 si = bsi_last (iter_part_bb);
3214 t = fold_build2 (MULT_EXPR, type, trip_main, nthreads);
3215 t = fold_build2 (PLUS_EXPR, type, t, threadid);
3216 t = fold_build2 (MULT_EXPR, type, t, fd->chunk_size);
3217 s0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3218 false, BSI_CONTINUE_LINKING);
3220 t = fold_build2 (PLUS_EXPR, type, s0, fd->chunk_size);
3221 t = fold_build2 (MIN_EXPR, type, t, n);
3222 e0 = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3223 false, BSI_CONTINUE_LINKING);
3225 t = build2 (LT_EXPR, boolean_type_node, s0, n);
3226 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3227 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3229 /* Setup code for sequential iteration goes in SEQ_START_BB. */
3230 si = bsi_start (seq_start_bb);
3232 t = fold_convert (type, s0);
3233 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3234 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3235 t = force_gimple_operand_bsi (&si, t, false, NULL_TREE,
3236 false, BSI_CONTINUE_LINKING);
3237 t = build_gimple_modify_stmt (fd->v, t);
3238 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3239 if (gimple_in_ssa_p (cfun))
3240 SSA_NAME_DEF_STMT (fd->v) = t;
3242 t = fold_convert (type, e0);
3243 t = fold_build2 (MULT_EXPR, type, t, fd->step);
3244 t = fold_build2 (PLUS_EXPR, type, t, fd->n1);
3245 e = force_gimple_operand_bsi (&si, t, true, NULL_TREE,
3246 false, BSI_CONTINUE_LINKING);
3248 /* The code controlling the sequential loop goes in CONT_BB,
3249 replacing the OMP_CONTINUE. */
3250 si = bsi_last (cont_bb);
3251 cont = bsi_stmt (si);
3252 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3253 v_main = TREE_OPERAND (cont, 1);
3254 v_back = TREE_OPERAND (cont, 0);
3256 t = build2 (PLUS_EXPR, type, v_main, fd->step);
3257 t = build_gimple_modify_stmt (v_back, t);
3258 bsi_insert_before (&si, t, BSI_SAME_STMT);
3259 if (gimple_in_ssa_p (cfun))
3260 SSA_NAME_DEF_STMT (v_back) = t;
3262 t = build2 (fd->cond_code, boolean_type_node, v_back, e);
3263 t = build3 (COND_EXPR, void_type_node, t, NULL_TREE, NULL_TREE);
3264 bsi_insert_before (&si, t, BSI_SAME_STMT);
3266 /* Remove OMP_CONTINUE. */
3267 bsi_remove (&si, true);
3269 /* Trip update code goes into TRIP_UPDATE_BB. */
3270 si = bsi_start (trip_update_bb);
3272 t = build_int_cst (type, 1);
3273 t = build2 (PLUS_EXPR, type, trip_main, t);
3274 t = build_gimple_modify_stmt (trip_back, t);
3275 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3276 if (gimple_in_ssa_p (cfun))
3277 SSA_NAME_DEF_STMT (trip_back) = t;
3279 /* Replace the OMP_RETURN with a barrier, or nothing. */
3280 si = bsi_last (exit_bb);
3281 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)))
3282 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3283 false, BSI_SAME_STMT);
3284 bsi_remove (&si, true);
3286 /* Connect the new blocks. */
3287 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
3288 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
3290 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
3291 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
3293 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
3295 if (gimple_in_ssa_p (cfun))
3297 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
3298 remove arguments of the phi nodes in fin_bb. We need to create
3299 appropriate phi nodes in iter_part_bb instead. */
3300 se = single_pred_edge (fin_bb);
3301 re = single_succ_edge (trip_update_bb);
3302 ene = single_succ_edge (entry_bb);
3304 args = PENDING_STMT (re);
3305 PENDING_STMT (re) = NULL_TREE;
3306 for (phi = phi_nodes (fin_bb);
3308 phi = PHI_CHAIN (phi), args = TREE_CHAIN (args))
3310 t = PHI_RESULT (phi);
3311 gcc_assert (t == TREE_PURPOSE (args));
3312 nphi = create_phi_node (t, iter_part_bb);
3313 SSA_NAME_DEF_STMT (t) = nphi;
3315 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
3316 /* A special case -- fd->v is not yet computed in iter_part_bb, we
3317 need to use v_extra instead. */
3320 add_phi_arg (nphi, t, ene);
3321 add_phi_arg (nphi, TREE_VALUE (args), re);
3323 gcc_assert (!phi && !args);
3324 while ((phi = phi_nodes (fin_bb)) != NULL_TREE)
3325 remove_phi_node (phi, NULL_TREE, false);
3327 /* Make phi node for trip. */
3328 phi = create_phi_node (trip_main, iter_part_bb);
3329 SSA_NAME_DEF_STMT (trip_main) = phi;
3330 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
3331 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
3334 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
3335 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
3336 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
3337 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
3338 recompute_dominator (CDI_DOMINATORS, fin_bb));
3339 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
3340 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
3341 set_immediate_dominator (CDI_DOMINATORS, body_bb,
3342 recompute_dominator (CDI_DOMINATORS, body_bb));
3346 /* Expand the OpenMP loop defined by REGION. */
3349 expand_omp_for (struct omp_region *region)
3351 struct omp_for_data fd;
3353 extract_omp_for_data (last_stmt (region->entry), &fd);
3354 region->sched_kind = fd.sched_kind;
3356 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
3357 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
3358 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
3361 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
3362 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
3363 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
3366 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
3368 && region->cont != NULL)
3370 if (fd.chunk_size == NULL)
3371 expand_omp_for_static_nochunk (region, &fd);
3373 expand_omp_for_static_chunk (region, &fd);
3377 int fn_index = fd.sched_kind + fd.have_ordered * 4;
3378 int start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
3379 int next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
3380 expand_omp_for_generic (region, &fd, start_ix, next_ix);
3383 update_ssa (TODO_update_ssa_only_virtuals);
3387 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
3389 v = GOMP_sections_start (n);
3406 v = GOMP_sections_next ();
3411 If this is a combined parallel sections, replace the call to
3412 GOMP_sections_start with call to GOMP_sections_next. */
3415 expand_omp_sections (struct omp_region *region)
3417 tree label_vec, l1, l2, t, u, sections_stmt, vin, vmain, vnext, cont;
3418 unsigned i, casei, len;
3419 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
3420 block_stmt_iterator si;
3421 struct omp_region *inner;
3422 bool exit_reachable = region->cont != NULL;
3424 gcc_assert (exit_reachable == (region->exit != NULL));
3425 entry_bb = region->entry;
3426 l0_bb = single_succ (entry_bb);
3427 l1_bb = region->cont;
3428 l2_bb = region->exit;
3431 gcc_assert (single_pred (l2_bb) == l0_bb);
3432 default_bb = create_empty_bb (l1_bb->prev_bb);
3433 l1 = tree_block_label (l1_bb);
3434 l2 = tree_block_label (l2_bb);
3438 default_bb = create_empty_bb (l0_bb);
3440 l2 = tree_block_label (default_bb);
3443 /* We will build a switch() with enough cases for all the
3444 OMP_SECTION regions, a '0' case to handle the end of more work
3445 and a default case to abort if something goes wrong. */
3446 len = EDGE_COUNT (l0_bb->succs);
3447 label_vec = make_tree_vec (len + 1);
3449 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
3450 OMP_SECTIONS statement. */
3451 si = bsi_last (entry_bb);
3452 sections_stmt = bsi_stmt (si);
3453 gcc_assert (TREE_CODE (sections_stmt) == OMP_SECTIONS);
3454 vin = OMP_SECTIONS_CONTROL (sections_stmt);
3455 if (!is_combined_parallel (region))
3457 /* If we are not inside a combined parallel+sections region,
3458 call GOMP_sections_start. */
3459 t = build_int_cst (unsigned_type_node,
3460 exit_reachable ? len - 1 : len);
3461 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
3462 t = build_call_expr (u, 1, t);
3466 /* Otherwise, call GOMP_sections_next. */
3467 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
3468 t = build_call_expr (u, 0);
3470 t = build_gimple_modify_stmt (vin, t);
3471 bsi_insert_after (&si, t, BSI_SAME_STMT);
3472 if (gimple_in_ssa_p (cfun))
3473 SSA_NAME_DEF_STMT (vin) = t;
3474 bsi_remove (&si, true);
3476 /* The switch() statement replacing OMP_SECTIONS_SWITCH goes in L0_BB. */
3477 si = bsi_last (l0_bb);
3478 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTIONS_SWITCH);
3481 cont = last_stmt (l1_bb);
3482 gcc_assert (TREE_CODE (cont) == OMP_CONTINUE);
3483 vmain = TREE_OPERAND (cont, 1);
3484 vnext = TREE_OPERAND (cont, 0);
3492 t = build3 (SWITCH_EXPR, void_type_node, vmain, NULL, label_vec);
3493 bsi_insert_after (&si, t, BSI_SAME_STMT);
3494 bsi_remove (&si, true);
3499 t = build3 (CASE_LABEL_EXPR, void_type_node,
3500 build_int_cst (unsigned_type_node, 0), NULL, l2);
3501 TREE_VEC_ELT (label_vec, 0) = t;
3505 /* Convert each OMP_SECTION into a CASE_LABEL_EXPR. */
3506 for (inner = region->inner, casei = 1;
3508 inner = inner->next, i++, casei++)
3510 basic_block s_entry_bb, s_exit_bb;
3512 s_entry_bb = inner->entry;
3513 s_exit_bb = inner->exit;
3515 t = tree_block_label (s_entry_bb);
3516 u = build_int_cst (unsigned_type_node, casei);
3517 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
3518 TREE_VEC_ELT (label_vec, i) = u;
3520 si = bsi_last (s_entry_bb);
3521 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SECTION);
3522 gcc_assert (i < len || OMP_SECTION_LAST (bsi_stmt (si)));
3523 bsi_remove (&si, true);
3524 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
3526 if (s_exit_bb == NULL)
3529 si = bsi_last (s_exit_bb);
3530 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3531 bsi_remove (&si, true);
3533 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
3536 /* Error handling code goes in DEFAULT_BB. */
3537 t = tree_block_label (default_bb);
3538 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
3539 TREE_VEC_ELT (label_vec, len) = u;
3540 make_edge (l0_bb, default_bb, 0);
3542 si = bsi_start (default_bb);
3543 t = build_call_expr (built_in_decls[BUILT_IN_TRAP], 0);
3544 bsi_insert_after (&si, t, BSI_CONTINUE_LINKING);
3548 /* Code to get the next section goes in L1_BB. */
3549 si = bsi_last (l1_bb);
3550 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_CONTINUE);
3552 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
3553 t = build_gimple_modify_stmt (vnext, t);
3554 bsi_insert_after (&si, t, BSI_SAME_STMT);
3555 if (gimple_in_ssa_p (cfun))
3556 SSA_NAME_DEF_STMT (vnext) = t;
3557 bsi_remove (&si, true);
3559 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
3561 /* Cleanup function replaces OMP_RETURN in EXIT_BB. */
3562 si = bsi_last (l2_bb);
3563 if (OMP_RETURN_NOWAIT (bsi_stmt (si)))
3564 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
3566 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
3567 t = build_call_expr (t, 0);
3568 bsi_insert_after (&si, t, BSI_SAME_STMT);
3569 bsi_remove (&si, true);
3572 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
3576 /* Expand code for an OpenMP single directive. We've already expanded
3577 much of the code, here we simply place the GOMP_barrier call. */
3580 expand_omp_single (struct omp_region *region)
3582 basic_block entry_bb, exit_bb;
3583 block_stmt_iterator si;
3584 bool need_barrier = false;
3586 entry_bb = region->entry;
3587 exit_bb = region->exit;
3589 si = bsi_last (entry_bb);
3590 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
3591 be removed. We need to ensure that the thread that entered the single
3592 does not exit before the data is copied out by the other threads. */
3593 if (find_omp_clause (OMP_SINGLE_CLAUSES (bsi_stmt (si)),
3594 OMP_CLAUSE_COPYPRIVATE))
3595 need_barrier = true;
3596 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE);
3597 bsi_remove (&si, true);
3598 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3600 si = bsi_last (exit_bb);
3601 if (!OMP_RETURN_NOWAIT (bsi_stmt (si)) || need_barrier)
3602 force_gimple_operand_bsi (&si, build_omp_barrier (), false, NULL_TREE,
3603 false, BSI_SAME_STMT);
3604 bsi_remove (&si, true);
3605 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3609 /* Generic expansion for OpenMP synchronization directives: master,
3610 ordered and critical. All we need to do here is remove the entry
3611 and exit markers for REGION. */
3614 expand_omp_synch (struct omp_region *region)
3616 basic_block entry_bb, exit_bb;
3617 block_stmt_iterator si;
3619 entry_bb = region->entry;
3620 exit_bb = region->exit;
3622 si = bsi_last (entry_bb);
3623 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_SINGLE
3624 || TREE_CODE (bsi_stmt (si)) == OMP_MASTER
3625 || TREE_CODE (bsi_stmt (si)) == OMP_ORDERED
3626 || TREE_CODE (bsi_stmt (si)) == OMP_CRITICAL);
3627 bsi_remove (&si, true);
3628 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3632 si = bsi_last (exit_bb);
3633 gcc_assert (TREE_CODE (bsi_stmt (si)) == OMP_RETURN);
3634 bsi_remove (&si, true);
3635 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
3639 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
3640 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
3641 size of the data type, and thus usable to find the index of the builtin
3642 decl. Returns false if the expression is not of the proper form. */
3645 expand_omp_atomic_fetch_op (basic_block load_bb,
3646 tree addr, tree loaded_val,
3647 tree stored_val, int index)
3649 enum built_in_function base;
3650 tree decl, itype, call;
3651 enum insn_code *optab;
3653 basic_block store_bb = single_succ (load_bb);
3654 block_stmt_iterator bsi;
3657 /* We expect to find the following sequences:
3660 OMP_ATOMIC_LOAD (tmp, mem)
3663 val = tmp OP something; (or: something OP tmp)
3666 ???FIXME: Allow a more flexible sequence.
3667 Perhaps use data flow to pick the statements.
3671 bsi = bsi_after_labels (store_bb);
3672 stmt = bsi_stmt (bsi);
3673 if (TREE_CODE (stmt) != GIMPLE_MODIFY_STMT)
3676 if (TREE_CODE (bsi_stmt (bsi)) != OMP_ATOMIC_STORE)
3679 if (!operand_equal_p (GIMPLE_STMT_OPERAND (stmt, 0), stored_val, 0))
3682 rhs = GIMPLE_STMT_OPERAND (stmt, 1);
3684 /* Check for one of the supported fetch-op operations. */
3685 switch (TREE_CODE (rhs))
3688 case POINTER_PLUS_EXPR:
3689 base = BUILT_IN_FETCH_AND_ADD_N;
3690 optab = sync_add_optab;
3693 base = BUILT_IN_FETCH_AND_SUB_N;
3694 optab = sync_add_optab;
3697 base = BUILT_IN_FETCH_AND_AND_N;
3698 optab = sync_and_optab;
3701 base = BUILT_IN_FETCH_AND_OR_N;
3702 optab = sync_ior_optab;
3705 base = BUILT_IN_FETCH_AND_XOR_N;
3706 optab = sync_xor_optab;
3711 /* Make sure the expression is of the proper form. */
3712 if (operand_equal_p (TREE_OPERAND (rhs, 0), loaded_val, 0))
3713 rhs = TREE_OPERAND (rhs, 1);
3714 else if (commutative_tree_code (TREE_CODE (rhs))
3715 && operand_equal_p (TREE_OPERAND (rhs, 1), loaded_val, 0))
3716 rhs = TREE_OPERAND (rhs, 0);
3720 decl = built_in_decls[base + index + 1];
3721 itype = TREE_TYPE (TREE_TYPE (decl));
3723 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
3726 bsi = bsi_last (load_bb);
3727 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3728 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
3729 force_gimple_operand_bsi (&bsi, call, true, NULL_TREE, true, BSI_SAME_STMT);
3730 bsi_remove (&bsi, true);
3732 bsi = bsi_last (store_bb);
3733 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3734 bsi_remove (&bsi, true);
3735 bsi = bsi_last (store_bb);
3736 bsi_remove (&bsi, true);
3738 if (gimple_in_ssa_p (cfun))
3739 update_ssa (TODO_update_ssa_no_phi);
3744 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3748 newval = rhs; // with oldval replacing *addr in rhs
3749 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
3750 if (oldval != newval)
3753 INDEX is log2 of the size of the data type, and thus usable to find the
3754 index of the builtin decl. */
3757 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
3758 tree addr, tree loaded_val, tree stored_val,
3761 tree loadedi, storedi, initial, new_storedi, old_vali;
3762 tree type, itype, cmpxchg, iaddr;
3763 block_stmt_iterator bsi;
3764 basic_block loop_header = single_succ (load_bb);
3768 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
3769 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3770 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
3772 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
3775 /* Load the initial value, replacing the OMP_ATOMIC_LOAD. */
3776 bsi = bsi_last (load_bb);
3777 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3778 /* For floating-point values, we'll need to view-convert them to integers
3779 so that we can perform the atomic compare and swap. Simplify the
3780 following code by always setting up the "i"ntegral variables. */
3781 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
3783 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
3784 x = build_gimple_modify_stmt (iaddr,
3785 fold_convert (TREE_TYPE (iaddr), addr));
3786 force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
3787 true, BSI_SAME_STMT);
3788 DECL_NO_TBAA_P (iaddr) = 1;
3789 DECL_POINTER_ALIAS_SET (iaddr) = 0;
3790 loadedi = create_tmp_var (itype, NULL);
3791 if (gimple_in_ssa_p (cfun))
3793 add_referenced_var (iaddr);
3794 add_referenced_var (loadedi);
3795 loadedi = make_ssa_name (loadedi, NULL);
3801 loadedi = loaded_val;
3803 initial = force_gimple_operand_bsi (&bsi, build_fold_indirect_ref (iaddr),
3804 true, NULL_TREE, true, BSI_SAME_STMT);
3806 /* Move the value to the LOADEDI temporary. */
3807 if (gimple_in_ssa_p (cfun))
3809 gcc_assert (phi_nodes (loop_header) == NULL_TREE);
3810 phi = create_phi_node (loadedi, loop_header);
3811 SSA_NAME_DEF_STMT (loadedi) = phi;
3812 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
3816 bsi_insert_before (&bsi,
3817 build_gimple_modify_stmt (loadedi, initial),
3819 if (loadedi != loaded_val)
3821 block_stmt_iterator bsi2;
3823 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
3824 bsi2 = bsi_start (loop_header);
3825 if (gimple_in_ssa_p (cfun))
3827 x = force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
3828 true, BSI_SAME_STMT);
3829 x = build_gimple_modify_stmt (loaded_val, x);
3830 bsi_insert_before (&bsi2, x, BSI_SAME_STMT);
3831 SSA_NAME_DEF_STMT (loaded_val) = x;
3835 x = build_gimple_modify_stmt (loaded_val, x);
3836 force_gimple_operand_bsi (&bsi2, x, true, NULL_TREE,
3837 true, BSI_SAME_STMT);
3840 bsi_remove (&bsi, true);
3842 bsi = bsi_last (store_bb);
3843 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3846 storedi = stored_val;
3849 force_gimple_operand_bsi (&bsi,
3850 build1 (VIEW_CONVERT_EXPR, itype,
3851 stored_val), true, NULL_TREE, true,
3854 /* Build the compare&swap statement. */
3855 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
3856 new_storedi = force_gimple_operand_bsi (&bsi,
3857 fold_convert (itype, new_storedi),
3859 true, BSI_SAME_STMT);
3861 if (gimple_in_ssa_p (cfun))
3865 old_vali = create_tmp_var (itype, NULL);
3866 if (gimple_in_ssa_p (cfun))
3867 add_referenced_var (old_vali);
3868 x = build_gimple_modify_stmt (old_vali, loadedi);
3869 force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
3870 true, BSI_SAME_STMT);
3872 x = build_gimple_modify_stmt (loadedi, new_storedi);
3873 force_gimple_operand_bsi (&bsi, x, true, NULL_TREE,
3874 true, BSI_SAME_STMT);
3877 /* Note that we always perform the comparison as an integer, even for
3878 floating point. This allows the atomic operation to properly
3879 succeed even with NaNs and -0.0. */
3880 x = build2 (NE_EXPR, boolean_type_node, new_storedi, old_vali);
3881 x = build3 (COND_EXPR, void_type_node, x, NULL_TREE, NULL_TREE);
3882 bsi_insert_before (&bsi, x, BSI_SAME_STMT);
3885 e = single_succ_edge (store_bb);
3886 e->flags &= ~EDGE_FALLTHRU;
3887 e->flags |= EDGE_FALSE_VALUE;
3889 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
3891 /* Copy the new value to loadedi (we already did that before the condition
3892 if we are not in SSA). */
3893 if (gimple_in_ssa_p (cfun))
3895 phi = phi_nodes (loop_header);
3896 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
3899 /* Remove OMP_ATOMIC_STORE. */
3900 bsi_remove (&bsi, true);
3902 if (gimple_in_ssa_p (cfun))
3903 update_ssa (TODO_update_ssa_no_phi);
3908 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
3910 GOMP_atomic_start ();
3914 The result is not globally atomic, but works so long as all parallel
3915 references are within #pragma omp atomic directives. According to
3916 responses received from omp@openmp.org, appears to be within spec.
3917 Which makes sense, since that's how several other compilers handle
3918 this situation as well.
3919 LOADED_VAL and ADDR are the operands of OMP_ATOMIC_LOAD we're expanding.
3920 STORED_VAL is the operand of the matching OMP_ATOMIC_STORE.
3923 OMP_ATOMIC_LOAD (loaded_val, addr) with
3927 OMP_ATOMIC_ATORE (stored_val) with
3932 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
3933 tree addr, tree loaded_val, tree stored_val)
3935 block_stmt_iterator bsi;
3938 bsi = bsi_last (load_bb);
3939 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_LOAD);
3941 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
3942 t = build_function_call_expr (t, 0);
3943 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3945 t = build_gimple_modify_stmt (loaded_val, build_fold_indirect_ref (addr));
3946 if (gimple_in_ssa_p (cfun))
3947 SSA_NAME_DEF_STMT (loaded_val) = t;
3948 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3949 bsi_remove (&bsi, true);
3951 bsi = bsi_last (store_bb);
3952 gcc_assert (TREE_CODE (bsi_stmt (bsi)) == OMP_ATOMIC_STORE);
3954 t = build_gimple_modify_stmt (build_fold_indirect_ref (unshare_expr (addr)),
3956 bsi_insert_before (&bsi, t, BSI_SAME_STMT);
3958 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
3959 t = build_function_call_expr (t, 0);
3960 force_gimple_operand_bsi (&bsi, t, true, NULL_TREE, true, BSI_SAME_STMT);
3961 bsi_remove (&bsi, true);
3963 if (gimple_in_ssa_p (cfun))
3964 update_ssa (TODO_update_ssa_no_phi);
3968 /* Expand an OMP_ATOMIC statement. We try to expand
3969 using expand_omp_atomic_fetch_op. If it failed, we try to
3970 call expand_omp_atomic_pipeline, and if it fails too, the
3971 ultimate fallback is wrapping the operation in a mutex
3972 (expand_omp_atomic_mutex). REGION is the atomic region built
3973 by build_omp_regions_1(). */
3976 expand_omp_atomic (struct omp_region *region)
3978 basic_block load_bb = region->entry, store_bb = region->exit;
3979 tree load = last_stmt (load_bb), store = last_stmt (store_bb);
3980 tree loaded_val = TREE_OPERAND (load, 0);
3981 tree addr = TREE_OPERAND (load, 1);
3982 tree stored_val = TREE_OPERAND (store, 0);
3983 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
3984 HOST_WIDE_INT index;
3986 /* Make sure the type is one of the supported sizes. */
3987 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
3988 index = exact_log2 (index);
3989 if (index >= 0 && index <= 4)
3991 unsigned int align = TYPE_ALIGN_UNIT (type);
3993 /* __sync builtins require strict data alignment. */
3994 if (exact_log2 (align) >= index)
3996 /* When possible, use specialized atomic update functions. */
3997 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
3998 && store_bb == single_succ (load_bb))
4000 if (expand_omp_atomic_fetch_op (load_bb, addr,
4001 loaded_val, stored_val, index))
4005 /* If we don't have specialized __sync builtins, try and implement
4006 as a compare and swap loop. */
4007 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
4008 loaded_val, stored_val, index))
4013 /* The ultimate fallback is wrapping the operation in a mutex. */
4014 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
4018 /* Expand the parallel region tree rooted at REGION. Expansion
4019 proceeds in depth-first order. Innermost regions are expanded
4020 first. This way, parallel regions that require a new function to
4021 be created (e.g., OMP_PARALLEL) can be expanded without having any
4022 internal dependencies in their body. */
4025 expand_omp (struct omp_region *region)
4029 /* First, determine whether this is a combined parallel+workshare
4031 if (region->type == OMP_PARALLEL)
4032 determine_parallel_type (region);
4035 expand_omp (region->inner);
4037 switch (region->type)
4040 expand_omp_parallel (region);
4044 expand_omp_for (region);
4048 expand_omp_sections (region);
4052 /* Individual omp sections are handled together with their
4053 parent OMP_SECTIONS region. */
4057 expand_omp_single (region);
4063 expand_omp_synch (region);
4066 case OMP_ATOMIC_LOAD:
4067 expand_omp_atomic (region);
4075 region = region->next;
4080 /* Helper for build_omp_regions. Scan the dominator tree starting at
4081 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
4082 true, the function ends once a single tree is built (otherwise, whole
4083 forest of OMP constructs may be built). */
4086 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
4089 block_stmt_iterator si;
4094 if (!bsi_end_p (si) && OMP_DIRECTIVE_P (bsi_stmt (si)))
4096 struct omp_region *region;
4097 enum tree_code code;
4099 stmt = bsi_stmt (si);
4100 code = TREE_CODE (stmt);
4101 if (code == OMP_RETURN)
4103 /* STMT is the return point out of region PARENT. Mark it
4104 as the exit point and make PARENT the immediately
4105 enclosing region. */
4106 gcc_assert (parent);
4109 parent = parent->outer;
4111 else if (code == OMP_ATOMIC_STORE)
4113 /* OMP_ATOMIC_STORE is analoguous to OMP_RETURN, but matches with
4115 gcc_assert (parent);
4116 gcc_assert (parent->type == OMP_ATOMIC_LOAD);
4119 parent = parent->outer;
4122 else if (code == OMP_CONTINUE)
4124 gcc_assert (parent);
4127 else if (code == OMP_SECTIONS_SWITCH)
4129 /* OMP_SECTIONS_SWITCH is part of OMP_SECTIONS, and we do nothing for
4134 /* Otherwise, this directive becomes the parent for a new
4136 region = new_omp_region (bb, code, parent);
4141 if (single_tree && !parent)
4144 for (son = first_dom_son (CDI_DOMINATORS, bb);
4146 son = next_dom_son (CDI_DOMINATORS, son))
4147 build_omp_regions_1 (son, parent, single_tree);
4150 /* Builds the tree of OMP regions rooted at ROOT, storing it to
4154 build_omp_regions_root (basic_block root)
4156 gcc_assert (root_omp_region == NULL);
4157 build_omp_regions_1 (root, NULL, true);
4158 gcc_assert (root_omp_region != NULL);
4161 /* Expands omp construct (and its subconstructs) starting in HEAD. */
4164 omp_expand_local (basic_block head)
4166 build_omp_regions_root (head);
4167 if (dump_file && (dump_flags & TDF_DETAILS))
4169 fprintf (dump_file, "\nOMP region tree\n\n");
4170 dump_omp_region (dump_file, root_omp_region, 0);
4171 fprintf (dump_file, "\n");
4174 remove_exit_barriers (root_omp_region);
4175 expand_omp (root_omp_region);
4177 free_omp_regions ();
4180 /* Scan the CFG and build a tree of OMP regions. Return the root of
4181 the OMP region tree. */
4184 build_omp_regions (void)
4186 gcc_assert (root_omp_region == NULL);
4187 calculate_dominance_info (CDI_DOMINATORS);
4188 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
4192 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
4195 execute_expand_omp (void)
4197 build_omp_regions ();
4199 if (!root_omp_region)
4204 fprintf (dump_file, "\nOMP region tree\n\n");
4205 dump_omp_region (dump_file, root_omp_region, 0);
4206 fprintf (dump_file, "\n");
4209 remove_exit_barriers (root_omp_region);
4211 expand_omp (root_omp_region);
4213 cleanup_tree_cfg ();
4215 free_omp_regions ();
4220 /* OMP expansion in SSA form. For testing purposes only. */
4223 gate_expand_omp_ssa (void)
4225 return flag_openmp_ssa && flag_openmp != 0 && errorcount == 0;
4228 struct gimple_opt_pass pass_expand_omp_ssa =
4232 "ompexpssa", /* name */
4233 gate_expand_omp_ssa, /* gate */
4234 execute_expand_omp, /* execute */
4237 0, /* static_pass_number */
4239 PROP_gimple_any, /* properties_required */
4240 PROP_gimple_lomp, /* properties_provided */
4241 0, /* properties_destroyed */
4242 0, /* todo_flags_start */
4243 TODO_dump_func /* todo_flags_finish */
4247 /* OMP expansion -- the default pass, run before creation of SSA form. */
4250 gate_expand_omp (void)
4252 return ((!flag_openmp_ssa || !optimize)
4253 && flag_openmp != 0 && errorcount == 0);
4256 struct gimple_opt_pass pass_expand_omp =
4260 "ompexp", /* name */
4261 gate_expand_omp, /* gate */
4262 execute_expand_omp, /* execute */
4265 0, /* static_pass_number */
4267 PROP_gimple_any, /* properties_required */
4268 PROP_gimple_lomp, /* properties_provided */
4269 0, /* properties_destroyed */
4270 0, /* todo_flags_start */
4271 TODO_dump_func /* todo_flags_finish */
4275 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
4277 /* Lower the OpenMP sections directive in *STMT_P. */
4280 lower_omp_sections (tree *stmt_p, omp_context *ctx)
4282 tree new_stmt, stmt, body, bind, block, ilist, olist, new_body, control;
4284 tree_stmt_iterator tsi;
4289 push_gimplify_context ();
4293 lower_rec_input_clauses (OMP_SECTIONS_CLAUSES (stmt), &ilist, &dlist, ctx);
4295 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4296 for (len = 0; !tsi_end_p (tsi); len++, tsi_next (&tsi))
4299 tsi = tsi_start (OMP_SECTIONS_BODY (stmt));
4300 body = alloc_stmt_list ();
4301 for (i = 0; i < len; i++, tsi_next (&tsi))
4304 tree sec_start, sec_end;
4306 sec_start = tsi_stmt (tsi);
4307 sctx = maybe_lookup_ctx (sec_start);
4310 append_to_statement_list (sec_start, &body);
4312 lower_omp (&OMP_SECTION_BODY (sec_start), sctx);
4313 append_to_statement_list (OMP_SECTION_BODY (sec_start), &body);
4314 OMP_SECTION_BODY (sec_start) = NULL;
4318 tree l = alloc_stmt_list ();
4319 lower_lastprivate_clauses (OMP_SECTIONS_CLAUSES (stmt), NULL,
4321 append_to_statement_list (l, &body);
4322 OMP_SECTION_LAST (sec_start) = 1;
4325 sec_end = make_node (OMP_RETURN);
4326 append_to_statement_list (sec_end, &body);
4329 block = make_node (BLOCK);
4330 bind = build3 (BIND_EXPR, void_type_node, NULL, body, block);
4333 lower_reduction_clauses (OMP_SECTIONS_CLAUSES (stmt), &olist, ctx);
4335 pop_gimplify_context (NULL_TREE);
4336 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4338 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4339 TREE_SIDE_EFFECTS (new_stmt) = 1;
4341 new_body = alloc_stmt_list ();
4342 append_to_statement_list (ilist, &new_body);
4343 append_to_statement_list (stmt, &new_body);
4344 append_to_statement_list (make_node (OMP_SECTIONS_SWITCH), &new_body);
4345 append_to_statement_list (bind, &new_body);
4347 control = create_tmp_var (unsigned_type_node, ".section");
4348 t = build2 (OMP_CONTINUE, void_type_node, control, control);
4349 OMP_SECTIONS_CONTROL (stmt) = control;
4350 append_to_statement_list (t, &new_body);
4352 append_to_statement_list (olist, &new_body);
4353 append_to_statement_list (dlist, &new_body);
4355 maybe_catch_exception (&new_body);
4357 t = make_node (OMP_RETURN);
4358 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SECTIONS_CLAUSES (stmt),
4360 append_to_statement_list (t, &new_body);
4362 BIND_EXPR_BODY (new_stmt) = new_body;
4363 OMP_SECTIONS_BODY (stmt) = NULL;
4369 /* A subroutine of lower_omp_single. Expand the simple form of
4370 an OMP_SINGLE, without a copyprivate clause:
4372 if (GOMP_single_start ())
4374 [ GOMP_barrier (); ] -> unless 'nowait' is present.
4376 FIXME. It may be better to delay expanding the logic of this until
4377 pass_expand_omp. The expanded logic may make the job more difficult
4378 to a synchronization analysis pass. */
4381 lower_omp_single_simple (tree single_stmt, tree *pre_p)
4385 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_START], 0);
4386 t = build3 (COND_EXPR, void_type_node, t,
4387 OMP_SINGLE_BODY (single_stmt), NULL);
4388 gimplify_and_add (t, pre_p);
4392 /* A subroutine of lower_omp_single. Expand the simple form of
4393 an OMP_SINGLE, with a copyprivate clause:
4395 #pragma omp single copyprivate (a, b, c)
4397 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
4400 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
4406 GOMP_single_copy_end (©out);
4417 FIXME. It may be better to delay expanding the logic of this until
4418 pass_expand_omp. The expanded logic may make the job more difficult
4419 to a synchronization analysis pass. */
4422 lower_omp_single_copy (tree single_stmt, tree *pre_p, omp_context *ctx)
4424 tree ptr_type, t, l0, l1, l2, copyin_seq;
4426 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
4428 ptr_type = build_pointer_type (ctx->record_type);
4429 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
4431 l0 = create_artificial_label ();
4432 l1 = create_artificial_label ();
4433 l2 = create_artificial_label ();
4435 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
4436 t = fold_convert (ptr_type, t);
4437 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4438 gimplify_and_add (t, pre_p);
4440 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
4441 build_int_cst (ptr_type, 0));
4442 t = build3 (COND_EXPR, void_type_node, t,
4443 build_and_jump (&l0), build_and_jump (&l1));
4444 gimplify_and_add (t, pre_p);
4446 t = build1 (LABEL_EXPR, void_type_node, l0);
4447 gimplify_and_add (t, pre_p);
4449 append_to_statement_list (OMP_SINGLE_BODY (single_stmt), pre_p);
4452 lower_copyprivate_clauses (OMP_SINGLE_CLAUSES (single_stmt), pre_p,
4455 t = build_fold_addr_expr (ctx->sender_decl);
4456 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
4457 gimplify_and_add (t, pre_p);
4459 t = build_and_jump (&l2);
4460 gimplify_and_add (t, pre_p);
4462 t = build1 (LABEL_EXPR, void_type_node, l1);
4463 gimplify_and_add (t, pre_p);
4465 append_to_statement_list (copyin_seq, pre_p);
4467 t = build1 (LABEL_EXPR, void_type_node, l2);
4468 gimplify_and_add (t, pre_p);
4472 /* Expand code for an OpenMP single directive. */
4475 lower_omp_single (tree *stmt_p, omp_context *ctx)
4477 tree t, bind, block, single_stmt = *stmt_p, dlist;
4479 push_gimplify_context ();
4481 block = make_node (BLOCK);
4482 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4483 TREE_SIDE_EFFECTS (bind) = 1;
4485 lower_rec_input_clauses (OMP_SINGLE_CLAUSES (single_stmt),
4486 &BIND_EXPR_BODY (bind), &dlist, ctx);
4487 lower_omp (&OMP_SINGLE_BODY (single_stmt), ctx);
4489 append_to_statement_list (single_stmt, &BIND_EXPR_BODY (bind));
4491 if (ctx->record_type)
4492 lower_omp_single_copy (single_stmt, &BIND_EXPR_BODY (bind), ctx);
4494 lower_omp_single_simple (single_stmt, &BIND_EXPR_BODY (bind));
4496 OMP_SINGLE_BODY (single_stmt) = NULL;
4498 append_to_statement_list (dlist, &BIND_EXPR_BODY (bind));
4500 maybe_catch_exception (&BIND_EXPR_BODY (bind));
4502 t = make_node (OMP_RETURN);
4503 OMP_RETURN_NOWAIT (t) = !!find_omp_clause (OMP_SINGLE_CLAUSES (single_stmt),
4505 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4507 pop_gimplify_context (bind);
4509 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4510 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4514 /* Expand code for an OpenMP master directive. */
4517 lower_omp_master (tree *stmt_p, omp_context *ctx)
4519 tree bind, block, stmt = *stmt_p, lab = NULL, x;
4521 push_gimplify_context ();
4523 block = make_node (BLOCK);
4524 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4525 TREE_SIDE_EFFECTS (bind) = 1;
4527 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4529 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4530 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
4531 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
4532 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4534 lower_omp (&OMP_MASTER_BODY (stmt), ctx);
4535 maybe_catch_exception (&OMP_MASTER_BODY (stmt));
4536 append_to_statement_list (OMP_MASTER_BODY (stmt), &BIND_EXPR_BODY (bind));
4537 OMP_MASTER_BODY (stmt) = NULL;
4539 x = build1 (LABEL_EXPR, void_type_node, lab);
4540 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4542 x = make_node (OMP_RETURN);
4543 OMP_RETURN_NOWAIT (x) = 1;
4544 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4546 pop_gimplify_context (bind);
4548 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4549 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4553 /* Expand code for an OpenMP ordered directive. */
4556 lower_omp_ordered (tree *stmt_p, omp_context *ctx)
4558 tree bind, block, stmt = *stmt_p, x;
4560 push_gimplify_context ();
4562 block = make_node (BLOCK);
4563 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4564 TREE_SIDE_EFFECTS (bind) = 1;
4566 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4568 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
4569 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4571 lower_omp (&OMP_ORDERED_BODY (stmt), ctx);
4572 maybe_catch_exception (&OMP_ORDERED_BODY (stmt));
4573 append_to_statement_list (OMP_ORDERED_BODY (stmt), &BIND_EXPR_BODY (bind));
4574 OMP_ORDERED_BODY (stmt) = NULL;
4576 x = build_call_expr (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
4577 gimplify_and_add (x, &BIND_EXPR_BODY (bind));
4579 x = make_node (OMP_RETURN);
4580 OMP_RETURN_NOWAIT (x) = 1;
4581 append_to_statement_list (x, &BIND_EXPR_BODY (bind));
4583 pop_gimplify_context (bind);
4585 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4586 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4590 /* Gimplify an OMP_CRITICAL statement. This is a relatively simple
4591 substitution of a couple of function calls. But in the NAMED case,
4592 requires that languages coordinate a symbol name. It is therefore
4593 best put here in common code. */
4595 static GTY((param1_is (tree), param2_is (tree)))
4596 splay_tree critical_name_mutexes;
4599 lower_omp_critical (tree *stmt_p, omp_context *ctx)
4601 tree bind, block, stmt = *stmt_p;
4602 tree t, lock, unlock, name;
4604 name = OMP_CRITICAL_NAME (stmt);
4610 if (!critical_name_mutexes)
4611 critical_name_mutexes
4612 = splay_tree_new_ggc (splay_tree_compare_pointers);
4614 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
4619 decl = create_tmp_var_raw (ptr_type_node, NULL);
4621 new_str = ACONCAT ((".gomp_critical_user_",
4622 IDENTIFIER_POINTER (name), NULL));
4623 DECL_NAME (decl) = get_identifier (new_str);
4624 TREE_PUBLIC (decl) = 1;
4625 TREE_STATIC (decl) = 1;
4626 DECL_COMMON (decl) = 1;
4627 DECL_ARTIFICIAL (decl) = 1;
4628 DECL_IGNORED_P (decl) = 1;
4629 varpool_finalize_decl (decl);
4631 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
4632 (splay_tree_value) decl);
4635 decl = (tree) n->value;
4637 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
4638 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
4640 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
4641 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
4645 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
4646 lock = build_call_expr (lock, 0);
4648 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
4649 unlock = build_call_expr (unlock, 0);
4652 push_gimplify_context ();
4654 block = make_node (BLOCK);
4655 *stmt_p = bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, block);
4656 TREE_SIDE_EFFECTS (bind) = 1;
4658 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4660 gimplify_and_add (lock, &BIND_EXPR_BODY (bind));
4662 lower_omp (&OMP_CRITICAL_BODY (stmt), ctx);
4663 maybe_catch_exception (&OMP_CRITICAL_BODY (stmt));
4664 append_to_statement_list (OMP_CRITICAL_BODY (stmt), &BIND_EXPR_BODY (bind));
4665 OMP_CRITICAL_BODY (stmt) = NULL;
4667 gimplify_and_add (unlock, &BIND_EXPR_BODY (bind));
4669 t = make_node (OMP_RETURN);
4670 OMP_RETURN_NOWAIT (t) = 1;
4671 append_to_statement_list (t, &BIND_EXPR_BODY (bind));
4673 pop_gimplify_context (bind);
4674 BIND_EXPR_VARS (bind) = chainon (BIND_EXPR_VARS (bind), ctx->block_vars);
4675 BLOCK_VARS (block) = BIND_EXPR_VARS (bind);
4679 /* A subroutine of lower_omp_for. Generate code to emit the predicate
4680 for a lastprivate clause. Given a loop control predicate of (V
4681 cond N2), we gate the clause on (!(V cond N2)). The lowered form
4682 is appended to *DLIST, iterator initialization is appended to
4686 lower_omp_for_lastprivate (struct omp_for_data *fd, tree *body_p,
4687 tree *dlist, struct omp_context *ctx)
4689 tree clauses, cond, stmts, vinit, t;
4690 enum tree_code cond_code;
4692 cond_code = fd->cond_code;
4693 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
4695 /* When possible, use a strict equality expression. This can let VRP
4696 type optimizations deduce the value and remove a copy. */
4697 if (host_integerp (fd->step, 0))
4699 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->step);
4700 if (step == 1 || step == -1)
4701 cond_code = EQ_EXPR;
4704 cond = build2 (cond_code, boolean_type_node, fd->v, fd->n2);
4706 clauses = OMP_FOR_CLAUSES (fd->for_stmt);
4708 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
4711 append_to_statement_list (stmts, dlist);
4713 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
4715 if (cond_code == EQ_EXPR
4716 && host_integerp (fd->n2, 0)
4717 && ! integer_zerop (fd->n2))
4718 vinit = build_int_cst (TREE_TYPE (fd->v), 0);
4720 /* Initialize the iterator variable, so that threads that don't execute
4721 any iterations don't execute the lastprivate clauses by accident. */
4722 t = build_gimple_modify_stmt (fd->v, vinit);
4723 gimplify_and_add (t, body_p);
4728 /* Lower code for an OpenMP loop directive. */
4731 lower_omp_for (tree *stmt_p, omp_context *ctx)
4733 tree t, stmt, ilist, dlist, new_stmt, *body_p, *rhs_p;
4734 struct omp_for_data fd;
4738 push_gimplify_context ();
4740 lower_omp (&OMP_FOR_PRE_BODY (stmt), ctx);
4741 lower_omp (&OMP_FOR_BODY (stmt), ctx);
4743 /* Move declaration of temporaries in the loop body before we make
4745 if (TREE_CODE (OMP_FOR_BODY (stmt)) == BIND_EXPR)
4746 record_vars_into (BIND_EXPR_VARS (OMP_FOR_BODY (stmt)), ctx->cb.dst_fn);
4748 new_stmt = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4749 TREE_SIDE_EFFECTS (new_stmt) = 1;
4750 body_p = &BIND_EXPR_BODY (new_stmt);
4752 /* The pre-body and input clauses go before the lowered OMP_FOR. */
4755 append_to_statement_list (OMP_FOR_PRE_BODY (stmt), body_p);
4756 lower_rec_input_clauses (OMP_FOR_CLAUSES (stmt), body_p, &dlist, ctx);
4758 /* Lower the header expressions. At this point, we can assume that
4759 the header is of the form:
4761 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
4763 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
4764 using the .omp_data_s mapping, if needed. */
4765 rhs_p = &GIMPLE_STMT_OPERAND (OMP_FOR_INIT (stmt), 1);
4766 if (!is_gimple_min_invariant (*rhs_p))
4767 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4769 rhs_p = &TREE_OPERAND (OMP_FOR_COND (stmt), 1);
4770 if (!is_gimple_min_invariant (*rhs_p))
4771 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4773 rhs_p = &TREE_OPERAND (GIMPLE_STMT_OPERAND (OMP_FOR_INCR (stmt), 1), 1);
4774 if (!is_gimple_min_invariant (*rhs_p))
4775 *rhs_p = get_formal_tmp_var (*rhs_p, body_p);
4777 /* Once lowered, extract the bounds and clauses. */
4778 extract_omp_for_data (stmt, &fd);
4780 lower_omp_for_lastprivate (&fd, body_p, &dlist, ctx);
4782 append_to_statement_list (stmt, body_p);
4784 append_to_statement_list (OMP_FOR_BODY (stmt), body_p);
4786 t = build2 (OMP_CONTINUE, void_type_node, fd.v, fd.v);
4787 append_to_statement_list (t, body_p);
4789 /* After the loop, add exit clauses. */
4790 lower_reduction_clauses (OMP_FOR_CLAUSES (stmt), body_p, ctx);
4791 append_to_statement_list (dlist, body_p);
4793 maybe_catch_exception (body_p);
4795 /* Region exit marker goes at the end of the loop body. */
4796 t = make_node (OMP_RETURN);
4797 OMP_RETURN_NOWAIT (t) = fd.have_nowait;
4798 append_to_statement_list (t, body_p);
4800 pop_gimplify_context (NULL_TREE);
4801 record_vars_into (ctx->block_vars, ctx->cb.dst_fn);
4803 OMP_FOR_BODY (stmt) = NULL_TREE;
4804 OMP_FOR_PRE_BODY (stmt) = NULL_TREE;
4808 /* Callback for walk_stmts. Check if *TP only contains OMP_FOR
4812 check_combined_parallel (tree *tp, int *walk_subtrees, void *data)
4814 struct walk_stmt_info *wi = data;
4815 int *info = wi->info;
4818 switch (TREE_CODE (*tp))
4822 *info = *info == 0 ? 1 : -1;
4831 /* Lower the OpenMP parallel directive in *STMT_P. CTX holds context
4832 information for the directive. */
4835 lower_omp_parallel (tree *stmt_p, omp_context *ctx)
4837 tree clauses, par_bind, par_body, new_body, bind;
4838 tree olist, ilist, par_olist, par_ilist;
4839 tree stmt, child_fn, t;
4843 clauses = OMP_PARALLEL_CLAUSES (stmt);
4844 par_bind = OMP_PARALLEL_BODY (stmt);
4845 par_body = BIND_EXPR_BODY (par_bind);
4846 child_fn = ctx->cb.dst_fn;
4847 if (!OMP_PARALLEL_COMBINED (stmt))
4849 struct walk_stmt_info wi;
4852 memset (&wi, 0, sizeof (wi));
4853 wi.callback = check_combined_parallel;
4856 walk_stmts (&wi, &par_bind);
4858 OMP_PARALLEL_COMBINED (stmt) = 1;
4861 push_gimplify_context ();
4863 par_olist = NULL_TREE;
4864 par_ilist = NULL_TREE;
4865 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
4866 lower_omp (&par_body, ctx);
4867 lower_reduction_clauses (clauses, &par_olist, ctx);
4869 /* Declare all the variables created by mapping and the variables
4870 declared in the scope of the parallel body. */
4871 record_vars_into (ctx->block_vars, child_fn);
4872 record_vars_into (BIND_EXPR_VARS (par_bind), child_fn);
4874 if (ctx->record_type)
4876 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_data_o");
4877 OMP_PARALLEL_DATA_ARG (stmt) = ctx->sender_decl;
4882 lower_send_clauses (clauses, &ilist, &olist, ctx);
4883 lower_send_shared_vars (&ilist, &olist, ctx);
4885 /* Once all the expansions are done, sequence all the different
4886 fragments inside OMP_PARALLEL_BODY. */
4887 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
4888 append_to_statement_list (ilist, &BIND_EXPR_BODY (bind));
4890 new_body = alloc_stmt_list ();
4892 if (ctx->record_type)
4894 t = build_fold_addr_expr (ctx->sender_decl);
4895 /* fixup_child_record_type might have changed receiver_decl's type. */
4896 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
4897 t = build_gimple_modify_stmt (ctx->receiver_decl, t);
4898 append_to_statement_list (t, &new_body);
4901 append_to_statement_list (par_ilist, &new_body);
4902 append_to_statement_list (par_body, &new_body);
4903 append_to_statement_list (par_olist, &new_body);
4904 maybe_catch_exception (&new_body);
4905 t = make_node (OMP_RETURN);
4906 append_to_statement_list (t, &new_body);
4907 OMP_PARALLEL_BODY (stmt) = new_body;
4909 append_to_statement_list (stmt, &BIND_EXPR_BODY (bind));
4910 append_to_statement_list (olist, &BIND_EXPR_BODY (bind));
4914 pop_gimplify_context (NULL_TREE);
4917 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
4921 lower_omp_2 (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
4925 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4926 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4929 /* If a global variable has been privatized, TREE_CONSTANT on
4930 ADDR_EXPR might be wrong. */
4931 if (TREE_CODE (t) == ADDR_EXPR)
4932 recompute_tree_invariant_for_addr_expr (t);
4934 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4939 lower_omp_1 (tree *tp, omp_context *ctx, tree_stmt_iterator *tsi)
4946 if (EXPR_HAS_LOCATION (t))
4947 input_location = EXPR_LOCATION (t);
4949 /* If we have issued syntax errors, avoid doing any heavy lifting.
4950 Just replace the OpenMP directives with a NOP to avoid
4951 confusing RTL expansion. */
4952 if (errorcount && OMP_DIRECTIVE_P (t))
4954 *tp = build_empty_stmt ();
4958 switch (TREE_CODE (t))
4960 case STATEMENT_LIST:
4962 tree_stmt_iterator i;
4963 for (i = tsi_start (t); !tsi_end_p (i); tsi_next (&i))
4964 lower_omp_1 (tsi_stmt_ptr (i), ctx, &i);
4969 lower_omp_1 (&COND_EXPR_THEN (t), ctx, NULL);
4970 lower_omp_1 (&COND_EXPR_ELSE (t), ctx, NULL);
4972 && walk_tree (&COND_EXPR_COND (t), lower_omp_2, ctx, NULL))
4975 gimplify_expr (&COND_EXPR_COND (t), &pre, NULL,
4976 is_gimple_condexpr, fb_rvalue);
4980 tsi_link_before (tsi, pre, TSI_SAME_STMT);
4983 append_to_statement_list (t, &pre);
4990 lower_omp_1 (&CATCH_BODY (t), ctx, NULL);
4992 case EH_FILTER_EXPR:
4993 lower_omp_1 (&EH_FILTER_FAILURE (t), ctx, NULL);
4995 case TRY_CATCH_EXPR:
4996 case TRY_FINALLY_EXPR:
4997 lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
4998 lower_omp_1 (&TREE_OPERAND (t, 1), ctx, NULL);
5001 lower_omp_1 (&BIND_EXPR_BODY (t), ctx, NULL);
5004 lower_omp_1 (&TREE_OPERAND (t, 0), ctx, NULL);
5008 ctx = maybe_lookup_ctx (t);
5009 lower_omp_parallel (tp, ctx);
5012 ctx = maybe_lookup_ctx (t);
5014 lower_omp_for (tp, ctx);
5017 ctx = maybe_lookup_ctx (t);
5019 lower_omp_sections (tp, ctx);
5022 ctx = maybe_lookup_ctx (t);
5024 lower_omp_single (tp, ctx);
5027 ctx = maybe_lookup_ctx (t);
5029 lower_omp_master (tp, ctx);
5032 ctx = maybe_lookup_ctx (t);
5034 lower_omp_ordered (tp, ctx);
5037 ctx = maybe_lookup_ctx (t);
5039 lower_omp_critical (tp, ctx);
5043 if (ctx && walk_tree (tp, lower_omp_2, ctx, NULL))
5045 /* The gimplifier doesn't gimplify CALL_EXPR_STATIC_CHAIN.
5046 Handle that here. */
5047 tree call = get_call_expr_in (t);
5049 && CALL_EXPR_STATIC_CHAIN (call)
5050 && walk_tree (&CALL_EXPR_STATIC_CHAIN (call), lower_omp_2,
5054 gimplify_expr (&CALL_EXPR_STATIC_CHAIN (call), &pre, NULL,
5055 is_gimple_val, fb_rvalue);
5059 tsi_link_before (tsi, pre, TSI_SAME_STMT);
5062 append_to_statement_list (t, &pre);
5063 lower_omp_1 (&pre, ctx, NULL);
5075 gimplify_expr (tp, &pre, NULL, is_gimple_stmt, fb_none);
5077 tsi_link_before (tsi, pre, TSI_SAME_STMT);
5085 lower_omp (tree *stmt_p, omp_context *ctx)
5087 lower_omp_1 (stmt_p, ctx, NULL);
5090 /* Main entry point. */
5093 execute_lower_omp (void)
5095 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
5096 delete_omp_context);
5098 scan_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5099 gcc_assert (parallel_nesting_level == 0);
5101 if (all_contexts->root)
5102 lower_omp (&DECL_SAVED_TREE (current_function_decl), NULL);
5106 splay_tree_delete (all_contexts);
5107 all_contexts = NULL;
5113 gate_lower_omp (void)
5115 return flag_openmp != 0;
5118 struct gimple_opt_pass pass_lower_omp =
5122 "omplower", /* name */
5123 gate_lower_omp, /* gate */
5124 execute_lower_omp, /* execute */
5127 0, /* static_pass_number */
5129 PROP_gimple_any, /* properties_required */
5130 PROP_gimple_lomp, /* properties_provided */
5131 0, /* properties_destroyed */
5132 0, /* todo_flags_start */
5133 TODO_dump_func /* todo_flags_finish */
5137 /* The following is a utility to diagnose OpenMP structured block violations.
5138 It is not part of the "omplower" pass, as that's invoked too late. It
5139 should be invoked by the respective front ends after gimplification. */
5141 static splay_tree all_labels;
5143 /* Check for mismatched contexts and generate an error if needed. Return
5144 true if an error is detected. */
5147 diagnose_sb_0 (tree *stmt_p, tree branch_ctx, tree label_ctx)
5151 if ((label_ctx ? TREE_VALUE (label_ctx) : NULL) == branch_ctx)
5154 /* Try to avoid confusing the user by producing and error message
5155 with correct "exit" or "enter" verbage. We prefer "exit"
5156 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
5157 if (branch_ctx == NULL)
5163 if (TREE_VALUE (label_ctx) == branch_ctx)
5168 label_ctx = TREE_CHAIN (label_ctx);
5173 error ("invalid exit from OpenMP structured block");
5175 error ("invalid entry to OpenMP structured block");
5177 *stmt_p = build_empty_stmt ();
5181 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
5182 where in the tree each label is found. */
5185 diagnose_sb_1 (tree *tp, int *walk_subtrees, void *data)
5187 struct walk_stmt_info *wi = data;
5188 tree context = (tree) wi->info;
5193 switch (TREE_CODE (t))
5198 walk_tree (&OMP_CLAUSES (t), diagnose_sb_1, wi, NULL);
5204 /* The minimal context here is just a tree of statements. */
5205 inner_context = tree_cons (NULL, t, context);
5206 wi->info = inner_context;
5207 walk_stmts (wi, &OMP_BODY (t));
5212 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_1, wi, NULL);
5213 inner_context = tree_cons (NULL, t, context);
5214 wi->info = inner_context;
5215 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_1, wi, NULL);
5216 walk_tree (&OMP_FOR_COND (t), diagnose_sb_1, wi, NULL);
5217 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_1, wi, NULL);
5218 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5219 walk_stmts (wi, &OMP_FOR_BODY (t));
5224 splay_tree_insert (all_labels, (splay_tree_key) LABEL_EXPR_LABEL (t),
5225 (splay_tree_value) context);
5235 /* Pass 2: Check each branch and see if its context differs from that of
5236 the destination label's context. */
5239 diagnose_sb_2 (tree *tp, int *walk_subtrees, void *data)
5241 struct walk_stmt_info *wi = data;
5242 tree context = (tree) wi->info;
5247 switch (TREE_CODE (t))
5252 walk_tree (&OMP_CLAUSES (t), diagnose_sb_2, wi, NULL);
5259 walk_stmts (wi, &OMP_BODY (t));
5264 walk_tree (&OMP_FOR_CLAUSES (t), diagnose_sb_2, wi, NULL);
5266 walk_tree (&OMP_FOR_INIT (t), diagnose_sb_2, wi, NULL);
5267 walk_tree (&OMP_FOR_COND (t), diagnose_sb_2, wi, NULL);
5268 walk_tree (&OMP_FOR_INCR (t), diagnose_sb_2, wi, NULL);
5269 walk_stmts (wi, &OMP_FOR_PRE_BODY (t));
5270 walk_stmts (wi, &OMP_FOR_BODY (t));
5276 tree lab = GOTO_DESTINATION (t);
5277 if (TREE_CODE (lab) != LABEL_DECL)
5280 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5281 diagnose_sb_0 (tp, context, n ? (tree) n->value : NULL_TREE);
5287 tree vec = SWITCH_LABELS (t);
5288 int i, len = TREE_VEC_LENGTH (vec);
5289 for (i = 0; i < len; ++i)
5291 tree lab = CASE_LABEL (TREE_VEC_ELT (vec, i));
5292 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
5293 if (diagnose_sb_0 (tp, context, (tree) n->value))
5300 diagnose_sb_0 (tp, context, NULL_TREE);
5311 diagnose_omp_structured_block_errors (tree fndecl)
5313 tree save_current = current_function_decl;
5314 struct walk_stmt_info wi;
5316 current_function_decl = fndecl;
5318 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
5320 memset (&wi, 0, sizeof (wi));
5321 wi.callback = diagnose_sb_1;
5322 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5324 memset (&wi, 0, sizeof (wi));
5325 wi.callback = diagnose_sb_2;
5326 wi.want_locations = true;
5327 wi.want_return_expr = true;
5328 walk_stmts (&wi, &DECL_SAVED_TREE (fndecl));
5330 splay_tree_delete (all_labels);
5333 current_function_decl = save_current;
5336 #include "gt-omp-low.h"