+/* Initialize an already allocated struct gomp_work_share.
+ This shouldn't touch the next_alloc field. */
+
+void
+gomp_init_work_share (struct gomp_work_share *ws, bool ordered,
+ unsigned nthreads)
+{
+ gomp_mutex_init (&ws->lock);
+ if (__builtin_expect (ordered, 0))
+ {
+#define INLINE_ORDERED_TEAM_IDS_CNT \
+ ((sizeof (struct gomp_work_share) \
+ - offsetof (struct gomp_work_share, inline_ordered_team_ids)) \
+ / sizeof (((struct gomp_work_share *) 0)->inline_ordered_team_ids[0]))
+
+ if (nthreads > INLINE_ORDERED_TEAM_IDS_CNT)
+ ws->ordered_team_ids
+ = gomp_malloc (nthreads * sizeof (*ws->ordered_team_ids));
+ else
+ ws->ordered_team_ids = ws->inline_ordered_team_ids;
+ memset (ws->ordered_team_ids, '\0',
+ nthreads * sizeof (*ws->ordered_team_ids));
+ ws->ordered_num_used = 0;
+ ws->ordered_owner = -1;
+ ws->ordered_cur = 0;
+ }
+ else
+ ws->ordered_team_ids = NULL;
+ gomp_ptrlock_init (&ws->next_ws, NULL);
+ ws->threads_completed = 0;
+}