OSDN Git Service

2007-11-28 Johannes Singler <singler@ira.uka.de>
[pf3gnuchains/gcc-fork.git] / libstdc++-v3 / include / parallel / multiway_mergesort.h
1 // -*- C++ -*-
2
3 // Copyright (C) 2007 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library.  This library is free
6 // software; you can redistribute it and/or modify it under the terms
7 // of the GNU General Public License as published by the Free Software
8 // Foundation; either version 2, or (at your option) any later
9 // version.
10
11 // This library is distributed in the hope that it will be useful, but
12 // WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 // General Public License for more details.
15
16 // You should have received a copy of the GNU General Public License
17 // along with this library; see the file COPYING.  If not, write to
18 // the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 // MA 02111-1307, USA.
20
21 // As a special exception, you may use this file as part of a free
22 // software library without restriction.  Specifically, if other files
23 // instantiate templates or use macros or inline functions from this
24 // file, or you compile this file and link it with other files to
25 // produce an executable, this file does not by itself cause the
26 // resulting executable to be covered by the GNU General Public
27 // License.  This exception does not however invalidate any other
28 // reasons why the executable file might be covered by the GNU General
29 // Public License.
30
31 /** @file parallel/multiway_mergesort.h
32  *  @brief Parallel multiway merge sort.
33  *  This file is a GNU parallel extension to the Standard C++ Library.
34  */
35
36 // Written by Johannes Singler.
37
38 #ifndef _GLIBCXX_PARALLEL_MERGESORT_H
39 #define _GLIBCXX_PARALLEL_MERGESORT_H 1
40
41 #include <vector>
42
43 #include <parallel/basic_iterator.h>
44 #include <bits/stl_algo.h>
45 #include <parallel/parallel.h>
46 #include <parallel/multiway_merge.h>
47
48 namespace __gnu_parallel
49 {
50
51 /** @brief Subsequence description. */
52 template<typename _DifferenceTp>
53   struct Piece
54   {
55     typedef _DifferenceTp difference_type;
56
57     /** @brief Begin of subsequence. */
58     difference_type begin;
59
60     /** @brief End of subsequence. */
61     difference_type end;
62   };
63
64 /** @brief Data accessed by all threads.
65   *
66   *  PMWMS = parallel multiway mergesort */
67 template<typename RandomAccessIterator>
68   struct PMWMSSortingData
69   {
70     typedef std::iterator_traits<RandomAccessIterator> traits_type;
71     typedef typename traits_type::value_type value_type;
72     typedef typename traits_type::difference_type difference_type;
73
74     /** @brief Number of threads involved. */
75     thread_index_t num_threads;
76
77     /** @brief Input begin. */
78     RandomAccessIterator source;
79
80     /** @brief Start indices, per thread. */
81     difference_type* starts;
82
83     /** @brief Temporary arrays for each thread.
84      *
85      *  Indirection Allows using the temporary storage in different
86      *  ways, without code duplication.
87      *  @see _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST */
88     value_type** temporaries;
89
90 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
91     /** @brief Storage in which to sort. */
92     RandomAccessIterator* sorting_places;
93
94     /** @brief Storage into which to merge. */
95     value_type** merging_places;
96 #else
97     /** @brief Storage in which to sort. */
98     value_type** sorting_places;
99
100     /** @brief Storage into which to merge. */
101     RandomAccessIterator* merging_places;
102 #endif
103     /** @brief Samples. */
104     value_type* samples;
105
106     /** @brief Offsets to add to the found positions. */
107     difference_type* offsets;
108
109     /** @brief Pieces of data to merge @c [thread][sequence] */
110     std::vector<Piece<difference_type> >* pieces;
111
112     /** @brief Stable sorting desired. */
113     bool stable;
114 };
115
116 /**
117   *  @brief Select samples from a sequence.
118   *  @param sd Pointer to algorithm data. Result will be placed in
119   *  @c sd->samples.
120   *  @param num_samples Number of samples to select.
121   */
122 template<typename RandomAccessIterator, typename _DifferenceTp>
123   inline void 
124   determine_samples(PMWMSSortingData<RandomAccessIterator>* sd,
125                     _DifferenceTp& num_samples)
126   {
127     typedef std::iterator_traits<RandomAccessIterator> traits_type;
128     typedef typename traits_type::value_type value_type;
129     typedef _DifferenceTp difference_type;
130
131     thread_index_t iam = omp_get_thread_num();
132
133     num_samples =
134         Settings::sort_mwms_oversampling * sd->num_threads - 1;
135
136     difference_type* es = new difference_type[num_samples + 2];
137
138     equally_split(sd->starts[iam + 1] - sd->starts[iam], 
139                   num_samples + 1, es);
140
141     for (difference_type i = 0; i < num_samples; i++)
142       new(&(sd->samples[iam * num_samples + i])) value_type(
143           sd->source[sd->starts[iam] + es[i + 1]]);
144
145     delete[] es;
146   }
147
148 /** @brief PMWMS code executed by each thread.
149   *  @param sd Pointer to algorithm data.
150   *  @param comp Comparator.
151   */
152 template<typename RandomAccessIterator, typename Comparator>
153   inline void 
154   parallel_sort_mwms_pu(PMWMSSortingData<RandomAccessIterator>* sd,
155                         Comparator& comp)
156   {
157     typedef std::iterator_traits<RandomAccessIterator> traits_type;
158     typedef typename traits_type::value_type value_type;
159     typedef typename traits_type::difference_type difference_type;
160
161     thread_index_t iam = omp_get_thread_num();
162
163     // Length of this thread's chunk, before merging.
164     difference_type length_local = sd->starts[iam + 1] - sd->starts[iam];
165
166 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
167     typedef RandomAccessIterator SortingPlacesIterator;
168
169     // Sort in input storage.
170     sd->sorting_places[iam] = sd->source + sd->starts[iam];
171 #else
172     typedef value_type* SortingPlacesIterator;
173
174     // Sort in temporary storage, leave space for sentinel.
175     sd->sorting_places[iam] = sd->temporaries[iam] = 
176         static_cast<value_type*>(
177         ::operator new(sizeof(value_type) * (length_local + 1)));
178
179     // Copy there.
180     std::uninitialized_copy(sd->source + sd->starts[iam],
181                             sd->source + sd->starts[iam] + length_local,
182                             sd->sorting_places[iam]);
183 #endif
184
185     // Sort locally.
186     if (sd->stable)
187       __gnu_sequential::stable_sort(sd->sorting_places[iam],
188                                     sd->sorting_places[iam] + length_local,
189                                     comp);
190     else
191       __gnu_sequential::sort(sd->sorting_places[iam],
192                              sd->sorting_places[iam] + length_local,
193                              comp);
194
195     // Invariant: locally sorted subsequence in sd->sorting_places[iam],
196     // sd->sorting_places[iam] + length_local.
197
198     if (Settings::sort_splitting == Settings::SAMPLING)
199       {
200         difference_type num_samples;
201         determine_samples(sd, num_samples);
202
203 #       pragma omp barrier
204
205 #       pragma omp single
206         __gnu_sequential::sort(sd->samples,
207                                sd->samples + (num_samples * sd->num_threads),
208                                comp);
209
210 #       pragma omp barrier
211
212         for (int s = 0; s < sd->num_threads; s++)
213           {
214             // For each sequence.
215               if (num_samples * iam > 0)
216                 sd->pieces[iam][s].begin = 
217                     std::lower_bound(sd->sorting_places[s],
218                         sd->sorting_places[s]
219                             + (sd->starts[s + 1] - sd->starts[s]),
220                         sd->samples[num_samples * iam],
221                         comp)
222                     - sd->sorting_places[s];
223             else
224               // Absolute beginning.
225               sd->pieces[iam][s].begin = 0;
226
227             if ((num_samples * (iam + 1)) < (num_samples * sd->num_threads))
228               sd->pieces[iam][s].end =
229                   std::lower_bound(sd->sorting_places[s],
230                           sd->sorting_places[s]
231                               + (sd->starts[s + 1] - sd->starts[s]),
232                           sd->samples[num_samples * (iam + 1)],
233                           comp)
234                   - sd->sorting_places[s];
235             else
236               // Absolute end.
237               sd->pieces[iam][s].end = sd->starts[s + 1] - sd->starts[s];
238             }
239       }
240     else if (Settings::sort_splitting == Settings::EXACT)
241       {
242 #       pragma omp barrier
243
244         std::vector<std::pair<SortingPlacesIterator, SortingPlacesIterator> >
245             seqs(sd->num_threads);
246         for (int s = 0; s < sd->num_threads; s++)
247           seqs[s] = std::make_pair(sd->sorting_places[s],
248                                    sd->sorting_places[s]
249                                        + (sd->starts[s + 1] - sd->starts[s]));
250
251         std::vector<SortingPlacesIterator> offsets(sd->num_threads);
252
253         // if not last thread
254         if (iam < sd->num_threads - 1)
255           multiseq_partition(seqs.begin(), seqs.end(),
256                              sd->starts[iam + 1], offsets.begin(), comp);
257
258         for (int seq = 0; seq < sd->num_threads; seq++)
259           {
260             // for each sequence
261             if (iam < (sd->num_threads - 1))
262               sd->pieces[iam][seq].end = offsets[seq] - seqs[seq].first;
263             else
264               // very end of this sequence
265               sd->pieces[iam][seq].end =
266                   sd->starts[seq + 1] - sd->starts[seq];
267           }
268
269 #       pragma omp barrier
270
271         for (int seq = 0; seq < sd->num_threads; seq++)
272           {
273             // For each sequence.
274             if (iam > 0)
275               sd->pieces[iam][seq].begin = sd->pieces[iam - 1][seq].end;
276             else
277               // Absolute beginning.
278               sd->pieces[iam][seq].begin = 0;
279           }
280       }
281
282     // Offset from target begin, length after merging.
283     difference_type offset = 0, length_am = 0;
284     for (int s = 0; s < sd->num_threads; s++)
285       {
286         length_am += sd->pieces[iam][s].end - sd->pieces[iam][s].begin;
287         offset += sd->pieces[iam][s].begin;
288       }
289
290 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
291     // Merge to temporary storage, uninitialized creation not possible
292     // since there is no multiway_merge calling the placement new
293     // instead of the assignment operator.
294     // XXX incorrect (de)construction
295     sd->merging_places[iam] = sd->temporaries[iam] =
296         static_cast<value_type*>(
297         ::operator new(sizeof(value_type) * length_am));
298 #else
299     // Merge directly to target.
300     sd->merging_places[iam] = sd->source + offset;
301 #endif
302     std::vector<std::pair<SortingPlacesIterator, SortingPlacesIterator> >
303         seqs(sd->num_threads);
304
305     for (int s = 0; s < sd->num_threads; s++)
306       {
307         seqs[s] =
308             std::make_pair(sd->sorting_places[s] + sd->pieces[iam][s].begin,
309                            sd->sorting_places[s] + sd->pieces[iam][s].end);
310       }
311
312     multiway_merge(seqs.begin(), seqs.end(), sd->merging_places[iam], comp,
313                    length_am, sd->stable, false, sequential_tag());
314
315 #   pragma omp barrier
316
317 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
318     // Write back.
319     std::copy(sd->merging_places[iam],
320               sd->merging_places[iam] + length_am,
321               sd->source + offset);
322 #endif
323
324     delete[] sd->temporaries[iam];
325   }
326
327 /** @brief PMWMS main call.
328   *  @param begin Begin iterator of sequence.
329   *  @param end End iterator of sequence.
330   *  @param comp Comparator.
331   *  @param n Length of sequence.
332   *  @param num_threads Number of threads to use.
333   *  @param stable Stable sorting.
334   */
335 template<typename RandomAccessIterator, typename Comparator>
336   inline void
337   parallel_sort_mwms(RandomAccessIterator begin, RandomAccessIterator end,
338                      Comparator comp, 
339                      typename std::iterator_traits<RandomAccessIterator>
340                         ::difference_type n,
341                      int num_threads,
342                      bool stable)
343   {
344     _GLIBCXX_CALL(n)
345
346     typedef std::iterator_traits<RandomAccessIterator> traits_type;
347     typedef typename traits_type::value_type value_type;
348     typedef typename traits_type::difference_type difference_type;
349
350     if (n <= 1)
351       return;
352
353     // at least one element per thread
354     if (num_threads > n)
355       num_threads = static_cast<thread_index_t>(n);
356
357     // shared variables
358     PMWMSSortingData<RandomAccessIterator> sd;
359     difference_type* starts;
360
361 #   pragma omp parallel num_threads(num_threads)
362       {
363         num_threads = omp_get_num_threads();  //no more threads than requested
364
365 #       pragma omp single
366           {
367             sd.num_threads = num_threads;
368             sd.source = begin;
369             sd.temporaries = new value_type*[num_threads];
370
371 #if _GLIBCXX_MULTIWAY_MERGESORT_COPY_LAST
372             sd.sorting_places = new RandomAccessIterator[num_threads];
373             sd.merging_places = new value_type*[num_threads];
374 #else
375             sd.sorting_places = new value_type*[num_threads];
376             sd.merging_places = new RandomAccessIterator[num_threads];
377 #endif
378
379             if (Settings::sort_splitting == Settings::SAMPLING)
380               {
381                 unsigned int size = 
382                     (Settings::sort_mwms_oversampling * num_threads - 1)
383                         * num_threads;
384                 sd.samples = static_cast<value_type*>(
385                     ::operator new(size * sizeof(value_type)));
386               }
387             else
388               sd.samples = NULL;
389
390             sd.offsets = new difference_type[num_threads - 1];
391             sd.pieces = new std::vector<Piece<difference_type> >[num_threads];
392             for (int s = 0; s < num_threads; s++)
393               sd.pieces[s].resize(num_threads);
394             starts = sd.starts = new difference_type[num_threads + 1];
395             sd.stable = stable;
396
397             difference_type chunk_length = n / num_threads;
398             difference_type split = n % num_threads;
399             difference_type pos = 0;
400             for (int i = 0; i < num_threads; i++)
401               {
402                 starts[i] = pos;
403                 pos += (i < split) ? (chunk_length + 1) : chunk_length;
404               }
405             starts[num_threads] = pos;
406           }
407
408         // Now sort in parallel.
409         parallel_sort_mwms_pu(&sd, comp);
410       } //parallel
411
412     delete[] starts;
413     delete[] sd.temporaries;
414     delete[] sd.sorting_places;
415     delete[] sd.merging_places;
416
417     if (Settings::sort_splitting == Settings::SAMPLING)
418         delete[] sd.samples;
419
420     delete[] sd.offsets;
421     delete[] sd.pieces;
422   }
423 } //namespace __gnu_parallel
424
425 #endif