summaryrefslogtreecommitdiff
path: root/gdbsupport
diff options
context:
space:
mode:
authorTom Tromey <tom@tromey.com>2021-06-13 12:46:28 -0600
committerTom Tromey <tom@tromey.com>2022-04-12 09:31:16 -0600
commitf4565e4c99e768d3bcf2998979528569a65d8417 (patch)
tree51d6e54a3ba0a495cdedd2977d30a748920ea3cd /gdbsupport
parent82d734f7a3b6f08813a9ad6272aa026779c88975 (diff)
Return vector of results from parallel_for_each
This changes gdb::parallel_for_each to return a vector of the results. However, if the passed-in function returns void, the return type remains 'void'. This functionality is used later, to parallelize the new indexer.
Diffstat (limited to 'gdbsupport')
-rw-r--r--gdbsupport/parallel-for.h142
-rw-r--r--gdbsupport/thread-pool.cc6
-rw-r--r--gdbsupport/thread-pool.h23
3 files changed, 141 insertions, 30 deletions
diff --git a/gdbsupport/parallel-for.h b/gdbsupport/parallel-for.h
index 811ffd61bd..44303abb71 100644
--- a/gdbsupport/parallel-for.h
+++ b/gdbsupport/parallel-for.h
@@ -21,11 +21,98 @@
#define GDBSUPPORT_PARALLEL_FOR_H
#include <algorithm>
+#include <type_traits>
#include "gdbsupport/thread-pool.h"
namespace gdb
{
+namespace detail
+{
+
+/* This is a helper class that is used to accumulate results for
+ parallel_for. There is a specialization for 'void', below. */
+template<typename T>
+struct par_for_accumulator
+{
+public:
+
+ explicit par_for_accumulator (size_t n_threads)
+ : m_futures (n_threads)
+ {
+ }
+
+ /* The result type that is accumulated. */
+ typedef std::vector<T> result_type;
+
+ /* Post the Ith task to a background thread, and store a future for
+ later. */
+ void post (size_t i, std::function<T ()> task)
+ {
+ m_futures[i]
+ = gdb::thread_pool::g_thread_pool->post_task (std::move (task));
+ }
+
+ /* Invoke TASK in the current thread, then compute all the results
+ from all background tasks and put them into a result vector,
+ which is returned. */
+ result_type finish (gdb::function_view<T ()> task)
+ {
+ result_type result (m_futures.size () + 1);
+
+ result.back () = task ();
+
+ for (size_t i = 0; i < m_futures.size (); ++i)
+ result[i] = m_futures[i].get ();
+
+ return result;
+ }
+
+private:
+
+ /* A vector of futures coming from the tasks run in the
+ background. */
+ std::vector<std::future<T>> m_futures;
+};
+
+/* See the generic template. */
+template<>
+struct par_for_accumulator<void>
+{
+public:
+
+ explicit par_for_accumulator (size_t n_threads)
+ : m_futures (n_threads)
+ {
+ }
+
+ /* This specialization does not compute results. */
+ typedef void result_type;
+
+ void post (size_t i, std::function<void ()> task)
+ {
+ m_futures[i]
+ = gdb::thread_pool::g_thread_pool->post_task (std::move (task));
+ }
+
+ result_type finish (gdb::function_view<void ()> task)
+ {
+ task ();
+
+ for (auto &future : m_futures)
+ {
+ /* Use 'get' and not 'wait', to propagate any exception. */
+ future.get ();
+ }
+ }
+
+private:
+
+ std::vector<std::future<void>> m_futures;
+};
+
+}
+
/* A very simple "parallel for". This splits the range of iterators
into subranges, and then passes each subrange to the callback. The
work may or may not be done in separate threads.
@@ -36,21 +123,25 @@ namespace gdb
The parameter N says how batching ought to be done -- there will be
at least N elements processed per thread. Setting N to 0 is not
- allowed. */
+ allowed.
+
+ If the function returns a non-void type, then a vector of the
+ results is returned. The size of the resulting vector depends on
+ the number of threads that were used. */
template<class RandomIt, class RangeFunction>
-void
+typename gdb::detail::par_for_accumulator<
+ std::result_of_t<RangeFunction (RandomIt, RandomIt)>
+ >::result_type
parallel_for_each (unsigned n, RandomIt first, RandomIt last,
RangeFunction callback)
{
- /* So we can use a local array below. */
- const size_t local_max = 16;
- size_t n_threads = std::min (thread_pool::g_thread_pool->thread_count (),
- local_max);
- size_t n_actual_threads = 0;
- std::future<void> futures[local_max];
+ typedef typename std::result_of_t<RangeFunction (RandomIt, RandomIt)>
+ result_type;
+ size_t n_threads = thread_pool::g_thread_pool->thread_count ();
size_t n_elements = last - first;
+ size_t elts_per_thread = 0;
if (n_threads > 1)
{
/* Require that there should be at least N elements in a
@@ -58,26 +149,27 @@ parallel_for_each (unsigned n, RandomIt first, RandomIt last,
gdb_assert (n > 0);
if (n_elements / n_threads < n)
n_threads = std::max (n_elements / n, (size_t) 1);
- size_t elts_per_thread = n_elements / n_threads;
- n_actual_threads = n_threads - 1;
- for (int i = 0; i < n_actual_threads; ++i)
- {
- RandomIt end = first + elts_per_thread;
- auto task = [=] ()
- {
- callback (first, end);
- };
-
- futures[i] = gdb::thread_pool::g_thread_pool->post_task (task);
- first = end;
- }
+ elts_per_thread = n_elements / n_threads;
}
- /* Process all the remaining elements in the main thread. */
- callback (first, last);
+ size_t count = n_threads == 0 ? 0 : n_threads - 1;
+ gdb::detail::par_for_accumulator<result_type> results (count);
- for (int i = 0; i < n_actual_threads; ++i)
- futures[i].wait ();
+ for (int i = 0; i < count; ++i)
+ {
+ RandomIt end = first + elts_per_thread;
+ results.post (i, [=] ()
+ {
+ return callback (first, end);
+ });
+ first = end;
+ }
+
+ /* Process all the remaining elements in the main thread. */
+ return results.finish ([=] ()
+ {
+ return callback (first, last);
+ });
}
}
diff --git a/gdbsupport/thread-pool.cc b/gdbsupport/thread-pool.cc
index 7d446952cc..efd8b79971 100644
--- a/gdbsupport/thread-pool.cc
+++ b/gdbsupport/thread-pool.cc
@@ -134,11 +134,10 @@ thread_pool::set_thread_count (size_t num_threads)
#endif /* CXX_STD_THREAD */
}
-std::future<void>
-thread_pool::post_task (std::function<void ()> &&func)
+void
+thread_pool::do_post_task (std::packaged_task<void ()> &&func)
{
std::packaged_task<void ()> t (std::move (func));
- std::future<void> f = t.get_future ();
#if CXX_STD_THREAD
if (m_thread_count != 0)
@@ -153,7 +152,6 @@ thread_pool::post_task (std::function<void ()> &&func)
/* Just execute it now. */
t ();
}
- return f;
}
#if CXX_STD_THREAD
diff --git a/gdbsupport/thread-pool.h b/gdbsupport/thread-pool.h
index 2672e4d739..3243346771 100644
--- a/gdbsupport/thread-pool.h
+++ b/gdbsupport/thread-pool.h
@@ -64,7 +64,24 @@ public:
/* Post a task to the thread pool. A future is returned, which can
be used to wait for the result. */
- std::future<void> post_task (std::function<void ()> &&func);
+ std::future<void> post_task (std::function<void ()> &&func)
+ {
+ std::packaged_task<void ()> task (std::move (func));
+ std::future<void> result = task.get_future ();
+ do_post_task (std::packaged_task<void ()> (std::move (task)));
+ return result;
+ }
+
+ /* Post a task to the thread pool. A future is returned, which can
+ be used to wait for the result. */
+ template<typename T>
+ std::future<T> post_task (std::function<T ()> &&func)
+ {
+ std::packaged_task<T ()> task (std::move (func));
+ std::future<T> result = task.get_future ();
+ do_post_task (std::packaged_task<void ()> (std::move (task)));
+ return result;
+ }
private:
@@ -74,6 +91,10 @@ private:
/* The callback for each worker thread. */
void thread_function ();
+ /* Post a task to the thread pool. A future is returned, which can
+ be used to wait for the result. */
+ void do_post_task (std::packaged_task<void ()> &&func);
+
/* The current thread count. */
size_t m_thread_count = 0;