<!DOCTYPE html>
    <html>
    <head>
        <meta http-equiv="Content-type" content="text/html;charset=UTF-8">
        <style>
/*--------------------------------------------------------------------------------------------- * Copyright (c) Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See License.txt in the project root for license information. *--------------------------------------------------------------------------------------------*/ body { font-family: "Segoe WPC", "Segoe UI", "SFUIText-Light", "HelveticaNeue-Light", sans-serif, "Droid Sans Fallback"; font-size: 14px; padding: 0 12px; line-height: 22px; word-wrap: break-word; } body.scrollBeyondLastLine { margin-bottom: calc(100vh - 22px); } body.showEditorSelection .code-line { position: relative; } body.showEditorSelection .code-active-line:before, body.showEditorSelection .code-line:hover:before { content: ""; display: block; position: absolute; top: 0; left: -12px; height: 100%; } body.showEditorSelection li.code-active-line:before, body.showEditorSelection li.code-line:hover:before { left: -30px; } .vscode-light.showEditorSelection .code-active-line:before { border-left: 3px solid rgba(0, 0, 0, 0.15); } .vscode-light.showEditorSelection .code-line:hover:before { border-left: 3px solid rgba(0, 0, 0, 0.40); } .vscode-dark.showEditorSelection .code-active-line:before { border-left: 3px solid rgba(255, 255, 255, 0.4); } .vscode-dark.showEditorSelection .code-line:hover:before { border-left: 3px solid rgba(255, 255, 255, 0.60); } .vscode-high-contrast.showEditorSelection .code-active-line:before { border-left: 3px solid rgba(255, 160, 0, 0.7); } .vscode-high-contrast.showEditorSelection .code-line:hover:before { border-left: 3px solid rgba(255, 160, 0, 1); } img { max-width: 100%; max-height: 100%; } a { color: #4080D0; text-decoration: none; } a:focus, input:focus, select:focus, textarea:focus { outline: 1px solid -webkit-focus-ring-color; outline-offset: -1px; } hr { border: 0; height: 2px; border-bottom: 2px solid; } h1 { padding-bottom: 0.3em; line-height: 1.2; border-bottom-width: 1px; border-bottom-style: solid; } h1, h2, h3 { font-weight: normal; } h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { font-size: inherit; line-height: auto; } a:hover { color: #4080D0; text-decoration: underline; } table { border-collapse: collapse; } table > thead > tr > th { text-align: left; border-bottom: 1px solid; } table > thead > tr > th, table > thead > tr > td, table > tbody > tr > th, table > tbody > tr > td { padding: 5px 10px; } table > tbody > tr + tr > td { border-top: 1px solid; } blockquote { margin: 0 7px 0 5px; padding: 0 16px 0 10px; border-left: 5px solid; } code { font-family: Menlo, Monaco, Consolas, "Droid Sans Mono", "Courier New", monospace, "Droid Sans Fallback"; font-size: 14px; line-height: 19px; } body.wordWrap pre { white-space: pre-wrap; } .mac code { font-size: 12px; line-height: 18px; } code > div { padding: 16px; border-radius: 3px; overflow: auto; } /** Theming */ .vscode-light { color: rgb(30, 30, 30); } .vscode-dark { color: #DDD; } .vscode-high-contrast { color: white; } .vscode-light code { color: #A31515; } .vscode-dark code { color: #D7BA7D; } .vscode-light code > div { background-color: rgba(220, 220, 220, 0.4); } .vscode-dark code > div { background-color: rgba(10, 10, 10, 0.4); } .vscode-high-contrast code > div { background-color: rgb(0, 0, 0); } .vscode-high-contrast h1 { border-color: rgb(0, 0, 0); } .vscode-light table > thead > tr > th { border-color: rgba(0, 0, 0, 0.69); } .vscode-dark table > thead > tr > th { border-color: rgba(255, 255, 255, 0.69); } .vscode-light h1, .vscode-light hr, .vscode-light table > tbody > tr + tr > td { border-color: rgba(0, 0, 0, 0.18); } .vscode-dark h1, .vscode-dark hr, .vscode-dark table > tbody > tr + tr > td { border-color: rgba(255, 255, 255, 0.18); } .vscode-light blockquote, .vscode-dark blockquote { background: rgba(127, 127, 127, 0.1); border-color: rgba(0, 122, 204, 0.5); } .vscode-high-contrast blockquote { background: transparent; border-color: #fff; }
</style>
<style>
/* Tomorrow Theme */ /* http://jmblog.github.com/color-themes-for-google-code-highlightjs */ /* Original theme - https://github.com/chriskempson/tomorrow-theme */ /* Tomorrow Comment */ .hljs-comment, .hljs-quote { color: #8e908c; } /* Tomorrow Red */ .hljs-variable, .hljs-template-variable, .hljs-tag, .hljs-name, .hljs-selector-id, .hljs-selector-class, .hljs-regexp, .hljs-deletion { color: #c82829; } /* Tomorrow Orange */ .hljs-number, .hljs-built_in, .hljs-builtin-name, .hljs-literal, .hljs-type, .hljs-params, .hljs-meta, .hljs-link { color: #f5871f; } /* Tomorrow Yellow */ .hljs-attribute { color: #eab700; } /* Tomorrow Green */ .hljs-string, .hljs-symbol, .hljs-bullet, .hljs-addition { color: #718c00; } /* Tomorrow Blue */ .hljs-title, .hljs-section { color: #4271ae; } /* Tomorrow Purple */ .hljs-keyword, .hljs-selector-tag { color: #8959a8; } .hljs { display: block; overflow-x: auto; color: #4d4d4c; padding: 0.5em; } .hljs-emphasis { font-style: italic; } .hljs-strong { font-weight: bold; }
</style>
<style>
ul.contains-task-list { padding-left: 0; } ul ul.contains-task-list { padding-left: 40px; } .task-list-item { list-style-type: none; } .task-list-item-checkbox { vertical-align: middle; }
</style>
        <style>
            body {
                font-family: -apple-system, BlinkMacSystemFont, 'Segoe WPC', 'Segoe UI', 'HelveticaNeue-Light', 'Ubuntu', 'Droid Sans', sans-serif;
                font-size: 14px;
                line-height: 1.6;
            }
        </style>
    </head>
    <body>
        <table>
<thead>
<tr>
<th>Document Number:</th>
<th>P0975R0</th>
</tr>
</thead>
<tbody>
<tr>
<td>Date:</td>
<td>2018-03-10</td>
</tr>
<tr>
<td>Audience:</td>
<td>Evolution, SG1, SG14, LEWG, LWG</td>
</tr>
<tr>
<td>Revises:</td>
<td>none</td>
</tr>
<tr>
<td>Reply to:</td>
<td>Gor Nishanov (gorn@microsoft.com)</td>
</tr>
</tbody>
</table>
<h1 id="impact-of-coroutines-on-current-and-upcoming-library-facilities">Impact of coroutines on current and upcoming library facilities</h1>
<!-- TOOC depthFrom:2 -->
<!-- TOC depthFrom:2 -->
<ul>
<li><a href="#1-overview">1. Overview</a></li>
<li><a href="#2-standard-coroutine-types">2. Standard coroutine types</a></li>
<li><a href="#3-existing-library-facilities">3. Existing library facilities</a></li>
<li><a href="#4-incoming-papers-with-generator-interactions">4. Incoming papers with generator interactions</a>
<ul>
<li><a href="#41-ranges">4.1. Ranges</a></li>
</ul>
</li>
<li><a href="#5-brief-tutorial-on-awaitables-from-a-library-perspective">5. Brief tutorial on awaitables from a library perspective</a></li>
<li><a href="#6-incoming-papers-with-async-interactions">6. Incoming papers with async interactions</a>
<ul>
<li><a href="#61-executors">6.1. Executors</a></li>
<li><a href="#62-concurrency-ts">6.2. Concurrency TS</a>
<ul>
<li><a href="#621-future">6.2.1. future</a></li>
<li><a href="#622-when_all">6.2.2. when_all</a></li>
<li><a href="#623-when_any">6.2.3. when_any</a></li>
</ul>
</li>
<li><a href="#63-other-future-like-types">6.3. Other future-like types</a></li>
<li><a href="#64-parallelism-ts">6.4. Parallelism TS</a>
<ul>
<li><a href="#641-task-blocks">6.4.1. Task Blocks</a></li>
</ul>
</li>
<li><a href="#65-stdexpected">6.5. std::expected</a></li>
<li><a href="#66-unified-call-syntax">6.6. Unified Call Syntax</a></li>
<li><a href="#67-revised-latches-and-barriers-for-c20">6.7. Revised Latches and Barriers for C++20</a></li>
<li><a href="#68-parallel-algorithms">6.8. Parallel algorithms</a></li>
<li><a href="#69-concurrent-queues">6.9. Concurrent Queues</a></li>
<li><a href="#610-rcu--hazard-pointers">6.10. RCU &amp; Hazard pointers</a></li>
<li><a href="#611-networking-technical-specification">6.11. Networking Technical Specification</a>
<ul>
<li><a href="#6111-async-api-simple-form">6.11.1. async API: simple form</a></li>
<li><a href="#6112-async-api-adding-an-executor">6.11.2. async API: adding an executor</a></li>
<li><a href="#6113-async-api-altering-default-allocator">6.11.3. async API: altering default allocator</a></li>
</ul>
</li>
</ul>
</li>
<li><a href="#7-conclusion">7. Conclusion</a></li>
<li><a href="#8-acknowledgement">8. Acknowledgement</a></li>
<li><a href="#9-references">9. References</a></li>
<li><a href="#10-appendix">10. Appendix</a>
<ul>
<li><a href="#101-full-listing-of-hand-crafted-state-machine-matching-the-example-in-networking-technical-specification-section-of-the-paper">10.1. Full listing of hand-crafted state machine matching the example in &quot;Networking Technical Specification&quot; section of the paper.</a></li>
</ul>
</li>
</ul>
<!-- /TOC -->
<h2 id="1-overview">1. Overview</h2>
<p>A coroutine is a generalization of a function that in addition to usual control flow operations such as call and return, can suspend execution of itself and yield control back to the caller with an ability to resume execution at a later time. In C++, coroutines were explicitly designed to efficiently and succinctly support  the following use patterns:</p>
<ul>
<li>asynchronous tasks, where <code>co_await &lt;expr&gt;</code> suspends a coroutine while waiting for the result of an expression) and the coroutine is resumed once the result is available</li>
<li>generators, where <code>co_yield &lt;expr&gt;</code> suspends the coroutine yielding the result of the expression to the consumer and the coroutine is resumed once the consumer asks for the next value</li>
<li>asynchronous streams, which can be thought as an asynchronous version of a generator where both <code>co_await</code> and <code>co_yield</code> can be used.</li>
</ul>
<p>Unlike most other languages that support coroutines, C++ coroutines are open and not tied to any particular runtime or generator type and allow libraries to imbue coroutines with meaning, whereas the compiler is responsible solely for efficient transformation of a function to a state machine that is the foundation of the coroutine.</p>
<p>While openness of coroutines allows easy integration into existing concurrency runtimes, we do not yet have great out of the box coroutine experience for C++20 as we do not have standard coroutine types, like task or generator and we do not have coroutine bindings with any upcoming concurrency and networking facilities in papers in flight.</p>
<p>This document describes what is missing, what is the impact of coroutines on existing library facilities (almost none) and what is the impact of coroutines on papers in progress (lots).</p>
<h2 id="2-standard-coroutine-types">2. Standard coroutine types</h2>
<p>There are two proposals en route for Rapperswil that will bring first two coroutine types to the library: generators and zero-overhead tasks. There will be also a paper on a zero-overhead adapters to consume Networking TS *_async APIs. <!-- and executors APIs. --></p>
<p>This is the minimal level of library support that allows C++20 to have great out of the box experience.</p>
<p>For async code, users should be able to write efficient and pretty async networking code:</p>
<pre class="hljs"><code><div>task&lt;&gt; session(io_context &amp;io, ip::tcp::socket s, <span class="hljs-keyword">size_t</span> block_size) {
  <span class="hljs-built_in">std</span>::<span class="hljs-built_in">vector</span>&lt;<span class="hljs-keyword">char</span>&gt; buf(block_size);
  <span class="hljs-keyword">for</span> (;;) {
    <span class="hljs-keyword">auto</span> n = co_await s.async_read_some( buffer(buf.data(),
                                                block_size));
    n = co_await async_write(s, buffer(buf.data(), n));
  }
}
</div></code></pre>
<p>Users will be able to write lazily generated views with generator coroutines:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">template</span> &lt;<span class="hljs-keyword">typename</span> T&gt; <span class="hljs-comment">// note, no upper bound, goes to "infinity"</span>
generator&lt;T&gt; squares() {
  <span class="hljs-keyword">for</span> (T i = {}; ; ++i)
    co_yield i * i;
  }
</div></code></pre>
<p>and then use them with ranges and standard library algorithms:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">int</span> sum = accumulate(squares&lt;<span class="hljs-keyword">int</span>&gt;()
                   | view::transform([](<span class="hljs-keyword">int</span> i){ <span class="hljs-keyword">return</span> i*i; })
                   | view::take(<span class="hljs-number">10</span>), <span class="hljs-number">0</span>);
</div></code></pre>
<p>With regard to asynchronous streams, it is an area of active research [RxMarbles, RxCpp] and we have no
plans to offer any standard coroutine types supporting this use case for C++20.</p>
<!--
Rapperswil `task` type is similar to the `task` type of cppcoro coroutine abstractions library [cppcoro].

Rapperswil `generator` type is elaboration on generators shown here and here.
-->
<h2 id="3-existing-library-facilities">3. Existing library facilities</h2>
<p>There is minimal impact on existing library types. One of the potential types that may interact with coroutines is <code>std::future</code>, however, in its current form, it does not have an ability to provide an asynchronous notification when the <code>future</code> is completed and therefore cannot be efficiently <code>co_await</code>-ed upon. We have several options to consider:</p>
<ol>
<li>If <code>then</code> makes it to <code>std::future</code> by C++20, we can make <code>std::future</code> completely coroutine aware. We would likely need to give guidance when heavy-weight <code>std::future</code> needs to be used with coroutines vs. when to use zero-overhead <code>task</code> type.</li>
<li>If <code>then</code> does not make it to the std::future in C++20. We cannot make <code>co_await</code> await on the future,
but we can provide bindings allowing coroutines to author <code>std::future</code> (as in coroutine returns <code>std::future</code> to represent the eventual result produced by the coroutine).</li>
<li>We can also decide that <code>std::future</code> will not interact with coroutines at all.</li>
</ol>
<p>The approach chosen for the <code>std::future</code> will apply to <code>std::shared_future</code> as well.</p>
<!--
ooo Allocators
While coroutines the language feature does not depend on allocators in any way, coroutine types, such as `task` or `generator` are designed with allocator support in mind. The exact details are in the Rapperswil bound `task` and `generator` papers. -->
<h2 id="4-incoming-papers-with-generator-interactions">4. Incoming papers with generator interactions</h2>
<h3 id="41-ranges">4.1. Ranges</h3>
<p>Zero-overhead generators are unavoidably move-only. However, <a href="#RangesTS">Ranges TS</a>, at the moment, requires copyability for view composition. We would need to relax copyable requirement, at minimum for input views, or possibly for <em>all</em> view categories.</p>
<p>Note that <a href="#Range-v3">Range-v3</a> currently implements a “compromise” solution that allows input views to be move-only and requires copy for forward views.</p>
<!--
View composition combined with the ease of making views via generators is amazingly expressive and it would be a shame not to have it not to have it in C++20. -->
<h2 id="5-brief-tutorial-on-awaitables-from-a-library-perspective">5. Brief tutorial on awaitables from a library perspective</h2>
<p>Before we proceed further, it would be valuable to briefly look at the important coroutine related concept, an awaitable.</p>
<pre class="hljs"><code><div><span class="hljs-keyword">template</span> &lt;<span class="hljs-keyword">typename</span> A&gt;
concept SimpleAwaitable = requires(A a, <span class="hljs-built_in">std</span>::coroutine_handle&lt;&gt; h) {
    { a.await_ready() } -&gt; <span class="hljs-keyword">bool</span>;
    a.await_suspend(h);
    a.await_resume();
};
</div></code></pre>
<p>An <em>await-expression</em>, i.e. <code>co_await &lt;expr&gt;</code> requires  <em>&lt;expr&gt;</em> to be an awaitable (or to have an overload of <code>operator co_await</code> that when applied to <em>&lt;expr&gt;</em> will return an awaitable).</p>
<p>Let's look at some simple awaitable examples.</p>
<p>The simplest awaitable possible is <code>suspend_never</code> that acts as a noop when awaited upon:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">struct</span> suspend_never {
  <span class="hljs-function"><span class="hljs-keyword">constexpr</span> <span class="hljs-keyword">bool</span> <span class="hljs-title">await_ready</span><span class="hljs-params">()</span> <span class="hljs-keyword">const</span> </span>{ <span class="hljs-keyword">return</span> <span class="hljs-literal">true</span>; }
  <span class="hljs-function"><span class="hljs-keyword">constexpr</span> <span class="hljs-keyword">void</span> <span class="hljs-title">await_suspend</span><span class="hljs-params">(coroutine_handle&lt;&gt;)</span> <span class="hljs-keyword">const</span> </span>{} <span class="hljs-comment">// never called due to awaitable being always ready</span>
  <span class="hljs-function"><span class="hljs-keyword">constexpr</span> <span class="hljs-keyword">void</span> <span class="hljs-title">await_resume</span><span class="hljs-params">()</span> <span class="hljs-keyword">const</span> </span>{}
};
</div></code></pre>
<!--
Example 1: Creating a wrapper for callback based API
```c++
// This is some existing API we would like to provide awaitable interface to
template <typename F>
void api_xyz(Param p, F callback); // callback expects a value of type R

// This is an awaitable interace to the API above
auto xyz(Param p) {
  struct [[nodiscard]] Awaiter {
    Param p;  // Not using variant<Param,R> to keep
    R result; // the example simpler

    bool await_ready() { return false; } // always go to await_suspend
    void await_suspend(coroutine_handle<> h) {
      api_xyz(p, [this, h](R r) mutable {
        result = move(r); // captures the result in awaitable
        h.resume();       // resumes the coroutine
      });
    }
    auto await_resume() { return move(result); } // unpacks the result
  };
  return Awaiter{p};
}
```

Now we can write `auto r = co_await xyz(p);`. This statement will suspend the coroutine, calls the underlying `api_xyz`, and, when the API reports the result back via its callback, the lambda created in `await_suspend` resumes the coroutine and the result provided will be returned as the result of the *await-expressions*.
-->
<p>And here is slightly more complicated awaitable that suspends and enqueues a coroutine for later resumption if some resource is not available:</p>
<!-- fix this -->
<pre class="hljs"><code><div><span class="hljs-keyword">struct</span> get_resource {
  resource_manager&amp; rm;
  optional&lt;resource_manager::value_type&gt; result;
  <span class="hljs-function"><span class="hljs-keyword">constexpr</span> <span class="hljs-keyword">bool</span> <span class="hljs-title">await_ready</span><span class="hljs-params">()</span> <span class="hljs-keyword">const</span> </span>{
    result = rm.try_get_resource();
    <span class="hljs-keyword">return</span> result.has_value(); 
  }
  <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">await_suspend</span><span class="hljs-params">(coroutine_handle&lt;&gt; h)</span> </span>{
    rm.enqueue(h, result);
  }
  <span class="hljs-function"><span class="hljs-keyword">auto</span> <span class="hljs-title">await_resume</span><span class="hljs-params">()</span> </span>{ <span class="hljs-keyword">return</span> move(result.value()); }
};
</div></code></pre>
<p>With this awaitable we can write <code>auto r = co_await get_resource{rm};</code>. If a resource is available, <code>await_ready</code> returns <code>true</code>,
and proceeds to unpack the result in <code>await_resume</code>, otherwise, the coroutine is suspended and enqueued into a queue of coroutines waiting for a resource. When resource becomes available, resource_manager, will pick a coroutine from the queue, update the optional and resume the coroutine.</p>
<p>An important property of how <em>await-expression</em> is specified by Coroutines TS is that once <code>await_ready()</code> is called on awaitable, related calls to <code>await_suspend()</code> and <code>await_resume()</code> will be called on the same awaitable object. This allows to use awaitable as a convenient store for per operation context required by an operation that <em>awaitable</em> represents. If a library performs transformations on an <em>awaitable</em> and directly invokes <code>await_xxx</code> functions, it should preserve aforementioned property.</p>
<p>Hopefully this section serves as a useful reminder of what coroutine awaitables are.</p>
<h2 id="6-incoming-papers-with-async-interactions">6. Incoming papers with async interactions</h2>
<p>Overall, we need to evaluate papers that provide asynchronous interfaces via callbacks or by returning future-like objects and provide awaitable interfaces. We also need to evaluate papers that offer blocking APIs, where blocking is performed by the library facility and decide whether a non-blocking awaitable version of the API is desired.</p>
<p>Ideally, <!-- In an ideal world, --> we would use <!--transformations similar to those offered by-->  CompletionToken transformations of Networking TS that can automatically convert from callback based APIs into awaitable or future based APIs. However, at the moment, CompletionToken transformations are not zero-overhead and while this might get resolved before C++20, this paper describes plan B, where we offer APIs with hand-crafted awaitables.</p>
<!-- which is what to do, if we don't come up with automatic, zero-overhead, callback into awaitable transformation traits. -->
<p>We also need to develop guidance for recommended naming for asynchronous/awaitable vs blocking functions</p>
<p>At the moment we have somewhat inconsistent naming coming from different papers. For example:</p>
<ul>
<li><code>non_blocking_push</code> vs <code>push</code> (blocking) [Concurrent Queues]</li>
<li><code>async_read</code> vs <code>read</code> (blocking) [Networking TS]</li>
<li><code>arrive</code> (non-blocking) vs <code>arrive_and_wait</code> (blocking) [Revised Latches and Barriers]</li>
<li><code>synchronize_rcu</code> and <code>rcu_barrier</code> (both blocking). How should we call non blocking versions? [RCU &amp; Hazard]</li>
</ul>
<!-- this belongs in executor section
Assuming that executors in some form enter the language, we would also need a guidance in the best way to express it in the API. For example:

```c++
co_await non_blocking_pop(v); // default executor for the queue
co_await execute_on(e, non_blocking_pop(v)); // on executor e
co_await e @ non_blocking_pop(v)); // on executor e, @ some binary op
co_await non_blocking_pop(v, e); // on executor e, embedded in the API
```
 -->
<p>Another aspect to consider, how to express that the user prefers to get an error reported as <code>expected&lt;T, error_code&gt;</code> as opposed to a throw. It can take several possible shapes:</p>
<pre class="hljs"><code><div>co_await s.async_read(buf); <span class="hljs-comment">// will throw on failure or return T</span>

co_await s.async_read(buf).as_expected(); <span class="hljs-comment">// await will return expected&lt;T,error_code&gt;</span>
<span class="hljs-function">co_await <span class="hljs-title">as_expected</span><span class="hljs-params">(s.async_read(buf)</span>)</span>;  <span class="hljs-comment">// same, without UFC</span>

co_await s.async_read(buf, ec); <span class="hljs-comment">// will return some default value and error will be reported in out parameter ec</span>
</div></code></pre>
<p>With library facilities, sometimes needing to abandon the callbacks they accepted in the APIs due to shutdown or other considerations, we need to decide a policy of how that will be expressed:</p>
<ol>
<li>awaitable are completed with errc::cancelled or cancelled exception</li>
<li>awaitable are abandoned and destructor of the awaitable is responsible for cancelling the coroutine.</li>
<li>mixture of both approaches (as it is the case in Networking TS today)</li>
</ol>
<p>And finally, given that we may be adding a lot of awaitables to the library APIs, we need to find an appropriate consistent wording to describe them and provide it as guidance to paper authors.</p>
<p>Note that not all blocking APIs require an awaitable. If the underlying facility is not providing completion notification, burning a thread to fullfil an awaitable requirements is probably an overkill.</p>
<p>Do not do the following!!!</p>
<pre class="hljs"><code><div><span class="hljs-comment">// desired usage:</span>
task&lt;&gt; DoSomething(mutex&amp; m) {
   unique_lock&lt;mutex&gt; lock = co_await lock_or_suspend{m};
   <span class="hljs-comment">// ...</span>
}

<span class="hljs-comment">// implementation</span>
<span class="hljs-keyword">struct</span> lock_or_suspend {
   unique_lock&lt;mutex&gt; lock;
   lock_or_suspend(mutex &amp; mut)
      : lock(mut, try_to_lock) {}

   <span class="hljs-function"><span class="hljs-keyword">bool</span> <span class="hljs-title">await_ready</span><span class="hljs-params">()</span> <span class="hljs-keyword">const</span> </span>{ <span class="hljs-keyword">return</span> lock.owns_lock(); }
   <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">await_suspend</span><span class="hljs-params">(coroutine_handle&lt;&gt; h)</span> </span>{
      <span class="hljs-built_in">std</span>::<span class="hljs-function">thread <span class="hljs-title">t</span><span class="hljs-params">([<span class="hljs-keyword">this</span>, h]{ lock.lock()</span></span>; h(); });
      t.detach();
   }
   unique_lock&lt;mutex&gt; await_resume() { <span class="hljs-keyword">return</span> move(lock);}
};
</div></code></pre>
<h3 id="61-executors">6.1. Executors</h3>
<p>Executors expose six flavors of APIs that take callbacks and initiate execution:</p>
<table>
<thead>
<tr>
<th>Name</th>
<th>Cardinality</th>
<th>Directionality</th>
</tr>
</thead>
<tbody>
<tr>
<td>execute</td>
<td>single</td>
<td>oneway</td>
</tr>
<tr>
<td>twoway_execute</td>
<td>single</td>
<td>twoway</td>
</tr>
<tr>
<td>then_execute</td>
<td>single</td>
<td>then</td>
</tr>
<tr>
<td>bulk_execute</td>
<td>bulk</td>
<td>oneway</td>
</tr>
<tr>
<td>bulk_twoway_execute</td>
<td>bulk</td>
<td>twoway</td>
</tr>
<tr>
<td>bulk_then_execute</td>
<td>bulk</td>
<td>then</td>
</tr>
</tbody>
</table>
<p>We will focus on the first three. Whatever the decision is made about non-bulk execution functions will be applied to <code>bulk_</code> versions.</p>
<table>
<thead>
<tr>
<th>API</th>
<th>return type</th>
<th>meaning</th>
</tr>
</thead>
<tbody>
<tr>
<td>x.execute(f)</td>
<td>void</td>
<td>execute f on executor x</td>
</tr>
<tr>
<td>x.twoway_execute(f)</td>
<td>Future&lt;R&gt;</td>
<td>execute f on executor x and return result as Future</td>
</tr>
<tr>
<td>x.then_execute(f, fut)</td>
<td>Future&lt;R&gt;</td>
<td>execute f on executor x after Future <code>fut</code> completes</td>
</tr>
</tbody>
</table>
<p>During executor design discussions the need for splitting the execution functions into void returning OneWay and Future returning TwoWay was questioned:</p>
<table>
<thead>
<tr>
<th>Question</th>
<th>Answer</th>
</tr>
</thead>
<tbody>
<tr>
<td>Why not TwoWay always?</td>
<td>one-way execution functions avoid the cost of a future object</td>
</tr>
<tr>
<td>Why not OneWay always?</td>
<td>Two-way execution functions allow executors to participate directly in synchronization rather than require inefficient synchronization out-of-band.</td>
</tr>
</tbody>
</table>
<p>With coroutines, the distinction between OneWay executors and TwoWay is not necessarily relevant. Unless the underlyng API that executor abstracts away always have to return a heavy-weight future, two-way APIs can simply return an awaitable which is a simple struct with possibly a single pointer/reference member referring to the executor. TwoWay API returning an awaitable is as efficient as a OneWay therefore they can be collapsed into one.</p>
<p>The proposed mapping between OneWay executor APIs and their awaitable counterparts could look like this:</p>
<table>
<thead>
<tr>
<th>API</th>
<th>expressed as</th>
<th>meaning</th>
</tr>
</thead>
<tbody>
<tr>
<td>x.execute(f)</td>
<td>co_await execute_on(x)</td>
<td>resume coroutine on executor x</td>
</tr>
<tr>
<td>x.twoway_execute(f)</td>
<td>co_await execute_on(x)</td>
<td>resume coroutine on executor x (same as OneWay)</td>
</tr>
<tr>
<td>x.then_execute(f, fut)</td>
<td>co_await execute_on(x, fut)</td>
<td>resume coroutine on executor x after awaitable <code>fut</code> completes</td>
</tr>
</tbody>
</table>
<p>There could be alternative spellings for ThenExecute if SG1/LEWG so desires. Here are a few suggestions (some are not necessarily good):</p>
<pre class="hljs"><code><div><span class="hljs-function">co_await <span class="hljs-title">execute_on</span><span class="hljs-params">(x, async_read()</span>)</span>; <span class="hljs-comment">// basic form</span>
<span class="hljs-function">co_await <span class="hljs-title">async_read</span><span class="hljs-params">()</span>.<span class="hljs-title">execute_on</span><span class="hljs-params">(x)</span></span>;  <span class="hljs-comment">// UFC flavored form</span>
<span class="hljs-function">co_await <span class="hljs-title">async_read</span><span class="hljs-params">()</span>.<span class="hljs-title">via</span><span class="hljs-params">(x)</span></span>;         <span class="hljs-comment">// SemiFuture inspired form</span>
co_await x &lt;&lt; async_read();           <span class="hljs-comment">// iostream inspired form</span>
</div></code></pre>
<p>Also, we encourage authors of Executors paper to explore whether futures in executor APIs can be replaced with awaitables.</p>
<p>Here is a complete implementation of <code>on_execute(x)</code>:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">template</span> &lt;OneWayExecutor E&gt;
<span class="hljs-function"><span class="hljs-keyword">auto</span> <span class="hljs-title">execute_on</span><span class="hljs-params">(E e)</span> </span>{
  <span class="hljs-keyword">struct</span> Awaitable {
    E e;
    <span class="hljs-function"><span class="hljs-keyword">constexpr</span> <span class="hljs-keyword">bool</span> <span class="hljs-title">await_ready</span><span class="hljs-params">()</span> <span class="hljs-keyword">const</span> </span>{ <span class="hljs-keyword">return</span> <span class="hljs-literal">false</span>; }
    <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">await_suspend</span><span class="hljs-params">(coroutine_handle&lt;&gt; h)</span> </span>{ e.execute(h); }
    <span class="hljs-function"><span class="hljs-keyword">constexpr</span> <span class="hljs-keyword">void</span> <span class="hljs-title">await_resume</span><span class="hljs-params">()</span> <span class="hljs-keyword">const</span> </span>{}
  }
  <span class="hljs-keyword">return</span> Awaitable{e};
}
</div></code></pre>
<p>and one possible implementation of <code>on_execute(x,a)</code>:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">template</span> &lt;OneWayExecutor E, Awaitable A&gt;
<span class="hljs-function"><span class="hljs-keyword">auto</span> <span class="hljs-title">execute_on</span><span class="hljs-params">(E e, A&amp;&amp; a)</span> </span>{ <span class="hljs-comment">// deduces to task&lt;result-of-awaiting-on-a&gt;</span>
  <span class="hljs-keyword">auto</span> result = co_await forward&lt;A&gt;(a);
  <span class="hljs-function">co_await <span class="hljs-title">on_execute</span><span class="hljs-params">(e)</span></span>;
  co_return result;
}
</div></code></pre>
<h3 id="62-concurrency-ts">6.2. Concurrency TS</h3>
<p>Concurrency TS [P0159] covers three areas:</p>
<ol>
<li>atomic_shared_ptr and friends (voted in C++20, no coroutine interaction)</li>
<li>latches and barriers (covered in later discussion about p0666r1)</li>
<li>std::future improvement</li>
</ol>
<p>In this section we will focus on the third part and throughout this section <code>future</code> and <code>shared_future</code> will be referring to <code>std::experimental::future</code> and <code>std::experimental::shared_future</code> types of the TS and not to <code>std::future</code>/<code>std::shared_future</code> of the C++ standard.</p>
<h4 id="621-future">6.2.1. future</h4>
<p>While the interface of the <code>future</code> described in the Concurrency TS allows graceful interactions with coroutines, we need to evaluate what are the use cases for the <code>future</code> that are not already covered by coroutines using zero-overhead <code>task</code> type and whether having two coroutine types with pretty names would create confusion among the users.</p>
<p>The approach chosen for the <code>future</code> will apply to <code>shared_future</code> as well.</p>
<h4 id="622-whenall">6.2.2. when_all</h4>
<p>Concurrency TS offers two <code>when_all</code> functions that convert a Sequence of <code>future</code>s or <code>shared_future</code>s into a <code>future</code> of a sequence of completed <code>future</code>s.</p>
<p>It can handle futures with heterogenous result type:</p>
<pre class="hljs"><code><div>future&lt;<span class="hljs-keyword">void</span>&gt; x;
shared_future&lt;<span class="hljs-keyword">int</span>&gt; y;
<span class="hljs-comment">// when_all(x,y) =&gt; future&lt;tuple&lt;future&lt;void&gt;, shared_future&lt;int&gt;&gt;&gt;</span>
</div></code></pre>
<p>and with homogeneous result types:</p>
<pre class="hljs"><code><div><span class="hljs-built_in">std</span>::<span class="hljs-built_in">list</span>&lt;future&lt;<span class="hljs-keyword">int</span>&gt;&gt; <span class="hljs-built_in">list</span>;
<span class="hljs-comment">// when_all(list.begin(), list.end()) =&gt; future&lt;vector&lt;future&lt;int&gt;&gt;&gt;</span>
</div></code></pre>
<p>We recommend that <code>when_all</code> to be modified to accept a sequence of arbitrary awaitables and return an awaitable of result type <code>tuple</code> or some container type populated with <code>expected&lt;T,exception_ptr&gt;</code> that hold the result of awaiting on a particular awaitable.</p>
<p>This design allows to push the decision about how to deal with  exceptions on to the user.</p>
<p>An alternative design could return an awaitable of sequence of values themselves and exceptions could be reported in a form of an <code>exception_list</code>. SG1 is actively exploring what would be the best way to deal with exceptions which occur concurrently [<a href="#P0797R1">P0797R1</a>].</p>
<p>The algorithm <code>when_all</code> is useful when composing awaitable types with standard library
algorithms or ranges. For example:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">int</span> x = accumulate( co_await when_all(x.async_read(), y.async_read())
                    | reduce_exceptions(), <span class="hljs-number">0</span>);
</div></code></pre>
<p>where <code>reduce_exceptions</code> view adapter will convert <code>expected&lt;T,E&gt;</code>s into <code>T</code>s applying appropriate exception reduction policy if some <code>expected</code>s store failures.</p>
<h4 id="623-whenany">6.2.3. when_any</h4>
<p>Concurrency TS also offers <code>when_any</code> algorithm that is similar to <code>when_all</code>, with the exception that it will complete its future as soon as at least one of the input futures have completed.</p>
<p>Note, that <code>when_any</code> behavior is sub-optimal today due to absence of cancellation.</p>
<pre class="hljs"><code><div><span class="hljs-built_in">std</span>::<span class="hljs-built_in">list</span>&lt;future&lt;<span class="hljs-keyword">int</span>&gt;&gt; <span class="hljs-built_in">list</span>;
<span class="hljs-built_in">list</span>.push(<span class="hljs-built_in">std</span>::async([]{})); <span class="hljs-comment">// will block in destructor</span>
<span class="hljs-built_in">list</span>.push(async_read()); <span class="hljs-comment">// will not block in destructor</span>
<span class="hljs-comment">// when_any(list.begin(), list.end()) =&gt; future&lt;pair&lt;size_t,vector&lt;future&lt;int&gt;&gt;&gt;</span>
<span class="hljs-comment">// when_any in this case will act like when_all</span>
</div></code></pre>
<p>However, we foresee that <a href="#NetworkingTS">Networking TS</a> may evolve along the lines suggested in [<a href="#Networking-and-threadpools">Networking &amp; Threadpools</a>] and will provide hierarchical lifetime and cancellation domains, called at the moment, <code>tp_context</code>. With that infrastructure in place, we can generalize <code>when_any</code> and make it behave reasonably in the presence of awaitables and futures with RAII semantics.</p>
<p><code>when_any</code> would have to accept a reference to a cancellation domain <code>tp_context</code> and a sequence of arbitrary awaitables. A soon as at least one awaitable have completed (either succesfully or with error), <code>when_any</code> will initiate cancellation and will wait until <strong>all</strong> awaitables have completed. The return value will return an awaitable of a pair of index and a container of <code>expected&lt;T, exception_ptr&gt;</code>. Index will indicate which of the awaitables have completed first. (With respect to decision whether to represent the failures using <code>expected</code> or <code>expection_list</code>, behavior of <code>when_any</code> should match <code>when_all</code>).</p>
<p>We can also foresee a desire to have <code>when_any_successful</code> or something along these lines where it will await until either all awaitables have completed with failure or at least one completed successfully. With this algorithm we can easily write something like this:</p>
<pre class="hljs"><code><div>tp_context&amp; tp; <span class="hljs-comment">// cancellation domain</span>
<span class="hljs-keyword">auto</span> t = co_await when_any_successful(tp, tcp::connect(tp, addr1), 
                                          tcp::connect(tp, addr2));
<span class="hljs-comment">// pick whichever connection was successful and use it</span>
</div></code></pre>
<h3 id="63-other-future-like-types">6.3. Other future-like types</h3>
<p>We have several proposals looking into possible evolution of the future:</p>
<ul>
<li>Back to the std2::future Part II [P0701R2]</li>
<li>A strawman Future API [P0904R0]</li>
<li>Towards a Good Future	[P0676R0]</li>
</ul>
<p>Though all of the futures presented in those papers can gracefully interact with coroutines, we would like to understand what scenarios not already covered by coroutines + awaitables they intended for and whether there is a need to have a heavy-weight future type in C++ at all.</p>
<h3 id="64-parallelism-ts">6.4. Parallelism TS</h3>
<h4 id="641-task-blocks">6.4.1. Task Blocks</h4>
<p>We encourage authors to evaluate whether it is possible to use coroutine to get close to the original cilk vision.
For example, a famous cilk fibonacci example:</p>
<pre class="hljs"><code><div><span class="hljs-function"><span class="hljs-keyword">int</span> <span class="hljs-title">fib</span><span class="hljs-params">(<span class="hljs-keyword">int</span> n)</span>
</span>{
    <span class="hljs-keyword">if</span> (n &lt; <span class="hljs-number">2</span>)
        <span class="hljs-keyword">return</span> n;
    <span class="hljs-keyword">int</span> x = cilk_spawn fib(n<span class="hljs-number">-1</span>); <span class="hljs-comment">// continuation stealing</span>
    <span class="hljs-keyword">int</span> y = fib(n<span class="hljs-number">-2</span>);
    cilk_sync;
    <span class="hljs-keyword">return</span> x + y;
}
</div></code></pre>
<p>can be expressed with coroutines as:</p>
<pre class="hljs"><code><div>cilk_task&lt;<span class="hljs-keyword">int</span>&gt; fib(<span class="hljs-keyword">int</span> n)
{
    <span class="hljs-keyword">if</span> (n &lt; <span class="hljs-number">2</span>)
        <span class="hljs-keyword">return</span> n;
    <span class="hljs-keyword">auto</span> x = co_await cilk_spawn(fib(n<span class="hljs-number">-1</span>)); <span class="hljs-comment">// continuation stealing</span>
    <span class="hljs-keyword">auto</span> y = fib(n<span class="hljs-number">-2</span>);
    <span class="hljs-function">co_await <span class="hljs-title">cilk_sync</span><span class="hljs-params">(x,y)</span></span>;
    <span class="hljs-keyword">return</span> x.get() + y.get();
}
</div></code></pre>
<p>If <code>spawn/sync</code> is preferable way to express fork/join computations,
is there a need for a separate <code>task_block</code> facility?</p>
<h3 id="65-stdexpected">6.5. std::expected</h3>
<p>While it is possible to add coroutine bindings to make coroutines to author or consume objects of <code>expected&lt;T,E&gt;</code> type, we are not advocating doing those for C++20.</p>
<p>However, expected offers a convenient way to modify awaitables that return values and report errors via exceptions into awaitables that return expected&lt;T,E&gt; where error type E of is dependent on the operation.
If underlying API that awaitable wraps using <code>error_code</code> as reporting mechanism, E would be <code>error_code</code>. Otherwise, it will be an <code>exception_ptr</code>.</p>
<pre class="hljs"><code><div>co_await s.async_read(buf); <span class="hljs-comment">// will throw on failure or return T</span>
co_await s.async_read(buf).as_expected(); <span class="hljs-comment">// await will return expected&lt;T,error_code&gt;</span>
</div></code></pre>
<p>A bikeshed name for awaitable modifier suggested is <code>as_expected</code>.</p>
<h3 id="66-unified-call-syntax">6.6. Unified Call Syntax</h3>
<p>It is impractical for every awaitable to include modifiers, such as <code>on_executor</code>, <code>via</code> or <code>as_expected</code>. Free functions <code>via</code>, <code>on_executor</code> or <code>as_expected</code> can transform arbitrary awaitables to alter their behavior (unless a particular awaitable can be implemented more efficiently with an awaitable specific implementation, in the latter case, the technique similar to swap can be used).</p>
<p>However the .modifier() syntax is compelling when composing modifiers on awaitable. We have a great hope that some version of Unified Call Syntax [N4474] will be available in C++20 to keep modifier syntax elegant and unobtrusive.</p>
<h3 id="67-revised-latches-and-barriers-for-c20">6.7. Revised Latches and Barriers for C++20</h3>
<p>The author of &quot;P0666r1: Revised latches and barriers&quot; proposes to extended <code>latch</code> and <code>barrier</code> classes with non-blocking future based APIs <code>arrive</code> for some unspecified future type.</p>
<p>We encourage the author to explore an alternative where <code>arrive</code> will be returning an awaitable. The benefit of an awaitable that it is a simple struct returned by value, has no overhead, can be awaited upon and can be trivially converted into any future type that has coroutine binding via a simpler adapter:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">template</span> &lt;<span class="hljs-keyword">typename</span> T, Awaitable&lt;T&gt; A&gt;
SomeFuture&lt;T&gt; as_some_future(A &amp;&amp; a) {
   co_return co_await <span class="hljs-built_in">std</span>::forward&lt;A&gt;(a);
}
</div></code></pre>
<h3 id="68-parallel-algorithms">6.8. Parallel algorithms</h3>
<p>Invoking Algorithms Asynchronously paper [p0361r1] correctly argues
that fork/join parallelism of C++17 parallel algorithms imposes an implicit barrier onto the parallel execution flow that impedes parallel efficiency and efficient resource utilization.</p>
<p>The paper proposes to add a new execution policy to the parallel algorithms that will alter their return value from <code>void</code> to some future_type.</p>
<p>As with the case of latches and barriers paper we recommend returning an awaitable instead of the future.</p>
<h3 id="69-concurrent-queues">6.9. Concurrent Queues</h3>
<p>Currently, &quot;P0260r1: Concurrent Queues&quot; paper proposes queues that have blocking and non-blocking APIs for pushing and popping. Non-blocking APIs have form:</p>
<pre class="hljs"><code><div>queue_op_status <span class="hljs-built_in">queue</span>::nonblocking_push(<span class="hljs-keyword">const</span> Element&amp;);
</div></code></pre>
<p>If the operation would block, return <code>queue_op_status::busy</code>. Otherwise, if the queue is full, return <code>queue_op_status::full</code>. Otherwise, push the Element onto the queue. Return <code>queue_op_status::success</code>.</p>
<p>If authors of P0260r1 decide that awaitable version is desirable, they would need to add:</p>
<blockquote>
<p><em>Awaitable&lt;void&gt;</em> <code>queue::async_push(const Element&amp;);</code></p>
</blockquote>
<p>where the return type is some awaitable type that will result in suspension of a coroutine if the queue is full or not available for immediate push. Coroutine should be resumed when the element is enqueued. Eventual result of the awaitable should be of type <code>void</code>.</p>
<h3 id="610-rcu-hazard-pointers">6.10. RCU &amp; Hazard pointers</h3>
<p>RCU and hazard pointers paper [p0566r4] exposes two free functions that perform blocking operations:</p>
<pre class="hljs"><code><div><span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">synchronize_rcu</span><span class="hljs-params">()</span> <span class="hljs-keyword">noexcept</span></span>;
<span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">rcu_barrier</span><span class="hljs-params">()</span> <span class="hljs-keyword">noexcept</span></span>;
</div></code></pre>
<p>We would like to ask authors, if it would make sense to provide an awaitable version of those APIs, for example:</p>
<pre class="hljs"><code><div><span class="hljs-function">SomeAwaitable <span class="hljs-title">async_synchronize_rcu</span><span class="hljs-params">()</span> <span class="hljs-keyword">noexcept</span></span>;
<span class="hljs-function">SomeAwaitable <span class="hljs-title">async_rcu_barrier</span><span class="hljs-params">()</span> <span class="hljs-keyword">noexcept</span></span>;
</div></code></pre>
<p>that would suspend the coroutine until the it is safe to proceed and resume it afterwards.</p>
<h3 id="611-networking-technical-specification">6.11. Networking Technical Specification</h3>
<p>Networking TS [<a href="#NetworkingTS">N4711</a>] is the proposal that would benefit the most from adding coroutine support. Asynchronous programming offered by Networking TS is simplified dramatically when combined with coroutines.</p>
<p>The following short code fragment:</p>
<pre class="hljs"><code><div>task&lt;&gt; session(tcp::socket s, <span class="hljs-keyword">size_t</span> block_size) {
  <span class="hljs-built_in">std</span>::<span class="hljs-built_in">vector</span>&lt;<span class="hljs-keyword">char</span>&gt; buf(block_size);
  <span class="hljs-keyword">for</span> (;;) {
    <span class="hljs-keyword">size_t</span> n = co_await async_read_some(s, buffer(buf.data(), block_size));
    <span class="hljs-function">co_await <span class="hljs-title">async_write</span><span class="hljs-params">(s, buffer(buf.data()</span>, n))</span>;
  }
}

task&lt;&gt; server(io_context&amp; io, tcp::endpoint <span class="hljs-keyword">const</span>&amp; endpoint, <span class="hljs-keyword">size_t</span> block_size) {
    tcp::<span class="hljs-function">acceptor <span class="hljs-title">acceptor</span><span class="hljs-params">(io, endpoint)</span></span>;
    acceptor.listen();
    <span class="hljs-keyword">for</span> (;;)
        spawn(io, session(co_await async_accept(acceptor), block_size));
}
</div></code></pre>
<p>replaces a hand-crafted state machines 100 lines long that you would have to write otherwise (see <a href="#10-appendix">Appendix</a> for full listing as it is too long to insert into main body of this paper).</p>
<p>Because providing coroutine bindings is of critical importance to Networking TS, we will offer a dedicated paper on this subject. This section offers a brief overview.</p>
<h4 id="6111-async-api-simple-form">6.11.1. async API: simple form</h4>
<p>Here is an example of a typical async API exposed by networking TS:</p>
<pre class="hljs"><code><div>s.async_write(buffer(buf, size),
              [](error_code <span class="hljs-keyword">const</span>&amp; ec, <span class="hljs-keyword">size_t</span> n) { handle_write(ec, n); });
</div></code></pre>
<p>Awaitable flavor of the API above would look like:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">size_t</span> n = co_await s.async_write(buffer(buf, size));
</div></code></pre>
<p>Where a system_error exception will be thrown if operation completes with error. If immediate error handling is desired, <code>as_expected()</code> modifier can be used to alter this behavior:</p>
<pre class="hljs"><code><div>expected&lt;<span class="hljs-keyword">size_t</span>, error_code&gt; result = co_await s.async_write(buffer(buf, size)).as_expected();
<span class="hljs-keyword">if</span> (!result.has_value()) {
  <span class="hljs-comment">// deal with the error</span>
}
</div></code></pre>
<h4 id="6112-async-api-adding-an-executor">6.11.2. async API: adding an executor</h4>
<p>If a user desires to alter the executor on which completion of the asynchronous operation will be invoked, it can use <code>bind_executor</code> wrapper around the lambda passed as a last parameter to an async API:</p>
<pre class="hljs"><code><div>s.async_write(buffer(buf, size),
            bind_executor(strand,
              [](error_code <span class="hljs-keyword">const</span>&amp; ec, <span class="hljs-keyword">size_t</span> n) { handle_write(ec, n); }));
</div></code></pre>
<p>In a coroutine, <code>via</code> awaitable modifier can be used to achieve the same purpose:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">size_t</span> n = co_await s.async_write(buffer(buf, size)).via(strand);
</div></code></pre>
<h4 id="6113-async-api-altering-default-allocator">6.11.3. async API: altering default allocator</h4>
<p>Networking TS allocates a small per operation context for every asynchronous operation. User can supply an allocator that would replace a default allocator used by networking TS implementation. Networking TS does not offer a simple wrapping helper (like <code>bind_executor</code>) that allows to pair a callable with a matching allocator, it is simple to write and <code>make_custom_alloc_handler</code> in the example below is the described wrapper:</p>
<pre class="hljs"><code><div>s.async_write(buffer(buf, size),
              make_custom_alloc_handler(alloc,
                  [](error_code <span class="hljs-keyword">const</span>&amp; ec, <span class="hljs-keyword">size_t</span> n) { handle_write(ec, n); });
</div></code></pre>
<p>When Networking TS is modified to use awaitables, it can utilize the storage available in the awaitable and may not need any dynamic allocations. However if user desires to control where per operation context goes, it can use <code>using_allocator()</code> modifier as shown below:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">size_t</span> n = co_await s.async_write(buffer(buf, size)).using_allocator(alloc);
</div></code></pre>
<h2 id="7-conclusion">7. Conclusion</h2>
<p>Coroutine TS allows library authors to offer users elegant and efficient access to their facilities when used with coroutines.
Given the guidance that big language features with broad library impact need to land early, we recommend to merge Coroutines TS
into the working paper as soon as possible to give library authors a stable foundation to work on.</p>
<h2 id="8-acknowledgement">8. Acknowledgement</h2>
<p>Great thanks to Casey Carter for review and suggestions.</p>
<h2 id="9-references">9. References</h2>
<p><a id="Executors"></a>
[Executors]: <a href="https://wg21.link/P0761R2">P0761R2: Executors Design Document</a></p>
<p><a id="NetworkingTS"></a>
[NetworkingTS]: <a href="https://wg21.link/N4711">N4711: C++ Extensions for Networking</a></p>
<p><a id="ConcurrentQueues"></a>
[ConcurrentQueues]: <a href="https://wg21.link/p0260r1">p0260r1: Concurrent Queues</a></p>
<p><a id="AsyncParallel"></a>
[AsyncParallel]: <a href="https://wg21.link/p0361r1">p0361r1: Invoking Algorithms Asynchronously</a></p>
<p><a id="Latches-and-Barriers"></a>
[Latches-and-Barriers]: <a href="https://wg21.link/p0666r1">p0666r1: Revised Latches and Barriers for C++20</a></p>
<p><a id="ConcurrencyTS"></a>
[ConcurrencyTS]: <a href="https://wg21.link/p0159r0">P0159: Concurrency TS1</a></p>
<p><a id="RCU-HazardPtr"></a>
[RCU-HazardPtr]: <a href="https://wg21.link/p0566r4">p0566r4: Concurrent Data Structures Hazard Pointer and Read-Copy-Update (RCU)</a></p>
<p><a id="Networking-and-Threadpools"></a>
[Networking-and-Threadpools]: <a href="https://wg21.link/p0399r0">p0399r0: Networking TS &amp; Threadpools</a></p>
<p><a id="CoroAlloc"></a>
[CoroAlloc]: <a href="https://godbolt.org/g/QjRTEt">Godbolt: Coroutines using allocator example</a></p>
<p><a id="Range-v3"></a>
[Range-v3]: <a href="https://github.com/ericniebler/range-v3">Range-v3 on Github</a></p>
<p><a id="RangesTS"></a>
[RangesTS]: <a href="https://wg21.link/N4685">N4685: C++ Extensions for Ranges</a></p>
<p><a id="UFC"></a>
[UFC]: <a href="https://wg21.link/N4474">N4474: Unified Call Syntax</a></p>
<p><a id="RxMarbles"></a>
[RxMarbles]: <a href="http://rxmarbles.com">Asynchronous stream composition</a></p>
<p><a id="RxCpp"></a>
[RxCpp]: <a href="https://github.com/Reactive-Extensions/RxCpp">Reactive Extensions for C++</a></p>
<p><a id="P0701R2"></a>
[P0701R2]: <a href="https://wg21.link/p0701r2">p0701r2: Back to the std2::future Part II</a></p>
<p><a id="P0904R0"></a>
[P0904R0]: <a href="https://wg21.link/p0904r0">P0904r0: A strawman Future API</a></p>
<p><a id="P0676R0"></a>
[P0676R0]: <a href="https://wg21.link/p0676r0">P0676r0: Towards a Good Future</a></p>
<p><a id="P0797R1"></a>
[P0797R1]: <a href="https://wg21.link/P0797R1">P0797R1: Handling Concurrent Exceptions with Executors</a></p>
<h2 id="10-appendix">10. Appendix</h2>
<h3 id="101-full-listing-of-hand-crafted-state-machine-matching-the-example-in-networking-technical-specification-section-of-the-paper">10.1. Full listing of hand-crafted state machine matching the example in &quot;Networking Technical Specification&quot; section of the paper.</h3>
<p>Compare the following short fragment:</p>
<pre class="hljs"><code><div>task&lt;&gt; session(tcp::socket s, <span class="hljs-keyword">size_t</span> block_size) {
  <span class="hljs-built_in">std</span>::<span class="hljs-built_in">vector</span>&lt;<span class="hljs-keyword">char</span>&gt; buf(block_size);
  <span class="hljs-keyword">for</span> (;;) {
    <span class="hljs-keyword">size_t</span> n = co_await async_read_some(s, buffer(buf.data(), block_size));
    <span class="hljs-function">co_await <span class="hljs-title">async_write</span><span class="hljs-params">(s, buffer(buf.data()</span>, n))</span>;
  }
}

task&lt;&gt; server(io_context&amp; io, tcp::endpoint <span class="hljs-keyword">const</span>&amp; endpoint, <span class="hljs-keyword">size_t</span> block_size) {
    tcp::<span class="hljs-function">acceptor <span class="hljs-title">acceptor</span><span class="hljs-params">(io, endpoint)</span></span>;
    acceptor.listen();
    <span class="hljs-keyword">for</span> (;;)
        spawn(io, session(co_await async_accept(acceptor), block_size));
}
</div></code></pre>
<p>and the equivalent code without coroutines:</p>
<pre class="hljs"><code><div><span class="hljs-keyword">struct</span> session {
  session(io_context&amp; io, ip::tcp::socket s, <span class="hljs-keyword">size_t</span> block_size)
      : io_context_(io), socket_(<span class="hljs-built_in">std</span>::move(s)), block_size_(block_size), buf_(block_size),
        read_data_length_(<span class="hljs-number">0</span>) {}

  <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">start</span><span class="hljs-params">()</span> </span>{
    <span class="hljs-built_in">std</span>::error_code set_option_err;
    <span class="hljs-keyword">if</span> (!set_option_err) {
      socket_.async_read_some(buffer(buf_.data(), block_size_),
                              [<span class="hljs-keyword">this</span>](<span class="hljs-keyword">auto</span> ec, <span class="hljs-keyword">auto</span> n) { handle_read(ec, n); });
      <span class="hljs-keyword">return</span>;
    }
    net::post(io_context_, [<span class="hljs-keyword">this</span>] { destroy(<span class="hljs-keyword">this</span>); });
  }

  <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">handle_read</span><span class="hljs-params">(<span class="hljs-keyword">const</span> <span class="hljs-built_in">std</span>::error_code&amp; err, <span class="hljs-keyword">size_t</span> length)</span> </span>{
    <span class="hljs-keyword">if</span> (!err) {
      read_data_length_ = length;
      async_write(socket_, buffer(buf_.data(), read_data_length_),
                  [<span class="hljs-keyword">this</span>](<span class="hljs-keyword">auto</span> ec, <span class="hljs-keyword">auto</span>) { handle_write(ec); });
      <span class="hljs-keyword">return</span>;
    }
    net::post(io_context_, [<span class="hljs-keyword">this</span>] { destroy(<span class="hljs-keyword">this</span>); });
  }

  <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">handle_write</span><span class="hljs-params">(<span class="hljs-keyword">const</span> <span class="hljs-built_in">std</span>::error_code&amp; err)</span> </span>{
    <span class="hljs-keyword">if</span> (!err) {
      socket_.async_read_some(buffer(buf_.data(), block_size_),
                              [<span class="hljs-keyword">this</span>](<span class="hljs-keyword">auto</span> ec, <span class="hljs-keyword">auto</span> n) { handle_read(ec, n); });
      <span class="hljs-keyword">return</span>;
    }
    net::post(io_context_, [<span class="hljs-keyword">this</span>] { destroy(<span class="hljs-keyword">this</span>); });
  }

  <span class="hljs-function"><span class="hljs-keyword">static</span> <span class="hljs-keyword">void</span> <span class="hljs-title">destroy</span><span class="hljs-params">(session* s)</span> </span>{ <span class="hljs-keyword">delete</span> s; }

<span class="hljs-keyword">private</span>:
  net::io_context&amp; io_context_;
  net::ip::tcp::socket socket_;
  <span class="hljs-keyword">size_t</span> block_size_;
  <span class="hljs-built_in">std</span>::<span class="hljs-built_in">vector</span>&lt;<span class="hljs-keyword">char</span>&gt; buf_;
  <span class="hljs-keyword">size_t</span> read_data_length_;
};

<span class="hljs-keyword">struct</span> server {
  server(io_context&amp; io, <span class="hljs-keyword">const</span> ip::tcp::endpoint&amp; endpoint, <span class="hljs-keyword">size_t</span> block_size)
      : io_context_(io), acceptor_(io, endpoint), block_size_(block_size) {
    acceptor_.listen();
    start_accept();
  }

  <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">start_accept</span><span class="hljs-params">()</span> </span>{
    acceptor_.async_accept([<span class="hljs-keyword">this</span>](<span class="hljs-keyword">auto</span> ec, <span class="hljs-keyword">auto</span> s) { handle_accept(ec, <span class="hljs-built_in">std</span>::move(s)); });
  }

  <span class="hljs-function"><span class="hljs-keyword">void</span> <span class="hljs-title">handle_accept</span><span class="hljs-params">(<span class="hljs-built_in">std</span>::error_code err, ip::tcp::socket s)</span> </span>{
    <span class="hljs-keyword">if</span> (!err) {
      session* new_session = <span class="hljs-keyword">new</span> session(io_context_, <span class="hljs-built_in">std</span>::move(s), block_size_);
      new_session-&gt;start();
    }
    start_accept();
  }

<span class="hljs-keyword">private</span>:
  io_context&amp; io_context_;
  ip::tcp::acceptor acceptor_;
  <span class="hljs-keyword">size_t</span> block_size_;
};
</div></code></pre>

    </body>
    </html>