aboutsummaryrefslogtreecommitdiff
path: root/src/sp/transport/inproc
diff options
context:
space:
mode:
authorGarrett D'Amore <garrett@damore.org>2024-12-09 13:33:11 -0800
committerGarrett D'Amore <garrett@damore.org>2024-12-22 21:31:28 -0800
commitc8ce57d668d73d92a071fa86f81e07ca403d8672 (patch)
treead7e602e43fefa64067fdac5fcd23987a50c3a90 /src/sp/transport/inproc
parent013bb69c6be2f0a4572f4200de05e664692b6704 (diff)
downloadnng-c8ce57d668d73d92a071fa86f81e07ca403d8672.tar.gz
nng-c8ce57d668d73d92a071fa86f81e07ca403d8672.tar.bz2
nng-c8ce57d668d73d92a071fa86f81e07ca403d8672.zip
aio: introduce nni_aio_defer
This will replace nni_aio_schedule, and it includes finishing the task if needed. It does so without dropping the lock and so is more efficient and race free. This includes some conversion of some subsystems to it.
Diffstat (limited to 'src/sp/transport/inproc')
-rw-r--r--src/sp/transport/inproc/inproc.c16
1 files changed, 4 insertions, 12 deletions
diff --git a/src/sp/transport/inproc/inproc.c b/src/sp/transport/inproc/inproc.c
index 90317499..fcf75566 100644
--- a/src/sp/transport/inproc/inproc.c
+++ b/src/sp/transport/inproc/inproc.c
@@ -193,7 +193,6 @@ inproc_pipe_send(void *arg, nni_aio *aio)
{
inproc_pipe *pipe = arg;
inproc_queue *queue = pipe->send_queue;
- int rv;
if (nni_aio_begin(aio) != 0) {
// No way to give the message back to the protocol, so
@@ -204,9 +203,8 @@ inproc_pipe_send(void *arg, nni_aio *aio)
}
nni_mtx_lock(&queue->lock);
- if ((rv = nni_aio_schedule(aio, inproc_queue_cancel, queue)) != 0) {
+ if (!nni_aio_defer(aio, inproc_queue_cancel, queue)) {
nni_mtx_unlock(&queue->lock);
- nni_aio_finish_error(aio, rv);
return;
}
nni_aio_list_append(&queue->writers, aio);
@@ -219,16 +217,14 @@ inproc_pipe_recv(void *arg, nni_aio *aio)
{
inproc_pipe *pipe = arg;
inproc_queue *queue = pipe->recv_queue;
- int rv;
if (nni_aio_begin(aio) != 0) {
return;
}
nni_mtx_lock(&queue->lock);
- if ((rv = nni_aio_schedule(aio, inproc_queue_cancel, queue)) != 0) {
+ if (!nni_aio_defer(aio, inproc_queue_cancel, queue)) {
nni_mtx_unlock(&queue->lock);
- nni_aio_finish_error(aio, rv);
return;
}
nni_aio_list_append(&queue->readers, aio);
@@ -463,7 +459,6 @@ inproc_ep_connect(void *arg, nni_aio *aio)
{
inproc_ep *ep = arg;
inproc_ep *server;
- int rv;
if (nni_aio_begin(aio) != 0) {
return;
@@ -486,9 +481,8 @@ inproc_ep_connect(void *arg, nni_aio *aio)
// We don't have to worry about the case where a zero timeout
// on connect was specified, as there is no option to specify
// that in the upper API.
- if ((rv = nni_aio_schedule(aio, inproc_ep_cancel, ep)) != 0) {
+ if (!nni_aio_defer(aio, inproc_ep_cancel, ep)) {
nni_mtx_unlock(&nni_inproc.mx);
- nni_aio_finish_error(aio, rv);
return;
}
@@ -523,7 +517,6 @@ static void
inproc_ep_accept(void *arg, nni_aio *aio)
{
inproc_ep *ep = arg;
- int rv;
if (nni_aio_begin(aio) != 0) {
return;
@@ -533,9 +526,8 @@ inproc_ep_accept(void *arg, nni_aio *aio)
// We need not worry about the case where a non-blocking
// accept was tried -- there is no API to do such a thing.
- if ((rv = nni_aio_schedule(aio, inproc_ep_cancel, ep)) != 0) {
+ if (!nni_aio_defer(aio, inproc_ep_cancel, ep)) {
nni_mtx_unlock(&nni_inproc.mx);
- nni_aio_finish_error(aio, rv);
return;
}