aboutsummaryrefslogtreecommitdiff
path: root/src/protocol/reqrep0/req.c
diff options
context:
space:
mode:
authorGarrett D'Amore <garrett@damore.org>2018-04-18 20:38:00 -0700
committerGarrett D'Amore <garrett@damore.org>2018-04-20 07:34:16 -0700
commit5902d02ad0a056a146231568f1293ffbcd59f61c (patch)
treebe38584c02d703ec2322ab941d4d723c752fe187 /src/protocol/reqrep0/req.c
parent40542e7af0f5003d7ad67876ea580a59174031ca (diff)
downloadnng-5902d02ad0a056a146231568f1293ffbcd59f61c.tar.gz
nng-5902d02ad0a056a146231568f1293ffbcd59f61c.tar.bz2
nng-5902d02ad0a056a146231568f1293ffbcd59f61c.zip
fixes #346 nng_recv() sometimes acts on null `msg` pointer
This closes a fundamental flaw in the way aio structures were handled. In paticular, aio expiration could race ahead, and fire before the aio was properly registered by the provider. This ultimately led to the possibility of duplicate completions on the same aio. The solution involved breaking up nni_aio_start into two functions. nni_aio_begin (which can be run outside of external locks) simply validates that nni_aio_fini() has not been called, and clears certain fields in the aio to make it ready for use by the provider. nni_aio_schedule does the work to register the aio with the expiration thread, and should only be called when the aio is actually scheduled for asynchronous completion. nni_aio_schedule_verify does the same thing, but returns NNG_ETIMEDOUT if the aio has a zero length timeout. This change has a small negative performance impact. We have plans to rectify that by converting nni_aio_begin to use a locklesss flag for the aio->a_fini bit. While we were here, we fixed some error paths in the POSIX subsystem, which would have returned incorrect error codes, and we made some optmizations in the message queues to reduce conditionals while holding locks in the hot code path.
Diffstat (limited to 'src/protocol/reqrep0/req.c')
-rw-r--r--src/protocol/reqrep0/req.c32
1 files changed, 23 insertions, 9 deletions
diff --git a/src/protocol/reqrep0/req.c b/src/protocol/reqrep0/req.c
index 8149ce08..3ecc8604 100644
--- a/src/protocol/reqrep0/req.c
+++ b/src/protocol/reqrep0/req.c
@@ -617,11 +617,10 @@ req0_ctx_recv(void *arg, nni_aio *aio)
req0_sock *s = ctx->sock;
nni_msg * msg;
- nni_mtx_lock(&s->mtx);
- if (nni_aio_start(aio, req0_ctx_cancel_recv, ctx) != 0) {
- nni_mtx_unlock(&s->mtx);
+ if (nni_aio_begin(aio) != 0) {
return;
}
+ nni_mtx_lock(&s->mtx);
if (s->closed) {
nni_mtx_unlock(&s->mtx);
nni_aio_finish_error(aio, NNG_ECLOSED);
@@ -638,6 +637,13 @@ req0_ctx_recv(void *arg, nni_aio *aio)
}
if ((msg = ctx->repmsg) == NULL) {
+ int rv;
+ rv = nni_aio_schedule_verify(aio, req0_ctx_cancel_recv, ctx);
+ if (rv != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
ctx->raio = aio;
nni_mtx_unlock(&s->mtx);
return;
@@ -697,14 +703,11 @@ req0_ctx_send(void *arg, nni_aio *aio)
uint64_t id;
int rv;
- nni_mtx_lock(&s->mtx);
- // Even though we always complete synchronously, this guards against
- // restarting a request that was stopped.
- if (nni_aio_start(aio, req0_ctx_cancel_send, ctx) != 0) {
- nni_mtx_unlock(&s->mtx);
+ if (nni_aio_begin(aio) != 0) {
return;
}
- // Sending a new requst cancels the old one, including any
+ nni_mtx_lock(&s->mtx);
+ // Sending a new request cancels the old one, including any
// outstanding reply.
if (ctx->raio != NULL) {
nni_aio_finish_error(ctx->raio, NNG_ECANCELED);
@@ -735,6 +738,15 @@ req0_ctx_send(void *arg, nni_aio *aio)
nni_aio_finish_error(aio, rv);
return;
}
+ // If no pipes are ready, and the request was a poll (no background
+ // schedule), then fail it. Should be NNG_TIMEDOUT.
+ rv = nni_aio_schedule_verify(aio, req0_ctx_cancel_send, ctx);
+ if ((rv != 0) && (nni_list_empty(&s->readypipes))) {
+ nni_idhash_remove(s->reqids, id);
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
ctx->reqlen = nni_msg_len(msg);
ctx->reqmsg = msg;
ctx->saio = aio;
@@ -743,6 +755,8 @@ req0_ctx_send(void *arg, nni_aio *aio)
// Stick us on the sendq list.
nni_list_append(&s->sendq, ctx);
+ // Note that this will be synchronous if the readypipes list was
+ // not empty.
req0_run_sendq(s, NULL);
nni_mtx_unlock(&s->mtx);
}