aboutsummaryrefslogtreecommitdiff
path: root/src/sp/protocol
diff options
context:
space:
mode:
authorGarrett D'Amore <garrett@damore.org>2021-01-01 11:30:03 -0800
committerGarrett D'Amore <garrett@damore.org>2021-01-01 12:46:17 -0800
commited542ac45e00c9b2faa0b41f3c00de6e291e5678 (patch)
tree673924ff077d468e6756529c2c204698d3faa47c /src/sp/protocol
parent1413b2421a82cd9b9cde178d44fb60c7893176b0 (diff)
downloadnng-ed542ac45e00c9b2faa0b41f3c00de6e291e5678.tar.gz
nng-ed542ac45e00c9b2faa0b41f3c00de6e291e5678.tar.bz2
nng-ed542ac45e00c9b2faa0b41f3c00de6e291e5678.zip
fixes #1345 Restructure the source tree
This is not quite complete, but it sets the stage for other protocols (such as zmq or mqtt) to be added to the project.
Diffstat (limited to 'src/sp/protocol')
-rw-r--r--src/sp/protocol/CMakeLists.txt20
-rw-r--r--src/sp/protocol/bus0/CMakeLists.txt18
-rw-r--r--src/sp/protocol/bus0/bug1247_test.c35
-rw-r--r--src/sp/protocol/bus0/bus.c466
-rw-r--r--src/sp/protocol/pair0/CMakeLists.txt16
-rw-r--r--src/sp/protocol/pair0/pair.c305
-rw-r--r--src/sp/protocol/pair1/CMakeLists.txt20
-rw-r--r--src/sp/protocol/pair1/pair.c540
-rw-r--r--src/sp/protocol/pair1/pair1_poly.c535
-rw-r--r--src/sp/protocol/pair1/pair1_poly_test.c370
-rw-r--r--src/sp/protocol/pair1/pair1_test.c433
-rw-r--r--src/sp/protocol/pipeline0/CMakeLists.txt23
-rw-r--r--src/sp/protocol/pipeline0/pull.c325
-rw-r--r--src/sp/protocol/pipeline0/pull_test.c264
-rw-r--r--src/sp/protocol/pipeline0/push.c442
-rw-r--r--src/sp/protocol/pipeline0/push_test.c525
-rw-r--r--src/sp/protocol/pubsub0/CMakeLists.txt24
-rw-r--r--src/sp/protocol/pubsub0/pub.c383
-rw-r--r--src/sp/protocol/pubsub0/pub_test.c331
-rw-r--r--src/sp/protocol/pubsub0/sub.c755
-rw-r--r--src/sp/protocol/pubsub0/sub_test.c624
-rw-r--r--src/sp/protocol/pubsub0/xsub.c211
-rw-r--r--src/sp/protocol/pubsub0/xsub_test.c376
-rw-r--r--src/sp/protocol/reqrep0/CMakeLists.txt25
-rw-r--r--src/sp/protocol/reqrep0/rep.c705
-rw-r--r--src/sp/protocol/reqrep0/rep_test.c669
-rw-r--r--src/sp/protocol/reqrep0/req.c869
-rw-r--r--src/sp/protocol/reqrep0/req_test.c968
-rw-r--r--src/sp/protocol/reqrep0/xrep.c432
-rw-r--r--src/sp/protocol/reqrep0/xrep_test.c434
-rw-r--r--src/sp/protocol/reqrep0/xreq.c319
-rw-r--r--src/sp/protocol/reqrep0/xreq_test.c367
-rw-r--r--src/sp/protocol/survey0/CMakeLists.txt25
-rw-r--r--src/sp/protocol/survey0/respond.c693
-rw-r--r--src/sp/protocol/survey0/respond_test.c586
-rw-r--r--src/sp/protocol/survey0/survey.c663
-rw-r--r--src/sp/protocol/survey0/survey_test.c626
-rw-r--r--src/sp/protocol/survey0/xrespond.c417
-rw-r--r--src/sp/protocol/survey0/xrespond_test.c436
-rw-r--r--src/sp/protocol/survey0/xsurvey.c379
-rw-r--r--src/sp/protocol/survey0/xsurvey_test.c399
41 files changed, 16053 insertions, 0 deletions
diff --git a/src/sp/protocol/CMakeLists.txt b/src/sp/protocol/CMakeLists.txt
new file mode 100644
index 00000000..fd480523
--- /dev/null
+++ b/src/sp/protocol/CMakeLists.txt
@@ -0,0 +1,20 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staystail.tech>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Protocols.
+nng_directory(protocol)
+
+add_subdirectory(bus0)
+add_subdirectory(pair0)
+add_subdirectory(pair1)
+add_subdirectory(pipeline0)
+add_subdirectory(pubsub0)
+add_subdirectory(reqrep0)
+add_subdirectory(survey0)
+
diff --git a/src/sp/protocol/bus0/CMakeLists.txt b/src/sp/protocol/bus0/CMakeLists.txt
new file mode 100644
index 00000000..01c0b05b
--- /dev/null
+++ b/src/sp/protocol/bus0/CMakeLists.txt
@@ -0,0 +1,18 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Bus protocol
+nng_directory(bus0)
+
+nng_sources_if(NNG_PROTO_BUS0 bus.c)
+nng_headers_if(NNG_PROTO_BUS0 nng/protocol/bus0/bus.h)
+nng_defines_if(NNG_PROTO_BUS0 NNG_HAVE_BUS0)
+
+nng_test(bug1247_test) \ No newline at end of file
diff --git a/src/sp/protocol/bus0/bug1247_test.c b/src/sp/protocol/bus0/bug1247_test.c
new file mode 100644
index 00000000..bbc6958b
--- /dev/null
+++ b/src/sp/protocol/bus0/bug1247_test.c
@@ -0,0 +1,35 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+#include <nng/protocol/bus0/bus.h>
+
+void
+test_bug1247(void)
+{
+ nng_socket bus1, bus2;
+ char * addr;
+
+ NUTS_ADDR(addr, "tcp");
+
+ NUTS_PASS(nng_bus0_open(&bus1));
+ NUTS_PASS(nng_bus0_open(&bus2));
+
+ NUTS_PASS(nng_listen(bus1, addr, NULL, 0));
+ NUTS_FAIL(nng_listen(bus2, addr, NULL, 0), NNG_EADDRINUSE);
+
+ NUTS_PASS(nng_close(bus2));
+ NUTS_PASS(nng_close(bus1));
+}
+
+TEST_LIST = {
+ { "bug1247", test_bug1247 },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/bus0/bus.c b/src/sp/protocol/bus0/bus.c
new file mode 100644
index 00000000..9a610ac6
--- /dev/null
+++ b/src/sp/protocol/bus0/bus.c
@@ -0,0 +1,466 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/bus0/bus.h"
+
+// Bus protocol. The BUS protocol, each peer sends a message to its peers.
+// However, bus protocols do not "forward" (absent a device). So in order
+// for each participant to receive the message, each sender must be connected
+// to every other node in the network (full mesh).
+
+#ifndef NNI_PROTO_BUS_V0
+#define NNI_PROTO_BUS_V0 NNI_PROTO(7, 0)
+#endif
+
+typedef struct bus0_pipe bus0_pipe;
+typedef struct bus0_sock bus0_sock;
+
+static void bus0_sock_getq(bus0_sock *);
+static void bus0_sock_send(void *, nni_aio *);
+static void bus0_sock_recv(void *, nni_aio *);
+
+static void bus0_pipe_getq(bus0_pipe *);
+static void bus0_pipe_recv(bus0_pipe *);
+
+static void bus0_sock_getq_cb(void *);
+static void bus0_sock_getq_cb_raw(void *);
+static void bus0_pipe_getq_cb(void *);
+static void bus0_pipe_send_cb(void *);
+static void bus0_pipe_recv_cb(void *);
+static void bus0_pipe_putq_cb(void *);
+
+// bus0_sock is our per-socket protocol private structure.
+struct bus0_sock {
+ nni_aio * aio_getq;
+ nni_list pipes;
+ nni_mtx mtx;
+ nni_msgq *uwq;
+ nni_msgq *urq;
+ bool raw;
+};
+
+// bus0_pipe is our per-pipe protocol private structure.
+struct bus0_pipe {
+ nni_pipe * npipe;
+ bus0_sock * psock;
+ nni_msgq * sendq;
+ nni_list_node node;
+ nni_aio * aio_getq;
+ nni_aio * aio_recv;
+ nni_aio * aio_send;
+ nni_aio * aio_putq;
+ nni_mtx mtx;
+};
+
+static void
+bus0_sock_fini(void *arg)
+{
+ bus0_sock *s = arg;
+
+ nni_aio_free(s->aio_getq);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+bus0_sock_init(void *arg, nni_sock *nsock)
+{
+ bus0_sock *s = arg;
+ int rv;
+
+ NNI_LIST_INIT(&s->pipes, bus0_pipe, node);
+ nni_mtx_init(&s->mtx);
+ if ((rv = nni_aio_alloc(&s->aio_getq, bus0_sock_getq_cb, s)) != 0) {
+ bus0_sock_fini(s);
+ return (rv);
+ }
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ s->raw = false;
+
+ return (0);
+}
+
+static int
+bus0_sock_init_raw(void *arg, nni_sock *nsock)
+{
+ bus0_sock *s = arg;
+ int rv;
+
+ NNI_LIST_INIT(&s->pipes, bus0_pipe, node);
+ nni_mtx_init(&s->mtx);
+ if ((rv = nni_aio_alloc(&s->aio_getq, bus0_sock_getq_cb_raw, s)) !=
+ 0) {
+ bus0_sock_fini(s);
+ return (rv);
+ }
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ s->raw = true;
+
+ return (0);
+}
+
+static void
+bus0_sock_open(void *arg)
+{
+ bus0_sock *s = arg;
+
+ bus0_sock_getq(s);
+}
+
+static void
+bus0_sock_close(void *arg)
+{
+ bus0_sock *s = arg;
+
+ nni_aio_close(s->aio_getq);
+}
+
+static void
+bus0_pipe_stop(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ nni_aio_stop(p->aio_getq);
+ nni_aio_stop(p->aio_send);
+ nni_aio_stop(p->aio_recv);
+ nni_aio_stop(p->aio_putq);
+}
+
+static void
+bus0_pipe_fini(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ nni_aio_free(p->aio_getq);
+ nni_aio_free(p->aio_send);
+ nni_aio_free(p->aio_recv);
+ nni_aio_free(p->aio_putq);
+ nni_msgq_fini(p->sendq);
+ nni_mtx_fini(&p->mtx);
+}
+
+static int
+bus0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ bus0_pipe *p = arg;
+ int rv;
+
+ NNI_LIST_NODE_INIT(&p->node);
+ nni_mtx_init(&p->mtx);
+ if (((rv = nni_msgq_init(&p->sendq, 16)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_getq, bus0_pipe_getq_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_send, bus0_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_recv, bus0_pipe_recv_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_putq, bus0_pipe_putq_cb, p)) != 0)) {
+ bus0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->npipe = npipe;
+ p->psock = s;
+ return (0);
+}
+
+static int
+bus0_pipe_start(void *arg)
+{
+ bus0_pipe *p = arg;
+ bus0_sock *s = p->psock;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_BUS_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->pipes, p);
+ nni_mtx_unlock(&s->mtx);
+
+ bus0_pipe_recv(p);
+ bus0_pipe_getq(p);
+
+ return (0);
+}
+
+static void
+bus0_pipe_close(void *arg)
+{
+ bus0_pipe *p = arg;
+ bus0_sock *s = p->psock;
+
+ nni_aio_close(p->aio_getq);
+ nni_aio_close(p->aio_send);
+ nni_aio_close(p->aio_recv);
+ nni_aio_close(p->aio_putq);
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->mtx);
+ if (nni_list_active(&s->pipes, p)) {
+ nni_list_remove(&s->pipes, p);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+bus0_pipe_getq_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_getq) != 0) {
+ // closed?
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_aio_set_msg(p->aio_send, nni_aio_get_msg(p->aio_getq));
+ nni_aio_set_msg(p->aio_getq, NULL);
+
+ nni_pipe_send(p->npipe, p->aio_send);
+}
+
+static void
+bus0_pipe_send_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_send) != 0) {
+ // closed?
+ nni_msg_free(nni_aio_get_msg(p->aio_send));
+ nni_aio_set_msg(p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ bus0_pipe_getq(p);
+}
+
+static void
+bus0_pipe_recv_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+ bus0_sock *s = p->psock;
+ nni_msg * msg;
+
+ if (nni_aio_result(p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ msg = nni_aio_get_msg(p->aio_recv);
+
+ if (s->raw) {
+ nni_msg_header_append_u32(msg, nni_pipe_id(p->npipe));
+ }
+
+ nni_msg_set_pipe(msg, nni_pipe_id(p->npipe));
+ nni_aio_set_msg(p->aio_putq, msg);
+ nni_aio_set_msg(p->aio_recv, NULL);
+ nni_msgq_aio_put(s->urq, p->aio_putq);
+}
+
+static void
+bus0_pipe_putq_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_putq));
+ nni_aio_set_msg(p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ // Wait for another recv.
+ bus0_pipe_recv(p);
+}
+
+static void
+bus0_sock_getq_cb(void *arg)
+{
+ bus0_sock *s = arg;
+ bus0_pipe *p;
+ bus0_pipe *lastp;
+ nni_msg * msg;
+ nni_msg * dup;
+
+ if (nni_aio_result(s->aio_getq) != 0) {
+ return;
+ }
+
+ msg = nni_aio_get_msg(s->aio_getq);
+
+ // We ignore any headers present for cooked mode.
+ nni_msg_header_clear(msg);
+
+ nni_mtx_lock(&s->mtx);
+ lastp = nni_list_last(&s->pipes);
+ NNI_LIST_FOREACH (&s->pipes, p) {
+ if (p != lastp) {
+ if (nni_msg_dup(&dup, msg) != 0) {
+ continue;
+ }
+ } else {
+ dup = msg;
+ msg = NULL;
+ }
+ if (nni_msgq_tryput(p->sendq, dup) != 0) {
+ nni_msg_free(dup);
+ }
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_free(msg);
+
+ bus0_sock_getq(s);
+}
+
+static void
+bus0_sock_getq_cb_raw(void *arg)
+{
+ bus0_sock *s = arg;
+ bus0_pipe *p;
+ nni_msg * msg;
+ uint32_t sender;
+
+ if (nni_aio_result(s->aio_getq) != 0) {
+ return;
+ }
+
+ msg = nni_aio_get_msg(s->aio_getq);
+
+ // The header being present indicates that the message
+ // was received locally and we are rebroadcasting. (Device
+ // is doing this probably.) In this case grab the pipe
+ // ID from the header, so we can exclude it.
+ if (nni_msg_header_len(msg) >= 4) {
+ sender = nni_msg_header_trim_u32(msg);
+ } else {
+ sender = 0;
+ }
+
+ nni_mtx_lock(&s->mtx);
+ NNI_LIST_FOREACH (&s->pipes, p) {
+ if (nni_pipe_id(p->npipe) == sender) {
+ continue;
+ }
+ nni_msg_clone(msg);
+ if (nni_msgq_tryput(p->sendq, msg) != 0) {
+ nni_msg_free(msg);
+ }
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_free(msg);
+
+ bus0_sock_getq(s);
+}
+
+static void
+bus0_sock_getq(bus0_sock *s)
+{
+ nni_msgq_aio_get(s->uwq, s->aio_getq);
+}
+
+static void
+bus0_pipe_getq(bus0_pipe *p)
+{
+ nni_msgq_aio_get(p->sendq, p->aio_getq);
+}
+
+static void
+bus0_pipe_recv(bus0_pipe *p)
+{
+ nni_pipe_recv(p->npipe, p->aio_recv);
+}
+
+static void
+bus0_sock_send(void *arg, nni_aio *aio)
+{
+ bus0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+bus0_sock_recv(void *arg, nni_aio *aio)
+{
+ bus0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops bus0_pipe_ops = {
+ .pipe_size = sizeof(bus0_pipe),
+ .pipe_init = bus0_pipe_init,
+ .pipe_fini = bus0_pipe_fini,
+ .pipe_start = bus0_pipe_start,
+ .pipe_close = bus0_pipe_close,
+ .pipe_stop = bus0_pipe_stop,
+};
+
+static nni_option bus0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops bus0_sock_ops = {
+ .sock_size = sizeof(bus0_sock),
+ .sock_init = bus0_sock_init,
+ .sock_fini = bus0_sock_fini,
+ .sock_open = bus0_sock_open,
+ .sock_close = bus0_sock_close,
+ .sock_send = bus0_sock_send,
+ .sock_recv = bus0_sock_recv,
+ .sock_options = bus0_sock_options,
+};
+
+static nni_proto_sock_ops bus0_sock_ops_raw = {
+ .sock_size = sizeof(bus0_sock),
+ .sock_init = bus0_sock_init_raw,
+ .sock_fini = bus0_sock_fini,
+ .sock_open = bus0_sock_open,
+ .sock_close = bus0_sock_close,
+ .sock_send = bus0_sock_send,
+ .sock_recv = bus0_sock_recv,
+ .sock_options = bus0_sock_options,
+};
+
+static nni_proto bus0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_peer = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &bus0_sock_ops,
+ .proto_pipe_ops = &bus0_pipe_ops,
+};
+
+static nni_proto bus0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_peer = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &bus0_sock_ops_raw,
+ .proto_pipe_ops = &bus0_pipe_ops,
+};
+
+int
+nng_bus0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &bus0_proto));
+}
+
+int
+nng_bus0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &bus0_proto_raw));
+}
diff --git a/src/sp/protocol/pair0/CMakeLists.txt b/src/sp/protocol/pair0/CMakeLists.txt
new file mode 100644
index 00000000..b12583ab
--- /dev/null
+++ b/src/sp/protocol/pair0/CMakeLists.txt
@@ -0,0 +1,16 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# PAIRv0 protocol
+nng_directory(pair0)
+
+nng_sources_if(NNG_PROTO_PAIR0 pair.c)
+nng_headers_if(NNG_PROTO_PAIR0 nng/protocol/pair0/pair.h)
+nng_defines_if(NNG_PROTO_PAIR0 NNG_HAVE_PAIR0) \ No newline at end of file
diff --git a/src/sp/protocol/pair0/pair.c b/src/sp/protocol/pair0/pair.c
new file mode 100644
index 00000000..41f88c7c
--- /dev/null
+++ b/src/sp/protocol/pair0/pair.c
@@ -0,0 +1,305 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pair0/pair.h"
+
+// Pair protocol. The PAIR protocol is a simple 1:1 messaging pattern.
+// While a peer is connected to the server, all other peer connection
+// attempts are discarded.
+
+#ifndef NNI_PROTO_PAIR_V0
+#define NNI_PROTO_PAIR_V0 NNI_PROTO(1, 0)
+#endif
+
+typedef struct pair0_pipe pair0_pipe;
+typedef struct pair0_sock pair0_sock;
+
+static void pair0_send_cb(void *);
+static void pair0_recv_cb(void *);
+static void pair0_getq_cb(void *);
+static void pair0_putq_cb(void *);
+static void pair0_pipe_fini(void *);
+
+// pair0_sock is our per-socket protocol private structure.
+struct pair0_sock {
+ pair0_pipe *ppipe;
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_mtx mtx;
+};
+
+// An pair0_pipe is our per-pipe protocol private structure. We keep
+// one of these even though in theory we'd only have a single underlying
+// pipe. The separate data structure is more like other protocols that do
+// manage multiple pipes.
+struct pair0_pipe {
+ nni_pipe * npipe;
+ pair0_sock *psock;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_getq;
+ nni_aio aio_putq;
+};
+
+static int
+pair0_sock_init(void *arg, nni_sock *nsock)
+{
+ pair0_sock *s = arg;
+
+ nni_mtx_init(&s->mtx);
+ s->ppipe = NULL;
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ return (0);
+}
+
+static void
+pair0_sock_fini(void *arg)
+{
+ pair0_sock *s = arg;
+
+ nni_mtx_fini(&s->mtx);
+}
+
+static void
+pair0_pipe_stop(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_putq);
+ nni_aio_stop(&p->aio_getq);
+}
+
+static void
+pair0_pipe_fini(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_putq);
+ nni_aio_fini(&p->aio_getq);
+}
+
+static int
+pair0_pipe_init(void *arg, nni_pipe *npipe, void *psock)
+{
+ pair0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_send, pair0_send_cb, p);
+ nni_aio_init(&p->aio_recv, pair0_recv_cb, p);
+ nni_aio_init(&p->aio_getq, pair0_getq_cb, p);
+ nni_aio_init(&p->aio_putq, pair0_putq_cb, p);
+
+ p->npipe = npipe;
+ p->psock = psock;
+ return (0);
+}
+
+static int
+pair0_pipe_start(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_PAIR_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ if (s->ppipe != NULL) {
+ nni_mtx_unlock(&s->mtx);
+ return (NNG_EBUSY); // Already have a peer, denied.
+ }
+ s->ppipe = p;
+ nni_mtx_unlock(&s->mtx);
+
+ // Schedule a getq on the upper, and a read from the pipe.
+ // Each of these also sets up another hold on the pipe itself.
+ nni_msgq_aio_get(s->uwq, &p->aio_getq);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+
+ return (0);
+}
+
+static void
+pair0_pipe_close(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_putq);
+ nni_aio_close(&p->aio_getq);
+
+ nni_mtx_lock(&s->mtx);
+ if (s->ppipe == p) {
+ s->ppipe = NULL;
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+pair0_recv_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ nni_msg_set_pipe(msg, nni_pipe_id(p->npipe));
+ nni_msgq_aio_put(s->urq, &p->aio_putq);
+}
+
+static void
+pair0_putq_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static void
+pair0_getq_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+ nni_pipe_send(p->npipe, &p->aio_send);
+}
+
+static void
+pair0_send_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_msgq_aio_get(s->uwq, &p->aio_getq);
+}
+
+static void
+pair0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair0_sock_send(void *arg, nni_aio *aio)
+{
+ pair0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+pair0_sock_recv(void *arg, nni_aio *aio)
+{
+ pair0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops pair0_pipe_ops = {
+ .pipe_size = sizeof(pair0_pipe),
+ .pipe_init = pair0_pipe_init,
+ .pipe_fini = pair0_pipe_fini,
+ .pipe_start = pair0_pipe_start,
+ .pipe_close = pair0_pipe_close,
+ .pipe_stop = pair0_pipe_stop,
+};
+
+static nni_option pair0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ }
+};
+
+static nni_proto_sock_ops pair0_sock_ops = {
+ .sock_size = sizeof(pair0_sock),
+ .sock_init = pair0_sock_init,
+ .sock_fini = pair0_sock_fini,
+ .sock_open = pair0_sock_open,
+ .sock_close = pair0_sock_close,
+ .sock_send = pair0_sock_send,
+ .sock_recv = pair0_sock_recv,
+ .sock_options = pair0_sock_options,
+};
+
+// Legacy protocol (v0)
+static nni_proto pair0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_peer = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &pair0_sock_ops,
+ .proto_pipe_ops = &pair0_pipe_ops,
+};
+
+static nni_proto pair0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_peer = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &pair0_sock_ops,
+ .proto_pipe_ops = &pair0_pipe_ops,
+};
+
+int
+nng_pair0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pair0_proto));
+}
+
+int
+nng_pair0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pair0_proto_raw));
+}
diff --git a/src/sp/protocol/pair1/CMakeLists.txt b/src/sp/protocol/pair1/CMakeLists.txt
new file mode 100644
index 00000000..12e12607
--- /dev/null
+++ b/src/sp/protocol/pair1/CMakeLists.txt
@@ -0,0 +1,20 @@
+#
+# Copyright 2019 Staysail Systems, Inc. <info@staystail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# PAIRv1 protocol
+nng_directory(pair1)
+
+# XXX: break pair1_poly into an ifdef.
+nng_sources_if(NNG_PROTO_PAIR1 pair.c pair1_poly.c)
+nng_headers_if(NNG_PROTO_PAIR1 nng/protocol/pair1/pair.h)
+nng_defines_if(NNG_PROTO_PAIR1 NNG_HAVE_PAIR1)
+
+nng_test(pair1_test)
+nng_test(pair1_poly_test) \ No newline at end of file
diff --git a/src/sp/protocol/pair1/pair.c b/src/sp/protocol/pair1/pair.c
new file mode 100644
index 00000000..ba497c42
--- /dev/null
+++ b/src/sp/protocol/pair1/pair.c
@@ -0,0 +1,540 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pair1/pair.h"
+
+// Pair protocol. The PAIRv1 protocol is a simple 1:1 messaging pattern.
+
+#ifdef NNG_ENABLE_STATS
+#define BUMP_STAT(x) nni_stat_inc(x, 1)
+#else
+#define BUMP_STAT(x)
+#endif
+
+typedef struct pair1_pipe pair1_pipe;
+typedef struct pair1_sock pair1_sock;
+
+static void pair1_pipe_send_cb(void *);
+static void pair1_pipe_recv_cb(void *);
+static void pair1_pipe_get_cb(void *);
+static void pair1_pipe_put_cb(void *);
+static void pair1_pipe_fini(void *);
+
+// pair1_sock is our per-socket protocol private structure.
+struct pair1_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_sock * sock;
+ bool raw;
+ pair1_pipe * p;
+ nni_atomic_int ttl;
+ nni_mtx mtx;
+#ifdef NNG_ENABLE_STATS
+ nni_stat_item stat_poly;
+ nni_stat_item stat_raw;
+ nni_stat_item stat_reject_mismatch;
+ nni_stat_item stat_reject_already;
+ nni_stat_item stat_ttl_drop;
+ nni_stat_item stat_rx_malformed;
+ nni_stat_item stat_tx_malformed;
+ nni_stat_item stat_tx_drop;
+#endif
+#ifdef NNG_TEST_LIB
+ bool inject_header;
+#endif
+};
+
+// pair1_pipe is our per-pipe protocol private structure.
+struct pair1_pipe {
+ nni_pipe * pipe;
+ pair1_sock *pair;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_get;
+ nni_aio aio_put;
+};
+
+static void
+pair1_sock_fini(void *arg)
+{
+ pair1_sock *s = arg;
+
+ nni_mtx_fini(&s->mtx);
+}
+
+#ifdef NNG_ENABLE_STATS
+static void
+pair1_add_sock_stat(
+ pair1_sock *s, nni_stat_item *item, const nni_stat_info *info)
+{
+ nni_stat_init(item, info);
+ nni_sock_add_stat(s->sock, item);
+}
+#endif
+
+static int
+pair1_sock_init_impl(void *arg, nni_sock *sock, bool raw)
+{
+ pair1_sock *s = arg;
+
+ // Raw mode uses this.
+ nni_mtx_init(&s->mtx);
+ s->sock = sock;
+
+#ifdef NNG_ENABLE_STATS
+ static const nni_stat_info poly_info = {
+ .si_name = "poly",
+ .si_desc = "polyamorous mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info raw_info = {
+ .si_name = "raw",
+ .si_desc = "raw mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info mismatch_info = {
+ .si_name = "mismatch",
+ .si_desc = "pipes rejected (protocol mismatch)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info already_info = {
+ .si_name = "already",
+ .si_desc = "pipes rejected (already connected)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info ttl_drop_info = {
+ .si_name = "ttl_drop",
+ .si_desc = "messages dropped due to too many hops",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_drop_info = {
+ .si_name = "tx_drop",
+ .si_desc = "messages dropped undeliverable",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info rx_malformed_info = {
+ .si_name = "rx_malformed",
+ .si_desc = "malformed messages received",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_malformed_info = {
+ .si_name = "tx_malformed",
+ .si_desc = "malformed messages not sent",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+
+ pair1_add_sock_stat(s, &s->stat_poly, &poly_info);
+ pair1_add_sock_stat(s, &s->stat_raw, &raw_info);
+ pair1_add_sock_stat(s, &s->stat_reject_mismatch, &mismatch_info);
+ pair1_add_sock_stat(s, &s->stat_reject_already, &already_info);
+ pair1_add_sock_stat(s, &s->stat_ttl_drop, &ttl_drop_info);
+ pair1_add_sock_stat(s, &s->stat_tx_drop, &tx_drop_info);
+ pair1_add_sock_stat(s, &s->stat_rx_malformed, &rx_malformed_info);
+
+ if (raw) {
+ // This stat only makes sense in raw mode.
+ pair1_add_sock_stat(
+ s, &s->stat_tx_malformed, &tx_malformed_info);
+ }
+
+ nni_stat_set_bool(&s->stat_raw, raw);
+ nni_stat_set_bool(&s->stat_poly, false);
+#endif
+
+ s->raw = raw;
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ return (0);
+}
+
+static int
+pair1_sock_init(void *arg, nni_sock *sock)
+{
+ return (pair1_sock_init_impl(arg, sock, false));
+}
+
+static int
+pair1_sock_init_raw(void *arg, nni_sock *sock)
+{
+ return (pair1_sock_init_impl(arg, sock, true));
+}
+
+static void
+pair1_pipe_stop(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+
+ nni_mtx_lock(&s->mtx);
+ if (s->p == p) {
+ s->p = NULL;
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_put);
+ nni_aio_stop(&p->aio_get);
+}
+
+static void
+pair1_pipe_fini(void *arg)
+{
+ pair1_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_put);
+ nni_aio_fini(&p->aio_get);
+}
+
+static int
+pair1_pipe_init(void *arg, nni_pipe *pipe, void *pair)
+{
+ pair1_pipe *p = arg;
+
+ nni_aio_init(&p->aio_send, pair1_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, pair1_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_get, pair1_pipe_get_cb, p);
+ nni_aio_init(&p->aio_put, pair1_pipe_put_cb, p);
+
+ p->pipe = pipe;
+ p->pair = pair;
+
+ return (0);
+}
+
+static int
+pair1_pipe_start(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+
+ if (nni_pipe_peer(p->pipe) != NNG_PAIR1_PEER) {
+ BUMP_STAT(&s->stat_reject_mismatch);
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ if (s->p != NULL) {
+ nni_mtx_unlock(&s->mtx);
+ BUMP_STAT(&s->stat_reject_already);
+ return (NNG_EBUSY);
+ }
+ s->p = p;
+ nni_mtx_unlock(&s->mtx);
+
+ // Schedule a get.
+ nni_msgq_aio_get(s->uwq, &p->aio_get);
+
+ // And the pipe read of course.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ return (0);
+}
+
+static void
+pair1_pipe_close(void *arg)
+{
+ pair1_pipe *p = arg;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_put);
+ nni_aio_close(&p->aio_get);
+}
+
+static void
+pair1_pipe_recv_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+ nni_msg * msg;
+ uint32_t hdr;
+ nni_pipe * pipe = p->pipe;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ // Store the pipe ID.
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // If the message is missing the hop count header, scrap it.
+ if ((nni_msg_len(msg) < sizeof(uint32_t)) ||
+ ((hdr = nni_msg_trim_u32(msg)) > 0xff)) {
+ BUMP_STAT(&s->stat_rx_malformed);
+ nni_msg_free(msg);
+ nni_pipe_close(pipe);
+ return;
+ }
+
+ len = nni_msg_len(msg);
+
+ // If we bounced too many times, discard the message, but
+ // keep getting more.
+ if ((int) hdr > nni_atomic_get(&s->ttl)) {
+ BUMP_STAT(&s->stat_ttl_drop);
+ nni_msg_free(msg);
+ nni_pipe_recv(pipe, &p->aio_recv);
+ return;
+ }
+
+ // Store the hop count in the header.
+ nni_msg_header_append_u32(msg, hdr);
+
+ // Send the message up.
+ nni_aio_set_msg(&p->aio_put, msg);
+ nni_sock_bump_rx(s->sock, len);
+ nni_msgq_aio_put(s->urq, &p->aio_put);
+}
+
+static void
+pair1_pipe_put_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_put) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_put));
+ nni_aio_set_msg(&p->aio_put, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+pair1_pipe_get_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+ nni_msg * msg;
+ uint32_t hops;
+
+ if (nni_aio_result(&p->aio_get) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_get);
+ nni_aio_set_msg(&p->aio_get, NULL);
+
+ // Raw mode messages have the header already formed, with a hop count.
+ // Cooked mode messages have no header so we have to add one.
+ if (s->raw) {
+ if ((nni_msg_header_len(msg) != sizeof(uint32_t)) ||
+ ((hops = nni_msg_header_trim_u32(msg)) > 254)) {
+ BUMP_STAT(&s->stat_tx_malformed);
+ nni_msg_free(msg);
+ nni_msgq_aio_get(s->uwq, &p->aio_get);
+ return;
+ }
+#if NNG_TEST_LIB
+ } else if (s->inject_header) {
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ return;
+#endif
+ } else {
+ // Strip off any previously existing header, such as when
+ // replying to messages.
+ nni_msg_header_clear(msg);
+ hops = 0;
+ }
+
+ hops++;
+
+ // Insert the hops header.
+ nni_msg_header_append_u32(msg, hops);
+
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+pair1_pipe_send_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_msgq_aio_get(s->uwq, &p->aio_get);
+}
+
+static void
+pair1_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair1_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static int
+pair1_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ pair1_sock *s = arg;
+ int rv;
+ int ttl;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+
+ return (rv);
+}
+
+static int
+pair1_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ pair1_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+#ifdef NNG_TEST_LIB
+static int
+pair1_set_test_inject_header(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ pair1_sock *s = arg;
+ int rv;
+ nni_mtx_lock(&s->mtx);
+ rv = nni_copyin_bool(&s->inject_header, buf, sz, t);
+ nni_mtx_unlock(&s->mtx);
+ return (rv);
+}
+#endif
+
+static void
+pair1_sock_send(void *arg, nni_aio *aio)
+{
+ pair1_sock *s = arg;
+
+ nni_sock_bump_tx(s->sock, nni_msg_len(nni_aio_get_msg(aio)));
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+pair1_sock_recv(void *arg, nni_aio *aio)
+{
+ pair1_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops pair1_pipe_ops = {
+ .pipe_size = sizeof(pair1_pipe),
+ .pipe_init = pair1_pipe_init,
+ .pipe_fini = pair1_pipe_fini,
+ .pipe_start = pair1_pipe_start,
+ .pipe_close = pair1_pipe_close,
+ .pipe_stop = pair1_pipe_stop,
+};
+
+static nni_option pair1_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = pair1_sock_get_max_ttl,
+ .o_set = pair1_sock_set_max_ttl,
+ },
+#ifdef NNG_TEST_LIB
+ {
+ // Test only option to pass header unmolested. This allows
+ // us to inject bad header contents.
+ .o_name = "pair1_test_inject_header",
+ .o_set = pair1_set_test_inject_header,
+ },
+#endif
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops pair1_sock_ops = {
+ .sock_size = sizeof(pair1_sock),
+ .sock_init = pair1_sock_init,
+ .sock_fini = pair1_sock_fini,
+ .sock_open = pair1_sock_open,
+ .sock_close = pair1_sock_close,
+ .sock_recv = pair1_sock_recv,
+ .sock_send = pair1_sock_send,
+ .sock_options = pair1_sock_options,
+};
+
+static nni_proto pair1_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_PAIR1_SELF, NNG_PAIR1_SELF_NAME },
+ .proto_peer = { NNG_PAIR1_PEER, NNG_PAIR1_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &pair1_sock_ops,
+ .proto_pipe_ops = &pair1_pipe_ops,
+};
+
+int
+nng_pair1_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &pair1_proto));
+}
+
+static nni_proto_sock_ops pair1_sock_ops_raw = {
+ .sock_size = sizeof(pair1_sock),
+ .sock_init = pair1_sock_init_raw,
+ .sock_fini = pair1_sock_fini,
+ .sock_open = pair1_sock_open,
+ .sock_close = pair1_sock_close,
+ .sock_recv = pair1_sock_recv,
+ .sock_send = pair1_sock_send,
+ .sock_options = pair1_sock_options,
+};
+
+static nni_proto pair1_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_PAIR1_SELF, NNG_PAIR1_SELF_NAME },
+ .proto_peer = { NNG_PAIR1_PEER, NNG_PAIR1_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &pair1_sock_ops_raw,
+ .proto_pipe_ops = &pair1_pipe_ops,
+};
+
+int
+nng_pair1_open_raw(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &pair1_proto_raw));
+}
diff --git a/src/sp/protocol/pair1/pair1_poly.c b/src/sp/protocol/pair1/pair1_poly.c
new file mode 100644
index 00000000..6c16745c
--- /dev/null
+++ b/src/sp/protocol/pair1/pair1_poly.c
@@ -0,0 +1,535 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pair1/pair.h"
+
+// Pair1 polyamorous mode. The PAIRv1 protocol is normally a simple 1:1
+// messaging pattern, but this mode offers the ability to use a best-effort
+// multicast type of communication. There are limitations however.
+// Most notably this does not interact well with nng_device type
+// proxies, and there is no support for raw mode.
+
+// THIS FEATURE IS DEPRECATED. We discourage use in new applications.
+
+#ifdef NNG_ENABLE_STATS
+#define BUMP_STAT(x) nni_stat_inc(x, 1)
+#else
+#define BUMP_STAT(x)
+#endif
+
+typedef struct pair1poly_pipe pair1poly_pipe;
+typedef struct pair1poly_sock pair1poly_sock;
+
+static void pair1poly_sock_get_cb(void *);
+static void pair1poly_pipe_send_cb(void *);
+static void pair1poly_pipe_recv_cb(void *);
+static void pair1poly_pipe_get_cb(void *);
+static void pair1poly_pipe_put_cb(void *);
+static void pair1poly_pipe_fini(void *);
+
+// pair1poly_sock is our per-socket protocol private structure.
+struct pair1poly_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_sock * sock;
+ nni_atomic_int ttl;
+ nni_mtx mtx;
+ nni_id_map pipes;
+ nni_list plist;
+ bool started;
+ nni_aio aio_get;
+ nni_stat_item stat_poly;
+ nni_stat_item stat_raw;
+ nni_stat_item stat_reject_mismatch;
+ nni_stat_item stat_reject_already;
+ nni_stat_item stat_ttl_drop;
+ nni_stat_item stat_rx_malformed;
+ nni_stat_item stat_tx_malformed;
+ nni_stat_item stat_tx_drop;
+};
+
+// pair1poly_pipe is our per-pipe protocol private structure.
+struct pair1poly_pipe {
+ nni_pipe * pipe;
+ pair1poly_sock *pair;
+ nni_msgq * send_queue;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_get;
+ nni_aio aio_put;
+ nni_list_node node;
+};
+
+static void
+pair1poly_sock_fini(void *arg)
+{
+ pair1poly_sock *s = arg;
+
+ nni_aio_fini(&s->aio_get);
+ nni_id_map_fini(&s->pipes);
+ nni_mtx_fini(&s->mtx);
+}
+
+#ifdef NNG_ENABLE_STATS
+static void
+pair1_add_sock_stat(
+ pair1poly_sock *s, nni_stat_item *item, const nni_stat_info *info)
+{
+ nni_stat_init(item, info);
+ nni_sock_add_stat(s->sock, item);
+}
+#endif
+
+static int
+pair1poly_sock_init(void *arg, nni_sock *sock)
+{
+ pair1poly_sock *s = arg;
+
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ NNI_LIST_INIT(&s->plist, pair1poly_pipe, node);
+ s->sock = sock;
+
+ // Raw mode uses this.
+ nni_mtx_init(&s->mtx);
+
+ nni_aio_init(&s->aio_get, pair1poly_sock_get_cb, s);
+
+#ifdef NNG_ENABLE_STATS
+ static const nni_stat_info poly_info = {
+ .si_name = "poly",
+ .si_desc = "polyamorous mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info raw_info = {
+ .si_name = "raw",
+ .si_desc = "raw mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info mismatch_info = {
+ .si_name = "mismatch",
+ .si_desc = "pipes rejected (protocol mismatch)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info already_info = {
+ .si_name = "already",
+ .si_desc = "pipes rejected (already connected)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info ttl_drop_info = {
+ .si_name = "ttl_drop",
+ .si_desc = "messages dropped due to too many hops",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_drop_info = {
+ .si_name = "tx_drop",
+ .si_desc = "messages dropped undeliverable",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info rx_malformed_info = {
+ .si_name = "rx_malformed",
+ .si_desc = "malformed messages received",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_malformed_info = {
+ .si_name = "tx_malformed",
+ .si_desc = "malformed messages not sent",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+
+ pair1_add_sock_stat(s, &s->stat_poly, &poly_info);
+ pair1_add_sock_stat(s, &s->stat_raw, &raw_info);
+ pair1_add_sock_stat(s, &s->stat_reject_mismatch, &mismatch_info);
+ pair1_add_sock_stat(s, &s->stat_reject_already, &already_info);
+ pair1_add_sock_stat(s, &s->stat_ttl_drop, &ttl_drop_info);
+ pair1_add_sock_stat(s, &s->stat_tx_drop, &tx_drop_info);
+ pair1_add_sock_stat(s, &s->stat_rx_malformed, &rx_malformed_info);
+ pair1_add_sock_stat(s, &s->stat_tx_malformed, &tx_malformed_info);
+
+ nni_stat_set_bool(&s->stat_raw, false);
+ nni_stat_set_bool(&s->stat_poly, true);
+#endif
+
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ return (0);
+}
+
+static void
+pair1poly_pipe_stop(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_put);
+ nni_aio_stop(&p->aio_get);
+}
+
+static void
+pair1poly_pipe_fini(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_put);
+ nni_aio_fini(&p->aio_get);
+ nni_msgq_fini(p->send_queue);
+}
+
+static int
+pair1poly_pipe_init(void *arg, nni_pipe *pipe, void *pair)
+{
+ pair1poly_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_send, pair1poly_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, pair1poly_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_get, pair1poly_pipe_get_cb, p);
+ nni_aio_init(&p->aio_put, pair1poly_pipe_put_cb, p);
+
+ if ((rv = nni_msgq_init(&p->send_queue, 2)) != 0) {
+ pair1poly_pipe_fini(p);
+ return (rv);
+ }
+
+ p->pipe = pipe;
+ p->pair = pair;
+
+ return (0);
+}
+
+static int
+pair1poly_pipe_start(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ pair1poly_sock *s = p->pair;
+ uint32_t id;
+ int rv;
+
+ nni_mtx_lock(&s->mtx);
+ if (nni_pipe_peer(p->pipe) != NNG_PAIR1_PEER) {
+ nni_mtx_unlock(&s->mtx);
+ BUMP_STAT(&s->stat_reject_mismatch);
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ id = nni_pipe_id(p->pipe);
+ if ((rv = nni_id_set(&s->pipes, id, p)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ return (rv);
+ }
+ if (!s->started) {
+ nni_msgq_aio_get(s->uwq, &s->aio_get);
+ }
+ nni_list_append(&s->plist, p);
+ s->started = true;
+ nni_mtx_unlock(&s->mtx);
+
+ // Schedule a get. In polyamorous mode we get on the per pipe
+ // send_queue, as the socket distributes to us. In monogamous mode
+ // we bypass and get from the upper write queue directly (saving a
+ // set of context switches).
+ nni_msgq_aio_get(p->send_queue, &p->aio_get);
+
+ // And the pipe read of course.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ return (0);
+}
+
+static void
+pair1poly_pipe_close(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ pair1poly_sock *s = p->pair;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_put);
+ nni_aio_close(&p->aio_get);
+
+ nni_mtx_lock(&s->mtx);
+ nni_id_remove(&s->pipes, nni_pipe_id(p->pipe));
+ nni_list_node_remove(&p->node);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_msgq_close(p->send_queue);
+}
+
+static void
+pair1poly_pipe_recv_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ pair1poly_sock *s = p->pair;
+ nni_msg * msg;
+ uint32_t hdr;
+ nni_pipe * pipe = p->pipe;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ // Store the pipe ID.
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // If the message is missing the hop count header, scrap it.
+ if ((nni_msg_len(msg) < sizeof(uint32_t)) ||
+ ((hdr = nni_msg_trim_u32(msg)) > 0xff)) {
+ BUMP_STAT(&s->stat_rx_malformed);
+ nni_msg_free(msg);
+ nni_pipe_close(pipe);
+ return;
+ }
+
+ len = nni_msg_len(msg);
+
+ // If we bounced too many times, discard the message, but
+ // keep getting more.
+ if ((int) hdr > nni_atomic_get(&s->ttl)) {
+ BUMP_STAT(&s->stat_ttl_drop);
+ nni_msg_free(msg);
+ nni_pipe_recv(pipe, &p->aio_recv);
+ return;
+ }
+
+ // Store the hop count in the header.
+ nni_msg_header_append_u32(msg, hdr);
+
+ // Send the message up.
+ nni_aio_set_msg(&p->aio_put, msg);
+ nni_sock_bump_rx(s->sock, len);
+ nni_msgq_aio_put(s->urq, &p->aio_put);
+}
+
+static void
+pair1poly_sock_get_cb(void *arg)
+{
+ pair1poly_pipe *p;
+ pair1poly_sock *s = arg;
+ nni_msg * msg;
+ uint32_t id;
+
+ if (nni_aio_result(&s->aio_get) != 0) {
+ // Socket closing...
+ return;
+ }
+
+ msg = nni_aio_get_msg(&s->aio_get);
+ nni_aio_set_msg(&s->aio_get, NULL);
+
+ p = NULL;
+ nni_mtx_lock(&s->mtx);
+ // If no pipe was requested, we look for any connected peer.
+ if (((id = nni_msg_get_pipe(msg)) == 0) &&
+ (!nni_list_empty(&s->plist))) {
+ p = nni_list_first(&s->plist);
+ } else {
+ p = nni_id_get(&s->pipes, id);
+ }
+
+ // Try a non-blocking send. If this fails we just discard the
+ // message. We have to do this to avoid head-of-line blocking
+ // for messages sent to other pipes. Note that there is some
+ // buffering in the send_queue.
+ if ((p == NULL) || nni_msgq_tryput(p->send_queue, msg) != 0) {
+ BUMP_STAT(&s->stat_tx_drop);
+ nni_msg_free(msg);
+ }
+
+ nni_mtx_unlock(&s->mtx);
+ nni_msgq_aio_get(s->uwq, &s->aio_get);
+}
+
+static void
+pair1poly_pipe_put_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_put) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_put));
+ nni_aio_set_msg(&p->aio_put, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+pair1poly_pipe_get_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_get) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_get);
+ nni_aio_set_msg(&p->aio_get, NULL);
+
+ // Cooked mode messages have no header so we have to add one.
+ // Strip off any previously existing header, such as when
+ // replying to messages.
+ nni_msg_header_clear(msg);
+
+ // Insert the hops header.
+ nni_msg_header_append_u32(msg, 1);
+
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+pair1poly_pipe_send_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->send_queue, &p->aio_get);
+}
+
+static void
+pair1poly_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair1poly_sock_close(void *arg)
+{
+ pair1poly_sock *s = arg;
+ nni_aio_close(&s->aio_get);
+}
+
+static int
+pair1poly_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ pair1poly_sock *s = arg;
+ int rv;
+ int ttl;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+
+ return (rv);
+}
+
+static int
+pair1poly_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ pair1poly_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+pair1poly_get_poly(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ NNI_ARG_UNUSED(arg);
+ return (nni_copyout_bool(true, buf, szp, t));
+}
+
+static void
+pair1poly_sock_send(void *arg, nni_aio *aio)
+{
+ pair1poly_sock *s = arg;
+
+ nni_sock_bump_tx(s->sock, nni_msg_len(nni_aio_get_msg(aio)));
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+pair1poly_sock_recv(void *arg, nni_aio *aio)
+{
+ pair1poly_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops pair1poly_pipe_ops = {
+ .pipe_size = sizeof(pair1poly_pipe),
+ .pipe_init = pair1poly_pipe_init,
+ .pipe_fini = pair1poly_pipe_fini,
+ .pipe_start = pair1poly_pipe_start,
+ .pipe_close = pair1poly_pipe_close,
+ .pipe_stop = pair1poly_pipe_stop,
+};
+
+static nni_option pair1poly_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = pair1poly_get_max_ttl,
+ .o_set = pair1poly_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_PAIR1_POLY,
+ .o_get = pair1poly_get_poly,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops pair1poly_sock_ops = {
+ .sock_size = sizeof(pair1poly_sock),
+ .sock_init = pair1poly_sock_init,
+ .sock_fini = pair1poly_sock_fini,
+ .sock_open = pair1poly_sock_open,
+ .sock_close = pair1poly_sock_close,
+ .sock_recv = pair1poly_sock_recv,
+ .sock_send = pair1poly_sock_send,
+ .sock_options = pair1poly_sock_options,
+};
+
+static nni_proto pair1poly_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_PAIR1_SELF, NNG_PAIR1_SELF_NAME },
+ .proto_peer = { NNG_PAIR1_PEER, NNG_PAIR1_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &pair1poly_sock_ops,
+ .proto_pipe_ops = &pair1poly_pipe_ops,
+};
+
+int
+nng_pair1_open_poly(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &pair1poly_proto));
+}
diff --git a/src/sp/protocol/pair1/pair1_poly_test.c b/src/sp/protocol/pair1/pair1_poly_test.c
new file mode 100644
index 00000000..f26f7809
--- /dev/null
+++ b/src/sp/protocol/pair1/pair1_poly_test.c
@@ -0,0 +1,370 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2017 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+#define SECOND 1000
+
+#define APPEND_STR(m, s) NUTS_PASS(nng_msg_append(m, s, strlen(s)))
+#define CHECK_STR(m, s) \
+ NUTS_TRUE(nng_msg_len(m) == strlen(s)); \
+ NUTS_TRUE(memcmp(nng_msg_body(m), s, strlen(s)) == 0)
+
+static void
+test_poly_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 32
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+void
+test_poly_best_effort(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(c1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, SECOND));
+
+ NUTS_MARRY(s1, c1);
+
+ for (int i = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ }
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_poly_cooked(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_socket c2;
+ nng_msg * msg;
+ bool v;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_pair1_open(&c2));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 10));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 10));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_RECVTIMEO, SECOND / 10));
+
+ NUTS_PASS(nng_getopt_bool(s1, NNG_OPT_PAIR1_POLY, &v));
+ NUTS_TRUE(v);
+
+ NUTS_MARRY(s1, c1);
+ NUTS_MARRY(s1, c2);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "ONE");
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ CHECK_STR(msg, "ONE");
+ p1 = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "TWO");
+ NUTS_PASS(nng_sendmsg(c2, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ CHECK_STR(msg, "TWO");
+ p2 = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+ nng_msg_free(msg);
+
+ NUTS_TRUE(nng_pipe_id(p1) != nng_pipe_id(p2));
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ nng_msg_set_pipe(msg, p1);
+ APPEND_STR(msg, "UNO");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ CHECK_STR(msg, "UNO");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ nng_msg_set_pipe(msg, p2);
+ APPEND_STR(msg, "DOS");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c2, &msg, 0));
+ CHECK_STR(msg, "DOS");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_close(c1));
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ nng_msg_set_pipe(msg, p1);
+ APPEND_STR(msg, "EIN");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(c2, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c2);
+}
+
+void
+test_poly_default(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_socket c2;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_pair1_open(&c2));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_SENDTIMEO, SECOND));
+
+ NUTS_MARRY(s1, c1);
+ NUTS_MARRY(s1, c2);
+
+ // This assumes poly picks the first suitor. Applications
+ // should not make the same assumption.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "YES");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ CHECK_STR(msg, "YES");
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(c1);
+ NUTS_SLEEP(10);
+
+ // Verify that the other pipe is chosen as the next suitor.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "AGAIN");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c2, &msg, 0));
+ CHECK_STR(msg, "AGAIN");
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c2);
+}
+
+void
+test_poly_close_abort(void)
+{
+ nng_socket s;
+ nng_socket c;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(c, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(c, s);
+
+ for (int i = 0; i < 20; i++) {
+ NUTS_SEND(c, "TEST");
+ }
+ NUTS_SLEEP(50);
+
+ NUTS_CLOSE(s);
+ NUTS_CLOSE(c);
+}
+
+void
+test_poly_recv_no_header(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+void
+test_poly_recv_garbage(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ // ridiculous hop count
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x1000));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+void
+test_poly_ttl(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+ uint32_t val;
+ int ttl;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 5));
+
+ // cannot set insane TTLs
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 1000), NNG_EINVAL);
+ ttl = 8;
+ NUTS_FAIL(nng_setopt(s1, NNG_OPT_MAXTTL, &ttl, 1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_MAXTTL, true), NNG_EBADTYPE);
+
+ NUTS_MARRY(s1, c1);
+
+ // Let's check enforcement of TTL
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 4));
+ NUTS_PASS(nng_getopt_int(s1, NNG_OPT_MAXTTL, &ttl));
+ NUTS_TRUE(ttl == 4);
+
+ // Bad TTL bounces
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 4));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Good TTL passes
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 3));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 0xFEEDFACE);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 4);
+ nng_msg_free(msg);
+
+ // Large TTL passes
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 1234));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 14));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 1234);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 15);
+ nng_msg_free(msg);
+
+ // Max TTL fails
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 15));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_poly_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+TEST_LIST = {
+ { "pair1 poly identity", test_poly_identity },
+ { "pair1 poly best effort", test_poly_best_effort },
+ { "pair1 poly cooked", test_poly_cooked },
+ { "pair1 poly default", test_poly_default },
+ { "pair1 poly recv no header", test_poly_recv_no_header },
+ { "pair1 poly recv garbage", test_poly_recv_garbage },
+ { "pair1 poly ttl", test_poly_ttl },
+ { "pair1 poly close abort", test_poly_close_abort },
+ { "pair1 poly validate peer", test_poly_validate_peer },
+
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pair1/pair1_test.c b/src/sp/protocol/pair1/pair1_test.c
new file mode 100644
index 00000000..881c4ac8
--- /dev/null
+++ b/src/sp/protocol/pair1/pair1_test.c
@@ -0,0 +1,433 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2017 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+#define SECOND 1000
+
+#define APPEND_STR(m, s) NUTS_TRUE(nng_msg_append(m, s, strlen(s)) == 0)
+#define CHECK_STR(m, s) \
+ NUTS_TRUE(nng_msg_len(m) == strlen(s)); \
+ NUTS_TRUE(memcmp(nng_msg_body(m), s, strlen(s)) == 0)
+
+static void
+test_mono_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pair1_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 32
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+void
+test_mono_cooked(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nuts_marry(s1, c1));
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "ALPHA", strlen("ALPHA") + 1));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_TRUE(nng_msg_len(msg) == strlen("ALPHA") + 1);
+ NUTS_MATCH(nng_msg_body(msg), "ALPHA");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "BETA", strlen("BETA") + 1));
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ NUTS_TRUE(nng_msg_len(msg) == strlen("BETA") + 1);
+ NUTS_MATCH(nng_msg_body(msg), "BETA");
+
+ nng_msg_free(msg);
+ NUTS_CLOSE(c1);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_mono_faithful(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_socket c2;
+ nng_msg * msg;
+ const char *addr = "inproc://pair1_mono_faithful";
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_pair1_open(&c2));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 4));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_int(c2, NNG_OPT_SENDBUF, 2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_MARRY(s1, c1);
+ NUTS_PASS(nng_dial(c2, addr, NULL, 0));
+
+ NUTS_SLEEP(100);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "ONE");
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ CHECK_STR(msg, "ONE");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "TWO");
+ NUTS_PASS(nng_sendmsg(c2, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+ NUTS_CLOSE(c2);
+}
+
+void
+test_mono_back_pressure(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ int i;
+ int rv;
+ nng_msg * msg;
+ nng_duration to = 100;
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(c1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, to));
+
+ NUTS_MARRY(s1, c1);
+
+ // We choose to allow some buffering. In reality the
+ // buffer size is just 1, and we will fail after 2.
+ for (i = 0, rv = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ if ((rv = nng_sendmsg(s1, msg, 0)) != 0) {
+ nng_msg_free(msg);
+ break;
+ }
+ }
+ NUTS_FAIL(rv, NNG_ETIMEDOUT);
+ NUTS_TRUE(i < 10);
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_mono_raw_exchange(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+
+ nng_msg *msg;
+ uint32_t hops;
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_MARRY(s1, c1);
+
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "GAMMA");
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_TRUE(nng_msg_header_len(msg) == sizeof(uint32_t));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ p = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p) > 0);
+
+ CHECK_STR(msg, "GAMMA");
+ NUTS_TRUE(nng_msg_header_len(msg) == sizeof(uint32_t));
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &hops));
+ NUTS_TRUE(hops == 2);
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "EPSILON");
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ CHECK_STR(msg, "EPSILON");
+ NUTS_TRUE(nng_msg_header_len(msg) == sizeof(uint32_t));
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &hops));
+ p = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p) > 0);
+
+ NUTS_TRUE(hops == 2);
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_mono_raw_header(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+ uint32_t v;
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_MARRY(s1, c1);
+
+ // Missing bits in the header
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Valid header works
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &v));
+ NUTS_TRUE(v == 0xFEEDFACE);
+ nng_msg_free(msg);
+
+ // Header with reserved bits set dropped
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 0xDEAD0000));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Header with no chance to add another hop gets dropped
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 0xff));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // With the same bits clear it works
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &v));
+ NUTS_TRUE(v == 0xFEEDFACE);
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_pair1_raw(void)
+{
+ nng_socket s1;
+ bool raw;
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_getopt_bool(s1, NNG_OPT_RAW, &raw));
+ NUTS_TRUE(raw == false);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_RAW, true), NNG_EREADONLY);
+ NUTS_PASS(nng_close(s1));
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_getopt_bool(s1, NNG_OPT_RAW, &raw));
+ NUTS_TRUE(raw == true);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_RAW, false), NNG_EREADONLY);
+ NUTS_PASS(nng_close(s1));
+}
+
+void
+test_pair1_ttl(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+ uint32_t val;
+ int ttl;
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 5));
+
+ // cannot set insane TTLs
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 1000), NNG_EINVAL);
+ ttl = 8;
+ NUTS_FAIL(nng_setopt(s1, NNG_OPT_MAXTTL, &ttl, 1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_MAXTTL, true), NNG_EBADTYPE);
+
+ NUTS_MARRY(s1, c1);
+
+ // Let's check enforcement of TTL
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 4));
+ NUTS_PASS(nng_getopt_int(s1, NNG_OPT_MAXTTL, &ttl));
+ NUTS_TRUE(ttl == 4);
+
+ // Bad TTL bounces
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 4));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Good TTL passes
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 3));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 0xFEEDFACE);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 4);
+ nng_msg_free(msg);
+
+ // Large TTL passes
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 1234));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 14));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 1234);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 15);
+ nng_msg_free(msg);
+
+ // Max TTL fails
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 15));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_pair1_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+void
+test_pair1_recv_no_header(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+void
+test_pair1_recv_garbage(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ // ridiculous hop count
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x1000));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+NUTS_TESTS = {
+ { "pair1 mono identity", test_mono_identity },
+ { "pair1 mono cooked", test_mono_cooked },
+ { "pair1 mono faithful", test_mono_faithful },
+ { "pair1 mono back pressure", test_mono_back_pressure },
+ { "pair1 mono raw exchange", test_mono_raw_exchange },
+ { "pair1 mono raw header", test_mono_raw_header },
+ { "pair1 raw", test_pair1_raw },
+ { "pair1 ttl", test_pair1_ttl },
+ { "pair1 validate peer", test_pair1_validate_peer },
+ { "pair1 recv no header", test_pair1_recv_no_header },
+ { "pair1 recv garbage", test_pair1_recv_garbage },
+
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pipeline0/CMakeLists.txt b/src/sp/protocol/pipeline0/CMakeLists.txt
new file mode 100644
index 00000000..8a10eab7
--- /dev/null
+++ b/src/sp/protocol/pipeline0/CMakeLists.txt
@@ -0,0 +1,23 @@
+#
+# Copyright 2019 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Pipeline protocol
+nng_directory(pipeline0)
+
+nng_sources_if(NNG_PROTO_PUSH0 push.c)
+nng_headers_if(NNG_PROTO_PUSH0 nng/protocol/pipeline0/push.h)
+nng_defines_if(NNG_PROTO_PUSH0 NNG_HAVE_PUSH0)
+
+nng_sources_if(NNG_PROTO_PULL0 pull.c)
+nng_headers_if(NNG_PROTO_PULL0 nng/protocol/pipeline0/pull.h)
+nng_defines_if(NNG_PROTO_PULL0 NNG_HAVE_PULL0)
+
+nng_test(pull_test)
+nng_test(push_test)
diff --git a/src/sp/protocol/pipeline0/pull.c b/src/sp/protocol/pipeline0/pull.c
new file mode 100644
index 00000000..616b0817
--- /dev/null
+++ b/src/sp/protocol/pipeline0/pull.c
@@ -0,0 +1,325 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pipeline0/pull.h"
+
+// Pull protocol. The PULL protocol is the "read" side of a pipeline.
+
+#ifndef NNI_PROTO_PULL_V0
+#define NNI_PROTO_PULL_V0 NNI_PROTO(5, 1)
+#endif
+
+#ifndef NNI_PROTO_PUSH_V0
+#define NNI_PROTO_PUSH_V0 NNI_PROTO(5, 0)
+#endif
+
+typedef struct pull0_pipe pull0_pipe;
+typedef struct pull0_sock pull0_sock;
+
+static void pull0_recv_cb(void *);
+
+// pull0_sock is our per-socket protocol private structure.
+struct pull0_sock {
+ bool raw;
+ nni_list pl; // pipe list (pipes with data ready)
+ nni_list rq; // recv queue (aio list)
+ nni_mtx m;
+ nni_pollable readable;
+};
+
+// pull0_pipe is our per-pipe protocol private structure.
+struct pull0_pipe {
+ nni_pipe * p;
+ pull0_sock * s;
+ nni_msg * m;
+ nni_aio aio;
+ bool closed;
+ nni_list_node node;
+};
+
+static int
+pull0_sock_init(void *arg, nni_sock *sock)
+{
+ pull0_sock *s = arg;
+ NNI_ARG_UNUSED(sock);
+
+ nni_aio_list_init(&s->rq);
+ NNI_LIST_INIT(&s->pl, pull0_pipe, node);
+ nni_mtx_init(&s->m);
+ nni_pollable_init(&s->readable);
+ return (0);
+}
+
+static void
+pull0_sock_fini(void *arg)
+{
+ pull0_sock *s = arg;
+ nni_mtx_fini(&s->m);
+ nni_pollable_fini(&s->readable);
+}
+
+static void
+pull0_pipe_stop(void *arg)
+{
+ pull0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio);
+}
+
+static void
+pull0_pipe_fini(void *arg)
+{
+ pull0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio);
+ if (p->m) {
+ nni_msg_free(p->m);
+ }
+}
+
+static int
+pull0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ pull0_pipe *p = arg;
+
+ nni_aio_init(&p->aio, pull0_recv_cb, p);
+ p->p = pipe;
+ p->s = s;
+ return (0);
+}
+
+static int
+pull0_pipe_start(void *arg)
+{
+ pull0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->p) != NNI_PROTO_PUSH_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ // Start the pending receive...
+ nni_pipe_recv(p->p, &p->aio);
+
+ return (0);
+}
+
+static void
+pull0_pipe_close(void *arg)
+{
+ pull0_pipe *p = arg;
+ pull0_sock *s = p->s;
+
+ nni_mtx_lock(&s->m);
+ p->closed = true;
+ if (nni_list_node_active(&p->node)) {
+ nni_list_node_remove(&p->node);
+ if (nni_list_empty(&s->pl)) {
+ nni_pollable_clear(&s->readable);
+ }
+ }
+ nni_mtx_unlock(&s->m);
+
+ nni_aio_close(&p->aio);
+}
+
+static void
+pull0_recv_cb(void *arg)
+{
+ pull0_pipe *p = arg;
+ pull0_sock *s = p->s;
+ nni_aio * ap = &p->aio;
+ nni_aio * as;
+ nni_msg * m;
+
+ if (nni_aio_result(ap) != 0) {
+ // Failed to get a message, probably the pipe is closed.
+ nni_pipe_close(p->p);
+ return;
+ }
+
+ // Got a message... start the put to send it up to the application.
+ m = nni_aio_get_msg(ap);
+ nni_aio_set_msg(ap, NULL);
+ nni_msg_set_pipe(m, nni_pipe_id(p->p));
+
+ nni_mtx_lock(&s->m);
+ if (p->closed) {
+ nni_mtx_unlock(&s->m);
+ nni_msg_free(m);
+ return;
+ }
+ if (nni_list_empty(&s->rq)) {
+ nni_list_append(&s->pl, p);
+ if (nni_list_first(&s->pl) == p) {
+ nni_pollable_raise(&s->readable);
+ }
+ p->m = m;
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+ nni_pipe_recv(p->p, ap);
+ as = nni_list_first(&s->rq);
+ nni_aio_list_remove(as);
+ nni_mtx_unlock(&s->m);
+ nni_aio_set_msg(as, m);
+ nni_aio_finish_sync(as, 0, nni_msg_len(m));
+}
+
+static void
+pull0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pull0_sock_close(void *arg)
+{
+ pull0_sock *s = arg;
+ nni_aio * a;
+ nni_mtx_lock(&s->m);
+ while ((a = nni_list_first(&s->rq)) != NULL) {
+ nni_aio_list_remove(a);
+ nni_aio_finish_error(a, NNG_ECLOSED);
+ }
+ // NB: The common socket framework closes pipes before this.
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+pull0_sock_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+}
+
+static void
+pull0_cancel(nni_aio *aio, void *arg, int rv)
+{
+ pull0_sock *s = arg;
+ nni_mtx_lock(&s->m);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+pull0_sock_recv(void *arg, nni_aio *aio)
+{
+ pull0_sock *s = arg;
+ pull0_pipe *p;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&s->m);
+ if ((p = nni_list_first(&s->pl)) == NULL) {
+
+ int rv;
+ if ((rv = nni_aio_schedule(aio, pull0_cancel, s)) != 0) {
+ nni_mtx_unlock(&s->m);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ nni_aio_list_append(&s->rq, aio);
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+
+ nni_list_remove(&s->pl, p);
+ if (nni_list_empty(&s->pl)) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_aio_finish_msg(aio, p->m);
+ p->m = NULL;
+ nni_pipe_recv(p->p, &p->aio);
+ nni_mtx_unlock(&s->m);
+}
+
+static int
+pull0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ pull0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static nni_option pull0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = pull0_sock_get_recv_fd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_pipe_ops pull0_pipe_ops = {
+ .pipe_size = sizeof(pull0_pipe),
+ .pipe_init = pull0_pipe_init,
+ .pipe_fini = pull0_pipe_fini,
+ .pipe_start = pull0_pipe_start,
+ .pipe_close = pull0_pipe_close,
+ .pipe_stop = pull0_pipe_stop,
+};
+
+static nni_proto_sock_ops pull0_sock_ops = {
+ .sock_size = sizeof(pull0_sock),
+ .sock_init = pull0_sock_init,
+ .sock_fini = pull0_sock_fini,
+ .sock_open = pull0_sock_open,
+ .sock_close = pull0_sock_close,
+ .sock_send = pull0_sock_send,
+ .sock_recv = pull0_sock_recv,
+ .sock_options = pull0_sock_options,
+};
+
+static nni_proto pull0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_peer = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_flags = NNI_PROTO_FLAG_RCV,
+ .proto_pipe_ops = &pull0_pipe_ops,
+ .proto_sock_ops = &pull0_sock_ops,
+};
+
+static nni_proto pull0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_peer = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_flags = NNI_PROTO_FLAG_RCV | NNI_PROTO_FLAG_RAW,
+ .proto_pipe_ops = &pull0_pipe_ops,
+ .proto_sock_ops = &pull0_sock_ops,
+};
+
+int
+nng_pull0_open(nng_socket *s)
+{
+ return (nni_proto_open(s, &pull0_proto));
+}
+
+int
+nng_pull0_open_raw(nng_socket *s)
+{
+ return (nni_proto_open(s, &pull0_proto_raw));
+}
diff --git a/src/sp/protocol/pipeline0/pull_test.c b/src/sp/protocol/pipeline0/pull_test.c
new file mode 100644
index 00000000..25066093
--- /dev/null
+++ b/src/sp/protocol/pipeline0/pull_test.c
@@ -0,0 +1,264 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_pull_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 1u)); // 81
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 0u)); // 80
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pull");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "push");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_cannot_send(void)
+{
+ nng_socket s;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_FAIL(nng_send(s, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_not_writeable(void)
+{
+ int fd;
+ nng_socket s;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_FAIL(nng_socket_get_int(s, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_poll_readable(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(pull, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pull, push);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(push, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // and receiving makes it no longer ready
+ NUTS_RECV(pull, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+static void
+test_pull_close_pending(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+ nng_pipe p1, p2;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_get_int(pull, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+ NUTS_MARRY_EX(pull, push, addr, &p1, &p2);
+
+ // Send a message -- it's ready for reading.
+ NUTS_SEND(push, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // NB: We have to close the pipe instead of the socket.
+ // This is because the socket won't notice the remote pipe
+ // disconnect until we collect the message and start another
+ // receive operation.
+ nng_pipe_close(p1);
+ nng_pipe_close(p2);
+
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+void
+test_pull_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pull0_open(&s1));
+ NUTS_PASS(nng_pull0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_pull_recv_aio_stopped(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(s, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_close_recv(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_recv_aio(s, aio);
+ NUTS_PASS(nng_close(s));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_recv_nonblock(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_recv_aio(s, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_recv_cancel(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_recv_aio(s, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_CLOSE(s);
+}
+
+TEST_LIST = {
+ { "pull identity", test_pull_identity },
+ { "pull cannot send", test_pull_cannot_send },
+ { "pull no context", test_pull_no_context },
+ { "pull not writeable", test_pull_not_writeable },
+ { "pull poll readable", test_pull_poll_readable },
+ { "pull close pending", test_pull_close_pending },
+ { "pull validate peer", test_pull_validate_peer },
+ { "pull recv aio stopped", test_pull_recv_aio_stopped },
+ { "pull close recv", test_pull_close_recv },
+ { "pull recv nonblock", test_pull_recv_nonblock },
+ { "pull recv cancel", test_pull_recv_cancel },
+ { "pull cooked", test_pull_cooked },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pipeline0/push.c b/src/sp/protocol/pipeline0/push.c
new file mode 100644
index 00000000..ad43d967
--- /dev/null
+++ b/src/sp/protocol/pipeline0/push.c
@@ -0,0 +1,442 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pipeline0/push.h"
+
+// Push protocol. The PUSH protocol is the "write" side of a pipeline.
+// Push distributes fairly, or tries to, by giving messages in round-robin
+// order.
+
+#ifndef NNI_PROTO_PULL_V0
+#define NNI_PROTO_PULL_V0 NNI_PROTO(5, 1)
+#endif
+
+#ifndef NNI_PROTO_PUSH_V0
+#define NNI_PROTO_PUSH_V0 NNI_PROTO(5, 0)
+#endif
+
+typedef struct push0_pipe push0_pipe;
+typedef struct push0_sock push0_sock;
+
+static void push0_send_cb(void *);
+static void push0_recv_cb(void *);
+static void push0_pipe_ready(push0_pipe *);
+
+// push0_sock is our per-socket protocol private structure.
+struct push0_sock {
+ nni_lmq wq; // list of messages queued
+ nni_list aq; // list of aio senders waiting
+ nni_list pl; // list of pipes ready to send
+ nni_pollable writable;
+ nni_mtx m;
+};
+
+// push0_pipe is our per-pipe protocol private structure.
+struct push0_pipe {
+ nni_pipe * pipe;
+ push0_sock * push;
+ nni_list_node node;
+
+ nni_aio aio_recv;
+ nni_aio aio_send;
+};
+
+static int
+push0_sock_init(void *arg, nni_sock *sock)
+{
+ push0_sock *s = arg;
+ NNI_ARG_UNUSED(sock);
+
+ nni_mtx_init(&s->m);
+ nni_aio_list_init(&s->aq);
+ NNI_LIST_INIT(&s->pl, push0_pipe, node);
+ nni_lmq_init(&s->wq, 0); // initially we start unbuffered.
+ nni_pollable_init(&s->writable);
+
+ return (0);
+}
+
+static void
+push0_sock_fini(void *arg)
+{
+ push0_sock *s = arg;
+ nni_pollable_fini(&s->writable);
+ nni_lmq_fini(&s->wq);
+ nni_mtx_fini(&s->m);
+}
+
+static void
+push0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+push0_sock_close(void *arg)
+{
+ push0_sock *s = arg;
+ nni_aio * a;
+ nni_mtx_lock(&s->m);
+ while ((a = nni_list_first(&s->aq)) != NULL) {
+ nni_aio_list_remove(a);
+ nni_aio_finish_error(a, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_pipe_stop(void *arg)
+{
+ push0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_send);
+}
+
+static void
+push0_pipe_fini(void *arg)
+{
+ push0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_send);
+}
+
+static int
+push0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ push0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, push0_recv_cb, p);
+ nni_aio_init(&p->aio_send, push0_send_cb, p);
+ NNI_LIST_NODE_INIT(&p->node);
+ p->pipe = pipe;
+ p->push = s;
+ return (0);
+}
+
+static int
+push0_pipe_start(void *arg)
+{
+ push0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_PULL_V0) {
+ return (NNG_EPROTO);
+ }
+
+ // Schedule a receiver. This is mostly so that we can detect
+ // a closed transport pipe.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ push0_pipe_ready(p);
+
+ return (0);
+}
+
+static void
+push0_pipe_close(void *arg)
+{
+ push0_pipe *p = arg;
+ push0_sock *s = p->push;
+
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_send);
+
+ nni_mtx_lock(&s->m);
+ if (nni_list_node_active(&p->node)) {
+ nni_list_node_remove(&p->node);
+
+ if (nni_list_empty(&s->pl) && nni_lmq_full(&s->wq)) {
+ nni_pollable_clear(&s->writable);
+ }
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_recv_cb(void *arg)
+{
+ push0_pipe *p = arg;
+
+ // We normally expect to receive an error. If a pipe actually
+ // sends us data, we just discard it.
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_msg_free(nni_aio_get_msg(&p->aio_recv));
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+push0_pipe_ready(push0_pipe *p)
+{
+ push0_sock *s = p->push;
+ nni_msg * m;
+ nni_aio * a = NULL;
+ size_t l;
+ bool blocked;
+
+ nni_mtx_lock(&s->m);
+
+ blocked = nni_lmq_full(&s->wq) && nni_list_empty(&s->pl);
+
+ // if message is waiting in the buffered queue
+ // then we prefer that.
+ if (nni_lmq_getq(&s->wq, &m) == 0) {
+ nni_aio_set_msg(&p->aio_send, m);
+ nni_pipe_send(p->pipe, &p->aio_send);
+
+ if ((a = nni_list_first(&s->aq)) != NULL) {
+ nni_aio_list_remove(a);
+ m = nni_aio_get_msg(a);
+ l = nni_msg_len(m);
+ nni_lmq_putq(&s->wq, m);
+ }
+
+ } else if ((a = nni_list_first(&s->aq)) != NULL) {
+ // Looks like we had the unbuffered case, but
+ // someone was waiting.
+ nni_aio_list_remove(a);
+ m = nni_aio_get_msg(a);
+ l = nni_msg_len(m);
+
+ nni_aio_set_msg(&p->aio_send, m);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ } else {
+ // We had nothing to send. Just put us in the ready list.
+ nni_list_append(&s->pl, p);
+ }
+
+ if (blocked) {
+ // if we blocked, then toggle the status.
+ if ((!nni_lmq_full(&s->wq)) || (!nni_list_empty(&s->pl))) {
+ nni_pollable_raise(&s->writable);
+ }
+ }
+
+ nni_mtx_unlock(&s->m);
+
+ if (a != NULL) {
+ nni_aio_set_msg(a, NULL);
+ nni_aio_finish_sync(a, 0, l);
+ }
+}
+
+static void
+push0_send_cb(void *arg)
+{
+ push0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ push0_pipe_ready(p);
+}
+
+static void
+push0_cancel(nni_aio *aio, void *arg, int rv)
+{
+ push0_sock *s = arg;
+
+ nni_mtx_lock(&s->m);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_sock_send(void *arg, nni_aio *aio)
+{
+ push0_sock *s = arg;
+ push0_pipe *p;
+ nni_msg * m;
+ size_t l;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ m = nni_aio_get_msg(aio);
+ l = nni_msg_len(m);
+
+ nni_mtx_lock(&s->m);
+
+ // First we want to see if we can send it right now.
+ // Note that we don't block the sender until the read is complete,
+ // only until we have committed to send it.
+ if ((p = nni_list_first(&s->pl)) != NULL) {
+ nni_list_remove(&s->pl, p);
+ // NB: We won't have had any waiters in the message queue
+ // or the aio queue, because we would not put the pipe
+ // in the ready list in that case. Note though that the
+ // wq may be "full" if we are unbuffered.
+ if (nni_list_empty(&s->pl) && (nni_lmq_full(&s->wq))) {
+ nni_pollable_clear(&s->writable);
+ }
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, l);
+ nni_aio_set_msg(&p->aio_send, m);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+
+ // Can we maybe queue it.
+ if (nni_lmq_putq(&s->wq, m) == 0) {
+ // Yay, we can. So we're done.
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, l);
+ if (nni_lmq_full(&s->wq)) {
+ nni_pollable_clear(&s->writable);
+ }
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+
+ if ((rv = nni_aio_schedule(aio, push0_cancel, s)) != 0) {
+ nni_aio_finish_error(aio, rv);
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+ nni_aio_list_append(&s->aq, aio);
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_sock_recv(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+}
+
+static int
+push0_set_send_buf_len(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ push0_sock *s = arg;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, buf, sz, 0, 8192, t)) != 0) {
+ return (rv);
+ }
+ nni_mtx_lock(&s->m);
+ rv = nni_lmq_resize(&s->wq, (size_t) val);
+ // Changing the size of the queue can affect our readiness.
+ if (!nni_lmq_full(&s->wq)) {
+ nni_pollable_raise(&s->writable);
+ } else if (nni_list_empty(&s->pl)) {
+ nni_pollable_clear(&s->writable);
+ }
+ nni_mtx_unlock(&s->m);
+ return (rv);
+}
+
+static int
+push0_get_send_buf_len(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ push0_sock *s = arg;
+ int val;
+
+ nni_mtx_lock(&s->m);
+ val = nni_lmq_cap(&s->wq);
+ nni_mtx_unlock(&s->m);
+
+ return (nni_copyout_int(val, buf, szp, t));
+}
+
+static int
+push0_sock_get_send_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ push0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static nni_proto_pipe_ops push0_pipe_ops = {
+ .pipe_size = sizeof(push0_pipe),
+ .pipe_init = push0_pipe_init,
+ .pipe_fini = push0_pipe_fini,
+ .pipe_start = push0_pipe_start,
+ .pipe_close = push0_pipe_close,
+ .pipe_stop = push0_pipe_stop,
+};
+
+static nni_option push0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = push0_sock_get_send_fd,
+ },
+ {
+ .o_name = NNG_OPT_SENDBUF,
+ .o_get = push0_get_send_buf_len,
+ .o_set = push0_set_send_buf_len,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops push0_sock_ops = {
+ .sock_size = sizeof(push0_sock),
+ .sock_init = push0_sock_init,
+ .sock_fini = push0_sock_fini,
+ .sock_open = push0_sock_open,
+ .sock_close = push0_sock_close,
+ .sock_options = push0_sock_options,
+ .sock_send = push0_sock_send,
+ .sock_recv = push0_sock_recv,
+};
+
+static nni_proto push0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_peer = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_flags = NNI_PROTO_FLAG_SND,
+ .proto_pipe_ops = &push0_pipe_ops,
+ .proto_sock_ops = &push0_sock_ops,
+};
+
+static nni_proto push0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_peer = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_flags = NNI_PROTO_FLAG_SND | NNI_PROTO_FLAG_RAW,
+ .proto_pipe_ops = &push0_pipe_ops,
+ .proto_sock_ops = &push0_sock_ops,
+};
+
+int
+nng_push0_open(nng_socket *s)
+{
+ return (nni_proto_open(s, &push0_proto));
+}
+
+int
+nng_push0_open_raw(nng_socket *s)
+{
+ return (nni_proto_open(s, &push0_proto_raw));
+}
diff --git a/src/sp/protocol/pipeline0/push_test.c b/src/sp/protocol/pipeline0/push_test.c
new file mode 100644
index 00000000..d22ccaa4
--- /dev/null
+++ b/src/sp/protocol/pipeline0/push_test.c
@@ -0,0 +1,525 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_push_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 0u)); // 80
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 1u)); // 81
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "push");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pull");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_cannot_recv(void)
+{
+ nng_socket s;
+ nng_msg * m = NULL;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_not_readable(void)
+{
+ int fd;
+ nng_socket s;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_FAIL(nng_socket_get_int(s, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_poll_writable(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(push, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // This tests unbuffered sockets for now.
+ // Note that for this we are using unbuffered inproc.
+ // If using TCP or similar, then transport buffering will
+ // break assumptions in this test.
+
+ // Not writable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // After connect we can write.
+ NUTS_MARRY(pull, push);
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // But once we send a message, it is not anymore.
+ NUTS_SEND(push, "abc");
+ // Have to send a second message, because the remote socket
+ // will have consumed the first one.
+ NUTS_SEND(push, "def");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // and receiving receiving the message makes it possible again.
+ NUTS_RECV(pull, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+static void
+test_push_poll_buffered(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 2));
+ NUTS_PASS(nng_socket_get_int(push, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We can write two message while unbuffered.
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // After connect we remote end will pick up one of them.
+ // Also, the local pipe itself will pick up one. So we
+ // have two.
+ NUTS_MARRY(pull, push);
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "ghi");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "jkl");
+ // Now it should be full.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // and receiving receiving the message makes it possible again.
+ NUTS_RECV(pull, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_RECV(pull, "def");
+ NUTS_RECV(pull, "ghi");
+ NUTS_RECV(pull, "jkl");
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+static void
+test_push_poll_truncate(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ // This test starts with a buffer and then truncates it to see
+ // that shortening the buffer has an impact.
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 3));
+ NUTS_PASS(nng_socket_get_int(push, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We can write two message while unbuffered.
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "def");
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 1));
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(pull, push);
+ NUTS_RECV(pull, "abc");
+ // def got dropped
+ NUTS_SEND(push, "ghi");
+ NUTS_RECV(pull, "ghi");
+
+ NUTS_CLOSE(pull);
+ NUTS_SLEEP(100);
+
+ // We have a buffer of one.
+ NUTS_SEND(push, "jkl");
+ // Resize to 0 (unbuffered)
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 0));
+
+ // reopen the pull socket and connect it
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_MARRY(push, pull);
+
+ // jkl got dropped.
+ NUTS_SEND(push, "mno");
+ NUTS_RECV(pull, "mno");
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+void
+test_push_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_push0_open(&s1));
+ NUTS_PASS(nng_push0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_push_send_aio_stopped(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_msg(aio, m);
+ nng_aio_stop(aio);
+ nng_send_aio(s, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_close_send(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+ NUTS_PASS(nng_close(s));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_nonblock(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_timeout(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_timeout(aio, 10);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_cancel(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_late_unbuffered(void)
+{
+ nng_socket s;
+ nng_socket pull;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append(m, "123\0", 4));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ NUTS_MARRY(s, pull);
+
+ NUTS_RECV(pull, "123");
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+
+static void
+test_push_send_late_buffered(void)
+{
+ nng_socket s;
+ nng_socket pull;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_SENDBUF, 2));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append(m, "123\0", 4));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ NUTS_MARRY(s, pull);
+
+ NUTS_RECV(pull, "123");
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_push_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_load_balance_buffered(void)
+{
+ nng_socket s;
+ nng_socket pull1;
+ nng_socket pull2;
+ nng_socket pull3;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull1));
+ NUTS_PASS(nng_pull0_open(&pull2));
+ NUTS_PASS(nng_pull0_open(&pull3));
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_SENDBUF, 4));
+ NUTS_MARRY(s, pull1);
+ NUTS_MARRY(s, pull2);
+ NUTS_MARRY(s, pull3);
+ NUTS_SLEEP(100);
+ NUTS_SEND(s, "one");
+ NUTS_SEND(s, "two");
+ NUTS_SEND(s, "three");
+ NUTS_RECV(pull1, "one");
+ NUTS_RECV(pull2, "two");
+ NUTS_RECV(pull3, "three");
+ NUTS_CLOSE(s);
+ NUTS_CLOSE(pull1);
+ NUTS_CLOSE(pull2);
+ NUTS_CLOSE(pull3);
+}
+
+static void
+test_push_load_balance_unbuffered(void)
+{
+ nng_socket s;
+ nng_socket pull1;
+ nng_socket pull2;
+ nng_socket pull3;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull1));
+ NUTS_PASS(nng_pull0_open(&pull2));
+ NUTS_PASS(nng_pull0_open(&pull3));
+ NUTS_MARRY(s, pull1);
+ NUTS_MARRY(s, pull2);
+ NUTS_MARRY(s, pull3);
+ NUTS_SLEEP(100);
+ NUTS_SEND(s, "one");
+ NUTS_SEND(s, "two");
+ NUTS_SEND(s, "three");
+ NUTS_RECV(pull1, "one");
+ NUTS_RECV(pull2, "two");
+ NUTS_RECV(pull3, "three");
+ // Loop around is unpredictable somewhat, because the the
+ // pull sockets can take different periods of time to return
+ // back to readiness.
+ NUTS_CLOSE(s);
+ NUTS_CLOSE(pull1);
+ NUTS_CLOSE(pull2);
+ NUTS_CLOSE(pull3);
+}
+
+static void
+test_push_send_buffer(void)
+{
+ nng_socket s;
+ int v;
+ bool b;
+ size_t sz;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_getopt_int(s, NNG_OPT_SENDBUF, &v));
+ NUTS_TRUE(v == 0);
+ NUTS_FAIL(nng_getopt_bool(s, NNG_OPT_SENDBUF, &b), NNG_EBADTYPE);
+ sz = 1;
+ NUTS_FAIL(nng_getopt(s, NNG_OPT_SENDBUF, &b, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s, NNG_OPT_SENDBUF, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s, NNG_OPT_SENDBUF, 100000), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(s, NNG_OPT_SENDBUF, false), NNG_EBADTYPE);
+ NUTS_FAIL(nng_setopt(s, NNG_OPT_SENDBUF, &b, 1), NNG_EINVAL);
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_SENDBUF, 100));
+ NUTS_PASS(nng_getopt_int(s, NNG_OPT_SENDBUF, &v));
+ NUTS_TRUE(v == 100);
+ NUTS_CLOSE(s);
+}
+
+TEST_LIST = {
+ { "push identity", test_push_identity },
+ { "push cannot recv", test_push_cannot_recv },
+ { "push no context", test_push_no_context },
+ { "push not readable", test_push_not_readable },
+ { "push poll writable", test_push_poll_writable },
+ { "push poll buffered", test_push_poll_buffered },
+ { "push poll truncate", test_push_poll_truncate },
+ { "push validate peer", test_push_validate_peer },
+ { "push send aio stopped", test_push_send_aio_stopped },
+ { "push close send", test_push_close_send },
+ { "push send nonblock", test_push_send_nonblock },
+ { "push send timeout", test_push_send_timeout },
+ { "push send cancel", test_push_send_cancel },
+ { "push send late buffered", test_push_send_late_buffered },
+ { "push send late unbuffered", test_push_send_late_unbuffered },
+ { "push cooked", test_push_cooked },
+ { "push load balance buffered", test_push_load_balance_buffered },
+ { "push load balance unbuffered", test_push_load_balance_unbuffered },
+ { "push send buffer", test_push_send_buffer },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pubsub0/CMakeLists.txt b/src/sp/protocol/pubsub0/CMakeLists.txt
new file mode 100644
index 00000000..160b7462
--- /dev/null
+++ b/src/sp/protocol/pubsub0/CMakeLists.txt
@@ -0,0 +1,24 @@
+#
+# Copyright 2019 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Pub/Sub protocol
+nng_directory(pubsub0)
+
+nng_sources_if(NNG_PROTO_PUB0 pub.c)
+nng_headers_if(NNG_PROTO_PUB0 nng/protocol/pubsub0/pub.h)
+nng_defines_if(NNG_PROTO_PUB0 NNG_HAVE_PUB0)
+
+nng_sources_if(NNG_PROTO_SUB0 sub.c xsub.c)
+nng_headers_if(NNG_PROTO_SUB0 nng/protocol/pubsub0/sub.h)
+nng_defines_if(NNG_PROTO_SUB0 NNG_HAVE_SUB0)
+
+nng_test(pub_test)
+nng_test(sub_test)
+nng_test(xsub_test)
diff --git a/src/sp/protocol/pubsub0/pub.c b/src/sp/protocol/pubsub0/pub.c
new file mode 100644
index 00000000..e3d4f16a
--- /dev/null
+++ b/src/sp/protocol/pubsub0/pub.c
@@ -0,0 +1,383 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pubsub0/pub.h"
+
+// Publish protocol. The PUB protocol simply sends messages out, as
+// a broadcast. It has nothing more sophisticated because it does not
+// perform sender-side filtering. Its best effort delivery, so anything
+// that can't receive the message won't get one.
+
+#ifndef NNI_PROTO_SUB_V0
+#define NNI_PROTO_SUB_V0 NNI_PROTO(2, 1)
+#endif
+
+#ifndef NNI_PROTO_PUB_V0
+#define NNI_PROTO_PUB_V0 NNI_PROTO(2, 0)
+#endif
+
+typedef struct pub0_pipe pub0_pipe;
+typedef struct pub0_sock pub0_sock;
+
+static void pub0_pipe_recv_cb(void *);
+static void pub0_pipe_send_cb(void *);
+static void pub0_sock_fini(void *);
+static void pub0_pipe_fini(void *);
+
+// pub0_sock is our per-socket protocol private structure.
+struct pub0_sock {
+ nni_list pipes;
+ nni_mtx mtx;
+ bool closed;
+ size_t sendbuf;
+ nni_pollable *sendable;
+};
+
+// pub0_pipe is our per-pipe protocol private structure.
+struct pub0_pipe {
+ nni_pipe * pipe;
+ pub0_sock * pub;
+ nni_lmq sendq;
+ bool closed;
+ bool busy;
+ nni_aio * aio_send;
+ nni_aio * aio_recv;
+ nni_list_node node;
+};
+
+static void
+pub0_sock_fini(void *arg)
+{
+ pub0_sock *s = arg;
+
+ nni_pollable_free(s->sendable);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+pub0_sock_init(void *arg, nni_sock *nsock)
+{
+ pub0_sock *sock = arg;
+ int rv;
+ NNI_ARG_UNUSED(nsock);
+
+ if ((rv = nni_pollable_alloc(&sock->sendable)) != 0) {
+ return (rv);
+ }
+ nni_mtx_init(&sock->mtx);
+ NNI_LIST_INIT(&sock->pipes, pub0_pipe, node);
+ sock->sendbuf = 16; // fairly arbitrary
+ return (0);
+}
+
+static void
+pub0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pub0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pub0_pipe_stop(void *arg)
+{
+ pub0_pipe *p = arg;
+
+ nni_aio_stop(p->aio_send);
+ nni_aio_stop(p->aio_recv);
+}
+
+static void
+pub0_pipe_fini(void *arg)
+{
+ pub0_pipe *p = arg;
+
+ nni_aio_free(p->aio_send);
+ nni_aio_free(p->aio_recv);
+ nni_lmq_fini(&p->sendq);
+}
+
+static int
+pub0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = s;
+ int rv;
+ size_t len;
+
+ nni_mtx_lock(&sock->mtx);
+ len = sock->sendbuf;
+ nni_mtx_unlock(&sock->mtx);
+
+ // XXX: consider making this depth tunable
+ if (((rv = nni_lmq_init(&p->sendq, len)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_send, pub0_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_recv, pub0_pipe_recv_cb, p)) != 0)) {
+
+ pub0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->busy = false;
+ p->pipe = pipe;
+ p->pub = s;
+ return (0);
+}
+
+static int
+pub0_pipe_start(void *arg)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = p->pub;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_SUB_V0) {
+ return (NNG_EPROTO);
+ }
+ nni_mtx_lock(&sock->mtx);
+ nni_list_append(&sock->pipes, p);
+ nni_mtx_unlock(&sock->mtx);
+
+ // Start the receiver.
+ nni_pipe_recv(p->pipe, p->aio_recv);
+
+ return (0);
+}
+
+static void
+pub0_pipe_close(void *arg)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = p->pub;
+
+ nni_aio_close(p->aio_send);
+ nni_aio_close(p->aio_recv);
+
+ nni_mtx_lock(&sock->mtx);
+ p->closed = true;
+ nni_lmq_flush(&p->sendq);
+
+ if (nni_list_active(&sock->pipes, p)) {
+ nni_list_remove(&sock->pipes, p);
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+pub0_pipe_recv_cb(void *arg)
+{
+ pub0_pipe *p = arg;
+
+ // We should never receive a message -- the only valid reason for us to
+ // be here is on pipe close.
+ if (nni_aio_result(p->aio_recv) == 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_recv));
+ }
+ nni_pipe_close(p->pipe);
+}
+
+static void
+pub0_pipe_send_cb(void *arg)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = p->pub;
+ nni_msg * msg;
+
+ if (nni_aio_result(p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_send));
+ nni_aio_set_msg(p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ if (p->closed) {
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+ if (nni_lmq_getq(&p->sendq, &msg) == 0) {
+ nni_aio_set_msg(p->aio_send, msg);
+ nni_pipe_send(p->pipe, p->aio_send);
+ } else {
+ p->busy = false;
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+pub0_sock_recv(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ if (nni_aio_begin(aio) == 0) {
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+ }
+}
+
+static void
+pub0_sock_send(void *arg, nni_aio *aio)
+{
+ pub0_sock *sock = arg;
+ pub0_pipe *p;
+ nng_msg * msg;
+ size_t len;
+
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg);
+ nni_mtx_lock(&sock->mtx);
+ NNI_LIST_FOREACH (&sock->pipes, p) {
+
+ nni_msg_clone(msg);
+ if (p->busy) {
+ if (nni_lmq_full(&p->sendq)) {
+ // Make space for the new message.
+ nni_msg *old;
+ (void) nni_lmq_getq(&p->sendq, &old);
+ nni_msg_free(old);
+ }
+ nni_lmq_putq(&p->sendq, msg);
+ } else {
+ p->busy = true;
+ nni_aio_set_msg(p->aio_send, msg);
+ nni_pipe_send(p->pipe, p->aio_send);
+ }
+ }
+ nni_mtx_unlock(&sock->mtx);
+ nng_msg_free(msg);
+ nni_aio_finish(aio, 0, len);
+}
+
+static int
+pub0_sock_get_sendfd(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ pub0_sock *sock = arg;
+ int fd;
+ int rv;
+ nni_mtx_lock(&sock->mtx);
+ // PUB sockets are *always* writable.
+ nni_pollable_raise(sock->sendable);
+ rv = nni_pollable_getfd(sock->sendable, &fd);
+ nni_mtx_unlock(&sock->mtx);
+
+ if (rv == 0) {
+ rv = nni_copyout_int(fd, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+pub0_sock_set_sendbuf(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ pub0_sock *sock = arg;
+ pub0_pipe *p;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, buf, sz, 1, 8192, t)) != 0) {
+ return (rv);
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ sock->sendbuf = (size_t) val;
+ NNI_LIST_FOREACH (&sock->pipes, p) {
+ // If we fail part way thru (should only be ENOMEM), we
+ // stop short. The others would likely fail for ENOMEM as
+ // well anyway. There is a weird effect here where the
+ // buffers may have been set for *some* of the pipes, but
+ // we have no way to correct partial failure.
+ if ((rv = nni_lmq_resize(&p->sendq, (size_t) val)) != 0) {
+ break;
+ }
+ }
+ nni_mtx_unlock(&sock->mtx);
+ return (rv);
+}
+
+static int
+pub0_sock_get_sendbuf(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ pub0_sock *sock = arg;
+ int val;
+ nni_mtx_lock(&sock->mtx);
+ val = (int) sock->sendbuf;
+ nni_mtx_unlock(&sock->mtx);
+ return (nni_copyout_int(val, buf, szp, t));
+}
+
+static nni_proto_pipe_ops pub0_pipe_ops = {
+ .pipe_size = sizeof(pub0_pipe),
+ .pipe_init = pub0_pipe_init,
+ .pipe_fini = pub0_pipe_fini,
+ .pipe_start = pub0_pipe_start,
+ .pipe_close = pub0_pipe_close,
+ .pipe_stop = pub0_pipe_stop,
+};
+
+static nni_option pub0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = pub0_sock_get_sendfd,
+ },
+ {
+ .o_name = NNG_OPT_SENDBUF,
+ .o_get = pub0_sock_get_sendbuf,
+ .o_set = pub0_sock_set_sendbuf,
+ },
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops pub0_sock_ops = {
+ .sock_size = sizeof(pub0_sock),
+ .sock_init = pub0_sock_init,
+ .sock_fini = pub0_sock_fini,
+ .sock_open = pub0_sock_open,
+ .sock_close = pub0_sock_close,
+ .sock_send = pub0_sock_send,
+ .sock_recv = pub0_sock_recv,
+ .sock_options = pub0_sock_options,
+};
+
+static nni_proto pub0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_peer = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_flags = NNI_PROTO_FLAG_SND,
+ .proto_sock_ops = &pub0_sock_ops,
+ .proto_pipe_ops = &pub0_pipe_ops,
+};
+
+static nni_proto pub0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_peer = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_flags = NNI_PROTO_FLAG_SND | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &pub0_sock_ops,
+ .proto_pipe_ops = &pub0_pipe_ops,
+};
+
+int
+nng_pub0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pub0_proto));
+}
+
+int
+nng_pub0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pub0_proto_raw));
+}
diff --git a/src/sp/protocol/pubsub0/pub_test.c b/src/sp/protocol/pubsub0/pub_test.c
new file mode 100644
index 00000000..a430b610
--- /dev/null
+++ b/src/sp/protocol/pubsub0/pub_test.c
@@ -0,0 +1,331 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_pub_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pub0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 0u)); // 32
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pub");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "sub");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pub_cannot_recv(void)
+{
+ nng_socket pub;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_FAIL(nng_recv(pub, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_no_context(void)
+{
+ nng_socket pub;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_FAIL(nng_ctx_open(&ctx, pub), NNG_ENOTSUP);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_not_readable(void)
+{
+ int fd;
+ nng_socket pub;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_FAIL(nng_socket_get_int(pub, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_poll_writeable(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_get_int(pub, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Pub is *always* writeable
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // Even if we send messages.
+ NUTS_SEND(pub, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_pub_send_no_pipes(void)
+{
+ nng_socket pub;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_SEND(pub, "DROP1");
+ NUTS_SEND(pub, "DROP2");
+ NUTS_CLOSE(pub);
+}
+
+void
+test_pub_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char *addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pub0_open(&s1));
+ NUTS_PASS(nng_pub0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_pub_send_queued(void)
+{
+ nng_socket pub;
+ nng_socket sub;
+
+ // MB: What we really need is a mock so that we can send harder
+ // than we receive -- we need a way to apply back-pressure for this
+ // test to be really meaningful.
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "", 0));
+ NUTS_PASS(nng_socket_set_int(pub, NNG_OPT_SENDBUF, 10));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 10));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_MARRY(pub, sub);
+ NUTS_SEND(pub, "first");
+ NUTS_SEND(pub, "second");
+ NUTS_SEND(pub, "three musketeers");
+ NUTS_SEND(pub, "four");
+ NUTS_SLEEP(50);
+ NUTS_RECV(sub, "first");
+ NUTS_RECV(sub, "second");
+ NUTS_RECV(sub, "three musketeers");
+ NUTS_RECV(sub, "four");
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+static void
+test_sub_recv_ctx_closed(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_ctx_close(ctx);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_ctx_recv_aio_stopped(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_close_context_recv(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ NUTS_PASS(nng_ctx_close(ctx));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_nonblock(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_cancel(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_pub_send_buf_option(void)
+{
+ nng_socket pub;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_SENDBUF;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+
+ NUTS_PASS(nng_socket_set_int(pub, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(pub, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(pub, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(pub, opt, 1000000), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(pub, opt, 3));
+ NUTS_PASS(nng_socket_get_int(pub, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(pub, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(pub, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(pub, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(pub, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(pub, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_pub0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_FAIL(nng_socket_set_bool(s, NNG_OPT_RAW, true), NNG_EREADONLY);
+ NUTS_PASS(nng_close(s));
+
+ // raw pub only differs in the option setting
+ NUTS_PASS(nng_pub0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+NUTS_TESTS = {
+ { "pub identity", test_pub_identity },
+ { "pub cannot recv", test_pub_cannot_recv },
+ { "put no context", test_pub_no_context },
+ { "pub not readable", test_pub_not_readable },
+ { "pub poll writeable", test_pub_poll_writeable },
+ { "pub validate peer", test_pub_validate_peer },
+ { "pub send queued", test_pub_send_queued },
+ { "pub send no pipes", test_pub_send_no_pipes },
+ { "sub recv ctx closed", test_sub_recv_ctx_closed },
+ { "sub recv aio ctx stopped", test_sub_ctx_recv_aio_stopped },
+ { "sub close context recv", test_sub_close_context_recv },
+ { "sub context recv nonblock", test_sub_ctx_recv_nonblock },
+ { "sub context recv cancel", test_sub_ctx_recv_cancel },
+ { "pub send buf option", test_pub_send_buf_option },
+ { "pub cooked", test_pub_cooked },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pubsub0/sub.c b/src/sp/protocol/pubsub0/sub.c
new file mode 100644
index 00000000..9f3f2283
--- /dev/null
+++ b/src/sp/protocol/pubsub0/sub.c
@@ -0,0 +1,755 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2019 Nathan Kent <nate@nkent.net>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdbool.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pubsub0/sub.h"
+
+// Subscriber protocol. The SUB protocol receives messages sent to
+// it from publishers, and filters out those it is not interested in,
+// only passing up ones that match known subscriptions.
+
+#ifndef NNI_PROTO_SUB_V0
+#define NNI_PROTO_SUB_V0 NNI_PROTO(2, 1)
+#endif
+
+#ifndef NNI_PROTO_PUB_V0
+#define NNI_PROTO_PUB_V0 NNI_PROTO(2, 0)
+#endif
+
+// By default we accept 128 messages.
+#define SUB0_DEFAULT_RECV_BUF_LEN 128
+
+// By default, prefer new messages when the queue is full.
+#define SUB0_DEFAULT_PREFER_NEW true
+
+typedef struct sub0_pipe sub0_pipe;
+typedef struct sub0_sock sub0_sock;
+typedef struct sub0_ctx sub0_ctx;
+typedef struct sub0_topic sub0_topic;
+
+static void sub0_recv_cb(void *);
+static void sub0_pipe_fini(void *);
+
+struct sub0_topic {
+ nni_list_node node;
+ size_t len;
+ void * buf;
+};
+
+// sub0_ctx is a context for a SUB socket. The advantage of contexts is
+// that different contexts can maintain different subscriptions.
+struct sub0_ctx {
+ nni_list_node node;
+ sub0_sock * sock;
+ nni_list topics; // TODO: Consider patricia trie
+ nni_list recv_queue; // can have multiple pending receives
+ nni_lmq lmq;
+ bool prefer_new;
+};
+
+// sub0_sock is our per-socket protocol private structure.
+struct sub0_sock {
+ nni_pollable readable;
+ sub0_ctx master; // default context
+ nni_list contexts; // all contexts
+ int num_contexts;
+ size_t recv_buf_len;
+ bool prefer_new;
+ nni_mtx lk;
+};
+
+// sub0_pipe is our per-pipe protocol private structure.
+struct sub0_pipe {
+ nni_pipe * pipe;
+ sub0_sock *sub;
+ nni_aio aio_recv;
+};
+
+static void
+sub0_ctx_cancel(nng_aio *aio, void *arg, int rv)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ nni_mtx_lock(&sock->lk);
+ if (nni_list_active(&ctx->recv_queue, aio)) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&sock->lk);
+}
+
+static void
+sub0_ctx_recv(void *arg, nni_aio *aio)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&sock->lk);
+
+again:
+ if (nni_lmq_empty(&ctx->lmq)) {
+ int rv;
+ if ((rv = nni_aio_schedule(aio, sub0_ctx_cancel, ctx)) != 0) {
+ nni_mtx_unlock(&sock->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&ctx->recv_queue, aio);
+ nni_mtx_unlock(&sock->lk);
+ return;
+ }
+
+ (void) nni_lmq_getq(&ctx->lmq, &msg);
+
+ if (nni_lmq_empty(&ctx->lmq) && (ctx == &sock->master)) {
+ nni_pollable_clear(&sock->readable);
+ }
+ if ((msg = nni_msg_unique(msg)) == NULL) {
+ goto again;
+ }
+ nni_aio_set_msg(aio, msg);
+ nni_mtx_unlock(&sock->lk);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+sub0_ctx_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ if (nni_aio_begin(aio) == 0) {
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+ }
+}
+
+static void
+sub0_ctx_close(void *arg)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ nni_aio * aio;
+
+ nni_mtx_lock(&sock->lk);
+ while ((aio = nni_list_first(&ctx->recv_queue)) != NULL) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&sock->lk);
+}
+
+static void
+sub0_ctx_fini(void *arg)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock * sock = ctx->sock;
+ sub0_topic *topic;
+
+ sub0_ctx_close(ctx);
+
+ nni_mtx_lock(&sock->lk);
+ nni_list_remove(&sock->contexts, ctx);
+ sock->num_contexts--;
+ nni_mtx_unlock(&sock->lk);
+
+ while ((topic = nni_list_first(&ctx->topics)) != 0) {
+ nni_list_remove(&ctx->topics, topic);
+ nni_free(topic->buf, topic->len);
+ NNI_FREE_STRUCT(topic);
+ }
+
+ nni_lmq_fini(&ctx->lmq);
+}
+
+static int
+sub0_ctx_init(void *ctx_arg, void *sock_arg)
+{
+ sub0_sock *sock = sock_arg;
+ sub0_ctx * ctx = ctx_arg;
+ size_t len;
+ bool prefer_new;
+ int rv;
+
+ nni_mtx_lock(&sock->lk);
+ len = sock->recv_buf_len;
+ prefer_new = sock->prefer_new;
+
+ if ((rv = nni_lmq_init(&ctx->lmq, len)) != 0) {
+ return (rv);
+ }
+ ctx->prefer_new = prefer_new;
+
+ nni_aio_list_init(&ctx->recv_queue);
+ NNI_LIST_INIT(&ctx->topics, sub0_topic, node);
+
+ ctx->sock = sock;
+
+ nni_list_append(&sock->contexts, ctx);
+ sock->num_contexts++;
+ nni_mtx_unlock(&sock->lk);
+
+ return (0);
+}
+
+static void
+sub0_sock_fini(void *arg)
+{
+ sub0_sock *sock = arg;
+
+ sub0_ctx_fini(&sock->master);
+ nni_pollable_fini(&sock->readable);
+ nni_mtx_fini(&sock->lk);
+}
+
+static int
+sub0_sock_init(void *arg, nni_sock *unused)
+{
+ sub0_sock *sock = arg;
+ int rv;
+
+ NNI_ARG_UNUSED(unused);
+
+ NNI_LIST_INIT(&sock->contexts, sub0_ctx, node);
+ nni_mtx_init(&sock->lk);
+ sock->recv_buf_len = SUB0_DEFAULT_RECV_BUF_LEN;
+ sock->prefer_new = SUB0_DEFAULT_PREFER_NEW;
+ nni_pollable_init(&sock->readable);
+
+ if ((rv = sub0_ctx_init(&sock->master, sock)) != 0) {
+ sub0_sock_fini(sock);
+ return (rv);
+ }
+
+ return (0);
+}
+
+static void
+sub0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+sub0_sock_close(void *arg)
+{
+ sub0_sock *sock = arg;
+ sub0_ctx_close(&sock->master);
+}
+
+static void
+sub0_pipe_stop(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+sub0_pipe_fini(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+sub0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, sub0_recv_cb, p);
+
+ p->pipe = pipe;
+ p->sub = s;
+ return (0);
+}
+
+static int
+sub0_pipe_start(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_PUB_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+sub0_pipe_close(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_close(&p->aio_recv);
+}
+
+static bool
+sub0_matches(sub0_ctx *ctx, uint8_t *body, size_t len)
+{
+ sub0_topic *topic;
+
+ // This is a naive and trivial matcher. Replace with a real
+ // patricia trie later.
+ NNI_LIST_FOREACH (&ctx->topics, topic) {
+ if (len < topic->len) {
+ continue;
+ }
+ if ((topic->len == 0) ||
+ (memcmp(topic->buf, body, topic->len) == 0)) {
+ return (true);
+ }
+ }
+ return (false);
+}
+
+static void
+sub0_recv_cb(void *arg)
+{
+ sub0_pipe *p = arg;
+ sub0_sock *sock = p->sub;
+ sub0_ctx * ctx;
+ nni_msg * msg;
+ size_t len;
+ uint8_t * body;
+ nni_list finish;
+ nng_aio * aio;
+ nni_msg * dup_msg;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_aio_list_init(&finish);
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ body = nni_msg_body(msg);
+ len = nni_msg_len(msg);
+ dup_msg = NULL;
+
+ nni_mtx_lock(&sock->lk);
+ // Go through all contexts. We will try to send up.
+ NNI_LIST_FOREACH (&sock->contexts, ctx) {
+ bool queued = false;
+
+ if (nni_lmq_full(&ctx->lmq) && !ctx->prefer_new) {
+ // Cannot deliver here, as receive buffer is full.
+ continue;
+ }
+
+ if (!sub0_matches(ctx, body, len)) {
+ continue;
+ }
+
+ // This is a performance optimization, that ensures we
+ // do not duplicate a message in the common case, where there
+ // is only a single context.
+ if (sock->num_contexts > 1) {
+ if (nni_msg_dup(&dup_msg, msg) != 0) {
+ // if we cannot dup it, continue on
+ continue;
+ }
+ } else {
+ // We only have one context, so it's the only
+ // possible message.
+ dup_msg = msg;
+ }
+
+ if (!nni_list_empty(&ctx->recv_queue)) {
+ aio = nni_list_first(&ctx->recv_queue);
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_set_msg(aio, dup_msg);
+
+ // Save for synchronous completion
+ nni_list_append(&finish, aio);
+ } else if (nni_lmq_full(&ctx->lmq)) {
+ // Make space for the new message.
+ nni_msg *old;
+ (void) nni_lmq_getq(&ctx->lmq, &old);
+ nni_msg_free(old);
+
+ (void) nni_lmq_putq(&ctx->lmq, dup_msg);
+ queued = true;
+
+ } else {
+ (void) nni_lmq_putq(&ctx->lmq, dup_msg);
+ queued = true;
+ }
+ if (queued && ctx == &sock->master) {
+ nni_pollable_raise(&sock->readable);
+ }
+ }
+ nni_mtx_unlock(&sock->lk);
+
+ // NB: This is slightly less efficient in that we may have
+ // created an extra copy in the face of e.g. two subscriptions,
+ // but optimizing this further would require checking the subscription
+ // list twice, adding complexity. If this turns out to be a problem
+ // we could probably add some other sophistication with a counter
+ // and flags on the contexts themselves.
+ if (msg != dup_msg) {
+ // If we didn't just use the message, then free our copy.
+ nni_msg_free(msg);
+ }
+
+ while ((aio = nni_list_first(&finish)) != NULL) {
+ nni_list_remove(&finish, aio);
+ nni_aio_finish_sync(aio, 0, len);
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+sub0_ctx_get_recv_buf_len(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ int val;
+ nni_mtx_lock(&sock->lk);
+ val = (int) nni_lmq_cap(&ctx->lmq);
+ nni_mtx_unlock(&sock->lk);
+
+ return (nni_copyout_int(val, buf, szp, t));
+}
+
+static int
+sub0_ctx_set_recv_buf_len(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, buf, sz, 1, 8192, t)) != 0) {
+ return (rv);
+ }
+ nni_mtx_lock(&sock->lk);
+ if ((rv = nni_lmq_resize(&ctx->lmq, (size_t) val)) != 0) {
+ nni_mtx_unlock(&sock->lk);
+ return (rv);
+ }
+
+ // If we change the socket, then this will change the queue for
+ // any new contexts. (Previously constructed contexts are unaffected.)
+ if (&sock->master == ctx) {
+ sock->recv_buf_len = (size_t) val;
+ }
+ nni_mtx_unlock(&sock->lk);
+ return (0);
+}
+
+// For now we maintain subscriptions on a sorted linked list. As we do not
+// expect to have huge numbers of subscriptions, and as the operation is
+// really O(n), we think this is acceptable. In the future we might decide
+// to replace this with a patricia trie, like old nanomsg had.
+
+static int
+sub0_ctx_subscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock * sock = ctx->sock;
+ sub0_topic *topic;
+ sub0_topic *new_topic;
+ NNI_ARG_UNUSED(t);
+
+ nni_mtx_lock(&sock->lk);
+ NNI_LIST_FOREACH (&ctx->topics, topic) {
+ if (topic->len != sz) {
+ continue;
+ }
+ if (memcmp(topic->buf, buf, sz) == 0) {
+ // Already have it.
+ nni_mtx_unlock(&sock->lk);
+ return (0);
+ }
+ }
+ if ((new_topic = NNI_ALLOC_STRUCT(new_topic)) == NULL) {
+ nni_mtx_unlock(&sock->lk);
+ return (NNG_ENOMEM);
+ }
+ if ((sz > 0) && ((new_topic->buf = nni_alloc(sz)) == NULL)) {
+ nni_mtx_unlock(&sock->lk);
+ NNI_FREE_STRUCT(new_topic);
+ return (NNG_ENOMEM);
+ }
+ memcpy(new_topic->buf, buf, sz);
+ new_topic->len = sz;
+ nni_list_append(&ctx->topics, new_topic);
+ nni_mtx_unlock(&sock->lk);
+ return (0);
+}
+
+static int
+sub0_ctx_unsubscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock * sock = ctx->sock;
+ sub0_topic *topic;
+ size_t len;
+ NNI_ARG_UNUSED(t);
+
+ nni_mtx_lock(&sock->lk);
+ NNI_LIST_FOREACH (&ctx->topics, topic) {
+ if (topic->len != sz) {
+ continue;
+ }
+ if (memcmp(topic->buf, buf, sz) == 0) {
+ // Matched!
+ break;
+ }
+ }
+ if (topic == NULL) {
+ nni_mtx_unlock(&sock->lk);
+ return (NNG_ENOENT);
+ }
+ nni_list_remove(&ctx->topics, topic);
+
+ // Now we need to make sure that any messages that are waiting still
+ // match the subscription. We basically just run through the queue
+ // and requeue those messages we need.
+ len = nni_lmq_len(&ctx->lmq);
+ for (size_t i = 0; i < len; i++) {
+ nni_msg *msg;
+
+ (void) nni_lmq_getq(&ctx->lmq, &msg);
+ if (sub0_matches(ctx, nni_msg_body(msg), nni_msg_len(msg))) {
+ (void) nni_lmq_putq(&ctx->lmq, msg);
+ } else {
+ nni_msg_free(msg);
+ }
+ }
+ nni_mtx_unlock(&sock->lk);
+
+ nni_free(topic->buf, topic->len);
+ NNI_FREE_STRUCT(topic);
+ return (0);
+}
+
+static int
+sub0_ctx_get_prefer_new(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ bool val;
+
+ nni_mtx_lock(&sock->lk);
+ val = ctx->prefer_new;
+ nni_mtx_unlock(&sock->lk);
+
+ return (nni_copyout_bool(val, buf, szp, t));
+}
+
+static int
+sub0_ctx_set_prefer_new(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ bool val;
+ int rv;
+
+ if ((rv = nni_copyin_bool(&val, buf, sz, t)) != 0) {
+ return (rv);
+ }
+
+ nni_mtx_lock(&sock->lk);
+ ctx->prefer_new = val;
+ if (&sock->master == ctx) {
+ sock->prefer_new = val;
+ }
+ nni_mtx_unlock(&sock->lk);
+
+ return (0);
+}
+
+static nni_option sub0_ctx_options[] = {
+ {
+ .o_name = NNG_OPT_RECVBUF,
+ .o_get = sub0_ctx_get_recv_buf_len,
+ .o_set = sub0_ctx_set_recv_buf_len,
+ },
+ {
+ .o_name = NNG_OPT_SUB_SUBSCRIBE,
+ .o_set = sub0_ctx_subscribe,
+ },
+ {
+ .o_name = NNG_OPT_SUB_UNSUBSCRIBE,
+ .o_set = sub0_ctx_unsubscribe,
+ },
+ {
+ .o_name = NNG_OPT_SUB_PREFNEW,
+ .o_get = sub0_ctx_get_prefer_new,
+ .o_set = sub0_ctx_set_prefer_new,
+ },
+ {
+ .o_name = NULL,
+ },
+};
+
+static void
+sub0_sock_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ if (nni_aio_begin(aio) == 0) {
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+ }
+}
+
+static void
+sub0_sock_recv(void *arg, nni_aio *aio)
+{
+ sub0_sock *sock = arg;
+
+ sub0_ctx_recv(&sock->master, aio);
+}
+
+static int
+sub0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ sub0_sock *sock = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&sock->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+sub0_sock_get_recv_buf_len(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_get_recv_buf_len(&sock->master, buf, szp, t));
+}
+
+static int
+sub0_sock_set_recv_buf_len(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_set_recv_buf_len(&sock->master, buf, sz, t));
+}
+
+static int
+sub0_sock_subscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_subscribe(&sock->master, buf, sz, t));
+}
+
+static int
+sub0_sock_unsubscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_unsubscribe(&sock->master, buf, sz, t));
+}
+
+static int
+sub0_sock_get_prefer_new(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_get_prefer_new(&sock->master, buf, szp, t));
+}
+
+static int
+sub0_sock_set_prefer_new(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_set_prefer_new(&sock->master, buf, sz, t));
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops sub0_pipe_ops = {
+ .pipe_size = sizeof(sub0_pipe),
+ .pipe_init = sub0_pipe_init,
+ .pipe_fini = sub0_pipe_fini,
+ .pipe_start = sub0_pipe_start,
+ .pipe_close = sub0_pipe_close,
+ .pipe_stop = sub0_pipe_stop,
+};
+
+static nni_proto_ctx_ops sub0_ctx_ops = {
+ .ctx_size = sizeof(sub0_ctx),
+ .ctx_init = sub0_ctx_init,
+ .ctx_fini = sub0_ctx_fini,
+ .ctx_send = sub0_ctx_send,
+ .ctx_recv = sub0_ctx_recv,
+ .ctx_options = sub0_ctx_options,
+};
+
+static nni_option sub0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_SUB_SUBSCRIBE,
+ .o_set = sub0_sock_subscribe,
+ },
+ {
+ .o_name = NNG_OPT_SUB_UNSUBSCRIBE,
+ .o_set = sub0_sock_unsubscribe,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = sub0_sock_get_recv_fd,
+ },
+ {
+ .o_name = NNG_OPT_RECVBUF,
+ .o_get = sub0_sock_get_recv_buf_len,
+ .o_set = sub0_sock_set_recv_buf_len,
+ },
+ {
+ .o_name = NNG_OPT_SUB_PREFNEW,
+ .o_get = sub0_sock_get_prefer_new,
+ .o_set = sub0_sock_set_prefer_new,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops sub0_sock_ops = {
+ .sock_size = sizeof(sub0_sock),
+ .sock_init = sub0_sock_init,
+ .sock_fini = sub0_sock_fini,
+ .sock_open = sub0_sock_open,
+ .sock_close = sub0_sock_close,
+ .sock_send = sub0_sock_send,
+ .sock_recv = sub0_sock_recv,
+ .sock_options = sub0_sock_options,
+};
+
+static nni_proto sub0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_peer = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_flags = NNI_PROTO_FLAG_RCV,
+ .proto_sock_ops = &sub0_sock_ops,
+ .proto_pipe_ops = &sub0_pipe_ops,
+ .proto_ctx_ops = &sub0_ctx_ops,
+};
+
+int
+nng_sub0_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &sub0_proto));
+}
diff --git a/src/sp/protocol/pubsub0/sub_test.c b/src/sp/protocol/pubsub0/sub_test.c
new file mode 100644
index 00000000..b830ae80
--- /dev/null
+++ b/src/sp/protocol/pubsub0/sub_test.c
@@ -0,0 +1,624 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_sub_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_sub0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 0u)); // 32
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "sub");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pub");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_sub_cannot_send(void)
+{
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_FAIL(nng_send(sub, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_context_cannot_send(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_msg * m;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_msg(aio, m);
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_sub_not_writeable(void)
+{
+ int fd;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_FAIL(nng_socket_get_int(sub, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_poll_readable(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "a", 1));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // If we send a message we didn't subscribe to, that doesn't matter.
+ NUTS_SEND(pub, "def");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // and receiving makes it no longer ready
+ NUTS_RECV(sub, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_recv_late(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "", 0));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ nng_recv_aio(sub, aio);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(200);
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, NULL);
+ NUTS_TRUE(nng_msg_len(msg) == 4);
+ NUTS_MATCH(nng_msg_body(msg), "abc");
+
+ nng_msg_free(msg);
+ nng_aio_free(aio);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_context_no_poll(void)
+{
+ int fd;
+ nng_socket sub;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_sub0_open(&s1));
+ NUTS_PASS(nng_sub0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_sub_recv_ctx_closed(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_ctx_close(ctx);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_ctx_recv_aio_stopped(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_close_context_recv(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ NUTS_PASS(nng_ctx_close(ctx));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_nonblock(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_cancel(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_recv_buf_option(void)
+{
+ nng_socket sub;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_RECVBUF;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set_int(sub, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 1000000), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(sub, opt, 3));
+ NUTS_PASS(nng_socket_get_int(sub, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(sub, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(sub, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(sub, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(sub, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(sub, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_subscribe_option(void)
+{
+ nng_socket sub;
+ size_t sz;
+ int v;
+ const char *opt = NNG_OPT_SUB_SUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set(sub, opt, "abc", 3));
+ NUTS_PASS(nng_socket_set(sub, opt, "abc", 3)); // duplicate
+ NUTS_PASS(nng_socket_set_bool(sub, opt, false));
+ NUTS_PASS(nng_socket_set_int(sub, opt, 32));
+ sz = sizeof(v);
+ NUTS_FAIL(nng_socket_get(sub, opt, &v, &sz), NNG_EWRITEONLY);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_unsubscribe_option(void)
+{
+ nng_socket sub;
+ size_t sz;
+ int v;
+ const char *opt1 = NNG_OPT_SUB_SUBSCRIBE;
+ const char *opt2 = NNG_OPT_SUB_UNSUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set(sub, opt1, "abc", 3));
+ NUTS_FAIL(nng_socket_set(sub, opt2, "abc123", 6), NNG_ENOENT);
+ NUTS_PASS(nng_socket_set(sub, opt2, "abc", 3));
+ NUTS_FAIL(nng_socket_set(sub, opt2, "abc", 3), NNG_ENOENT);
+ NUTS_PASS(nng_socket_set_int(sub, opt1, 32));
+ NUTS_FAIL(nng_socket_set_int(sub, opt2, 23), NNG_ENOENT);
+ NUTS_PASS(nng_socket_set_int(sub, opt2, 32));
+ sz = sizeof(v);
+ NUTS_FAIL(nng_socket_get(sub, opt2, &v, &sz), NNG_EWRITEONLY);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_prefer_new_option(void)
+{
+ nng_socket sub;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_SUB_PREFNEW;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set_bool(sub, opt, true));
+ NUTS_PASS(nng_socket_set_bool(sub, opt, false));
+ NUTS_PASS(nng_socket_get_bool(sub, opt, &b));
+ NUTS_TRUE(b == false);
+ sz = sizeof(b);
+ b = true;
+ NUTS_PASS(nng_socket_get(sub, opt, &b, &sz));
+ NUTS_TRUE(b == false);
+ NUTS_TRUE(sz == sizeof(bool));
+
+ NUTS_FAIL(nng_socket_set(sub, opt, "abc", 3), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 1), NNG_EBADTYPE);
+
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_drop_new(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 2));
+ NUTS_PASS(nng_socket_set_bool(sub, NNG_OPT_SUB_PREFNEW, false));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, NULL, 0));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_MARRY(pub, sub);
+ NUTS_SEND(pub, "one");
+ NUTS_SEND(pub, "two");
+ NUTS_SEND(pub, "three");
+ NUTS_SLEEP(100);
+ NUTS_RECV(sub, "one");
+ NUTS_RECV(sub, "two");
+ NUTS_FAIL(nng_recvmsg(sub, &msg, 0), NNG_ETIMEDOUT);
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_drop_old(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 2));
+ NUTS_PASS(nng_socket_set_bool(sub, NNG_OPT_SUB_PREFNEW, true));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, NULL, 0));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_MARRY(pub, sub);
+ NUTS_SEND(pub, "one");
+ NUTS_SEND(pub, "two");
+ NUTS_SEND(pub, "three");
+ NUTS_SLEEP(100);
+ NUTS_RECV(sub, "two");
+ NUTS_RECV(sub, "three");
+ NUTS_FAIL(nng_recvmsg(sub, &msg, 0), NNG_ETIMEDOUT);
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_filter(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ char buf[32];
+ size_t sz;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 10));
+
+ // Set up some default filters
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "abc", 3));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "def", 3));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "ghi", 3));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "jkl", 3));
+
+ NUTS_MARRY(pub, sub);
+
+ NUTS_PASS(nng_send(pub, "def", 3, 0));
+ NUTS_PASS(nng_send(pub, "de", 2, 0)); // will not go through
+ NUTS_PASS(nng_send(pub, "abc123", 6, 0));
+ NUTS_PASS(nng_send(pub, "xzy", 3, 0)); // does not match
+ NUTS_PASS(nng_send(pub, "ghi-drop", 7, 0)); // dropped by unsub
+ NUTS_PASS(nng_send(pub, "jkl-mno", 6, 0));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_UNSUBSCRIBE, "ghi", 3));
+ sz = sizeof(buf);
+ NUTS_PASS(nng_recv(sub, buf, &sz, 0));
+ NUTS_TRUE(sz == 3);
+ NUTS_TRUE(memcmp(buf, "def", 3) == 0);
+
+ sz = sizeof(buf);
+ NUTS_PASS(nng_recv(sub, buf, &sz, 0));
+ NUTS_TRUE(sz == 6);
+ NUTS_TRUE(memcmp(buf, "abc123", 6) == 0);
+
+ sz = sizeof(buf);
+ NUTS_PASS(nng_recv(sub, buf, &sz, 0));
+ NUTS_TRUE(sz == 6);
+ NUTS_TRUE(memcmp(buf, "jkl-mno", 6) == 0);
+
+ NUTS_CLOSE(sub);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_sub_multi_context(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ nng_ctx c1;
+ nng_ctx c2;
+ nng_aio * aio1;
+ nng_aio * aio2;
+ nng_msg * m;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+ NUTS_PASS(nng_aio_alloc(&aio2, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&c1, sub));
+ NUTS_PASS(nng_ctx_open(&c2, sub));
+
+ NUTS_PASS(nng_ctx_set(c1, NNG_OPT_SUB_SUBSCRIBE, "one", 3));
+ NUTS_PASS(nng_ctx_set(c1, NNG_OPT_SUB_SUBSCRIBE, "all", 3));
+
+ NUTS_PASS(nng_ctx_set(c2, NNG_OPT_SUB_SUBSCRIBE, "two", 3));
+ NUTS_PASS(nng_ctx_set(c2, NNG_OPT_SUB_SUBSCRIBE, "all", 3));
+
+ nng_aio_set_timeout(aio1, 100);
+ nng_aio_set_timeout(aio2, 100);
+
+ NUTS_MARRY(pub, sub);
+
+ NUTS_SEND(pub, "one for the money");
+ NUTS_SEND(pub, "all dogs go to heaven");
+ NUTS_SEND(pub, "nobody likes a snitch");
+ NUTS_SEND(pub, "two for the show");
+
+ nng_ctx_recv(c1, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "one for the money");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c1, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "all dogs go to heaven");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c2, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "all dogs go to heaven");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c2, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "two for the show");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c1, aio1);
+ nng_ctx_recv(c2, aio2);
+
+ nng_aio_wait(aio1);
+ nng_aio_wait(aio2);
+ NUTS_FAIL(nng_aio_result(aio1), NNG_ETIMEDOUT);
+ NUTS_FAIL(nng_aio_result(aio2), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ NUTS_CLOSE(pub);
+ nng_aio_free(aio1);
+ nng_aio_free(aio2);
+}
+
+static void
+test_sub_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_sub0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_CLOSE(s);
+}
+
+TEST_LIST = {
+ { "sub identity", test_sub_identity },
+ { "sub cannot send", test_sub_cannot_send },
+ { "sub context cannot send", test_sub_context_cannot_send },
+ { "sub not writeable", test_sub_not_writeable },
+ { "sub poll readable", test_sub_poll_readable },
+ { "sub context does not poll", test_sub_context_no_poll },
+ { "sub validate peer", test_sub_validate_peer },
+ { "sub recv late", test_sub_recv_late },
+ { "sub recv ctx closed", test_sub_recv_ctx_closed },
+ { "sub recv aio ctx stopped", test_sub_ctx_recv_aio_stopped },
+ { "sub close context recv", test_sub_close_context_recv },
+ { "sub context recv nonblock", test_sub_ctx_recv_nonblock },
+ { "sub context recv cancel", test_sub_ctx_recv_cancel },
+ { "sub recv buf option", test_sub_recv_buf_option },
+ { "sub subscribe option", test_sub_subscribe_option },
+ { "sub unsubscribe option", test_sub_unsubscribe_option },
+ { "sub prefer new option", test_sub_prefer_new_option },
+ { "sub drop new", test_sub_drop_new },
+ { "sub drop old", test_sub_drop_old },
+ { "sub filter", test_sub_filter },
+ { "sub multi context", test_sub_multi_context },
+ { "sub cooked", test_sub_cooked },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pubsub0/xsub.c b/src/sp/protocol/pubsub0/xsub.c
new file mode 100644
index 00000000..0013b8b3
--- /dev/null
+++ b/src/sp/protocol/pubsub0/xsub.c
@@ -0,0 +1,211 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pubsub0/sub.h"
+
+// Subscriber protocol. The SUB protocol receives messages sent to
+// it from publishers, and filters out those it is not interested in,
+// only passing up ones that match known subscriptions.
+
+#ifndef NNI_PROTO_SUB_V0
+#define NNI_PROTO_SUB_V0 NNI_PROTO(2, 1)
+#endif
+
+#ifndef NNI_PROTO_PUB_V0
+#define NNI_PROTO_PUB_V0 NNI_PROTO(2, 0)
+#endif
+
+typedef struct xsub0_pipe xsub0_pipe;
+typedef struct xsub0_sock xsub0_sock;
+
+static void xsub0_recv_cb(void *);
+static void xsub0_pipe_fini(void *);
+
+// xsub0_sock is our per-socket protocol private structure.
+struct xsub0_sock {
+ nni_msgq *urq;
+ nni_mtx lk;
+};
+
+// sub0_pipe is our per-pipe protocol private structure.
+struct xsub0_pipe {
+ nni_pipe * pipe;
+ xsub0_sock *sub;
+ nni_aio aio_recv;
+};
+
+static int
+xsub0_sock_init(void *arg, nni_sock *sock)
+{
+ xsub0_sock *s = arg;
+
+ s->urq = nni_sock_recvq(sock);
+ return (0);
+}
+
+static void
+xsub0_sock_fini(void *arg)
+{
+ xsub0_sock *s = arg;
+ nni_mtx_fini(&s->lk);
+}
+
+static void
+xsub0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xsub0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xsub0_pipe_stop(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+xsub0_pipe_fini(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+xsub0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, xsub0_recv_cb, p);
+
+ p->pipe = pipe;
+ p->sub = s;
+ return (0);
+}
+
+static int
+xsub0_pipe_start(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_PUB_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xsub0_pipe_close(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_close(&p->aio_recv);
+}
+
+static void
+xsub0_recv_cb(void *arg)
+{
+ xsub0_pipe *p = arg;
+ xsub0_sock *s = p->sub;
+ nni_msgq * urq = s->urq;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ if (nni_msgq_tryput(urq, msg) != 0) {
+ // This only happens for two reasons. For flow control,
+ // in which case we just want to discard the message and
+ // carry on, and for a close of the socket (which is very
+ // hard to achieve, since we close the pipes.) In either
+ // case the easiest thing to do is just free the message
+ // and try again.
+ nni_msg_free(msg);
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+xsub0_sock_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+}
+
+static void
+xsub0_sock_recv(void *arg, nni_aio *aio)
+{
+ xsub0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops xsub0_pipe_ops = {
+ .pipe_size = sizeof(xsub0_pipe),
+ .pipe_init = xsub0_pipe_init,
+ .pipe_fini = xsub0_pipe_fini,
+ .pipe_start = xsub0_pipe_start,
+ .pipe_close = xsub0_pipe_close,
+ .pipe_stop = xsub0_pipe_stop,
+};
+
+static nni_option xsub0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xsub0_sock_ops = {
+ .sock_size = sizeof(xsub0_sock),
+ .sock_init = xsub0_sock_init,
+ .sock_fini = xsub0_sock_fini,
+ .sock_open = xsub0_sock_open,
+ .sock_close = xsub0_sock_close,
+ .sock_send = xsub0_sock_send,
+ .sock_recv = xsub0_sock_recv,
+ .sock_options = xsub0_sock_options,
+};
+
+static nni_proto xsub0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_peer = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_flags = NNI_PROTO_FLAG_RCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xsub0_sock_ops,
+ .proto_pipe_ops = &xsub0_pipe_ops,
+};
+
+int
+nng_sub0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xsub0_proto));
+}
diff --git a/src/sp/protocol/pubsub0/xsub_test.c b/src/sp/protocol/pubsub0/xsub_test.c
new file mode 100644
index 00000000..19815661
--- /dev/null
+++ b/src/sp/protocol/pubsub0/xsub_test.c
@@ -0,0 +1,376 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xsub_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_sub0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 0u)); // 32
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "sub");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pub");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsub_cannot_send(void)
+{
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_send(sub, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_not_writeable(void)
+{
+ int fd;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_socket_get_int(sub, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_poll_readable(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(200);
+
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // and receiving makes it no longer ready
+ NUTS_RECV(sub, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_recv_late(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ nng_recv_aio(sub, aio);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(200);
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, NULL);
+ NUTS_TRUE(nng_msg_len(msg) == 4);
+ NUTS_TRUE(strcmp(nng_msg_body(msg), "abc") == 0);
+
+ nng_msg_free(msg);
+ nng_aio_free(aio);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_xsub_no_context(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_ctx_open(&ctx, sub), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_xsub_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_sub0_open_raw(&s1));
+ NUTS_PASS(nng_sub0_open_raw(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xsub_recv_closed(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_CLOSE(sub);
+ nng_recv_aio(sub, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+}
+
+static void
+test_xsub_close_recv(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_recv_aio(sub, aio);
+ NUTS_CLOSE(sub);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ nng_aio_free(aio);
+}
+
+static void
+test_xsub_recv_nonblock(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_recv_aio(sub, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_xsub_recv_buf_option(void)
+{
+ nng_socket sub;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_RECVBUF;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+
+ NUTS_PASS(nng_socket_set_int(sub, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(sub, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 1000000), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(sub, opt, 3));
+ NUTS_PASS(nng_socket_get_int(sub, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(sub, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(sub, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(sub, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(sub, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(sub, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_subscribe_option(void)
+{
+ nng_socket sub;
+ const char *opt = NNG_OPT_SUB_SUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_socket_set(sub, opt, "abc", 3), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_unsubscribe_option(void)
+{
+ nng_socket sub;
+ const char *opt = NNG_OPT_SUB_UNSUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_socket_set(sub, opt, "abc", 3), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_sub0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsub_close_during_recv(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(pub, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(pub, sub);
+
+ for (unsigned i = 0; i < 100; i++) {
+ NUTS_PASS(nng_send(pub, "abc", 3, 0));
+ }
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_close_during_pipe_recv(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(pub, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(pub, sub);
+
+ for (unsigned i = 0; i < 100; i++) {
+ int rv;
+ rv = nng_send(pub, "abc", 3, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ break;
+ }
+ NUTS_SLEEP(1);
+ }
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_recv_aio_stopped(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(sub, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+TEST_LIST = {
+ { "xsub identity", test_xsub_identity },
+ { "xsub cannot send", test_xsub_cannot_send },
+ { "xsub not writeable", test_xsub_not_writeable },
+ { "xsub poll readable", test_xsub_poll_readable },
+ { "xsub validate peer", test_xsub_validate_peer },
+ { "xsub recv late", test_xsub_recv_late },
+ { "xsub recv closed", test_xsub_recv_closed },
+ { "xsub close recv", test_xsub_close_recv },
+ { "xsub recv nonblock", test_xsub_recv_nonblock },
+ { "xsub recv buf option", test_xsub_recv_buf_option },
+ { "xsub subscribe option", test_xsub_subscribe_option },
+ { "xsub unsubscribe option", test_xsub_unsubscribe_option },
+ { "xsub no context", test_xsub_no_context },
+ { "xsub raw", test_xsub_raw },
+ { "xsub recv aio stopped", test_xsub_recv_aio_stopped },
+ { "xsub close during recv ", test_xsub_close_during_recv },
+ { "xsub close during pipe recv", test_xsub_close_during_pipe_recv },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/CMakeLists.txt b/src/sp/protocol/reqrep0/CMakeLists.txt
new file mode 100644
index 00000000..a3cecfd0
--- /dev/null
+++ b/src/sp/protocol/reqrep0/CMakeLists.txt
@@ -0,0 +1,25 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Req/Rep protocol
+nng_directory(reqrep0)
+
+nng_sources_if(NNG_PROTO_REQ0 req.c xreq.c)
+nng_headers_if(NNG_PROTO_REQ0 nng/protocol/reqrep0/req.h)
+nng_defines_if(NNG_PROTO_REQ0 NNG_HAVE_REQ0)
+
+nng_sources_if(NNG_PROTO_REP0 rep.c xrep.c)
+nng_headers_if(NNG_PROTO_REP0 nng/protocol/reqrep0/rep.h)
+nng_defines_if(NNG_PROTO_REP0 NNG_HAVE_REP0)
+
+nng_test(req_test)
+nng_test(rep_test)
+nng_test(xrep_test)
+nng_test(xreq_test)
diff --git a/src/sp/protocol/reqrep0/rep.c b/src/sp/protocol/reqrep0/rep.c
new file mode 100644
index 00000000..aa32d249
--- /dev/null
+++ b/src/sp/protocol/reqrep0/rep.c
@@ -0,0 +1,705 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/rep.h"
+
+// Response protocol. The REP protocol is the "reply" side of a
+// request-reply pair. This is useful for building RPC servers, for
+// example.
+
+typedef struct rep0_pipe rep0_pipe;
+typedef struct rep0_sock rep0_sock;
+typedef struct rep0_ctx rep0_ctx;
+
+static void rep0_pipe_send_cb(void *);
+static void rep0_pipe_recv_cb(void *);
+static void rep0_pipe_fini(void *);
+
+struct rep0_ctx {
+ rep0_sock * sock;
+ uint32_t pipe_id;
+ rep0_pipe * spipe; // send pipe
+ nni_aio * saio; // send aio
+ nni_aio * raio; // recv aio
+ nni_list_node sqnode;
+ nni_list_node rqnode;
+ size_t btrace_len;
+ uint32_t btrace[NNI_MAX_MAX_TTL + 1];
+};
+
+// rep0_sock is our per-socket protocol private structure.
+struct rep0_sock {
+ nni_mtx lk;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ nni_list recvpipes; // list of pipes with data to receive
+ nni_list recvq;
+ rep0_ctx ctx;
+ nni_pollable readable;
+ nni_pollable writable;
+};
+
+// rep0_pipe is our per-pipe protocol private structure.
+struct rep0_pipe {
+ nni_pipe * pipe;
+ rep0_sock * rep;
+ uint32_t id;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_list_node rnode; // receivable list linkage
+ nni_list sendq; // contexts waiting to send
+ bool busy;
+ bool closed;
+};
+
+static void
+rep0_ctx_close(void *arg)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+ nni_aio * aio;
+
+ nni_mtx_lock(&s->lk);
+ if ((aio = ctx->saio) != NULL) {
+ rep0_pipe *pipe = ctx->spipe;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ nni_list_remove(&pipe->sendq, ctx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ if ((aio = ctx->raio) != NULL) {
+ nni_list_remove(&s->recvq, ctx);
+ ctx->raio = NULL;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_ctx_fini(void *arg)
+{
+ rep0_ctx *ctx = arg;
+
+ rep0_ctx_close(ctx);
+}
+
+static int
+rep0_ctx_init(void *carg, void *sarg)
+{
+ rep0_sock *s = sarg;
+ rep0_ctx * ctx = carg;
+
+ NNI_LIST_NODE_INIT(&ctx->sqnode);
+ NNI_LIST_NODE_INIT(&ctx->rqnode);
+ ctx->btrace_len = 0;
+ ctx->sock = s;
+ ctx->pipe_id = 0;
+
+ return (0);
+}
+
+static void
+rep0_ctx_cancel_send(nni_aio *aio, void *arg, int rv)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->lk);
+ if (ctx->saio != aio) {
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+ nni_list_node_remove(&ctx->sqnode);
+ ctx->saio = NULL;
+ nni_mtx_unlock(&s->lk);
+
+ nni_msg_header_clear(nni_aio_get_msg(aio)); // reset the headers
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+rep0_ctx_send(void *arg, nni_aio *aio)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+ rep0_pipe *p;
+ nni_msg * msg;
+ int rv;
+ size_t len;
+ uint32_t p_id; // pipe id
+
+ msg = nni_aio_get_msg(aio);
+ nni_msg_header_clear(msg);
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&s->lk);
+ len = ctx->btrace_len;
+ p_id = ctx->pipe_id;
+
+ // Assert "completion" of the previous req request. This ensures
+ // exactly one send for one receive ordering.
+ ctx->btrace_len = 0;
+ ctx->pipe_id = 0;
+
+ if (ctx == &s->ctx) {
+ // No matter how this goes, we will no longer be able
+ // to send on the socket (root context). That's because
+ // we will have finished (successfully or otherwise) the
+ // reply for the single request we got.
+ nni_pollable_clear(&s->writable);
+ }
+ if (len == 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ if ((rv = nni_msg_header_append(msg, ctx->btrace, len)) != 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ if ((p = nni_id_get(&s->pipes, p_id)) == NULL) {
+ // Pipe is gone. Make this look like a good send to avoid
+ // disrupting the state machine. We don't care if the peer
+ // lost interest in our reply.
+ nni_mtx_unlock(&s->lk);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ return;
+ }
+ if (!p->busy) {
+ p->busy = true;
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ nni_mtx_unlock(&s->lk);
+
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, len);
+ return;
+ }
+
+ if ((rv = nni_aio_schedule(aio, rep0_ctx_cancel_send, ctx)) != 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ ctx->saio = aio;
+ ctx->spipe = p;
+ nni_list_append(&p->sendq, ctx);
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_sock_fini(void *arg)
+{
+ rep0_sock *s = arg;
+
+ nni_id_map_fini(&s->pipes);
+ rep0_ctx_fini(&s->ctx);
+ nni_pollable_fini(&s->writable);
+ nni_pollable_fini(&s->readable);
+ nni_mtx_fini(&s->lk);
+}
+
+static int
+rep0_sock_init(void *arg, nni_sock *sock)
+{
+ rep0_sock *s = arg;
+
+ NNI_ARG_UNUSED(sock);
+
+ nni_mtx_init(&s->lk);
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ NNI_LIST_INIT(&s->recvq, rep0_ctx, rqnode);
+ NNI_LIST_INIT(&s->recvpipes, rep0_pipe, rnode);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ (void) rep0_ctx_init(&s->ctx, s);
+
+ // We start off without being either readable or writable.
+ // Readability comes when there is something on the socket.
+ nni_pollable_init(&s->writable);
+ nni_pollable_init(&s->readable);
+
+ return (0);
+}
+
+static void
+rep0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+rep0_sock_close(void *arg)
+{
+ rep0_sock *s = arg;
+
+ rep0_ctx_close(&s->ctx);
+}
+
+static void
+rep0_pipe_stop(void *arg)
+{
+ rep0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+rep0_pipe_fini(void *arg)
+{
+ rep0_pipe *p = arg;
+ nng_msg * msg;
+
+ if ((msg = nni_aio_get_msg(&p->aio_recv)) != NULL) {
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_free(msg);
+ }
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+rep0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ rep0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_send, rep0_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, rep0_pipe_recv_cb, p);
+
+ NNI_LIST_INIT(&p->sendq, rep0_ctx, sqnode);
+
+ p->id = nni_pipe_id(pipe);
+ p->pipe = pipe;
+ p->rep = s;
+ return (0);
+}
+
+static int
+rep0_pipe_start(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ int rv;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REP0_PEER) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->lk);
+ rv = nni_id_set(&s->pipes, nni_pipe_id(p->pipe), p);
+ nni_mtx_unlock(&s->lk);
+ if (rv != 0) {
+ return (rv);
+ }
+ // By definition, we have not received a request yet on this pipe,
+ // so it cannot cause us to become writable.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+rep0_pipe_close(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ rep0_ctx * ctx;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_mtx_lock(&s->lk);
+ p->closed = true;
+ if (nni_list_active(&s->recvpipes, p)) {
+ // We are no longer "receivable".
+ nni_list_remove(&s->recvpipes, p);
+ }
+ while ((ctx = nni_list_first(&p->sendq)) != NULL) {
+ nni_aio *aio;
+ nni_msg *msg;
+ // Pipe was closed. To avoid pushing an error back to the
+ // entire socket, we pretend we completed this successfully.
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ msg = nni_aio_get_msg(aio);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ }
+ if (p->id == s->ctx.pipe_id) {
+ // We "can" send. (Well, not really, but we will happily
+ // accept a message and discard it.)
+ nni_pollable_raise(&s->writable);
+ }
+ nni_id_remove(&s->pipes, nni_pipe_id(p->pipe));
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_pipe_send_cb(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ rep0_ctx * ctx;
+ nni_aio * aio;
+ nni_msg * msg;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_mtx_lock(&s->lk);
+ p->busy = false;
+ if ((ctx = nni_list_first(&p->sendq)) == NULL) {
+ // Nothing else to send.
+ if (p->id == s->ctx.pipe_id) {
+ // Mark us ready for the other side to send!
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ p->busy = true;
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+
+ nni_mtx_unlock(&s->lk);
+
+ nni_aio_finish_sync(aio, 0, len);
+}
+
+static void
+rep0_cancel_recv(nni_aio *aio, void *arg, int rv)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->lk);
+ if (ctx->raio == aio) {
+ nni_list_remove(&s->recvq, ctx);
+ ctx->raio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_ctx_recv(void *arg, nni_aio *aio)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+ rep0_pipe *p;
+ size_t len;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->lk);
+ if ((p = nni_list_first(&s->recvpipes)) == NULL) {
+ int rv;
+ if ((rv = nni_aio_schedule(aio, rep0_cancel_recv, ctx)) != 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ if (ctx->raio != NULL) {
+ // Cannot have a second receive operation pending.
+ // This could be ESTATE, or we could cancel the first
+ // with ECANCELED. We elect the former.
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ ctx->raio = aio;
+ nni_list_append(&s->recvq, ctx);
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_list_remove(&s->recvpipes, p);
+ if (nni_list_empty(&s->recvpipes)) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ if ((ctx == &s->ctx) && !p->busy) {
+ nni_pollable_raise(&s->writable);
+ }
+
+ len = nni_msg_header_len(msg);
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ ctx->btrace_len = len;
+ ctx->pipe_id = nni_pipe_id(p->pipe);
+ nni_mtx_unlock(&s->lk);
+
+ nni_msg_header_clear(msg);
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+rep0_pipe_recv_cb(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ rep0_ctx * ctx;
+ nni_msg * msg;
+ uint8_t * body;
+ nni_aio * aio;
+ size_t len;
+ int hops;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ ttl = nni_atomic_get(&s->ttl);
+
+ nni_msg_set_pipe(msg, p->id);
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end;
+
+ if (hops > ttl) {
+ // This isn't malformed, but it has gone
+ // through too many hops. Do not disconnect,
+ // because we can legitimately receive messages
+ // with too many hops from devices, etc.
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer is speaking garbage. Kick it.
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ // Out of memory, so drop it.
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ len = nni_msg_header_len(msg);
+
+ nni_mtx_lock(&s->lk);
+
+ if (p->closed) {
+ // If we are closed, then we can't return data.
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_mtx_unlock(&s->lk);
+ nni_msg_free(msg);
+ return;
+ }
+
+ if ((ctx = nni_list_first(&s->recvq)) == NULL) {
+ // No one waiting to receive yet, holding pattern.
+ nni_list_append(&s->recvpipes, p);
+ nni_pollable_raise(&s->readable);
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+
+ nni_list_remove(&s->recvq, ctx);
+ aio = ctx->raio;
+ ctx->raio = NULL;
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ if ((ctx == &s->ctx) && !p->busy) {
+ nni_pollable_raise(&s->writable);
+ }
+
+ // schedule another receive
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ ctx->btrace_len = len;
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ nni_msg_header_clear(msg);
+ ctx->pipe_id = p->id;
+
+ nni_mtx_unlock(&s->lk);
+
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, nni_msg_len(msg));
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+rep0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+ int ttl;
+ int rv;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+rep0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+rep0_sock_get_sendfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+rep0_sock_get_recvfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static void
+rep0_sock_send(void *arg, nni_aio *aio)
+{
+ rep0_sock *s = arg;
+
+ rep0_ctx_send(&s->ctx, aio);
+}
+
+static void
+rep0_sock_recv(void *arg, nni_aio *aio)
+{
+ rep0_sock *s = arg;
+
+ rep0_ctx_recv(&s->ctx, aio);
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops rep0_pipe_ops = {
+ .pipe_size = sizeof(rep0_pipe),
+ .pipe_init = rep0_pipe_init,
+ .pipe_fini = rep0_pipe_fini,
+ .pipe_start = rep0_pipe_start,
+ .pipe_close = rep0_pipe_close,
+ .pipe_stop = rep0_pipe_stop,
+};
+
+static nni_proto_ctx_ops rep0_ctx_ops = {
+ .ctx_size = sizeof(rep0_ctx),
+ .ctx_init = rep0_ctx_init,
+ .ctx_fini = rep0_ctx_fini,
+ .ctx_send = rep0_ctx_send,
+ .ctx_recv = rep0_ctx_recv,
+};
+
+static nni_option rep0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = rep0_sock_get_max_ttl,
+ .o_set = rep0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = rep0_sock_get_recvfd,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = rep0_sock_get_sendfd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops rep0_sock_ops = {
+ .sock_size = sizeof(rep0_sock),
+ .sock_init = rep0_sock_init,
+ .sock_fini = rep0_sock_fini,
+ .sock_open = rep0_sock_open,
+ .sock_close = rep0_sock_close,
+ .sock_options = rep0_sock_options,
+ .sock_send = rep0_sock_send,
+ .sock_recv = rep0_sock_recv,
+};
+
+static nni_proto rep0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REP0_SELF, NNG_REP0_SELF_NAME },
+ .proto_peer = { NNG_REP0_PEER, NNG_REP0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &rep0_sock_ops,
+ .proto_pipe_ops = &rep0_pipe_ops,
+ .proto_ctx_ops = &rep0_ctx_ops,
+};
+
+int
+nng_rep0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &rep0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/rep_test.c b/src/sp/protocol/reqrep0/rep_test.c
new file mode 100644
index 00000000..5a47e67a
--- /dev/null
+++ b/src/sp/protocol/reqrep0/rep_test.c
@@ -0,0 +1,669 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_rep_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_rep0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_REP0_SELF);
+ NUTS_TRUE(p2 == NNG_REP0_PEER);
+ NUTS_MATCH(n1, NNG_REP0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_REP0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+void
+test_rep_send_bad_state(void)
+{
+ nng_socket rep;
+ nng_msg * msg = NULL;
+
+ NUTS_TRUE(nng_rep0_open(&rep) == 0);
+ NUTS_TRUE(nng_msg_alloc(&msg, 0) == 0);
+ NUTS_TRUE(nng_sendmsg(rep, msg, 0) == NNG_ESTATE);
+ nng_msg_free(msg);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep);
+
+ // Still not writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // If we get a job, *then* we become writable
+ NUTS_SEND(req, "abc");
+ NUTS_RECV(rep, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // And is no longer writable once we send a message
+ NUTS_SEND(rep, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ // Even after receiving it
+ NUTS_RECV(req, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(req, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_context_no_poll(void)
+{
+ int fd;
+ nng_socket req;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_rep0_open(&req));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(req);
+}
+
+void
+test_rep_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+ NUTS_PASS(nng_rep0_open(&s1));
+ NUTS_PASS(nng_rep0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+void
+test_rep_double_recv(void)
+{
+ nng_socket s1;
+ nng_aio * aio1;
+ nng_aio * aio2;
+
+ NUTS_PASS(nng_rep0_open(&s1));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+ NUTS_PASS(nng_aio_alloc(&aio2, NULL, NULL));
+
+ nng_recv_aio(s1, aio1);
+ nng_recv_aio(s1, aio2);
+
+ nng_aio_wait(aio2);
+ NUTS_FAIL(nng_aio_result(aio2), NNG_ESTATE);
+ NUTS_CLOSE(s1);
+ NUTS_FAIL(nng_aio_result(aio1), NNG_ECLOSED);
+ nng_aio_free(aio1);
+ nng_aio_free(aio2);
+}
+
+void
+test_rep_close_pipe_before_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(req, rep);
+ NUTS_SEND(req, "test");
+
+ nng_recv_aio(rep, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio1);
+}
+
+void
+test_rep_close_pipe_during_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ for (int i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ p = nng_msg_get_pipe(m);
+ rv = nng_sendmsg(rep, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ // Queue is backed up, senders are busy.
+ nng_msg_free(m);
+ break;
+ }
+ NUTS_PASS(rv);
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_ctx_recv_aio_stopped(void)
+{
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, rep));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+void
+test_rep_close_pipe_context_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+ nng_ctx ctx[100];
+ nng_aio * aio[100];
+ int i;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], rep));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 100; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ p = nng_msg_get_pipe(m);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that REQ socket is not reading the results.
+ NUTS_PASS(nng_pipe_close(p));
+
+ for (i = 0; i < 100; i++) {
+ int rv;
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_close_context_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_ctx ctx[100];
+ nng_aio * aio[100];
+ int i;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], rep));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 100; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that REQ socket is not reading the results.
+ for (i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_close_recv(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_recv_aio(rep, aio);
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+}
+
+struct rep_close_recv_cb_state {
+ nng_aio *aio;
+ nng_mtx *mtx;
+ nng_cv * cv;
+ int done;
+ int result;
+ nng_msg *msg;
+};
+
+static void
+rep_close_recv_cb(void *arg)
+{
+ struct rep_close_recv_cb_state *state = arg;
+
+ nng_mtx_lock(state->mtx);
+ state->result = nng_aio_result(state->aio);
+ state->msg = nng_aio_get_msg(state->aio);
+ state->done = true;
+ nng_cv_wake(state->cv);
+ nng_mtx_unlock(state->mtx);
+}
+
+void
+test_rep_close_recv_cb(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ struct rep_close_recv_cb_state state;
+
+ memset(&state, 0, sizeof(state));
+ NUTS_PASS(nng_mtx_alloc(&state.mtx));
+ NUTS_PASS(nng_cv_alloc(&state.cv, state.mtx));
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+ NUTS_PASS(nng_aio_alloc(&state.aio, rep_close_recv_cb, &state));
+ nng_recv_aio(rep, state.aio);
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+ nng_mtx_lock(state.mtx);
+ while (!state.done) {
+ NUTS_PASS(nng_cv_until(state.cv, nng_clock() + 1000));
+ }
+ nng_mtx_unlock(state.mtx);
+ NUTS_TRUE(state.done != 0);
+ NUTS_FAIL(nng_aio_result(state.aio), NNG_ECLOSED);
+ NUTS_TRUE(nng_aio_get_msg(state.aio) == NULL);
+ nng_aio_free(state.aio);
+ nng_cv_free(state.cv);
+ nng_mtx_free(state.mtx);
+}
+
+static void
+test_rep_ctx_recv_nonblock(void)
+{
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_ctx_open(&ctx, rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+static void
+test_rep_ctx_send_nonblock(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 2000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_ctx_open(&ctx, rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(req, rep);
+
+ NUTS_SEND(req, "SEND");
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ // message carries over
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+ nng_aio_free(aio);
+}
+
+static void
+test_rep_ctx_send_nonblock2(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_ctx rep_ctx[10];
+ nng_aio * rep_aio[10];
+ int num_good = 0;
+ int num_fail = 0;
+
+ // We are going to send a bunch of requests, receive them,
+ // but then see that non-block pressure exerts for some, but
+ // that at least one non-blocking send works.
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ for (int i = 0; i < 10; i++) {
+ NUTS_PASS(nng_ctx_open(&rep_ctx[i], rep));
+ NUTS_PASS(nng_aio_alloc(&rep_aio[i], NULL, NULL));
+ }
+ NUTS_MARRY(req, rep);
+
+ for (int i = 0; i < 10; i++) {
+ nng_msg *msg;
+ NUTS_PASS(nng_msg_alloc(&msg, 4));
+ NUTS_PASS(nng_msg_append_u32(msg, (unsigned) i | 0x80000000u));
+ nng_ctx_recv(rep_ctx[i], rep_aio[i]);
+ NUTS_PASS(nng_sendmsg(req, msg, 0));
+ }
+ for (int i = 0; i < 10; i++) {
+ nng_msg *msg;
+ nng_aio_wait(rep_aio[i]);
+ NUTS_PASS(nng_aio_result(rep_aio[i]));
+ msg = nng_aio_get_msg(rep_aio[i]);
+ nng_aio_set_timeout(rep_aio[i], 0);
+ nng_aio_set_msg(rep_aio[i], msg);
+ nng_ctx_send(rep_ctx[i], rep_aio[i]);
+ }
+
+ for (int i = 0; i < 10; i++) {
+ int rv;
+ nng_aio_wait(rep_aio[i]);
+ rv = nng_aio_result(rep_aio[i]);
+ if (rv == 0) {
+ num_good++;
+ } else {
+ NUTS_FAIL(rv, NNG_ETIMEDOUT);
+ nng_msg_free(nng_aio_get_msg(rep_aio[i]));
+ num_fail++;
+ }
+ }
+
+ TEST_ASSERT(num_good > 0);
+ TEST_ASSERT(num_fail > 0);
+
+ for (int i = 0; i < 10; i++) {
+ nng_aio_free(rep_aio[i]);
+ nng_ctx_close(rep_ctx[i]);
+ }
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+}
+
+static void
+test_rep_send_nonblock(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ int rv;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_MARRY(req, rep);
+
+ NUTS_SEND(req, "SEND");
+ NUTS_RECV(rep, "SEND");
+
+ // Use the nonblock flag
+ rv = nng_send(rep, "RECV", 5, NNG_FLAG_NONBLOCK);
+
+ NUTS_PASS(rv);
+ NUTS_RECV(req, "RECV");
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+}
+
+void
+test_rep_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+NUTS_TESTS = {
+ { "rep identity", test_rep_identity },
+ { "rep send bad state", test_rep_send_bad_state },
+ { "rep poll readable", test_rep_poll_readable },
+ { "rep poll writable", test_rep_poll_writeable },
+ { "rep context does not poll", test_rep_context_no_poll },
+ { "rep validate peer", test_rep_validate_peer },
+ { "rep double recv", test_rep_double_recv },
+ { "rep send nonblock", test_rep_send_nonblock },
+ { "rep close pipe before send", test_rep_close_pipe_before_send },
+ { "rep close pipe during send", test_rep_close_pipe_during_send },
+ { "rep recv aio ctx stopped", test_rep_ctx_recv_aio_stopped },
+ { "rep close pipe context send", test_rep_close_pipe_context_send },
+ { "rep close context send", test_rep_close_context_send },
+ { "rep close recv", test_rep_close_recv },
+ { "rep close recv cb", test_rep_close_recv_cb },
+ { "rep context send nonblock", test_rep_ctx_send_nonblock },
+ { "rep context send nonblock 2", test_rep_ctx_send_nonblock2 },
+ { "rep context recv nonblock", test_rep_ctx_recv_nonblock },
+ { "rep recv garbage", test_rep_recv_garbage },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/req.c b/src/sp/protocol/reqrep0/req.c
new file mode 100644
index 00000000..cb3c9395
--- /dev/null
+++ b/src/sp/protocol/reqrep0/req.c
@@ -0,0 +1,869 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+#include <stdio.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/req.h"
+
+// Request protocol. The REQ protocol is the "request" side of a
+// request-reply pair. This is useful for building RPC clients, for example.
+
+typedef struct req0_pipe req0_pipe;
+typedef struct req0_sock req0_sock;
+typedef struct req0_ctx req0_ctx;
+
+static void req0_run_send_queue(req0_sock *, nni_list *);
+static void req0_ctx_reset(req0_ctx *);
+static void req0_ctx_timeout(void *);
+static void req0_pipe_fini(void *);
+static void req0_ctx_fini(void *);
+static int req0_ctx_init(void *, void *);
+
+// A req0_ctx is a "context" for the request. It uses most of the
+// socket, but keeps track of its own outstanding replays, the request ID,
+// and so forth.
+struct req0_ctx {
+ req0_sock * sock;
+ nni_list_node sock_node; // node on the socket context list
+ nni_list_node send_node; // node on the send_queue
+ nni_list_node pipe_node; // node on the pipe list
+ uint32_t request_id; // request ID, without high bit set
+ nni_aio * recv_aio; // user aio waiting to recv - only one!
+ nni_aio * send_aio; // user aio waiting to send
+ nng_msg * req_msg; // request message (owned by protocol)
+ size_t req_len; // length of request message (for stats)
+ nng_msg * rep_msg; // reply message
+ nni_timer_node timer;
+ nni_duration retry;
+ bool conn_reset; // sent message w/o retry, peer disconnect
+};
+
+// A req0_sock is our per-socket protocol private structure.
+struct req0_sock {
+ nni_duration retry;
+ bool closed;
+ nni_atomic_int ttl;
+ req0_ctx master; // base socket master
+ nni_list ready_pipes;
+ nni_list busy_pipes;
+ nni_list stop_pipes;
+ nni_list contexts;
+ nni_list send_queue; // contexts waiting to send.
+ nni_id_map requests; // contexts by request ID
+ nni_pollable readable;
+ nni_pollable writable;
+ nni_mtx mtx;
+};
+
+// A req0_pipe is our per-pipe protocol private structure.
+struct req0_pipe {
+ nni_pipe * pipe;
+ req0_sock * req;
+ nni_list_node node;
+ nni_list contexts; // contexts with pending traffic
+ bool closed;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+};
+
+static void req0_sock_fini(void *);
+static void req0_send_cb(void *);
+static void req0_recv_cb(void *);
+
+static int
+req0_sock_init(void *arg, nni_sock *sock)
+{
+ req0_sock *s = arg;
+
+ NNI_ARG_UNUSED(sock);
+
+ // Request IDs are 32 bits, with the high order bit set.
+ // We start at a random point, to minimize likelihood of
+ // accidental collision across restarts.
+ nni_id_map_init(&s->requests, 0x80000000u, 0xffffffffu, true);
+
+ nni_mtx_init(&s->mtx);
+
+ NNI_LIST_INIT(&s->ready_pipes, req0_pipe, node);
+ NNI_LIST_INIT(&s->busy_pipes, req0_pipe, node);
+ NNI_LIST_INIT(&s->stop_pipes, req0_pipe, node);
+ NNI_LIST_INIT(&s->send_queue, req0_ctx, send_node);
+ NNI_LIST_INIT(&s->contexts, req0_ctx, sock_node);
+
+ // this is "semi random" start for request IDs.
+ s->retry = NNI_SECOND * 60;
+
+ (void) req0_ctx_init(&s->master, s);
+
+ nni_pollable_init(&s->writable);
+ nni_pollable_init(&s->readable);
+
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+ return (0);
+}
+
+static void
+req0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+req0_sock_close(void *arg)
+{
+ req0_sock *s = arg;
+
+ nni_mtx_lock(&s->mtx);
+ s->closed = true;
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_sock_fini(void *arg)
+{
+ req0_sock *s = arg;
+
+ nni_mtx_lock(&s->mtx);
+ NNI_ASSERT(nni_list_empty(&s->busy_pipes));
+ NNI_ASSERT(nni_list_empty(&s->stop_pipes));
+ NNI_ASSERT(nni_list_empty(&s->ready_pipes));
+ nni_mtx_unlock(&s->mtx);
+
+ req0_ctx_fini(&s->master);
+ nni_pollable_fini(&s->readable);
+ nni_pollable_fini(&s->writable);
+ nni_id_map_fini(&s->requests);
+ nni_mtx_fini(&s->mtx);
+}
+
+static void
+req0_pipe_stop(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_send);
+ nni_mtx_lock(&s->mtx);
+ nni_list_node_remove(&p->node);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_pipe_fini(void *arg)
+{
+ req0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_send);
+}
+
+static int
+req0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ req0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, req0_recv_cb, p);
+ nni_aio_init(&p->aio_send, req0_send_cb, p);
+ NNI_LIST_NODE_INIT(&p->node);
+ NNI_LIST_INIT(&p->contexts, req0_ctx, pipe_node);
+ p->pipe = pipe;
+ p->req = s;
+ return (0);
+}
+
+static int
+req0_pipe_start(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REQ0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->ready_pipes, p);
+ nni_pollable_raise(&s->writable);
+ req0_run_send_queue(s, NULL);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+req0_pipe_close(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+ req0_ctx * ctx;
+
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_send);
+
+ nni_mtx_lock(&s->mtx);
+ // This removes the node from either busy_pipes or ready_pipes.
+ // It doesn't much matter which. We stick the pipe on the stop
+ // list, so that we can wait for that to close down safely.
+ p->closed = true;
+ nni_list_node_remove(&p->node);
+ nni_list_append(&s->stop_pipes, p);
+ if (nni_list_empty(&s->ready_pipes)) {
+ nni_pollable_clear(&s->writable);
+ }
+
+ while ((ctx = nni_list_first(&p->contexts)) != NULL) {
+ nni_list_remove(&p->contexts, ctx);
+ nng_aio *aio;
+ if (ctx->retry <= 0) {
+ // If we can't retry, then just cancel the operation
+ // altogether. We should only be waiting for recv,
+ // because we will already have sent if we are here.
+ if ((aio = ctx->recv_aio) != NULL) {
+ ctx->recv_aio = NULL;
+ nni_aio_finish_error(aio, NNG_ECONNRESET);
+ req0_ctx_reset(ctx);
+ } else {
+ req0_ctx_reset(ctx);
+ ctx->conn_reset = true;
+ }
+ } else {
+ // Reset the timer on this so it expires immediately.
+ // This is actually easier than canceling the timer and
+ // running the send_queue separately. (In particular,
+ // it avoids a potential deadlock on cancelling the
+ // timer.)
+ nni_timer_schedule(&ctx->timer, NNI_TIME_ZERO);
+ }
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+// For cooked mode, we use a context, and send out that way. This
+// completely bypasses the upper write queue. Each context keeps one
+// message pending; these are "scheduled" via the send_queue. The send_queue
+// is ordered, so FIFO ordering between contexts is provided for.
+
+static void
+req0_send_cb(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+ nni_aio * aio;
+ nni_list sent_list;
+
+ nni_aio_list_init(&sent_list);
+ if (nni_aio_result(&p->aio_send) != 0) {
+ // We failed to send... clean up and deal with it.
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ // We completed a cooked send, so we need to reinsert ourselves
+ // in the ready list, and re-run the send_queue.
+
+ nni_mtx_lock(&s->mtx);
+ if (p->closed || s->closed) {
+ // This occurs if the req0_pipe_close has been called.
+ // In that case we don't want any more processing.
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+ nni_list_remove(&s->busy_pipes, p);
+ nni_list_append(&s->ready_pipes, p);
+ if (nni_list_empty(&s->send_queue)) {
+ nni_pollable_raise(&s->writable);
+ }
+ req0_run_send_queue(s, &sent_list);
+ nni_mtx_unlock(&s->mtx);
+
+ while ((aio = nni_list_first(&sent_list)) != NULL) {
+ nni_list_remove(&sent_list, aio);
+ nni_aio_finish_sync(aio, 0, 0);
+ }
+}
+
+static void
+req0_recv_cb(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+ req0_ctx * ctx;
+ nni_msg * msg;
+ nni_aio * aio;
+ uint32_t id;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // We yank 4 bytes from front of body, and move them to the header.
+ if (nni_msg_len(msg) < 4) {
+ // Malformed message.
+ goto malformed;
+ }
+ id = nni_msg_trim_u32(msg);
+
+ // Schedule another receive while we are processing this.
+ nni_mtx_lock(&s->mtx);
+
+ // NB: If close was called, then this will just abort.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ // Look for a context to receive it.
+ if (((ctx = nni_id_get(&s->requests, id)) == NULL) ||
+ (ctx->send_aio != NULL) || (ctx->rep_msg != NULL)) {
+ nni_mtx_unlock(&s->mtx);
+ // No waiting context, we have not sent the request out to
+ // the wire yet, or context already has a reply ready.
+ // Discard the message.
+ nni_msg_free(msg);
+ return;
+ }
+
+ // We have our match, so we can remove this.
+ nni_list_node_remove(&ctx->send_node);
+ nni_id_remove(&s->requests, id);
+ ctx->request_id = 0;
+ if (ctx->req_msg != NULL) {
+ nni_msg_free(ctx->req_msg);
+ ctx->req_msg = NULL;
+ }
+
+ // Is there an aio waiting for us?
+ if ((aio = ctx->recv_aio) != NULL) {
+ ctx->recv_aio = NULL;
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, nni_msg_len(msg));
+ } else {
+ // No AIO, so stash msg. Receive will pick it up later.
+ ctx->rep_msg = msg;
+ if (ctx == &s->master) {
+ nni_pollable_raise(&s->readable);
+ }
+ nni_mtx_unlock(&s->mtx);
+ }
+ return;
+
+malformed:
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+}
+
+static void
+req0_ctx_timeout(void *arg)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if ((ctx->req_msg != NULL) && (!s->closed)) {
+ if (!nni_list_node_active(&ctx->send_node)) {
+ nni_list_append(&s->send_queue, ctx);
+ }
+ req0_run_send_queue(s, NULL);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static int
+req0_ctx_init(void *arg, void *sock)
+{
+ req0_sock *s = sock;
+ req0_ctx * ctx = arg;
+
+ nni_timer_init(&ctx->timer, req0_ctx_timeout, ctx);
+
+ nni_mtx_lock(&s->mtx);
+ ctx->sock = s;
+ ctx->recv_aio = NULL;
+ ctx->retry = s->retry;
+ nni_list_append(&s->contexts, ctx);
+ nni_mtx_unlock(&s->mtx);
+
+ return (0);
+}
+
+static void
+req0_ctx_fini(void *arg)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+ nni_aio * aio;
+
+ nni_mtx_lock(&s->mtx);
+ if ((aio = ctx->recv_aio) != NULL) {
+ ctx->recv_aio = NULL;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ if ((aio = ctx->send_aio) != NULL) {
+ ctx->send_aio = NULL;
+ nni_aio_set_msg(aio, ctx->req_msg);
+ ctx->req_msg = NULL;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ req0_ctx_reset(ctx);
+ nni_list_remove(&s->contexts, ctx);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_timer_cancel(&ctx->timer);
+ nni_timer_fini(&ctx->timer);
+}
+
+static int
+req0_ctx_set_resend_time(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ req0_ctx *ctx = arg;
+ return (nni_copyin_ms(&ctx->retry, buf, sz, t));
+}
+
+static int
+req0_ctx_get_resend_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_ctx *ctx = arg;
+ return (nni_copyout_ms(ctx->retry, buf, szp, t));
+}
+
+static void
+req0_run_send_queue(req0_sock *s, nni_list *sent_list)
+{
+ req0_ctx *ctx;
+ nni_aio * aio;
+
+ // Note: This routine should be called with the socket lock held.
+ while ((ctx = nni_list_first(&s->send_queue)) != NULL) {
+ req0_pipe *p;
+
+ if ((p = nni_list_first(&s->ready_pipes)) == NULL) {
+ return;
+ }
+
+ // We have a place to send it, so do the send.
+ // If a sending error occurs that causes the message to
+ // be dropped, we rely on the resend timer to pick it up.
+ // We also notify the completion callback if this is the
+ // first send attempt.
+ nni_list_remove(&s->send_queue, ctx);
+
+ // Schedule a resubmit timer. We only do this if we got
+ // a pipe to send to. Otherwise, we should get handled
+ // the next time that the send_queue is run. We don't do this
+ // if the retry is "disabled" with NNG_DURATION_INFINITE.
+ if (ctx->retry > 0) {
+ nni_timer_schedule(
+ &ctx->timer, nni_clock() + ctx->retry);
+ }
+
+ // Put us on the pipe list of active contexts.
+ // This gives the pipe a chance to kick a resubmit
+ // if the pipe is removed.
+ nni_list_node_remove(&ctx->pipe_node);
+ nni_list_append(&p->contexts, ctx);
+
+ nni_list_remove(&s->ready_pipes, p);
+ nni_list_append(&s->busy_pipes, p);
+ if (nni_list_empty(&s->ready_pipes)) {
+ nni_pollable_clear(&s->writable);
+ }
+
+ if ((aio = ctx->send_aio) != NULL) {
+ ctx->send_aio = NULL;
+ nni_aio_bump_count(aio, ctx->req_len);
+ // If the list was passed in, we want to do a
+ // synchronous completion later.
+ if (sent_list != NULL) {
+ nni_list_append(sent_list, aio);
+ } else {
+ nni_aio_finish(aio, 0, 0);
+ }
+ }
+
+ // At this point, we will never give this message back to
+ // to the user, so we don't have to worry about making it
+ // unique. We can freely clone it.
+ nni_msg_clone(ctx->req_msg);
+ nni_aio_set_msg(&p->aio_send, ctx->req_msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ }
+}
+
+void
+req0_ctx_reset(req0_ctx *ctx)
+{
+ req0_sock *s = ctx->sock;
+ // Call with sock lock held!
+
+ // We cannot safely "wait" using nni_timer_cancel, but this removes
+ // any scheduled timer activation. If the timeout is already running
+ // concurrently, it will still run. It should do nothing, because
+ // we toss the request. There is still a very narrow race if the
+ // timeout fires, but doesn't actually start running before we
+ // both finish this function, *and* manage to reschedule another
+ // request. The consequence of that occurring is that the request
+ // will be emitted on the wire twice. This is not actually tragic.
+ nni_timer_schedule(&ctx->timer, NNI_TIME_NEVER);
+
+ nni_list_node_remove(&ctx->pipe_node);
+ nni_list_node_remove(&ctx->send_node);
+ if (ctx->request_id != 0) {
+ nni_id_remove(&s->requests, ctx->request_id);
+ ctx->request_id = 0;
+ }
+ if (ctx->req_msg != NULL) {
+ nni_msg_free(ctx->req_msg);
+ ctx->req_msg = NULL;
+ }
+ if (ctx->rep_msg != NULL) {
+ nni_msg_free(ctx->rep_msg);
+ ctx->rep_msg = NULL;
+ }
+ ctx->conn_reset = false;
+}
+
+static void
+req0_ctx_cancel_recv(nni_aio *aio, void *arg, int rv)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->recv_aio == aio) {
+ ctx->recv_aio = NULL;
+
+ // Cancellation of a pending receive is treated as aborting the
+ // entire state machine. This allows us to preserve the
+ // semantic of exactly one receive operation per send
+ // operation, and should be the least surprising for users. The
+ // main consequence is that if a receive operation is completed
+ // (in error or otherwise), the user must submit a new send
+ // operation to restart the state machine.
+ req0_ctx_reset(ctx);
+
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_ctx_recv(void *arg, nni_aio *aio)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ if ((ctx->recv_aio != NULL) ||
+ ((ctx->req_msg == NULL) && (ctx->rep_msg == NULL))) {
+ // We have already got a pending receive or have not
+ // tried to send a request yet.
+ // Either of these violate our basic state assumptions.
+ int rv;
+ if (ctx->conn_reset) {
+ ctx->conn_reset = false;
+ rv = NNG_ECONNRESET;
+ } else {
+ rv = NNG_ESTATE;
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ if ((msg = ctx->rep_msg) == NULL) {
+ int rv;
+ rv = nni_aio_schedule(aio, req0_ctx_cancel_recv, ctx);
+ if (rv != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ctx->recv_aio = aio;
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+
+ ctx->rep_msg = NULL;
+
+ // We have got a message to pass up, yay!
+ nni_aio_set_msg(aio, msg);
+ if (ctx == &s->master) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+req0_ctx_cancel_send(nni_aio *aio, void *arg, int rv)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->send_aio == aio) {
+ // There should not be a pending reply, because we canceled
+ // it while we were waiting.
+ NNI_ASSERT(ctx->recv_aio == NULL);
+ ctx->send_aio = NULL;
+ // Restore the message back to the aio.
+ nni_aio_set_msg(aio, ctx->req_msg);
+ nni_msg_header_clear(ctx->req_msg);
+ ctx->req_msg = NULL;
+
+ // Cancellation of a pending receive is treated as aborting the
+ // entire state machine. This allows us to preserve the
+ // semantic of exactly one receive operation per send
+ // operation, and should be the least surprising for users. The
+ // main consequence is that if a receive operation is completed
+ // (in error or otherwise), the user must submit a new send
+ // operation to restart the state machine.
+ req0_ctx_reset(ctx);
+
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_ctx_send(void *arg, nni_aio *aio)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+ nng_msg * msg = nni_aio_get_msg(aio);
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ if (s->closed) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ // Sending a new request cancels the old one, including any
+ // outstanding reply.
+ if (ctx->recv_aio != NULL) {
+ nni_aio_finish_error(ctx->recv_aio, NNG_ECANCELED);
+ ctx->recv_aio = NULL;
+ }
+ if (ctx->send_aio != NULL) {
+ nni_aio_set_msg(ctx->send_aio, ctx->req_msg);
+ nni_msg_header_clear(ctx->req_msg);
+ ctx->req_msg = NULL;
+ nni_aio_finish_error(ctx->send_aio, NNG_ECANCELED);
+ ctx->send_aio = NULL;
+ nni_list_remove(&s->send_queue, ctx);
+ }
+
+ // This resets the entire state machine.
+ req0_ctx_reset(ctx);
+
+ // Insert us on the per ID hash list, so that receives can find us.
+ if ((rv = nni_id_alloc(&s->requests, &ctx->request_id, ctx)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_msg_header_clear(msg);
+ nni_msg_header_append_u32(msg, ctx->request_id);
+
+ // If no pipes are ready, and the request was a poll (no background
+ // schedule), then fail it. Should be NNG_ETIMEDOUT.
+ rv = nni_aio_schedule(aio, req0_ctx_cancel_send, ctx);
+ if ((rv != 0) && (nni_list_empty(&s->ready_pipes))) {
+ nni_id_remove(&s->requests, ctx->request_id);
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ctx->req_len = nni_msg_len(msg);
+ ctx->req_msg = msg;
+ ctx->send_aio = aio;
+ nni_aio_set_msg(aio, NULL);
+
+ // Stick us on the send_queue list.
+ nni_list_append(&s->send_queue, ctx);
+
+ req0_run_send_queue(s, NULL);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_sock_send(void *arg, nni_aio *aio)
+{
+ req0_sock *s = arg;
+ req0_ctx_send(&s->master, aio);
+}
+
+static void
+req0_sock_recv(void *arg, nni_aio *aio)
+{
+ req0_sock *s = arg;
+ req0_ctx_recv(&s->master, aio);
+}
+
+static int
+req0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+req0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+req0_sock_set_resend_time(
+ void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int rv;
+ rv = req0_ctx_set_resend_time(&s->master, buf, sz, t);
+ s->retry = s->master.retry;
+ return (rv);
+}
+
+static int
+req0_sock_get_resend_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ return (req0_ctx_get_resend_time(&s->master, buf, szp, t));
+}
+
+static int
+req0_sock_get_send_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+req0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static nni_proto_pipe_ops req0_pipe_ops = {
+ .pipe_size = sizeof(req0_pipe),
+ .pipe_init = req0_pipe_init,
+ .pipe_fini = req0_pipe_fini,
+ .pipe_start = req0_pipe_start,
+ .pipe_close = req0_pipe_close,
+ .pipe_stop = req0_pipe_stop,
+};
+
+static nni_option req0_ctx_options[] = {
+ {
+ .o_name = NNG_OPT_REQ_RESENDTIME,
+ .o_get = req0_ctx_get_resend_time,
+ .o_set = req0_ctx_set_resend_time,
+ },
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_ctx_ops req0_ctx_ops = {
+ .ctx_size = sizeof(req0_ctx),
+ .ctx_init = req0_ctx_init,
+ .ctx_fini = req0_ctx_fini,
+ .ctx_recv = req0_ctx_recv,
+ .ctx_send = req0_ctx_send,
+ .ctx_options = req0_ctx_options,
+};
+
+static nni_option req0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = req0_sock_get_max_ttl,
+ .o_set = req0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_REQ_RESENDTIME,
+ .o_get = req0_sock_get_resend_time,
+ .o_set = req0_sock_set_resend_time,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = req0_sock_get_recv_fd,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = req0_sock_get_send_fd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops req0_sock_ops = {
+ .sock_size = sizeof(req0_sock),
+ .sock_init = req0_sock_init,
+ .sock_fini = req0_sock_fini,
+ .sock_open = req0_sock_open,
+ .sock_close = req0_sock_close,
+ .sock_options = req0_sock_options,
+ .sock_send = req0_sock_send,
+ .sock_recv = req0_sock_recv,
+};
+
+static nni_proto req0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REQ0_SELF, NNG_REQ0_SELF_NAME },
+ .proto_peer = { NNG_REQ0_PEER, NNG_REQ0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &req0_sock_ops,
+ .proto_pipe_ops = &req0_pipe_ops,
+ .proto_ctx_ops = &req0_ctx_ops,
+};
+
+int
+nng_req0_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &req0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/req_test.c b/src/sp/protocol/reqrep0/req_test.c
new file mode 100644
index 00000000..fb78efa0
--- /dev/null
+++ b/src/sp/protocol/reqrep0/req_test.c
@@ -0,0 +1,968 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_req_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_req0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_REQ0_SELF);
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NNG_REQ0_PEER); // 49
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, NNG_REQ0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, NNG_REQ0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_req_ttl_option(void)
+{
+ nng_socket req;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_req0_open(&req));
+
+ NUTS_PASS(nng_socket_set_int(req, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(req, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(req, opt, -1), NNG_EINVAL);
+ // This test will fail if the NNI_MAX_MAX_TTL is changed from the
+ // builtin default of 15.
+ NUTS_FAIL(nng_socket_set_int(req, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(req, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(req, opt, 3));
+ NUTS_PASS(nng_socket_get_int(req, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(req, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(req, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(req, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(req, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(req, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_resend_option(void)
+{
+ nng_socket req;
+ nng_duration d;
+ bool b;
+ size_t sz = sizeof(b);
+ const char * opt = NNG_OPT_REQ_RESENDTIME;
+
+ NUTS_PASS(nng_req0_open(&req));
+
+ NUTS_TRUE(nng_socket_set_ms(req, opt, 10) == 0);
+ NUTS_FAIL(nng_socket_set(req, opt, "", 1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_get(req, opt, &b, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(req, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(req, opt, &b), NNG_EBADTYPE);
+
+ NUTS_PASS(nng_socket_get_ms(req, opt, &d));
+ NUTS_TRUE(d == 10);
+ NUTS_CLOSE(req);
+}
+
+void
+test_req_recv_bad_state(void)
+{
+ nng_socket req;
+ nng_msg * msg = NULL;
+
+ NUTS_TRUE(nng_req0_open(&req) == 0);
+ NUTS_TRUE(nng_recvmsg(req, &msg, 0) == NNG_ESTATE);
+ NUTS_NULL(msg);
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ uint32_t req_id;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &req_id));
+
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ NUTS_FAIL(nng_recvmsg(req, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+#define SECOND 1000
+
+void
+test_req_rep_exchange(void)
+{
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_TRUE(nng_req0_open(&req) == 0);
+ NUTS_TRUE(nng_rep0_open(&rep) == 0);
+
+ NUTS_TRUE(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND) == 0);
+ NUTS_TRUE(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, SECOND) == 0);
+ NUTS_TRUE(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND) == 0);
+ NUTS_TRUE(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, SECOND) == 0);
+
+ NUTS_MARRY(rep, req);
+
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep, "ping");
+ NUTS_SEND(rep, "pong");
+ NUTS_RECV(req, "pong");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_resend(void)
+{
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 10));
+
+ NUTS_MARRY(rep, req);
+
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep, "ping");
+ NUTS_RECV(rep, "ping");
+ NUTS_RECV(rep, "ping");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_resend_reconnect(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_SENDTIMEO, SECOND));
+ // We intentionally set the retry time long; that way we only see
+ // the retry from loss of our original peer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 60 * SECOND));
+
+ NUTS_MARRY(rep1, req);
+
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+
+ NUTS_CLOSE(rep1);
+ NUTS_MARRY(rep2, req);
+
+ NUTS_RECV(rep2, "ping");
+ NUTS_SEND(rep2, "rep2");
+ NUTS_RECV(req, "rep2");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_resend_disconnect(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_SENDTIMEO, SECOND));
+ // We intentionally set the retry time long; that way we only see
+ // the retry from loss of our original peer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 60 * SECOND));
+
+ NUTS_MARRY(rep1, req);
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+
+ NUTS_MARRY(rep2, req);
+ NUTS_CLOSE(rep1);
+
+ NUTS_RECV(rep2, "ping");
+ NUTS_SEND(rep2, "rep2");
+ NUTS_RECV(req, "rep2");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_disconnect_no_retry(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND / 10));
+ // Setting the resend time to zero so we will force an error
+ // if the peer disconnects without sending us an answer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 0));
+
+ NUTS_MARRY(rep1, req);
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+
+ NUTS_MARRY(rep2, req);
+ NUTS_CLOSE(rep1);
+
+ nng_msg *msg = NULL;
+ NUTS_FAIL(nng_recvmsg(req, &msg, 0), NNG_ECONNRESET);
+ NUTS_FAIL(nng_recvmsg(rep2, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_disconnect_abort(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+ NUTS_PASS(nng_aio_alloc(&aio, 0, 0));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND / 10));
+ // Setting the resend time to zero so we will force an error
+ // if the peer disconnects without sending us an answer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 0));
+
+ NUTS_MARRY(rep1, req);
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+ nng_recv_aio(req, aio);
+
+ NUTS_MARRY(rep2, req);
+ NUTS_CLOSE(rep1);
+
+ nng_msg *msg = NULL;
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECONNRESET);
+ NUTS_FAIL(nng_recvmsg(rep2, &msg, 0), NNG_ETIMEDOUT);
+ nng_aio_free(aio);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_cancel(void)
+{
+ nng_duration retry = SECOND;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_rep_open(&rep));
+ NUTS_PASS(nng_req_open(&req));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, retry));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 16));
+
+ NUTS_MARRY(rep, req);
+
+ // Send req #1 (abc).
+ NUTS_SEND(req, "abc");
+
+ // Sleep a bit. This is so that we ensure that our request gets
+ // to the far side. (If we cancel too fast, then our outgoing send
+ // will be canceled before it gets to the peer.)
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the REP side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to abc.
+ NUTS_SEND(req, "def");
+
+ // Receive the first request (should be abc) on the REP server.
+ NUTS_RECV(rep, "abc");
+
+ // REP sends the reply to first command. This will be discarded
+ // by the REQ socket.
+ NUTS_SEND(rep, "abc");
+
+ // Now get the next command from the REP; should be "def".
+ NUTS_RECV(rep, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(rep, "def");
+
+ // And we got back only the second result.
+ NUTS_RECV(req, "def");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_cancel_abort_recv(void)
+{
+ nng_aio * aio;
+ nng_duration retry = SECOND * 10; // 10s (kind of never)
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_rep_open(&rep));
+ NUTS_PASS(nng_req_open(&req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, retry));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 16));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 5 * SECOND));
+
+ NUTS_MARRY(rep, req);
+
+ // Send req #1 (abc).
+ NUTS_SEND(req, "abc");
+
+ // Wait for it to get ot the other side.
+ NUTS_SLEEP(100);
+
+ nng_aio_set_timeout(aio, 5 * SECOND);
+ nng_recv_aio(req, aio);
+
+ // Give time for this recv to post properly.
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the REP side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to
+ // abc.
+ NUTS_SEND(req, "def");
+
+ // Our pending I/O should have been canceled.
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+
+ // Receive the first request (should be abc) on the REP server.
+ NUTS_RECV(rep, "abc");
+
+ // REP sends the reply to first command. This will be
+ // discarded by the REQ socket.
+ NUTS_SEND(rep, "abc");
+
+ // Now get the next command from the REP; should be "def".
+ NUTS_RECV(rep, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(rep, "def");
+
+ // Try a req command. This should give back "def"
+ NUTS_RECV(req, "def");
+
+ nng_aio_free(aio);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_req_cancel_post_recv(void)
+{
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_MARRY(req, rep);
+
+ NUTS_SEND(req, "ONE");
+ NUTS_RECV(rep, "ONE");
+ NUTS_SEND(rep, "one");
+ NUTS_SLEEP(100); // Make sure reply arrives!
+ NUTS_SEND(req, "TWO");
+ NUTS_RECV(rep, "TWO");
+ NUTS_SEND(rep, "two");
+ NUTS_RECV(req, "two");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep);
+
+ // It should be writable now.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // Submit a bunch of jobs. Note that we have to stall a bit
+ // between each message to let it queue up.
+ for (int i = 0; i < 10; i++) {
+ int rv = nng_send(req, "", 0, NNG_FLAG_NONBLOCK);
+ if (rv == NNG_EAGAIN) {
+ break;
+ }
+ NUTS_PASS(rv);
+ NUTS_SLEEP(50);
+ }
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_poll_contention(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_aio * aio;
+ nng_ctx ctx[5];
+ nng_aio * ctx_aio[5];
+ nng_msg * ctx_msg[5];
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+
+ for (int i = 0; i < 5; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&ctx_aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&ctx_msg[i], 0));
+ }
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ nng_aio_set_msg(aio, msg);
+ nng_send_aio(req, aio);
+ for (int i = 0; i < 5; i++) {
+ nng_aio_set_msg(ctx_aio[i], ctx_msg[i]);
+ nng_ctx_send(ctx[i], ctx_aio[i]);
+ }
+ NUTS_SLEEP(50); // so everything is queued steady state
+
+ NUTS_MARRY(req, rep);
+
+ // It should not be writable now.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+
+ // Still not writeable...
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ for (int i = 0; i < 5; i++) {
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+ }
+ // It can take a little bit of time for the eased back-pressure
+ // to reflect across the network.
+ NUTS_SLEEP(100);
+
+ // Should be come writeable now...
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ for (int i = 0; i < 5; i++) {
+ nng_aio_free(ctx_aio[i]);
+ }
+ nng_aio_free(aio);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_poll_multi_pipe(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep1);
+ NUTS_MARRY(req, rep2);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+ NUTS_SEND(req, "ONE");
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep1);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "xyz", 3));
+ NUTS_PASS(nng_sendmsg(req, msg, 0));
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0)); // recv on rep
+ NUTS_PASS(nng_sendmsg(rep, msg, 0)); // echo it back
+ NUTS_SLEEP(200); // give time for message to arrive
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(req, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_req_ctx_no_poll(void)
+{
+ int fd;
+ nng_socket req;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_FAIL(nng_ctx_getopt_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_getopt_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_ctx_send_queued(void)
+{
+ nng_socket req;
+ nng_socket rep;
+ nng_ctx ctx[3];
+ nng_aio * aio[3];
+ nng_msg * msg[3];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 100));
+
+ for (int i = 0; i < 3; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_SLEEP(50); // Only to ensure stuff queues up
+ for (int i = 0; i < 3; i++) {
+ nng_msg *m;
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ nng_msg_free(m);
+ }
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ for (int i = 0; i < 3; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ nng_aio_free(aio[i]);
+ }
+}
+
+static void
+test_req_ctx_send_close(void)
+{
+ nng_socket req;
+ nng_ctx ctx[3];
+ nng_aio * aio[3];
+ nng_msg * msg[3];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ for (int i = 0; i < 3; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_ctx_close(ctx[i]);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_FAIL(nng_aio_result(aio[i]), NNG_ECLOSED);
+ nng_aio_free(aio[i]);
+ nng_msg_free(msg[i]);
+ }
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_ctx_send_abort(void)
+{
+ nng_socket req;
+ nng_ctx ctx[3];
+ nng_aio * aio[3];
+ nng_msg * msg[3];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ for (int i = 0; i < 3; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_abort(aio[i], NNG_ECANCELED);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_FAIL(nng_aio_result(aio[i]), NNG_ECANCELED);
+ nng_aio_free(aio[i]);
+ nng_msg_free(msg[i]);
+ }
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_ctx_send_twice(void)
+{
+ nng_socket req;
+ nng_ctx ctx;
+ nng_aio * aio[2];
+ nng_msg * msg[2];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+
+ for (int i = 0; i < 2; i++) {
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 2; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx, aio[i]);
+ NUTS_SLEEP(50);
+ }
+
+ NUTS_CLOSE(req);
+ nng_aio_wait(aio[0]);
+ nng_aio_wait(aio[1]);
+ NUTS_FAIL(nng_aio_result(aio[0]), NNG_ECANCELED);
+ NUTS_FAIL(nng_aio_result(aio[1]), NNG_ECLOSED);
+
+ for (int i = 0; i < 2; i++) {
+ nng_aio_free(aio[i]);
+ nng_msg_free(msg[i]);
+ }
+}
+
+static void
+test_req_ctx_recv_nonblock(void)
+{
+ nng_socket req;
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ NUTS_MARRY(req, rep);
+
+ nng_aio_set_msg(aio, msg);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+static void
+test_req_ctx_send_nonblock(void)
+{
+ nng_socket req;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(req);
+ nng_aio_free(aio);
+ nng_msg_free(msg);
+}
+
+static void
+test_req_ctx_recv_close_socket(void)
+{
+ nng_socket req;
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(req, rep);
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ nng_aio_set_msg(aio, m);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+
+ nng_ctx_recv(ctx, aio);
+ nng_close(req);
+
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_req_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_req0_open(&s1));
+ NUTS_PASS(nng_req0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+NUTS_TESTS = {
+ { "req identity", test_req_identity },
+ { "req ttl option", test_req_ttl_option },
+ { "req resend option", test_req_resend_option },
+ { "req recv bad state", test_req_recv_bad_state },
+ { "req recv garbage", test_req_recv_garbage },
+ { "req rep exchange", test_req_rep_exchange },
+ { "req resend", test_req_resend },
+ { "req resend disconnect", test_req_resend_disconnect },
+ { "req disconnect no retry", test_req_disconnect_no_retry },
+ { "req disconnect abort", test_req_disconnect_abort },
+ { "req resend reconnect", test_req_resend_reconnect },
+ { "req cancel", test_req_cancel },
+ { "req cancel abort recv", test_req_cancel_abort_recv },
+ { "req cancel post recv", test_req_cancel_post_recv },
+ { "req poll writable", test_req_poll_writeable },
+ { "req poll contention", test_req_poll_contention },
+ { "req poll multi pipe", test_req_poll_multi_pipe },
+ { "req poll readable", test_req_poll_readable },
+ { "req context send queued", test_req_ctx_send_queued },
+ { "req context send close", test_req_ctx_send_close },
+ { "req context send abort", test_req_ctx_send_abort },
+ { "req context send twice", test_req_ctx_send_twice },
+ { "req context does not poll", test_req_ctx_no_poll },
+ { "req context recv close socket", test_req_ctx_recv_close_socket },
+ { "req context recv nonblock", test_req_ctx_recv_nonblock },
+ { "req context send nonblock", test_req_ctx_send_nonblock },
+ { "req validate peer", test_req_validate_peer },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/xrep.c b/src/sp/protocol/reqrep0/xrep.c
new file mode 100644
index 00000000..9737c600
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xrep.c
@@ -0,0 +1,432 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/rep.h"
+
+// Response protocol in raw mode. The REP protocol is the "reply" side of a
+// request-reply pair. This is useful for building RPC servers, for
+// example.
+
+typedef struct xrep0_pipe xrep0_pipe;
+typedef struct xrep0_sock xrep0_sock;
+
+static void xrep0_sock_getq_cb(void *);
+static void xrep0_pipe_getq_cb(void *);
+static void xrep0_pipe_putq_cb(void *);
+static void xrep0_pipe_send_cb(void *);
+static void xrep0_pipe_recv_cb(void *);
+static void xrep0_pipe_fini(void *);
+
+// xrep0_sock is our per-socket protocol private structure.
+struct xrep0_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_mtx lk;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ nni_aio aio_getq;
+};
+
+// xrep0_pipe is our per-pipe protocol private structure.
+struct xrep0_pipe {
+ nni_pipe * pipe;
+ xrep0_sock *rep;
+ nni_msgq * sendq;
+ nni_aio aio_getq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_putq;
+};
+
+static void
+xrep0_sock_fini(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ nni_aio_fini(&s->aio_getq);
+ nni_id_map_fini(&s->pipes);
+ nni_mtx_fini(&s->lk);
+}
+
+static int
+xrep0_sock_init(void *arg, nni_sock *sock)
+{
+ xrep0_sock *s = arg;
+
+ nni_mtx_init(&s->lk);
+ nni_aio_init(&s->aio_getq, xrep0_sock_getq_cb, s);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8); // Per RFC
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ return (0);
+}
+
+static void
+xrep0_sock_open(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ // This starts us retrieving message from the upper write q.
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+static void
+xrep0_sock_close(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ nni_aio_close(&s->aio_getq);
+}
+
+static void
+xrep0_pipe_stop(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_putq);
+}
+
+static void
+xrep0_pipe_fini(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_putq);
+ nni_msgq_fini(p->sendq);
+}
+
+static int
+xrep0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ xrep0_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_getq, xrep0_pipe_getq_cb, p);
+ nni_aio_init(&p->aio_send, xrep0_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, xrep0_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_putq, xrep0_pipe_putq_cb, p);
+
+ p->pipe = pipe;
+ p->rep = s;
+
+ // We want a pretty deep send queue on pipes. The rationale here is
+ // that the send rate will be mitigated by the receive rate.
+ // If a slow pipe (req pipe not reading its own responses!?)
+ // comes up, then we will start discarding its replies eventually,
+ // but it takes some time. It would be poor form for a peer to
+ // smash us with requests, but be unable to handle replies faster
+ // than we can forward them. If they do that, their replies get
+ // dropped. (From a DDoS perspective, it might be nice in the
+ // future if we had a way to exert back pressure to the send side --
+ // essentially don't let peers send requests faster than they are
+ // willing to receive replies. Something to think about for the
+ // future.)
+ if ((rv = nni_msgq_init(&p->sendq, 64)) != 0) {
+ xrep0_pipe_fini(p);
+ return (rv);
+ }
+ return (0);
+}
+
+static int
+xrep0_pipe_start(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+ int rv;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REP0_PEER) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->lk);
+ rv = nni_id_set(&s->pipes, nni_pipe_id(p->pipe), p);
+ nni_mtx_unlock(&s->lk);
+ if (rv != 0) {
+ return (rv);
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xrep0_pipe_close(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_putq);
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->lk);
+ nni_id_remove(&s->pipes, nni_pipe_id(p->pipe));
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+xrep0_sock_getq_cb(void *arg)
+{
+ xrep0_sock *s = arg;
+ nni_msgq * uwq = s->uwq;
+ nni_msg * msg;
+ uint32_t id;
+ xrep0_pipe *p;
+
+ // This watches for messages from the upper write queue,
+ // extracts the destination pipe, and forwards it to the appropriate
+ // destination pipe via a separate queue. This prevents a single bad
+ // or slow pipe from gumming up the works for the entire socket.
+
+ if (nni_aio_result(&s->aio_getq) != 0) {
+ // Closed socket?
+ return;
+ }
+
+ msg = nni_aio_get_msg(&s->aio_getq);
+ nni_aio_set_msg(&s->aio_getq, NULL);
+
+ // We yank the outgoing pipe id from the header
+ if (nni_msg_header_len(msg) < 4) {
+ nni_msg_free(msg);
+
+ // Look for another message on the upper write queue.
+ nni_msgq_aio_get(uwq, &s->aio_getq);
+ return;
+ }
+
+ id = nni_msg_header_trim_u32(msg);
+
+ // Look for the pipe, and attempt to put the message there
+ // (non-blocking) if we can. If we can't for any reason, then we
+ // free the message.
+ nni_mtx_lock(&s->lk);
+ if (((p = nni_id_get(&s->pipes, id)) == NULL) ||
+ (nni_msgq_tryput(p->sendq, msg) != 0)) {
+ nni_msg_free(msg);
+ }
+ nni_mtx_unlock(&s->lk);
+
+ // Now look for another message on the upper write queue.
+ nni_msgq_aio_get(uwq, &s->aio_getq);
+}
+
+static void
+xrep0_pipe_getq_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+xrep0_pipe_send_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+}
+
+static void
+xrep0_pipe_recv_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+ nni_msg * msg;
+ int hops;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ ttl = nni_atomic_get(&s->ttl);
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // Store the pipe id in the header, first thing.
+ nni_msg_header_append_u32(msg, nni_pipe_id(p->pipe));
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end;
+ uint8_t *body;
+ if (hops > ttl) {
+ // This isn't malformed, but it has gone through
+ // too many hops. Do not disconnect, because we
+ // can legitimately receive messages with too many
+ // hops from devices, etc.
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer is speaking garbage. Kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ // Out of memory most likely, but keep going to
+ // avoid breaking things.
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ // Go ahead and send it up.
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(s->urq, &p->aio_putq);
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+xrep0_pipe_putq_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+xrep0_sock_set_maxttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xrep0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xrep0_sock_get_maxttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xrep0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static void
+xrep0_sock_send(void *arg, nni_aio *aio)
+{
+ xrep0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+xrep0_sock_recv(void *arg, nni_aio *aio)
+{
+ xrep0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops xrep0_pipe_ops = {
+ .pipe_size = sizeof(xrep0_pipe),
+ .pipe_init = xrep0_pipe_init,
+ .pipe_fini = xrep0_pipe_fini,
+ .pipe_start = xrep0_pipe_start,
+ .pipe_close = xrep0_pipe_close,
+ .pipe_stop = xrep0_pipe_stop,
+};
+
+static nni_option xrep0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xrep0_sock_get_maxttl,
+ .o_set = xrep0_sock_set_maxttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xrep0_sock_ops = {
+ .sock_size = sizeof(xrep0_sock),
+ .sock_init = xrep0_sock_init,
+ .sock_fini = xrep0_sock_fini,
+ .sock_open = xrep0_sock_open,
+ .sock_close = xrep0_sock_close,
+ .sock_options = xrep0_sock_options,
+ .sock_send = xrep0_sock_send,
+ .sock_recv = xrep0_sock_recv,
+};
+
+static nni_proto xrep0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REP0_SELF, NNG_REP0_SELF_NAME },
+ .proto_peer = { NNG_REP0_PEER, NNG_REP0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xrep0_sock_ops,
+ .proto_pipe_ops = &xrep0_pipe_ops,
+};
+
+int
+nng_rep0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xrep0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/xrep_test.c b/src/sp/protocol/reqrep0/xrep_test.c
new file mode 100644
index 00000000..6f1564eb
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xrep_test.c
@@ -0,0 +1,434 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xrep_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_rep0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_REP0_SELF);
+ NUTS_TRUE(p2 == NNG_REP0_PEER);
+ NUTS_MATCH(n1, NNG_REP0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_REP0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+static void
+test_xrep_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_rep0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xrep_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_rep0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xrep_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We are always writeable, even before connect. This is so that
+ // back-pressure from a bad peer can't trash others. We assume
+ // that peers won't send us requests faster than they can consume
+ // the answers. If they do, they will lose their answers.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_MARRY(req, rep);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(req, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char *addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_rep0_open_raw(&s1));
+ NUTS_PASS(nng_rep0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_PASS(nng_close(s1));
+ NUTS_PASS(nng_close(s2));
+ nng_stats_free(stats);
+}
+
+static void
+test_xrep_close_pipe_before_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(req, rep);
+ NUTS_SEND(req, "test");
+
+ nng_recv_aio(rep, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio1);
+}
+
+static void
+test_xrep_close_pipe_during_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) 0x81000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ p = nng_msg_get_pipe(m);
+ nng_msg_free(m);
+
+ for (int i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p)));
+ NUTS_PASS(
+ nng_msg_header_append_u32(m, (unsigned) i | 0x80000000u));
+ // xrep does not exert back-pressure
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_close_during_recv(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(req, rep);
+
+ for (unsigned i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ rv = nng_sendmsg(req, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ nng_msg_free(m);
+ break;
+ }
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_recv_aio_stopped(void)
+{
+ nng_socket rep;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(rep, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+static void
+test_xrep_send_no_header(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_ttl_option(void)
+{
+ nng_socket rep;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+
+ NUTS_PASS(nng_socket_set_int(rep, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(rep, opt, 3));
+ NUTS_PASS(nng_socket_get_int(rep, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(rep, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_TRUE(nng_socket_set(rep, opt, "", 1) == NNG_EINVAL);
+ sz = 1;
+ NUTS_TRUE(nng_socket_get(rep, opt, &v, &sz) == NNG_EINVAL);
+ NUTS_TRUE(nng_socket_set_bool(rep, opt, true) == NNG_EBADTYPE);
+ NUTS_TRUE(nng_socket_get_bool(rep, opt, &b) == NNG_EBADTYPE);
+
+ NUTS_TRUE(nng_close(rep) == 0);
+}
+
+static void
+test_xrep_ttl_drop(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_MAXTTL, 3));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ // Send messages. Note that xrep implicitly adds a hop on receive.
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 2 hops
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000001u));
+ NUTS_PASS(nng_msg_append(m, "PASS1", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "FAIL2", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 3 hops - passes
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "PASS3", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "FAIL4", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ // So on receive we should see 80000001 and 80000003.
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_TRUE(strcmp(nng_msg_body(m), "PASS1") == 0);
+ nng_msg_free(m);
+
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 16); // 3 hops + ID
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_TRUE(strcmp(nng_msg_body(m), "PASS3") == 0);
+ nng_msg_free(m);
+
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+NUTS_TESTS = {
+ { "xrep identity", test_xrep_identity },
+ { "xrep raw", test_xrep_raw },
+ { "xrep no context", test_xrep_no_context },
+ { "xrep poll readable", test_xrep_poll_readable },
+ { "xrep poll writable", test_xrep_poll_writeable },
+ { "xrep validate peer", test_xrep_validate_peer },
+ { "xrep close pipe before send", test_xrep_close_pipe_before_send },
+ { "xrep close pipe during send", test_xrep_close_pipe_during_send },
+ { "xrep close during recv", test_xrep_close_during_recv },
+ { "xrep recv aio stopped", test_xrep_recv_aio_stopped },
+ { "xrep send no header", test_xrep_send_no_header },
+ { "xrep recv garbage", test_xrep_recv_garbage },
+ { "xrep ttl option", test_xrep_ttl_option },
+ { "xrep ttl drop", test_xrep_ttl_drop },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/xreq.c b/src/sp/protocol/reqrep0/xreq.c
new file mode 100644
index 00000000..bcb218bf
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xreq.c
@@ -0,0 +1,319 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdio.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/req.h"
+
+// Request protocol. The REQ protocol is the "request" side of a
+// request-reply pair. This is useful for building RPC clients, for example.
+
+typedef struct xreq0_pipe xreq0_pipe;
+typedef struct xreq0_sock xreq0_sock;
+
+// An xreq0_sock is our per-socket protocol private structure.
+struct xreq0_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_atomic_int ttl;
+};
+
+// A req0_pipe is our per-pipe protocol private structure.
+struct xreq0_pipe {
+ nni_pipe * pipe;
+ xreq0_sock *req;
+ nni_aio aio_getq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_putq;
+};
+
+static void xreq0_sock_fini(void *);
+static void xreq0_getq_cb(void *);
+static void xreq0_send_cb(void *);
+static void xreq0_recv_cb(void *);
+static void xreq0_putq_cb(void *);
+
+static int
+xreq0_sock_init(void *arg, nni_sock *sock)
+{
+ xreq0_sock *s = arg;
+
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+
+ return (0);
+}
+
+static void
+xreq0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xreq0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xreq0_sock_fini(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xreq0_pipe_stop(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_putq);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_send);
+}
+
+static void
+xreq0_pipe_fini(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_putq);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_send);
+}
+
+static int
+xreq0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_getq, xreq0_getq_cb, p);
+ nni_aio_init(&p->aio_putq, xreq0_putq_cb, p);
+ nni_aio_init(&p->aio_recv, xreq0_recv_cb, p);
+ nni_aio_init(&p->aio_send, xreq0_send_cb, p);
+
+ p->pipe = pipe;
+ p->req = s;
+ return (0);
+}
+
+static int
+xreq0_pipe_start(void *arg)
+{
+ xreq0_pipe *p = arg;
+ xreq0_sock *s = p->req;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REQ0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_msgq_aio_get(s->uwq, &p->aio_getq);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xreq0_pipe_close(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_putq);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_send);
+}
+
+// For raw mode we can just let the pipes "contend" via get queue to get a
+// message from the upper write queue. The msg queue implementation
+// actually provides ordering, so load will be spread automatically.
+// (NB: We may have to revise this in the future if we want to provide some
+// kind of priority.)
+
+static void
+xreq0_getq_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+xreq0_send_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ // Sent a message so we just need to look for another one.
+ nni_msgq_aio_get(p->req->uwq, &p->aio_getq);
+}
+
+static void
+xreq0_putq_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_aio_set_msg(&p->aio_putq, NULL);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+xreq0_recv_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+ xreq0_sock *sock = p->req;
+ nni_msg * msg;
+ bool end;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+ end = false;
+
+ while (!end) {
+ uint8_t *body;
+
+ if (nni_msg_len(msg) < 4) {
+ // Peer gave us garbage, so kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+
+ if (nng_msg_header_append(msg, body, sizeof (uint32_t)) != 0) {
+ // TODO: bump a no-memory stat
+ nni_msg_free(msg);
+ // Closing the pipe may release some memory.
+ // It at least gives an indication to the peer
+ // that we've lost the message.
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_msg_trim(msg, sizeof (uint32_t));
+ }
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(sock->urq, &p->aio_putq);
+}
+
+static void
+xreq0_sock_send(void *arg, nni_aio *aio)
+{
+ xreq0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+xreq0_sock_recv(void *arg, nni_aio *aio)
+{
+ xreq0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static int
+xreq0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xreq0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xreq0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xreq0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static nni_proto_pipe_ops xreq0_pipe_ops = {
+ .pipe_size = sizeof(xreq0_pipe),
+ .pipe_init = xreq0_pipe_init,
+ .pipe_fini = xreq0_pipe_fini,
+ .pipe_start = xreq0_pipe_start,
+ .pipe_close = xreq0_pipe_close,
+ .pipe_stop = xreq0_pipe_stop,
+};
+
+static nni_option xreq0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xreq0_sock_get_max_ttl,
+ .o_set = xreq0_sock_set_max_ttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xreq0_sock_ops = {
+ .sock_size = sizeof(xreq0_sock),
+ .sock_init = xreq0_sock_init,
+ .sock_fini = xreq0_sock_fini,
+ .sock_open = xreq0_sock_open,
+ .sock_close = xreq0_sock_close,
+ .sock_options = xreq0_sock_options,
+ .sock_send = xreq0_sock_send,
+ .sock_recv = xreq0_sock_recv,
+};
+
+static nni_proto xreq0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REQ0_SELF, NNG_REQ0_SELF_NAME },
+ .proto_peer = { NNG_REQ0_PEER, NNG_REQ0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xreq0_sock_ops,
+ .proto_pipe_ops = &xreq0_pipe_ops,
+ .proto_ctx_ops = NULL, // raw mode does not support contexts
+};
+
+int
+nng_req0_open_raw(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &xreq0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/xreq_test.c b/src/sp/protocol/reqrep0/xreq_test.c
new file mode 100644
index 00000000..8c850cba
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xreq_test.c
@@ -0,0 +1,367 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xreq_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_req0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_REQ0_SELF);
+ NUTS_TRUE(p2 == NNG_REQ0_PEER);
+ NUTS_MATCH(n1, NNG_REQ0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_REQ0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+static void
+test_xreq_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_req0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xreq_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_req0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xreq_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We can't write until we have a connection.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_RECVFD, &fd));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ // Request ID
+ NUTS_PASS(nng_msg_append_u32(msg, 0x80000000));
+ NUTS_PASS(nng_sendmsg(req, msg, 0));
+
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ NUTS_PASS(nng_sendmsg(rep, msg, 0));
+
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(req, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_req0_open_raw(&s1));
+ NUTS_PASS(nng_req0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xreq_recv_aio_stopped(void)
+{
+ nng_socket req;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(req, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(req);
+ nng_aio_free(aio);
+}
+
+static void
+test_xreq_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ uint32_t req_id;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000000));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &req_id));
+ NUTS_TRUE(req_id == 0x80000000);
+
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ NUTS_FAIL(nng_recvmsg(req, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_recv_header(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_pipe p1, p2;
+ uint32_t id;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY_EX(req, rep, NULL, &p1, &p2);
+
+ // Simulate a few hops.
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p2)));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x2));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x1));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x80000123u));
+
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+
+ NUTS_PASS(nng_recvmsg(req, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x2);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x1);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x80000123u);
+
+ nng_msg_free(m);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_close_during_recv(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(req, rep, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p2)));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_close_pipe_during_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(req, rep, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ }
+
+ NUTS_PASS(nng_pipe_close(p1));
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_ttl_option(void)
+{
+ nng_socket rep;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_req0_open_raw(&rep));
+
+ NUTS_PASS(nng_socket_set_int(rep, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(rep, opt, 3));
+ NUTS_PASS(nng_socket_get_int(rep, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(rep, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_TRUE(nng_socket_set(rep, opt, "", 1) == NNG_EINVAL);
+ sz = 1;
+ NUTS_TRUE(nng_socket_get(rep, opt, &v, &sz) == NNG_EINVAL);
+ NUTS_TRUE(nng_socket_set_bool(rep, opt, true) == NNG_EBADTYPE);
+ NUTS_TRUE(nng_socket_get_bool(rep, opt, &b) == NNG_EBADTYPE);
+
+ NUTS_TRUE(nng_close(rep) == 0);
+}
+
+NUTS_TESTS = {
+ { "xreq identity", test_xreq_identity },
+ { "xreq raw", test_xreq_raw },
+ { "xreq no context", test_xreq_no_context },
+ { "xreq poll readable", test_xreq_poll_readable },
+ { "xreq poll writable", test_xreq_poll_writeable },
+ { "xreq validate peer", test_xreq_validate_peer },
+ { "xreq recv aio stopped", test_xreq_recv_aio_stopped },
+ { "xreq recv garbage", test_xreq_recv_garbage },
+ { "xreq recv header", test_xreq_recv_header },
+ { "xreq close during recv", test_xreq_close_during_recv },
+ { "xreq close pipe during send", test_xreq_close_pipe_during_send },
+ { "xreq ttl option", test_xreq_ttl_option },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/CMakeLists.txt b/src/sp/protocol/survey0/CMakeLists.txt
new file mode 100644
index 00000000..b5daca41
--- /dev/null
+++ b/src/sp/protocol/survey0/CMakeLists.txt
@@ -0,0 +1,25 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Surveyor/Respondent protocol
+nng_directory(survey0)
+
+nng_sources_if(NNG_PROTO_SURVEYOR0 survey.c xsurvey.c)
+nng_headers_if(NNG_PROTO_SURVEYOR0 nng/protocol/survey0/survey.h)
+nng_defines_if(NNG_PROTO_SURVEYOR0 NNG_HAVE_SURVEYOR0)
+
+nng_sources_if(NNG_PROTO_RESPONDENT0 respond.c xrespond.c)
+nng_headers_if(NNG_PROTO_RESPONDENT0 nng/protocol/survey0/respond.h)
+nng_defines_if(NNG_PROTO_RESPONDENT0 NNG_HAVE_RESPONDENT0)
+
+nng_test(respond_test)
+nng_test(survey_test)
+nng_test(xrespond_test)
+nng_test(xsurvey_test) \ No newline at end of file
diff --git a/src/sp/protocol/survey0/respond.c b/src/sp/protocol/survey0/respond.c
new file mode 100644
index 00000000..ad551c8f
--- /dev/null
+++ b/src/sp/protocol/survey0/respond.c
@@ -0,0 +1,693 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/respond.h"
+
+// Respondent protocol. The RESPONDENT protocol is the "replier" side of
+// the surveyor pattern. This is useful for building service discovery, or
+// voting algorithms, for example.
+
+#ifndef NNI_PROTO_SURVEYOR_V0
+#define NNI_PROTO_SURVEYOR_V0 NNI_PROTO(6, 2)
+#endif
+
+#ifndef NNI_PROTO_RESPONDENT_V0
+#define NNI_PROTO_RESPONDENT_V0 NNI_PROTO(6, 3)
+#endif
+
+typedef struct resp0_pipe resp0_pipe;
+typedef struct resp0_sock resp0_sock;
+typedef struct resp0_ctx resp0_ctx;
+
+static void resp0_pipe_send_cb(void *);
+static void resp0_pipe_recv_cb(void *);
+static void resp0_pipe_fini(void *);
+
+struct resp0_ctx {
+ resp0_sock * sock;
+ uint32_t pipe_id;
+ resp0_pipe * spipe; // send pipe
+ nni_aio * saio; // send aio
+ nni_aio * raio; // recv aio
+ nni_list_node sqnode;
+ nni_list_node rqnode;
+ size_t btrace_len;
+ uint32_t btrace[NNI_MAX_MAX_TTL + 1];
+};
+
+// resp0_sock is our per-socket protocol private structure.
+struct resp0_sock {
+ nni_mtx mtx;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ resp0_ctx ctx;
+ nni_list recvpipes;
+ nni_list recvq;
+ nni_pollable readable;
+ nni_pollable writable;
+};
+
+// resp0_pipe is our per-pipe protocol private structure.
+struct resp0_pipe {
+ nni_pipe * npipe;
+ resp0_sock * psock;
+ bool busy;
+ bool closed;
+ uint32_t id;
+ nni_list sendq; // contexts waiting to send
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_list_node rnode; // receivable linkage
+};
+
+static void
+resp0_ctx_close(void *arg)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+ nni_aio * aio;
+
+ // complete any outstanding operations here, cancellation, etc.
+
+ nni_mtx_lock(&s->mtx);
+ if ((aio = ctx->saio) != NULL) {
+ resp0_pipe *p = ctx->spipe;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ nni_list_remove(&p->sendq, ctx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ if ((aio = ctx->raio) != NULL) {
+ ctx->raio = NULL;
+ nni_list_remove(&s->recvq, ctx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_ctx_fini(void *arg)
+{
+ resp0_ctx *ctx = arg;
+
+ resp0_ctx_close(ctx);
+}
+
+static int
+resp0_ctx_init(void *carg, void *sarg)
+{
+ resp0_sock *s = sarg;
+ resp0_ctx * ctx = carg;
+
+ NNI_LIST_NODE_INIT(&ctx->sqnode);
+ NNI_LIST_NODE_INIT(&ctx->rqnode);
+ ctx->btrace_len = 0;
+ ctx->sock = s;
+ ctx->pipe_id = 0;
+
+ return (0);
+}
+
+static void
+resp0_ctx_cancel_send(nni_aio *aio, void *arg, int rv)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->saio != aio) {
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+ nni_list_node_remove(&ctx->sqnode);
+ ctx->saio = NULL;
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_header_clear(nni_aio_get_msg(aio)); // reset the headers
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+resp0_ctx_send(void *arg, nni_aio *aio)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+ resp0_pipe *p;
+ nni_msg * msg;
+ size_t len;
+ uint32_t pid;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ msg = nni_aio_get_msg(aio);
+ nni_msg_header_clear(msg);
+
+ if (ctx == &s->ctx) {
+ // We can't send anymore, because only one send per request.
+ nni_pollable_clear(&s->writable);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ if ((rv = nni_aio_schedule(aio, resp0_ctx_cancel_send, ctx)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ if ((len = ctx->btrace_len) == 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ pid = ctx->pipe_id;
+ ctx->pipe_id = 0;
+ ctx->btrace_len = 0;
+
+ if ((rv = nni_msg_header_append(msg, ctx->btrace, len)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ if ((p = nni_id_get(&s->pipes, pid)) == NULL) {
+ // Surveyor has left the building. Just discard the reply.
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ return;
+ }
+
+ if (!p->busy) {
+ p->busy = true;
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->npipe, &p->aio_send);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, len);
+ return;
+ }
+
+ ctx->saio = aio;
+ ctx->spipe = p;
+ nni_list_append(&p->sendq, ctx);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_sock_fini(void *arg)
+{
+ resp0_sock *s = arg;
+
+ nni_id_map_fini(&s->pipes);
+ resp0_ctx_fini(&s->ctx);
+ nni_pollable_fini(&s->writable);
+ nni_pollable_fini(&s->readable);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+resp0_sock_init(void *arg, nni_sock *nsock)
+{
+ resp0_sock *s = arg;
+
+ NNI_ARG_UNUSED(nsock);
+
+ nni_mtx_init(&s->mtx);
+ nni_id_map_init(&s->pipes, 0, 0, false);
+
+ NNI_LIST_INIT(&s->recvq, resp0_ctx, rqnode);
+ NNI_LIST_INIT(&s->recvpipes, resp0_pipe, rnode);
+
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8); // Per RFC
+
+ (void) resp0_ctx_init(&s->ctx, s);
+
+ // We start off without being either readable or writable.
+ // Readability comes when there is something on the socket.
+ nni_pollable_init(&s->writable);
+ nni_pollable_init(&s->readable);
+ return (0);
+}
+
+static void
+resp0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+resp0_sock_close(void *arg)
+{
+ resp0_sock *s = arg;
+
+ resp0_ctx_close(&s->ctx);
+}
+
+static void
+resp0_pipe_stop(void *arg)
+{
+ resp0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+resp0_pipe_fini(void *arg)
+{
+ resp0_pipe *p = arg;
+ nng_msg * msg;
+
+ if ((msg = nni_aio_get_msg(&p->aio_recv)) != NULL) {
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_free(msg);
+ }
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+resp0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ resp0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, resp0_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_send, resp0_pipe_send_cb, p);
+
+ NNI_LIST_INIT(&p->sendq, resp0_ctx, sqnode);
+
+ p->npipe = npipe;
+ p->psock = s;
+ p->busy = false;
+ p->id = nni_pipe_id(npipe);
+
+ return (0);
+}
+
+static int
+resp0_pipe_start(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ int rv;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_SURVEYOR_V0) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ rv = nni_id_set(&s->pipes, p->id, p);
+ nni_mtx_unlock(&s->mtx);
+ if (rv != 0) {
+ return (rv);
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+ return (rv);
+}
+
+static void
+resp0_pipe_close(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ resp0_ctx * ctx;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_mtx_lock(&s->mtx);
+ p->closed = true;
+ while ((ctx = nni_list_first(&p->sendq)) != NULL) {
+ nni_aio *aio;
+ nni_msg *msg;
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ msg = nni_aio_get_msg(aio);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ }
+ if (p->id == s->ctx.pipe_id) {
+ // Make sure user space knows they can send a message to us,
+ // which we will happily discard.
+ nni_pollable_raise(&s->writable);
+ }
+ nni_id_remove(&s->pipes, p->id);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_pipe_send_cb(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ resp0_ctx * ctx;
+ nni_aio * aio;
+ nni_msg * msg;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ p->busy = false;
+ if ((ctx = nni_list_first(&p->sendq)) == NULL) {
+ // Nothing else to send.
+ if (p->id == s->ctx.pipe_id) {
+ // Mark us ready for the other side to send!
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ p->busy = true;
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->npipe, &p->aio_send);
+
+ nni_mtx_unlock(&s->mtx);
+
+ nni_aio_finish_sync(aio, 0, len);
+}
+
+static void
+resp0_cancel_recv(nni_aio *aio, void *arg, int rv)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->raio == aio) {
+ nni_list_remove(&s->recvq, ctx);
+ ctx->raio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_ctx_recv(void *arg, nni_aio *aio)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+ resp0_pipe *p;
+ size_t len;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ if ((p = nni_list_first(&s->recvpipes)) == NULL) {
+ int rv;
+ rv = nni_aio_schedule(aio, resp0_cancel_recv, ctx);
+ if (rv != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ // We cannot have two concurrent receive requests on the same
+ // context...
+ if (ctx->raio != NULL) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ ctx->raio = aio;
+ nni_list_append(&s->recvq, ctx);
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_list_remove(&s->recvpipes, p);
+ if (nni_list_empty(&s->recvpipes)) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+
+ len = nni_msg_header_len(msg);
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ ctx->btrace_len = len;
+ ctx->pipe_id = p->id;
+ if (ctx == &s->ctx) {
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->mtx);
+
+ nni_msg_header_clear(msg);
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+resp0_pipe_recv_cb(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ resp0_ctx * ctx;
+ nni_msg * msg;
+ nni_aio * aio;
+ int hops;
+ size_t len;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ ttl = nni_atomic_get(&s->ttl);
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_msg_set_pipe(msg, p->id);
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end = 0;
+ uint8_t *body;
+
+ if (hops > ttl) {
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer is speaking garbage, kick it.
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ len = nni_msg_header_len(msg);
+
+ nni_mtx_lock(&s->mtx);
+
+ if (p->closed) {
+ // If pipe was closed, we just abandon the data from it.
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_free(msg);
+ return;
+ }
+ if ((ctx = nni_list_first(&s->recvq)) == NULL) {
+ // No one blocked in recv, stall.
+ nni_list_append(&s->recvpipes, p);
+ nni_pollable_raise(&s->readable);
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+
+ nni_list_remove(&s->recvq, ctx);
+ aio = ctx->raio;
+ ctx->raio = NULL;
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ // Start the next receive.
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+
+ ctx->btrace_len = len;
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ nni_msg_header_clear(msg);
+ ctx->pipe_id = p->id;
+
+ if ((ctx == &s->ctx) && (!p->busy)) {
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->mtx);
+
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, nni_msg_len(msg));
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static int
+resp0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ int ttl;
+ int rv;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+resp0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+resp0_sock_get_sendfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+resp0_sock_get_recvfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static void
+resp0_sock_send(void *arg, nni_aio *aio)
+{
+ resp0_sock *s = arg;
+
+ resp0_ctx_send(&s->ctx, aio);
+}
+
+static void
+resp0_sock_recv(void *arg, nni_aio *aio)
+{
+ resp0_sock *s = arg;
+
+ resp0_ctx_recv(&s->ctx, aio);
+}
+
+static nni_proto_pipe_ops resp0_pipe_ops = {
+ .pipe_size = sizeof(resp0_pipe),
+ .pipe_init = resp0_pipe_init,
+ .pipe_fini = resp0_pipe_fini,
+ .pipe_start = resp0_pipe_start,
+ .pipe_close = resp0_pipe_close,
+ .pipe_stop = resp0_pipe_stop,
+};
+
+static nni_proto_ctx_ops resp0_ctx_ops = {
+ .ctx_size = sizeof(resp0_ctx),
+ .ctx_init = resp0_ctx_init,
+ .ctx_fini = resp0_ctx_fini,
+ .ctx_send = resp0_ctx_send,
+ .ctx_recv = resp0_ctx_recv,
+};
+
+static nni_option resp0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = resp0_sock_get_max_ttl,
+ .o_set = resp0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = resp0_sock_get_recvfd,
+ .o_set = NULL,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = resp0_sock_get_sendfd,
+ .o_set = NULL,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops resp0_sock_ops = {
+ .sock_size = sizeof(resp0_sock),
+ .sock_init = resp0_sock_init,
+ .sock_fini = resp0_sock_fini,
+ .sock_open = resp0_sock_open,
+ .sock_close = resp0_sock_close,
+ .sock_send = resp0_sock_send,
+ .sock_recv = resp0_sock_recv,
+ .sock_options = resp0_sock_options,
+};
+
+static nni_proto resp0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_RESPONDENT_V0, "respondent" },
+ .proto_peer = { NNI_PROTO_SURVEYOR_V0, "surveyor" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &resp0_sock_ops,
+ .proto_pipe_ops = &resp0_pipe_ops,
+ .proto_ctx_ops = &resp0_ctx_ops,
+};
+
+int
+nng_respondent0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &resp0_proto));
+}
diff --git a/src/sp/protocol/survey0/respond_test.c b/src/sp/protocol/survey0/respond_test.c
new file mode 100644
index 00000000..51844c76
--- /dev/null
+++ b/src/sp/protocol/survey0/respond_test.c
@@ -0,0 +1,586 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+void
+test_resp_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_respondent0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_RESPONDENT0_SELF);
+ NUTS_TRUE(nng_socket_get_int(s, NNG_OPT_PEER, &p) == 0);
+ NUTS_TRUE(p == NNG_RESPONDENT0_PEER);
+ NUTS_TRUE(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n) == 0);
+ NUTS_MATCH(n, NNG_RESPONDENT0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_TRUE(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n) == 0);
+ NUTS_MATCH(n, NNG_RESPONDENT0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+void
+test_resp_send_bad_state(void)
+{
+ nng_socket resp;
+ nng_msg * msg = NULL;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_FAIL(nng_sendmsg(resp, msg, 0), NNG_ESTATE);
+ nng_msg_free(msg);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(surv, resp);
+
+ // Still not writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // If we get a job, *then* we become writable
+ NUTS_SEND(surv, "abc");
+ NUTS_RECV(resp, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // And is no longer writable once we send a message
+ NUTS_SEND(resp, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ // Even after receiving it
+ NUTS_RECV(surv, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(surv, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_context_no_poll(void)
+{
+ int fd;
+ nng_socket resp;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_respondent0_open(&s1));
+ NUTS_PASS(nng_respondent0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+void
+test_resp_double_recv(void)
+{
+ nng_socket s1;
+ nng_aio * aio1;
+ nng_aio * aio2;
+
+ NUTS_PASS(nng_respondent0_open(&s1));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+ NUTS_PASS(nng_aio_alloc(&aio2, NULL, NULL));
+
+ nng_recv_aio(s1, aio1);
+ nng_recv_aio(s1, aio2);
+
+ nng_aio_wait(aio2);
+ NUTS_FAIL(nng_aio_result(aio2), NNG_ESTATE);
+ NUTS_CLOSE(s1);
+ NUTS_FAIL(nng_aio_result(aio1), NNG_ECLOSED);
+ nng_aio_free(aio1);
+ nng_aio_free(aio2);
+}
+
+void
+test_resp_close_pipe_before_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(surv, resp);
+ NUTS_SEND(surv, "test");
+
+ nng_recv_aio(resp, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio1);
+}
+
+void
+test_resp_close_pipe_during_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(surv, resp);
+
+ for (int i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ p = nng_msg_get_pipe(m);
+ rv = nng_sendmsg(resp, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ // Queue is backed up, senders are busy.
+ nng_msg_free(m);
+ break;
+ }
+ NUTS_PASS(rv);
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_ctx_recv_aio_stopped(void)
+{
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+void
+test_resp_close_pipe_context_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+ nng_ctx ctx[10];
+ nng_aio * aio[10];
+ int i;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_RECVBUF, 1));
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], resp));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+
+ NUTS_MARRY(surv, resp);
+
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 10; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ p = nng_msg_get_pipe(m);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that SURVEYOR socket is not reading the results.
+ NUTS_PASS(nng_pipe_close(p));
+
+ for (i = 0; i < 10; i++) {
+ int rv;
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_close_context_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_ctx ctx[10];
+ nng_aio * aio[10];
+ int i;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_RECVBUF, 1));
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], resp));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+
+ NUTS_MARRY(surv, resp);
+
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 10; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that REQ socket is not reading the results.
+ for (i = 0; i < 10; i++) {
+ int rv;
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_resp_ctx_recv_nonblock(void)
+{
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_resp_ctx_send_nonblock(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(surv, resp);
+
+ NUTS_SEND(surv, "SEND");
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ // message carries over
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+ nng_msg_free(msg);
+}
+
+void
+test_resp_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_resp_ttl_option(void)
+{
+ nng_socket resp;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+
+ NUTS_PASS(nng_setopt_int(resp, opt, 1));
+ NUTS_FAIL(nng_setopt_int(resp, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(resp, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(resp, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(resp, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_setopt_int(resp, opt, 3));
+ NUTS_PASS(nng_socket_get_int(resp, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(resp, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_setopt(resp, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(resp, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(resp, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(resp, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_resp_ttl_drop(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_MAXTTL, 3));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ // Send messages. Note that xrep implicitly adds a hop on receive.
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 2 hops
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000001u));
+ NUTS_PASS(nng_msg_append(m, "PASS1", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "FAIL2", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 3 hops - passes
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "PASS3", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "FAIL4", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_RECV(resp, "PASS1");
+ NUTS_RECV(resp, "PASS3");
+
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(resp);
+ NUTS_CLOSE(surv);
+}
+
+TEST_LIST = {
+ { "respond identity", test_resp_identity },
+ { "respond send bad state", test_resp_send_bad_state },
+ { "respond poll readable", test_resp_poll_readable },
+ { "respond poll writable", test_resp_poll_writeable },
+ { "respond context does not poll", test_resp_context_no_poll },
+ { "respond validate peer", test_resp_validate_peer },
+ { "respond double recv", test_resp_double_recv },
+ { "respond close pipe before send", test_resp_close_pipe_before_send },
+ { "respond close pipe during send", test_resp_close_pipe_during_send },
+ { "respond recv aio ctx stopped", test_resp_ctx_recv_aio_stopped },
+ { "respond close pipe context send",
+ test_resp_close_pipe_context_send },
+ { "respond close context send", test_resp_close_context_send },
+ { "respond context send nonblock", test_resp_ctx_send_nonblock },
+ { "respond context recv nonblock", test_resp_ctx_recv_nonblock },
+ { "respond recv garbage", test_resp_recv_garbage },
+ { "respond ttl option", test_resp_ttl_option },
+ { "respond ttl drop", test_resp_ttl_drop },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/survey.c b/src/sp/protocol/survey0/survey.c
new file mode 100644
index 00000000..ce1ed601
--- /dev/null
+++ b/src/sp/protocol/survey0/survey.c
@@ -0,0 +1,663 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/survey.h"
+
+// Surveyor protocol. The SURVEYOR protocol is the "survey" side of the
+// survey pattern. This is useful for building service discovery, voting, etc.
+// Note that this pattern is not optimized for extreme low latency, as it makes
+// multiple use of queues for simplicity. Typically this is used in cases
+// where a few dozen extra microseconds does not matter.
+
+typedef struct surv0_pipe surv0_pipe;
+typedef struct surv0_sock surv0_sock;
+typedef struct surv0_ctx surv0_ctx;
+
+static void surv0_pipe_send_cb(void *);
+static void surv0_pipe_recv_cb(void *);
+static void surv0_ctx_timeout(void *);
+
+struct surv0_ctx {
+ surv0_sock * sock;
+ uint32_t survey_id; // survey id
+ nni_timer_node timer;
+ nni_time expire;
+ nni_lmq recv_lmq;
+ nni_list recv_queue;
+ nni_atomic_int recv_buf;
+ nni_atomic_int survey_time;
+ int err;
+};
+
+// surv0_sock is our per-socket protocol private structure.
+struct surv0_sock {
+ int ttl;
+ nni_list pipes;
+ nni_mtx mtx;
+ surv0_ctx ctx;
+ nni_id_map surveys;
+ nni_pollable writable;
+ nni_pollable readable;
+ nni_atomic_int send_buf;
+};
+
+// surv0_pipe is our per-pipe protocol private structure.
+struct surv0_pipe {
+ nni_pipe * pipe;
+ surv0_sock * sock;
+ nni_lmq send_queue;
+ nni_list_node node;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ bool busy;
+ bool closed;
+};
+
+static void
+surv0_ctx_abort(surv0_ctx *ctx, int err)
+{
+ nni_aio * aio;
+ surv0_sock *sock = ctx->sock;
+
+ while ((aio = nni_list_first(&ctx->recv_queue)) != NULL) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, err);
+ }
+ nni_lmq_flush(&ctx->recv_lmq);
+ if (ctx->survey_id != 0) {
+ nni_id_remove(&sock->surveys, ctx->survey_id);
+ ctx->survey_id = 0;
+ }
+ if (ctx == &sock->ctx) {
+ nni_pollable_clear(&sock->readable);
+ }
+}
+
+static void
+surv0_ctx_close(surv0_ctx *ctx)
+{
+ surv0_sock *sock = ctx->sock;
+
+ nni_mtx_lock(&sock->mtx);
+ surv0_ctx_abort(ctx, NNG_ECLOSED);
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_ctx_fini(void *arg)
+{
+ surv0_ctx *ctx = arg;
+
+ surv0_ctx_close(ctx);
+ nni_timer_cancel(&ctx->timer);
+ nni_lmq_fini(&ctx->recv_lmq);
+}
+
+static int
+surv0_ctx_init(void *c, void *s)
+{
+ surv0_ctx * ctx = c;
+ surv0_sock * sock = s;
+ int rv;
+ int len;
+ nng_duration tmo;
+
+ nni_aio_list_init(&ctx->recv_queue);
+ nni_atomic_init(&ctx->recv_buf);
+ nni_atomic_init(&ctx->survey_time);
+
+ if (ctx == &sock->ctx) {
+ len = 128;
+ tmo = NNI_SECOND; // survey timeout
+ } else {
+ len = nni_atomic_get(&sock->ctx.recv_buf);
+ tmo = nni_atomic_get(&sock->ctx.survey_time);
+ }
+
+ nni_atomic_set(&ctx->recv_buf, len);
+ nni_atomic_set(&ctx->survey_time, tmo);
+
+ ctx->sock = sock;
+
+ if ((rv = nni_lmq_init(&ctx->recv_lmq, len)) != 0) {
+ surv0_ctx_fini(ctx);
+ return (rv);
+ }
+ nni_timer_init(&ctx->timer, surv0_ctx_timeout, ctx);
+ return (0);
+}
+
+static void
+surv0_ctx_cancel(nni_aio *aio, void *arg, int rv)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock *sock = ctx->sock;
+ nni_mtx_lock(&sock->mtx);
+ if (nni_list_active(&ctx->recv_queue, aio)) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ if (ctx->survey_id != 0) {
+ nni_id_remove(&sock->surveys, ctx->survey_id);
+ ctx->survey_id = 0;
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_ctx_recv(void *arg, nni_aio *aio)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock *sock = ctx->sock;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ if (ctx->survey_id == 0) {
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+again:
+ if (nni_lmq_getq(&ctx->recv_lmq, &msg) != 0) {
+ int rv;
+ if ((rv = nni_aio_schedule(aio, &surv0_ctx_cancel, ctx)) !=
+ 0) {
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&ctx->recv_queue, aio);
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+ if (nni_lmq_empty(&ctx->recv_lmq) && (ctx == &sock->ctx)) {
+ nni_pollable_clear(&sock->readable);
+ }
+ if ((msg = nni_msg_unique(msg)) == NULL) {
+ goto again;
+ }
+
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_msg(aio, msg);
+}
+
+void
+surv0_ctx_timeout(void *arg)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock *sock = ctx->sock;
+
+ nni_mtx_lock(&sock->mtx);
+ if (nni_clock() < ctx->expire) {
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+
+ // Abort any pending receives.
+ surv0_ctx_abort(ctx, NNG_ETIMEDOUT);
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_ctx_send(void *arg, nni_aio *aio)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock * sock = ctx->sock;
+ surv0_pipe * pipe;
+ nni_msg * msg = nni_aio_get_msg(aio);
+ size_t len = nni_msg_len(msg);
+ nni_time now = nni_clock();
+ nng_duration survey_time;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ survey_time = nni_atomic_get(&ctx->survey_time);
+
+ nni_mtx_lock(&sock->mtx);
+
+ // Abort everything outstanding.
+ surv0_ctx_abort(ctx, NNG_ECANCELED);
+ nni_timer_cancel(&ctx->timer);
+
+ // Allocate the new ID.
+ if ((rv = nni_id_alloc(&sock->surveys, &ctx->survey_id, ctx)) != 0) {
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_msg_header_clear(msg);
+ nni_msg_header_append_u32(msg, (uint32_t) ctx->survey_id);
+
+ // From this point, we're committed to success. Note that we send
+ // regardless of whether there are any pipes or not. If no pipes,
+ // then it just gets discarded.
+ nni_aio_set_msg(aio, NULL);
+ NNI_LIST_FOREACH (&sock->pipes, pipe) {
+
+ // if the pipe isn't busy, then send this message direct.
+ if (!pipe->busy) {
+ pipe->busy = true;
+ nni_msg_clone(msg);
+ nni_aio_set_msg(&pipe->aio_send, msg);
+ nni_pipe_send(pipe->pipe, &pipe->aio_send);
+ } else if (!nni_lmq_full(&pipe->send_queue)) {
+ nni_msg_clone(msg);
+ nni_lmq_putq(&pipe->send_queue, msg);
+ }
+ }
+
+ ctx->expire = now + survey_time;
+ nni_timer_schedule(&ctx->timer, ctx->expire);
+
+ nni_mtx_unlock(&sock->mtx);
+ nni_msg_free(msg);
+
+ nni_aio_finish(aio, 0, len);
+}
+
+static void
+surv0_sock_fini(void *arg)
+{
+ surv0_sock *sock = arg;
+
+ surv0_ctx_fini(&sock->ctx);
+ nni_id_map_fini(&sock->surveys);
+ nni_pollable_fini(&sock->writable);
+ nni_pollable_fini(&sock->readable);
+ nni_mtx_fini(&sock->mtx);
+}
+
+static int
+surv0_sock_init(void *arg, nni_sock *s)
+{
+ surv0_sock *sock = arg;
+ int rv;
+
+ NNI_ARG_UNUSED(s);
+
+ NNI_LIST_INIT(&sock->pipes, surv0_pipe, node);
+ nni_mtx_init(&sock->mtx);
+ nni_pollable_init(&sock->readable);
+ nni_pollable_init(&sock->writable);
+ // We are always writable.
+ nni_pollable_raise(&sock->writable);
+
+ // We allow for some buffering on a per pipe basis, to allow for
+ // multiple contexts to have surveys outstanding. It is recommended
+ // to increase this if many contexts will want to publish
+ // at nearly the same time.
+ nni_atomic_init(&sock->send_buf);
+ nni_atomic_set(&sock->send_buf, 8);
+
+ // Survey IDs are 32 bits, with the high order bit set.
+ // We start at a random point, to minimize likelihood of
+ // accidental collision across restarts.
+ nni_id_map_init(&sock->surveys, 0x80000000u, 0xffffffffu, true);
+
+ if ((rv = surv0_ctx_init(&sock->ctx, sock)) != 0) {
+ surv0_sock_fini(sock);
+ return (rv);
+ }
+
+ sock->ttl = 8;
+
+ return (0);
+}
+
+static void
+surv0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+surv0_sock_close(void *arg)
+{
+ surv0_sock *s = arg;
+
+ surv0_ctx_close(&s->ctx);
+}
+
+static void
+surv0_pipe_stop(void *arg)
+{
+ surv0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+surv0_pipe_fini(void *arg)
+{
+ surv0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_lmq_fini(&p->send_queue);
+}
+
+static int
+surv0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *sock = s;
+ int rv;
+ int len;
+
+ len = nni_atomic_get(&sock->send_buf);
+ nni_aio_init(&p->aio_send, surv0_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, surv0_pipe_recv_cb, p);
+
+ // This depth could be tunable. The deeper the queue, the more
+ // concurrent surveys that can be delivered (multiple contexts).
+ // Note that surveys can be *outstanding*, but not yet put on the wire.
+ if ((rv = nni_lmq_init(&p->send_queue, len)) != 0) {
+ surv0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->pipe = pipe;
+ p->sock = sock;
+ return (0);
+}
+
+static int
+surv0_pipe_start(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *s = p->sock;
+
+ if (nni_pipe_peer(p->pipe) != NNG_SURVEYOR0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->pipes, p);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+surv0_pipe_close(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *s = p->sock;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_mtx_lock(&s->mtx);
+ p->closed = true;
+ nni_lmq_flush(&p->send_queue);
+ if (nni_list_active(&s->pipes, p)) {
+ nni_list_remove(&s->pipes, p);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+surv0_pipe_send_cb(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *sock = p->sock;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ if (p->closed) {
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+ if (nni_lmq_getq(&p->send_queue, &msg) == 0) {
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ } else {
+ p->busy = false;
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_pipe_recv_cb(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *sock = p->sock;
+ surv0_ctx * ctx;
+ nni_msg * msg;
+ uint32_t id;
+ nni_aio * aio;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // We yank 4 bytes of body, and move them to the header.
+ if (nni_msg_len(msg) < 4) {
+ // Peer sent us garbage. Kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ id = nni_msg_trim_u32(msg);
+ nni_msg_header_append_u32(msg, id);
+
+ nni_mtx_lock(&sock->mtx);
+ // Best effort at delivery. Discard if no context or context is
+ // unable to receive it.
+ if (((ctx = nni_id_get(&sock->surveys, id)) == NULL) ||
+ (nni_lmq_full(&ctx->recv_lmq))) {
+ nni_msg_free(msg);
+ } else if ((aio = nni_list_first(&ctx->recv_queue)) != NULL) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_msg(aio, msg);
+ } else {
+ nni_lmq_putq(&ctx->recv_lmq, msg);
+ if (ctx == &sock->ctx) {
+ nni_pollable_raise(&sock->readable);
+ }
+ }
+ nni_mtx_unlock(&sock->mtx);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+surv0_ctx_set_survey_time(
+ void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ surv0_ctx * ctx = arg;
+ nng_duration expire;
+ int rv;
+ if ((rv = nni_copyin_ms(&expire, buf, sz, t)) == 0) {
+ nni_atomic_set(&ctx->survey_time, expire);
+ }
+ return (rv);
+}
+
+static int
+surv0_ctx_get_survey_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_ctx *ctx = arg;
+ return (
+ nni_copyout_ms(nni_atomic_get(&ctx->survey_time), buf, szp, t));
+}
+
+static int
+surv0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (nni_copyin_int(&s->ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t));
+}
+
+static int
+surv0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (nni_copyout_int(s->ttl, buf, szp, t));
+}
+
+static int
+surv0_sock_set_survey_time(
+ void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (surv0_ctx_set_survey_time(&s->ctx, buf, sz, t));
+}
+
+static int
+surv0_sock_get_survey_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (surv0_ctx_get_survey_time(&s->ctx, buf, szp, t));
+}
+
+static int
+surv0_sock_get_send_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *sock = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&sock->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+surv0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *sock = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&sock->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static void
+surv0_sock_recv(void *arg, nni_aio *aio)
+{
+ surv0_sock *s = arg;
+ surv0_ctx_recv(&s->ctx, aio);
+}
+
+static void
+surv0_sock_send(void *arg, nni_aio *aio)
+{
+ surv0_sock *s = arg;
+ surv0_ctx_send(&s->ctx, aio);
+}
+
+static nni_proto_pipe_ops surv0_pipe_ops = {
+ .pipe_size = sizeof(surv0_pipe),
+ .pipe_init = surv0_pipe_init,
+ .pipe_fini = surv0_pipe_fini,
+ .pipe_start = surv0_pipe_start,
+ .pipe_close = surv0_pipe_close,
+ .pipe_stop = surv0_pipe_stop,
+};
+
+static nni_option surv0_ctx_options[] = {
+ {
+ .o_name = NNG_OPT_SURVEYOR_SURVEYTIME,
+ .o_get = surv0_ctx_get_survey_time,
+ .o_set = surv0_ctx_set_survey_time,
+ },
+ {
+ .o_name = NULL,
+ }
+};
+static nni_proto_ctx_ops surv0_ctx_ops = {
+ .ctx_size = sizeof(surv0_ctx),
+ .ctx_init = surv0_ctx_init,
+ .ctx_fini = surv0_ctx_fini,
+ .ctx_send = surv0_ctx_send,
+ .ctx_recv = surv0_ctx_recv,
+ .ctx_options = surv0_ctx_options,
+};
+
+static nni_option surv0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_SURVEYOR_SURVEYTIME,
+ .o_get = surv0_sock_get_survey_time,
+ .o_set = surv0_sock_set_survey_time,
+ },
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = surv0_sock_get_max_ttl,
+ .o_set = surv0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = surv0_sock_get_recv_fd,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = surv0_sock_get_send_fd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops surv0_sock_ops = {
+ .sock_size = sizeof(surv0_sock),
+ .sock_init = surv0_sock_init,
+ .sock_fini = surv0_sock_fini,
+ .sock_open = surv0_sock_open,
+ .sock_close = surv0_sock_close,
+ .sock_send = surv0_sock_send,
+ .sock_recv = surv0_sock_recv,
+ .sock_options = surv0_sock_options,
+};
+
+static nni_proto surv0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_SURVEYOR0_SELF, NNG_SURVEYOR0_SELF_NAME },
+ .proto_peer = { NNG_SURVEYOR0_PEER, NNG_SURVEYOR0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &surv0_sock_ops,
+ .proto_pipe_ops = &surv0_pipe_ops,
+ .proto_ctx_ops = &surv0_ctx_ops,
+};
+
+int
+nng_surveyor0_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &surv0_proto));
+}
diff --git a/src/sp/protocol/survey0/survey_test.c b/src/sp/protocol/survey0/survey_test.c
new file mode 100644
index 00000000..95d27adf
--- /dev/null
+++ b/src/sp/protocol/survey0/survey_test.c
@@ -0,0 +1,626 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_surv_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_surveyor0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_SELF);
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_PEER); // 49
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_surv_ttl_option(void)
+{
+ nng_socket surv;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+
+ NUTS_PASS(nng_socket_set_int(surv, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(surv, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(surv, opt, -1), NNG_EINVAL);
+ // This test will fail if the NNI_MAX_MAX_TTL is changed from the
+ // builtin default of 15.
+ NUTS_FAIL(nng_socket_set_int(surv, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(surv, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(surv, opt, 3));
+ NUTS_PASS(nng_socket_get_int(surv, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(surv, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(surv, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(surv, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(surv, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(surv, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(surv);
+}
+
+static void
+test_surv_survey_time_option(void)
+{
+ nng_socket surv;
+ nng_duration d;
+ bool b;
+ size_t sz = sizeof(b);
+ const char * opt = NNG_OPT_SURVEYOR_SURVEYTIME;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+
+ NUTS_PASS(nng_socket_set_ms(surv, opt, 10));
+ NUTS_FAIL(nng_socket_set(surv, opt, "", 1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_get(surv, opt, &b, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(surv, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(surv, opt, &b), NNG_EBADTYPE);
+
+ NUTS_PASS(nng_socket_get_ms(surv, opt, &d));
+ NUTS_TRUE(d == 10);
+ NUTS_CLOSE(surv);
+}
+
+void
+test_surv_recv_bad_state(void)
+{
+ nng_socket surv;
+ nng_msg * msg = NULL;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_FAIL(nng_recvmsg(surv, &msg, 0), NNG_ESTATE);
+ NUTS_TRUE(msg == NULL);
+ NUTS_CLOSE(surv);
+}
+
+static void
+test_surv_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ uint32_t surv_id;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &surv_id));
+
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ NUTS_FAIL(nng_recvmsg(surv, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+#define SECOND 1000
+
+void
+test_surv_resp_exchange(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, SECOND));
+
+ NUTS_MARRY(resp, surv);
+
+ NUTS_SEND(surv, "ping");
+ NUTS_RECV(resp, "ping");
+ NUTS_SEND(resp, "pong");
+ NUTS_RECV(surv, "pong");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_surv_cancel(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 16));
+
+ NUTS_MARRY(resp, surv);
+
+ // Send req #1 (abc).
+ NUTS_SEND(surv, "abc");
+
+ // Sleep a bit. This is so that we ensure that our request gets
+ // to the far side. (If we cancel too fast, then our outgoing send
+ // will be canceled before it gets to the peer.)
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the RESP side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to abc.
+ NUTS_SEND(surv, "def");
+
+ // Receive the first request (should be abc) on the REP server.
+ NUTS_RECV(resp, "abc");
+
+ // RESP sends the reply to first command. This will be discarded
+ // by the SURV socket.
+ NUTS_SEND(resp, "abc");
+
+ // Now get the next command from the REP; should be "def".
+ NUTS_RECV(resp, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(resp, "def");
+
+ // Try a req command. This should give back "def"
+ NUTS_RECV(surv, "def");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_surv_cancel_abort_recv(void)
+{
+ nng_aio * aio;
+ nng_duration time = SECOND * 10; // 10s (kind of never)
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SURVEYOR_SURVEYTIME, time));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 16));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 5 * SECOND));
+
+ NUTS_MARRY(resp, surv);
+
+ // Send survey #1 (abc).
+ NUTS_SEND(surv, "abc");
+
+ // Wait for it to get ot the other side.
+ NUTS_SLEEP(100);
+
+ nng_aio_set_timeout(aio, 5 * SECOND);
+ nng_recv_aio(surv, aio);
+
+ // Give time for this recv to post properly.
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the respondent side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to
+ // abc.
+ NUTS_SEND(surv, "def");
+
+ // Our pending I/O should have been canceled.
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+
+ // Receive the first request (should be abc) on the respondent.
+ NUTS_RECV(resp, "abc");
+
+ // Respondent sends the reply to first survey. This will be
+ // discarded by the SURV socket.
+ NUTS_SEND(resp, "abc");
+
+ // Now get the next survey from the RESP; should be "def".
+ NUTS_RECV(resp, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(resp, "def");
+
+ // Try a req command. This should give back "def"
+ NUTS_RECV(surv, "def");
+
+ nng_aio_free(aio);
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_cancel_post_recv(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_MARRY(surv, resp);
+
+ NUTS_SEND(surv, "ONE");
+ NUTS_RECV(resp, "ONE");
+ NUTS_SEND(resp, "one");
+ NUTS_SLEEP(100); // Make sure reply arrives!
+ NUTS_SEND(surv, "TWO");
+ NUTS_RECV(resp, "TWO");
+ NUTS_SEND(resp, "two");
+ NUTS_RECV(surv, "two");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Survey is broadcast, so we can always write.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_MARRY(surv, resp);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_surv_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "xyz", 3));
+ NUTS_PASS(nng_sendmsg(surv, msg, 0));
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0)); // recv on rep
+ NUTS_PASS(nng_sendmsg(resp, msg, 0)); // echo it back
+ NUTS_SLEEP(200); // give time for message to arrive
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(surv, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_ctx_no_poll(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(surv);
+}
+
+static void
+test_surv_ctx_recv_nonblock(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ NUTS_MARRY(surv, resp);
+
+ nng_aio_set_msg(aio, msg);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_surv_ctx_send_nonblock(void)
+{
+ nng_socket surv;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio)); // We never block
+ NUTS_CLOSE(surv);
+ nng_aio_free(aio);
+}
+
+static void
+test_surv_send_best_effort(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_MARRY(surv, resp);
+
+ for (int i = 0; i < 200; i++) {
+ NUTS_SEND(surv, "junk");
+ }
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_survey_timeout(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ char buf[16];
+ size_t sz;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SURVEYOR_SURVEYTIME, 50));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_SEND(surv, "hello");
+ NUTS_RECV(resp, "hello");
+
+ sz = sizeof(buf);
+ NUTS_FAIL(nng_recv(surv, buf, &sz, 0), NNG_ETIMEDOUT);
+ NUTS_SEND(resp, "world");
+ NUTS_FAIL(nng_recv(surv, buf, &sz, 0), NNG_ESTATE);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_ctx_recv_close_socket(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(surv, resp);
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ nng_aio_set_msg(aio, m);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+
+ nng_ctx_recv(ctx, aio);
+ nng_close(surv);
+
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_context_multi(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ nng_ctx c[5];
+ nng_aio * aio;
+ nng_msg * m;
+ int cnt = sizeof(c) / sizeof(c[0]);
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_MARRY(surv, resp);
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SURVEYOR_SURVEYTIME, 200));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ for (int i = 0; i < cnt; i++) {
+ NUTS_PASS(nng_ctx_open(&c[i], surv));
+ }
+
+ for (int i = 0; i < cnt; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, i));
+ nng_aio_set_msg(aio, m);
+ nng_ctx_send(c[i], aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ }
+
+ for (int i = 0; i < cnt; i++) {
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ }
+
+ for (int i = cnt - 1; i >= 0; i--) {
+ uint32_t x;
+ nng_ctx_recv(c[i], aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ m = nng_aio_get_msg(aio);
+ TEST_ASSERT(m != NULL);
+ NUTS_PASS(nng_msg_trim_u32(m, &x));
+ NUTS_TRUE(x == (uint32_t) i);
+ nng_msg_free(m);
+ }
+
+ for (int i = 0; i < cnt; i++) {
+ nng_ctx_recv(c[i], aio);
+ nng_aio_wait(aio);
+ NUTS_TRUE(nng_aio_result(aio) != 0);
+ }
+ for (int i = 0; i < cnt; i++) {
+ nng_ctx_close(c[i]);
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_surv_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+ NUTS_PASS(nng_surveyor0_open(&s1));
+ NUTS_PASS(nng_surveyor0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_PASS(nng_close(s1));
+ NUTS_PASS(nng_close(s2));
+ nng_stats_free(stats);
+}
+
+TEST_LIST = {
+ { "survey identity", test_surv_identity },
+ { "survey ttl option", test_surv_ttl_option },
+ { "survey survey time option", test_surv_survey_time_option },
+ { "survey recv bad state", test_surv_recv_bad_state },
+ { "survey recv garbage", test_surv_recv_garbage },
+ { "survey respondent exchange", test_surv_resp_exchange },
+ { "survey cancel", test_surv_cancel },
+ { "survey cancel abort recv", test_surv_cancel_abort_recv },
+ { "survey cancel post recv", test_surv_cancel_post_recv },
+ { "survey poll writable", test_surv_poll_writeable },
+ { "survey poll readable", test_surv_poll_readable },
+ { "survey context does not poll", test_surv_ctx_no_poll },
+ { "survey context recv close socket",
+ test_surv_ctx_recv_close_socket },
+ { "survey context recv nonblock", test_surv_ctx_recv_nonblock },
+ { "survey context send nonblock", test_surv_ctx_send_nonblock },
+ { "survey timeout", test_surv_survey_timeout },
+ { "survey send best effort", test_surv_send_best_effort },
+ { "survey context multi", test_surv_context_multi },
+ { "survey validate peer", test_surv_validate_peer },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/xrespond.c b/src/sp/protocol/survey0/xrespond.c
new file mode 100644
index 00000000..b2f203c3
--- /dev/null
+++ b/src/sp/protocol/survey0/xrespond.c
@@ -0,0 +1,417 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/respond.h"
+
+// Respondent protocol. The RESPONDENT protocol is the "replier" side of
+// the surveyor pattern. This is useful for building service discovery, or
+// voting algorithms, for example.
+
+#ifndef NNI_PROTO_SURVEYOR_V0
+#define NNI_PROTO_SURVEYOR_V0 NNI_PROTO(6, 2)
+#endif
+
+#ifndef NNI_PROTO_RESPONDENT_V0
+#define NNI_PROTO_RESPONDENT_V0 NNI_PROTO(6, 3)
+#endif
+
+typedef struct xresp0_pipe xresp0_pipe;
+typedef struct xresp0_sock xresp0_sock;
+
+static void xresp0_recv_cb(void *);
+static void xresp0_putq_cb(void *);
+static void xresp0_getq_cb(void *);
+static void xresp0_send_cb(void *);
+static void xresp0_sock_getq_cb(void *);
+static void xresp0_pipe_fini(void *);
+
+// resp0_sock is our per-socket protocol private structure.
+struct xresp0_sock {
+ nni_msgq * urq;
+ nni_msgq * uwq;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ nni_aio aio_getq;
+ nni_mtx mtx;
+};
+
+// resp0_pipe is our per-pipe protocol private structure.
+struct xresp0_pipe {
+ nni_pipe * npipe;
+ xresp0_sock *psock;
+ uint32_t id;
+ nni_msgq * sendq;
+ nni_aio aio_getq;
+ nni_aio aio_putq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+};
+
+static void
+xresp0_sock_fini(void *arg)
+{
+ xresp0_sock *s = arg;
+
+ nni_aio_fini(&s->aio_getq);
+ nni_id_map_fini(&s->pipes);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+xresp0_sock_init(void *arg, nni_sock *nsock)
+{
+ xresp0_sock *s = arg;
+
+ nni_mtx_init(&s->mtx);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8); // Per RFC
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ nni_aio_init(&s->aio_getq, xresp0_sock_getq_cb, s);
+
+ s->urq = nni_sock_recvq(nsock);
+ s->uwq = nni_sock_sendq(nsock);
+
+ return (0);
+}
+
+static void
+xresp0_sock_open(void *arg)
+{
+ xresp0_sock *s = arg;
+
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+static void
+xresp0_sock_close(void *arg)
+{
+ xresp0_sock *s = arg;
+
+ nni_aio_close(&s->aio_getq);
+}
+
+static void
+xresp0_pipe_stop(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_putq);
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+xresp0_pipe_fini(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_putq);
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_msgq_fini(p->sendq);
+}
+
+static int
+xresp0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ xresp0_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_putq, xresp0_putq_cb, p);
+ nni_aio_init(&p->aio_recv, xresp0_recv_cb, p);
+ nni_aio_init(&p->aio_getq, xresp0_getq_cb, p);
+ nni_aio_init(&p->aio_send, xresp0_send_cb, p);
+
+ if ((rv = nni_msgq_init(&p->sendq, 2)) != 0) {
+ xresp0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->npipe = npipe;
+ p->psock = s;
+ return (0);
+}
+
+static int
+xresp0_pipe_start(void *arg)
+{
+ xresp0_pipe *p = arg;
+ xresp0_sock *s = p->psock;
+ int rv;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_SURVEYOR_V0) {
+ return (NNG_EPROTO);
+ }
+
+ p->id = nni_pipe_id(p->npipe);
+
+ nni_mtx_lock(&s->mtx);
+ rv = nni_id_set(&s->pipes, p->id, p);
+ nni_mtx_unlock(&s->mtx);
+ if (rv != 0) {
+ return (rv);
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+
+ return (rv);
+}
+
+static void
+xresp0_pipe_close(void *arg)
+{
+ xresp0_pipe *p = arg;
+ xresp0_sock *s = p->psock;
+
+ nni_aio_close(&p->aio_putq);
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->mtx);
+ nni_id_remove(&s->pipes, p->id);
+ nni_mtx_unlock(&s->mtx);
+}
+
+// resp0_sock_send watches for messages from the upper write queue,
+// extracts the destination pipe, and forwards it to the appropriate
+// destination pipe via a separate queue. This prevents a single bad
+// or slow pipe from gumming up the works for the entire socket.s
+
+void
+xresp0_sock_getq_cb(void *arg)
+{
+ xresp0_sock *s = arg;
+ nni_msg * msg;
+ uint32_t id;
+ xresp0_pipe *p;
+
+ if (nni_aio_result(&s->aio_getq) != 0) {
+ return;
+ }
+ msg = nni_aio_get_msg(&s->aio_getq);
+ nni_aio_set_msg(&s->aio_getq, NULL);
+
+ // We yank the outgoing pipe id from the header
+ if (nni_msg_header_len(msg) < 4) {
+ nni_msg_free(msg);
+ // We can't really close down the socket, so just keep going.
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+ return;
+ }
+ id = nni_msg_header_trim_u32(msg);
+
+ nni_mtx_lock(&s->mtx);
+ // Look for the pipe, and attempt to put the message there
+ // (nonblocking) if we can. If we can't for any reason, then we
+ // free the message.
+ if (((p = nni_id_get(&s->pipes, id)) == NULL) ||
+ (nni_msgq_tryput(p->sendq, msg) != 0)) {
+ nni_msg_free(msg);
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+void
+xresp0_getq_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->npipe, &p->aio_send);
+}
+
+void
+xresp0_send_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+}
+
+static void
+xresp0_recv_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+ xresp0_sock *s = p->psock;
+ nni_msgq * urq = s->urq;
+ nni_msg * msg;
+ int hops;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ ttl = nni_atomic_get(&s->ttl);
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, p->id);
+
+ // Store the pipe id in the header, first thing.
+ nni_msg_header_append_u32(msg, p->id);
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end;
+ uint8_t *body;
+
+ if (hops > ttl) {
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer sent us garbage, so kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ // Now send it up.
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(urq, &p->aio_putq);
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static void
+xresp0_putq_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static int
+xresp0_sock_set_maxttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xresp0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xresp0_sock_get_maxttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xresp0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static void
+xresp0_sock_send(void *arg, nni_aio *aio)
+{
+ xresp0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+xresp0_sock_recv(void *arg, nni_aio *aio)
+{
+ xresp0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops xresp0_pipe_ops = {
+ .pipe_size = sizeof(xresp0_pipe),
+ .pipe_init = xresp0_pipe_init,
+ .pipe_fini = xresp0_pipe_fini,
+ .pipe_start = xresp0_pipe_start,
+ .pipe_close = xresp0_pipe_close,
+ .pipe_stop = xresp0_pipe_stop,
+};
+
+static nni_option xresp0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xresp0_sock_get_maxttl,
+ .o_set = xresp0_sock_set_maxttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xresp0_sock_ops = {
+ .sock_size = sizeof(xresp0_sock),
+ .sock_init = xresp0_sock_init,
+ .sock_fini = xresp0_sock_fini,
+ .sock_open = xresp0_sock_open,
+ .sock_close = xresp0_sock_close,
+ .sock_send = xresp0_sock_send,
+ .sock_recv = xresp0_sock_recv,
+ .sock_options = xresp0_sock_options,
+};
+
+static nni_proto xresp0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_RESPONDENT_V0, "respondent" },
+ .proto_peer = { NNI_PROTO_SURVEYOR_V0, "surveyor" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xresp0_sock_ops,
+ .proto_pipe_ops = &xresp0_pipe_ops,
+};
+
+int
+nng_respondent0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xresp0_proto));
+}
diff --git a/src/sp/protocol/survey0/xrespond_test.c b/src/sp/protocol/survey0/xrespond_test.c
new file mode 100644
index 00000000..ec5e99a3
--- /dev/null
+++ b/src/sp/protocol/survey0/xrespond_test.c
@@ -0,0 +1,436 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xresp_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_respondent0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_RESPONDENT0_SELF);
+ NUTS_TRUE(p2 == NNG_RESPONDENT0_PEER);
+ NUTS_MATCH(n1, NNG_RESPONDENT0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_RESPONDENT0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+static void
+test_xresp_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_respondent0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xresp_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_respondent0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xresp_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We are always writeable, even before connect. This is so that
+ // back-pressure from a bad peer can't trash others. We assume
+ // that peers won't send us requests faster than they can consume
+ // the answers. If they do, they will lose their answers.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_MARRY(surv, resp);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(surv, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_respondent0_open_raw(&s1));
+ NUTS_PASS(nng_respondent0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xresp_close_pipe_before_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(surv, resp);
+ NUTS_SEND(surv, "test");
+
+ nng_recv_aio(resp, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio1);
+}
+
+static void
+test_xresp_close_pipe_during_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) 0x81000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ p = nng_msg_get_pipe(m);
+ nng_msg_free(m);
+
+ for (int i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p)));
+ NUTS_PASS(
+ nng_msg_header_append_u32(m, (unsigned) i | 0x80000000u));
+ // protocol does not exert back-pressure
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_close_during_recv(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(surv, resp);
+
+ for (unsigned i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ rv = nng_sendmsg(surv, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ nng_msg_free(m);
+ break;
+ }
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_recv_aio_stopped(void)
+{
+ nng_socket resp;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(resp, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_xresp_send_no_header(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_ttl_option(void)
+{
+ nng_socket resp;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+
+ NUTS_PASS(nng_socket_set_int(resp, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(resp, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(resp, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(resp, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(resp, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(resp, opt, 3));
+ NUTS_PASS(nng_socket_get_int(resp, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(resp, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(resp, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(resp, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(resp, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(resp, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_ttl_drop(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_MAXTTL, 3));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ // Send messages. Note that xresp implicitly adds a hop on receive.
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 2 hops
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000001u));
+ NUTS_PASS(nng_msg_append(m, "PASS1", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "FAIL2", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 3 hops - passes
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "PASS3", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "FAIL4", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ // So on receive we should see 80000001 and 80000003.
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_MATCH(nng_msg_body(m), "PASS1");
+ nng_msg_free(m);
+
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 16); // 3 hops + ID
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_MATCH(nng_msg_body(m), "PASS3");
+ nng_msg_free(m);
+
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+NUTS_TESTS = {
+ { "xrespond identity", test_xresp_identity },
+ { "xrespond raw", test_xresp_raw },
+ { "xrespond no context", test_xresp_no_context },
+ { "xrespond poll readable", test_xresp_poll_readable },
+ { "xrespond poll writable", test_xresp_poll_writeable },
+ { "xrespond validate peer", test_xresp_validate_peer },
+ { "xrespond close pipe before send",
+ test_xresp_close_pipe_before_send },
+ { "xrespond close pipe during send",
+ test_xresp_close_pipe_during_send },
+ { "xrespond close during recv", test_xresp_close_during_recv },
+ { "xrespond recv aio stopped", test_xresp_recv_aio_stopped },
+ { "xrespond send no header", test_xresp_send_no_header },
+ { "xrespond recv garbage", test_xresp_recv_garbage },
+ { "xrespond ttl option", test_xresp_ttl_option },
+ { "xrespond ttl drop", test_xresp_ttl_drop },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/xsurvey.c b/src/sp/protocol/survey0/xsurvey.c
new file mode 100644
index 00000000..2a198662
--- /dev/null
+++ b/src/sp/protocol/survey0/xsurvey.c
@@ -0,0 +1,379 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/survey.h"
+
+// Surveyor protocol. The SURVEYOR protocol is the "survey" side of the
+// survey pattern. This is useful for building service discovery, voting, etc.
+
+typedef struct xsurv0_pipe xsurv0_pipe;
+typedef struct xsurv0_sock xsurv0_sock;
+
+static void xsurv0_sock_getq_cb(void *);
+static void xsurv0_getq_cb(void *);
+static void xsurv0_putq_cb(void *);
+static void xsurv0_send_cb(void *);
+static void xsurv0_recv_cb(void *);
+
+// surv0_sock is our per-socket protocol private structure.
+struct xsurv0_sock {
+ nni_list pipes;
+ nni_aio aio_getq;
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_mtx mtx;
+ nni_atomic_int ttl;
+};
+
+// surv0_pipe is our per-pipe protocol private structure.
+struct xsurv0_pipe {
+ nni_pipe * npipe;
+ xsurv0_sock * psock;
+ nni_msgq * sendq;
+ nni_list_node node;
+ nni_aio aio_getq;
+ nni_aio aio_putq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+};
+
+static void
+xsurv0_sock_fini(void *arg)
+{
+ xsurv0_sock *s = arg;
+
+ nni_aio_fini(&s->aio_getq);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+xsurv0_sock_init(void *arg, nni_sock *nsock)
+{
+ xsurv0_sock *s = arg;
+
+ nni_aio_init(&s->aio_getq, xsurv0_sock_getq_cb, s);
+ NNI_LIST_INIT(&s->pipes, xsurv0_pipe, node);
+ nni_mtx_init(&s->mtx);
+
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ return (0);
+}
+
+static void
+xsurv0_sock_open(void *arg)
+{
+ xsurv0_sock *s = arg;
+
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+static void
+xsurv0_sock_close(void *arg)
+{
+ xsurv0_sock *s = arg;
+
+ nni_aio_close(&s->aio_getq);
+}
+
+static void
+xsurv0_pipe_stop(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_putq);
+}
+
+static void
+xsurv0_pipe_fini(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_putq);
+ nni_msgq_fini(p->sendq);
+}
+
+static int
+xsurv0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ xsurv0_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_getq, xsurv0_getq_cb, p);
+ nni_aio_init(&p->aio_putq, xsurv0_putq_cb, p);
+ nni_aio_init(&p->aio_send, xsurv0_send_cb, p);
+ nni_aio_init(&p->aio_recv, xsurv0_recv_cb, p);
+
+ // This depth could be tunable. The queue exists so that if we
+ // have multiple requests coming in faster than we can deliver them,
+ // we try to avoid dropping them. We don't really have a solution
+ // for applying back pressure. It would be nice if surveys carried
+ // an expiration with them, so that we could discard any that are
+ // not delivered before their expiration date.
+ if ((rv = nni_msgq_init(&p->sendq, 16)) != 0) {
+ xsurv0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->npipe = npipe;
+ p->psock = s;
+ return (0);
+}
+
+static int
+xsurv0_pipe_start(void *arg)
+{
+ xsurv0_pipe *p = arg;
+ xsurv0_sock *s = p->psock;
+
+ if (nni_pipe_peer(p->npipe) != NNG_SURVEYOR0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->pipes, p);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xsurv0_pipe_close(void *arg)
+{
+ xsurv0_pipe *p = arg;
+ xsurv0_sock *s = p->psock;
+
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_putq);
+
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->mtx);
+ if (nni_list_active(&s->pipes, p)) {
+ nni_list_remove(&s->pipes, p);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+xsurv0_getq_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->npipe, &p->aio_send);
+}
+
+static void
+xsurv0_send_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+}
+
+static void
+xsurv0_putq_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static void
+xsurv0_recv_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+ nni_msg * msg;
+ bool end;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->npipe));
+ end = false;
+
+ while (!end) {
+ uint8_t *body;
+
+ if (nni_msg_len(msg) < 4) {
+ // Peer gave us garbage, so kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+
+ if (nni_msg_header_append(msg, body, sizeof(uint32_t)) != 0) {
+ // TODO: bump a no-memory stat
+ nni_msg_free(msg);
+ // Closing the pipe may release some memory.
+ // It at least gives an indication to the peer
+ // that we've lost the message.
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_msg_trim(msg, sizeof(uint32_t));
+ }
+
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(p->psock->urq, &p->aio_putq);
+}
+
+static int
+xsurv0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xsurv0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xsurv0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xsurv0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static void
+xsurv0_sock_getq_cb(void *arg)
+{
+ xsurv0_sock *s = arg;
+ xsurv0_pipe *p;
+ nni_msg * msg;
+
+ if (nni_aio_result(&s->aio_getq) != 0) {
+ // Should be NNG_ECLOSED.
+ return;
+ }
+ msg = nni_aio_get_msg(&s->aio_getq);
+ nni_aio_set_msg(&s->aio_getq, NULL);
+
+ nni_mtx_lock(&s->mtx);
+ NNI_LIST_FOREACH (&s->pipes, p) {
+ nni_msg_clone(msg);
+ if (nni_msgq_tryput(p->sendq, msg) != 0) {
+ nni_msg_free(msg);
+ }
+ }
+
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+ nni_mtx_unlock(&s->mtx);
+
+ // If there were no pipes to send on, just toss the message.
+ nni_msg_free(msg);
+}
+
+static void
+xsurv0_sock_recv(void *arg, nni_aio *aio)
+{
+ xsurv0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static void
+xsurv0_sock_send(void *arg, nni_aio *aio)
+{
+ xsurv0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static nni_proto_pipe_ops xsurv0_pipe_ops = {
+ .pipe_size = sizeof(xsurv0_pipe),
+ .pipe_init = xsurv0_pipe_init,
+ .pipe_fini = xsurv0_pipe_fini,
+ .pipe_start = xsurv0_pipe_start,
+ .pipe_close = xsurv0_pipe_close,
+ .pipe_stop = xsurv0_pipe_stop,
+};
+
+static nni_option xsurv0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xsurv0_sock_get_max_ttl,
+ .o_set = xsurv0_sock_set_max_ttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xsurv0_sock_ops = {
+ .sock_size = sizeof(xsurv0_sock),
+ .sock_init = xsurv0_sock_init,
+ .sock_fini = xsurv0_sock_fini,
+ .sock_open = xsurv0_sock_open,
+ .sock_close = xsurv0_sock_close,
+ .sock_send = xsurv0_sock_send,
+ .sock_recv = xsurv0_sock_recv,
+ .sock_options = xsurv0_sock_options,
+};
+
+static nni_proto xsurv0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_SURVEYOR0_SELF, NNG_SURVEYOR0_SELF_NAME },
+ .proto_peer = { NNG_SURVEYOR0_PEER, NNG_SURVEYOR0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xsurv0_sock_ops,
+ .proto_pipe_ops = &xsurv0_pipe_ops,
+};
+
+int
+nng_surveyor0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xsurv0_proto));
+}
diff --git a/src/sp/protocol/survey0/xsurvey_test.c b/src/sp/protocol/survey0/xsurvey_test.c
new file mode 100644
index 00000000..f8e9d401
--- /dev/null
+++ b/src/sp/protocol/survey0/xsurvey_test.c
@@ -0,0 +1,399 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xsurveyor_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_SELF); // 0x62
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_PEER); // 0x62
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurveyor_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurvey_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurvey_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Survey is broadcast, so we can always write.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_MARRY(surv, resp);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_RECVFD, &fd));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ // Request ID
+ NUTS_PASS(nng_msg_append_u32(msg, 0x80000000));
+ NUTS_PASS(nng_sendmsg(surv, msg, 0));
+
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0));
+ NUTS_PASS(nng_sendmsg(resp, msg, 0));
+
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) );
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(surv, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char *addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s1));
+ NUTS_PASS(nng_surveyor0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xsurvey_recv_aio_stopped(void)
+{
+ nng_socket surv;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(surv, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(surv);
+ nng_aio_free(aio);
+}
+
+static void
+test_xsurvey_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ uint32_t req_id;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000000));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &req_id));
+ NUTS_TRUE(req_id == 0x80000000);
+
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ NUTS_FAIL(nng_recvmsg(surv, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_recv_header(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_pipe p;
+ uint32_t id;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY_EX(surv, resp, NULL, NULL, &p);
+
+ // Simulate a few hops.
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p)));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x2));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x1));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x80000123u));
+
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+
+ NUTS_PASS(nng_recvmsg(surv, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x2);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x1);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x80000123u);
+
+ nng_msg_free(m);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_close_during_recv(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(surv, resp, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p2)));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_close_pipe_during_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(surv, resp, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ }
+
+ NUTS_PASS(nng_pipe_close(p1));
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_ttl_option(void)
+{
+ nng_socket s;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+
+ NUTS_PASS(nng_socket_set_int(s, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(s, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(s, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(s, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(s, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(s, opt, 3));
+ NUTS_PASS(nng_socket_get_int(s, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(s, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(s, opt, "", 1) , NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(s, opt, &v, &sz) , NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(s, opt, true) , NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(s, opt, &b) , NNG_EBADTYPE);
+
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurvey_broadcast(void)
+{
+ nng_socket resp1;
+ nng_socket resp2;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp1));
+ NUTS_PASS(nng_respondent0_open(&resp2));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp1, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp2, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+
+ NUTS_MARRY(surv, resp1);
+ NUTS_MARRY(surv, resp2);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "hello", 6));
+
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_RECV(resp1, "hello");
+ NUTS_RECV(resp2, "hello");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp1);
+ NUTS_CLOSE(resp2);
+}
+
+TEST_LIST = {
+ { "xsurvey identity", test_xsurveyor_identity },
+ { "xsurvey raw", test_xsurveyor_raw },
+ { "xsurvey no context", test_xsurvey_no_context },
+ { "xsurvey poll readable", test_xsurvey_poll_readable },
+ { "xsurvey poll writable", test_xsurvey_poll_writeable },
+ { "xsurvey validate peer", test_xsurvey_validate_peer },
+ { "xsurvey recv aio stopped", test_xsurvey_recv_aio_stopped },
+ { "xsurvey recv garbage", test_xsurvey_recv_garbage },
+ { "xsurvey recv header", test_xsurvey_recv_header },
+ { "xsurvey close during recv", test_xsurvey_close_during_recv },
+ { "xsurvey close pipe during send",
+ test_xsurvey_close_pipe_during_send },
+ { "xsurvey ttl option", test_xsurvey_ttl_option },
+ { "xsurvey broadcast", test_xsurvey_broadcast },
+ { NULL, NULL },
+};