aboutsummaryrefslogtreecommitdiff
path: root/src/sp
diff options
context:
space:
mode:
Diffstat (limited to 'src/sp')
-rw-r--r--src/sp/CMakeLists.txt13
-rw-r--r--src/sp/protocol/CMakeLists.txt20
-rw-r--r--src/sp/protocol/bus0/CMakeLists.txt18
-rw-r--r--src/sp/protocol/bus0/bug1247_test.c35
-rw-r--r--src/sp/protocol/bus0/bus.c466
-rw-r--r--src/sp/protocol/pair0/CMakeLists.txt16
-rw-r--r--src/sp/protocol/pair0/pair.c305
-rw-r--r--src/sp/protocol/pair1/CMakeLists.txt20
-rw-r--r--src/sp/protocol/pair1/pair.c540
-rw-r--r--src/sp/protocol/pair1/pair1_poly.c535
-rw-r--r--src/sp/protocol/pair1/pair1_poly_test.c370
-rw-r--r--src/sp/protocol/pair1/pair1_test.c433
-rw-r--r--src/sp/protocol/pipeline0/CMakeLists.txt23
-rw-r--r--src/sp/protocol/pipeline0/pull.c325
-rw-r--r--src/sp/protocol/pipeline0/pull_test.c264
-rw-r--r--src/sp/protocol/pipeline0/push.c442
-rw-r--r--src/sp/protocol/pipeline0/push_test.c525
-rw-r--r--src/sp/protocol/pubsub0/CMakeLists.txt24
-rw-r--r--src/sp/protocol/pubsub0/pub.c383
-rw-r--r--src/sp/protocol/pubsub0/pub_test.c331
-rw-r--r--src/sp/protocol/pubsub0/sub.c755
-rw-r--r--src/sp/protocol/pubsub0/sub_test.c624
-rw-r--r--src/sp/protocol/pubsub0/xsub.c211
-rw-r--r--src/sp/protocol/pubsub0/xsub_test.c376
-rw-r--r--src/sp/protocol/reqrep0/CMakeLists.txt25
-rw-r--r--src/sp/protocol/reqrep0/rep.c705
-rw-r--r--src/sp/protocol/reqrep0/rep_test.c669
-rw-r--r--src/sp/protocol/reqrep0/req.c869
-rw-r--r--src/sp/protocol/reqrep0/req_test.c968
-rw-r--r--src/sp/protocol/reqrep0/xrep.c432
-rw-r--r--src/sp/protocol/reqrep0/xrep_test.c434
-rw-r--r--src/sp/protocol/reqrep0/xreq.c319
-rw-r--r--src/sp/protocol/reqrep0/xreq_test.c367
-rw-r--r--src/sp/protocol/survey0/CMakeLists.txt25
-rw-r--r--src/sp/protocol/survey0/respond.c693
-rw-r--r--src/sp/protocol/survey0/respond_test.c586
-rw-r--r--src/sp/protocol/survey0/survey.c663
-rw-r--r--src/sp/protocol/survey0/survey_test.c626
-rw-r--r--src/sp/protocol/survey0/xrespond.c417
-rw-r--r--src/sp/protocol/survey0/xrespond_test.c436
-rw-r--r--src/sp/protocol/survey0/xsurvey.c379
-rw-r--r--src/sp/protocol/survey0/xsurvey_test.c399
-rw-r--r--src/sp/transport/CMakeLists.txt19
-rw-r--r--src/sp/transport/inproc/CMakeLists.txt16
-rw-r--r--src/sp/transport/inproc/inproc.c692
-rw-r--r--src/sp/transport/ipc/CMakeLists.txt17
-rw-r--r--src/sp/transport/ipc/ipc.c1171
-rw-r--r--src/sp/transport/ipc/ipc_test.c395
-rw-r--r--src/sp/transport/tcp/CMakeLists.txt17
-rw-r--r--src/sp/transport/tcp/tcp.c1263
-rw-r--r--src/sp/transport/tcp/tcp_test.c297
-rw-r--r--src/sp/transport/tls/CMakeLists.txt16
-rw-r--r--src/sp/transport/tls/tls.c1292
-rw-r--r--src/sp/transport/ws/CMakeLists.txt24
-rw-r--r--src/sp/transport/ws/README.adoc38
-rw-r--r--src/sp/transport/ws/websocket.c740
-rw-r--r--src/sp/transport/ws/ws_test.c181
-rw-r--r--src/sp/transport/zerotier/CMakeLists.txt37
-rw-r--r--src/sp/transport/zerotier/zerotier.c3241
-rw-r--r--src/sp/transport/zerotier/zthash.c302
-rw-r--r--src/sp/transport/zerotier/zthash.h43
61 files changed, 25867 insertions, 0 deletions
diff --git a/src/sp/CMakeLists.txt b/src/sp/CMakeLists.txt
new file mode 100644
index 00000000..b84952ee
--- /dev/null
+++ b/src/sp/CMakeLists.txt
@@ -0,0 +1,13 @@
+#
+# Copyright 2021 Staysail Systems, Inc. <info@staystail.tech>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+nng_directory(sp)
+
+add_subdirectory(protocol)
+add_subdirectory(transport)
diff --git a/src/sp/protocol/CMakeLists.txt b/src/sp/protocol/CMakeLists.txt
new file mode 100644
index 00000000..fd480523
--- /dev/null
+++ b/src/sp/protocol/CMakeLists.txt
@@ -0,0 +1,20 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staystail.tech>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Protocols.
+nng_directory(protocol)
+
+add_subdirectory(bus0)
+add_subdirectory(pair0)
+add_subdirectory(pair1)
+add_subdirectory(pipeline0)
+add_subdirectory(pubsub0)
+add_subdirectory(reqrep0)
+add_subdirectory(survey0)
+
diff --git a/src/sp/protocol/bus0/CMakeLists.txt b/src/sp/protocol/bus0/CMakeLists.txt
new file mode 100644
index 00000000..01c0b05b
--- /dev/null
+++ b/src/sp/protocol/bus0/CMakeLists.txt
@@ -0,0 +1,18 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Bus protocol
+nng_directory(bus0)
+
+nng_sources_if(NNG_PROTO_BUS0 bus.c)
+nng_headers_if(NNG_PROTO_BUS0 nng/protocol/bus0/bus.h)
+nng_defines_if(NNG_PROTO_BUS0 NNG_HAVE_BUS0)
+
+nng_test(bug1247_test) \ No newline at end of file
diff --git a/src/sp/protocol/bus0/bug1247_test.c b/src/sp/protocol/bus0/bug1247_test.c
new file mode 100644
index 00000000..bbc6958b
--- /dev/null
+++ b/src/sp/protocol/bus0/bug1247_test.c
@@ -0,0 +1,35 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+#include <nng/protocol/bus0/bus.h>
+
+void
+test_bug1247(void)
+{
+ nng_socket bus1, bus2;
+ char * addr;
+
+ NUTS_ADDR(addr, "tcp");
+
+ NUTS_PASS(nng_bus0_open(&bus1));
+ NUTS_PASS(nng_bus0_open(&bus2));
+
+ NUTS_PASS(nng_listen(bus1, addr, NULL, 0));
+ NUTS_FAIL(nng_listen(bus2, addr, NULL, 0), NNG_EADDRINUSE);
+
+ NUTS_PASS(nng_close(bus2));
+ NUTS_PASS(nng_close(bus1));
+}
+
+TEST_LIST = {
+ { "bug1247", test_bug1247 },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/bus0/bus.c b/src/sp/protocol/bus0/bus.c
new file mode 100644
index 00000000..9a610ac6
--- /dev/null
+++ b/src/sp/protocol/bus0/bus.c
@@ -0,0 +1,466 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/bus0/bus.h"
+
+// Bus protocol. The BUS protocol, each peer sends a message to its peers.
+// However, bus protocols do not "forward" (absent a device). So in order
+// for each participant to receive the message, each sender must be connected
+// to every other node in the network (full mesh).
+
+#ifndef NNI_PROTO_BUS_V0
+#define NNI_PROTO_BUS_V0 NNI_PROTO(7, 0)
+#endif
+
+typedef struct bus0_pipe bus0_pipe;
+typedef struct bus0_sock bus0_sock;
+
+static void bus0_sock_getq(bus0_sock *);
+static void bus0_sock_send(void *, nni_aio *);
+static void bus0_sock_recv(void *, nni_aio *);
+
+static void bus0_pipe_getq(bus0_pipe *);
+static void bus0_pipe_recv(bus0_pipe *);
+
+static void bus0_sock_getq_cb(void *);
+static void bus0_sock_getq_cb_raw(void *);
+static void bus0_pipe_getq_cb(void *);
+static void bus0_pipe_send_cb(void *);
+static void bus0_pipe_recv_cb(void *);
+static void bus0_pipe_putq_cb(void *);
+
+// bus0_sock is our per-socket protocol private structure.
+struct bus0_sock {
+ nni_aio * aio_getq;
+ nni_list pipes;
+ nni_mtx mtx;
+ nni_msgq *uwq;
+ nni_msgq *urq;
+ bool raw;
+};
+
+// bus0_pipe is our per-pipe protocol private structure.
+struct bus0_pipe {
+ nni_pipe * npipe;
+ bus0_sock * psock;
+ nni_msgq * sendq;
+ nni_list_node node;
+ nni_aio * aio_getq;
+ nni_aio * aio_recv;
+ nni_aio * aio_send;
+ nni_aio * aio_putq;
+ nni_mtx mtx;
+};
+
+static void
+bus0_sock_fini(void *arg)
+{
+ bus0_sock *s = arg;
+
+ nni_aio_free(s->aio_getq);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+bus0_sock_init(void *arg, nni_sock *nsock)
+{
+ bus0_sock *s = arg;
+ int rv;
+
+ NNI_LIST_INIT(&s->pipes, bus0_pipe, node);
+ nni_mtx_init(&s->mtx);
+ if ((rv = nni_aio_alloc(&s->aio_getq, bus0_sock_getq_cb, s)) != 0) {
+ bus0_sock_fini(s);
+ return (rv);
+ }
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ s->raw = false;
+
+ return (0);
+}
+
+static int
+bus0_sock_init_raw(void *arg, nni_sock *nsock)
+{
+ bus0_sock *s = arg;
+ int rv;
+
+ NNI_LIST_INIT(&s->pipes, bus0_pipe, node);
+ nni_mtx_init(&s->mtx);
+ if ((rv = nni_aio_alloc(&s->aio_getq, bus0_sock_getq_cb_raw, s)) !=
+ 0) {
+ bus0_sock_fini(s);
+ return (rv);
+ }
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ s->raw = true;
+
+ return (0);
+}
+
+static void
+bus0_sock_open(void *arg)
+{
+ bus0_sock *s = arg;
+
+ bus0_sock_getq(s);
+}
+
+static void
+bus0_sock_close(void *arg)
+{
+ bus0_sock *s = arg;
+
+ nni_aio_close(s->aio_getq);
+}
+
+static void
+bus0_pipe_stop(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ nni_aio_stop(p->aio_getq);
+ nni_aio_stop(p->aio_send);
+ nni_aio_stop(p->aio_recv);
+ nni_aio_stop(p->aio_putq);
+}
+
+static void
+bus0_pipe_fini(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ nni_aio_free(p->aio_getq);
+ nni_aio_free(p->aio_send);
+ nni_aio_free(p->aio_recv);
+ nni_aio_free(p->aio_putq);
+ nni_msgq_fini(p->sendq);
+ nni_mtx_fini(&p->mtx);
+}
+
+static int
+bus0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ bus0_pipe *p = arg;
+ int rv;
+
+ NNI_LIST_NODE_INIT(&p->node);
+ nni_mtx_init(&p->mtx);
+ if (((rv = nni_msgq_init(&p->sendq, 16)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_getq, bus0_pipe_getq_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_send, bus0_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_recv, bus0_pipe_recv_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_putq, bus0_pipe_putq_cb, p)) != 0)) {
+ bus0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->npipe = npipe;
+ p->psock = s;
+ return (0);
+}
+
+static int
+bus0_pipe_start(void *arg)
+{
+ bus0_pipe *p = arg;
+ bus0_sock *s = p->psock;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_BUS_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->pipes, p);
+ nni_mtx_unlock(&s->mtx);
+
+ bus0_pipe_recv(p);
+ bus0_pipe_getq(p);
+
+ return (0);
+}
+
+static void
+bus0_pipe_close(void *arg)
+{
+ bus0_pipe *p = arg;
+ bus0_sock *s = p->psock;
+
+ nni_aio_close(p->aio_getq);
+ nni_aio_close(p->aio_send);
+ nni_aio_close(p->aio_recv);
+ nni_aio_close(p->aio_putq);
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->mtx);
+ if (nni_list_active(&s->pipes, p)) {
+ nni_list_remove(&s->pipes, p);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+bus0_pipe_getq_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_getq) != 0) {
+ // closed?
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_aio_set_msg(p->aio_send, nni_aio_get_msg(p->aio_getq));
+ nni_aio_set_msg(p->aio_getq, NULL);
+
+ nni_pipe_send(p->npipe, p->aio_send);
+}
+
+static void
+bus0_pipe_send_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_send) != 0) {
+ // closed?
+ nni_msg_free(nni_aio_get_msg(p->aio_send));
+ nni_aio_set_msg(p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ bus0_pipe_getq(p);
+}
+
+static void
+bus0_pipe_recv_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+ bus0_sock *s = p->psock;
+ nni_msg * msg;
+
+ if (nni_aio_result(p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ msg = nni_aio_get_msg(p->aio_recv);
+
+ if (s->raw) {
+ nni_msg_header_append_u32(msg, nni_pipe_id(p->npipe));
+ }
+
+ nni_msg_set_pipe(msg, nni_pipe_id(p->npipe));
+ nni_aio_set_msg(p->aio_putq, msg);
+ nni_aio_set_msg(p->aio_recv, NULL);
+ nni_msgq_aio_put(s->urq, p->aio_putq);
+}
+
+static void
+bus0_pipe_putq_cb(void *arg)
+{
+ bus0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_putq));
+ nni_aio_set_msg(p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ // Wait for another recv.
+ bus0_pipe_recv(p);
+}
+
+static void
+bus0_sock_getq_cb(void *arg)
+{
+ bus0_sock *s = arg;
+ bus0_pipe *p;
+ bus0_pipe *lastp;
+ nni_msg * msg;
+ nni_msg * dup;
+
+ if (nni_aio_result(s->aio_getq) != 0) {
+ return;
+ }
+
+ msg = nni_aio_get_msg(s->aio_getq);
+
+ // We ignore any headers present for cooked mode.
+ nni_msg_header_clear(msg);
+
+ nni_mtx_lock(&s->mtx);
+ lastp = nni_list_last(&s->pipes);
+ NNI_LIST_FOREACH (&s->pipes, p) {
+ if (p != lastp) {
+ if (nni_msg_dup(&dup, msg) != 0) {
+ continue;
+ }
+ } else {
+ dup = msg;
+ msg = NULL;
+ }
+ if (nni_msgq_tryput(p->sendq, dup) != 0) {
+ nni_msg_free(dup);
+ }
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_free(msg);
+
+ bus0_sock_getq(s);
+}
+
+static void
+bus0_sock_getq_cb_raw(void *arg)
+{
+ bus0_sock *s = arg;
+ bus0_pipe *p;
+ nni_msg * msg;
+ uint32_t sender;
+
+ if (nni_aio_result(s->aio_getq) != 0) {
+ return;
+ }
+
+ msg = nni_aio_get_msg(s->aio_getq);
+
+ // The header being present indicates that the message
+ // was received locally and we are rebroadcasting. (Device
+ // is doing this probably.) In this case grab the pipe
+ // ID from the header, so we can exclude it.
+ if (nni_msg_header_len(msg) >= 4) {
+ sender = nni_msg_header_trim_u32(msg);
+ } else {
+ sender = 0;
+ }
+
+ nni_mtx_lock(&s->mtx);
+ NNI_LIST_FOREACH (&s->pipes, p) {
+ if (nni_pipe_id(p->npipe) == sender) {
+ continue;
+ }
+ nni_msg_clone(msg);
+ if (nni_msgq_tryput(p->sendq, msg) != 0) {
+ nni_msg_free(msg);
+ }
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_free(msg);
+
+ bus0_sock_getq(s);
+}
+
+static void
+bus0_sock_getq(bus0_sock *s)
+{
+ nni_msgq_aio_get(s->uwq, s->aio_getq);
+}
+
+static void
+bus0_pipe_getq(bus0_pipe *p)
+{
+ nni_msgq_aio_get(p->sendq, p->aio_getq);
+}
+
+static void
+bus0_pipe_recv(bus0_pipe *p)
+{
+ nni_pipe_recv(p->npipe, p->aio_recv);
+}
+
+static void
+bus0_sock_send(void *arg, nni_aio *aio)
+{
+ bus0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+bus0_sock_recv(void *arg, nni_aio *aio)
+{
+ bus0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops bus0_pipe_ops = {
+ .pipe_size = sizeof(bus0_pipe),
+ .pipe_init = bus0_pipe_init,
+ .pipe_fini = bus0_pipe_fini,
+ .pipe_start = bus0_pipe_start,
+ .pipe_close = bus0_pipe_close,
+ .pipe_stop = bus0_pipe_stop,
+};
+
+static nni_option bus0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops bus0_sock_ops = {
+ .sock_size = sizeof(bus0_sock),
+ .sock_init = bus0_sock_init,
+ .sock_fini = bus0_sock_fini,
+ .sock_open = bus0_sock_open,
+ .sock_close = bus0_sock_close,
+ .sock_send = bus0_sock_send,
+ .sock_recv = bus0_sock_recv,
+ .sock_options = bus0_sock_options,
+};
+
+static nni_proto_sock_ops bus0_sock_ops_raw = {
+ .sock_size = sizeof(bus0_sock),
+ .sock_init = bus0_sock_init_raw,
+ .sock_fini = bus0_sock_fini,
+ .sock_open = bus0_sock_open,
+ .sock_close = bus0_sock_close,
+ .sock_send = bus0_sock_send,
+ .sock_recv = bus0_sock_recv,
+ .sock_options = bus0_sock_options,
+};
+
+static nni_proto bus0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_peer = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &bus0_sock_ops,
+ .proto_pipe_ops = &bus0_pipe_ops,
+};
+
+static nni_proto bus0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_peer = { NNI_PROTO_BUS_V0, "bus" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &bus0_sock_ops_raw,
+ .proto_pipe_ops = &bus0_pipe_ops,
+};
+
+int
+nng_bus0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &bus0_proto));
+}
+
+int
+nng_bus0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &bus0_proto_raw));
+}
diff --git a/src/sp/protocol/pair0/CMakeLists.txt b/src/sp/protocol/pair0/CMakeLists.txt
new file mode 100644
index 00000000..b12583ab
--- /dev/null
+++ b/src/sp/protocol/pair0/CMakeLists.txt
@@ -0,0 +1,16 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# PAIRv0 protocol
+nng_directory(pair0)
+
+nng_sources_if(NNG_PROTO_PAIR0 pair.c)
+nng_headers_if(NNG_PROTO_PAIR0 nng/protocol/pair0/pair.h)
+nng_defines_if(NNG_PROTO_PAIR0 NNG_HAVE_PAIR0) \ No newline at end of file
diff --git a/src/sp/protocol/pair0/pair.c b/src/sp/protocol/pair0/pair.c
new file mode 100644
index 00000000..41f88c7c
--- /dev/null
+++ b/src/sp/protocol/pair0/pair.c
@@ -0,0 +1,305 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pair0/pair.h"
+
+// Pair protocol. The PAIR protocol is a simple 1:1 messaging pattern.
+// While a peer is connected to the server, all other peer connection
+// attempts are discarded.
+
+#ifndef NNI_PROTO_PAIR_V0
+#define NNI_PROTO_PAIR_V0 NNI_PROTO(1, 0)
+#endif
+
+typedef struct pair0_pipe pair0_pipe;
+typedef struct pair0_sock pair0_sock;
+
+static void pair0_send_cb(void *);
+static void pair0_recv_cb(void *);
+static void pair0_getq_cb(void *);
+static void pair0_putq_cb(void *);
+static void pair0_pipe_fini(void *);
+
+// pair0_sock is our per-socket protocol private structure.
+struct pair0_sock {
+ pair0_pipe *ppipe;
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_mtx mtx;
+};
+
+// An pair0_pipe is our per-pipe protocol private structure. We keep
+// one of these even though in theory we'd only have a single underlying
+// pipe. The separate data structure is more like other protocols that do
+// manage multiple pipes.
+struct pair0_pipe {
+ nni_pipe * npipe;
+ pair0_sock *psock;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_getq;
+ nni_aio aio_putq;
+};
+
+static int
+pair0_sock_init(void *arg, nni_sock *nsock)
+{
+ pair0_sock *s = arg;
+
+ nni_mtx_init(&s->mtx);
+ s->ppipe = NULL;
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ return (0);
+}
+
+static void
+pair0_sock_fini(void *arg)
+{
+ pair0_sock *s = arg;
+
+ nni_mtx_fini(&s->mtx);
+}
+
+static void
+pair0_pipe_stop(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_putq);
+ nni_aio_stop(&p->aio_getq);
+}
+
+static void
+pair0_pipe_fini(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_putq);
+ nni_aio_fini(&p->aio_getq);
+}
+
+static int
+pair0_pipe_init(void *arg, nni_pipe *npipe, void *psock)
+{
+ pair0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_send, pair0_send_cb, p);
+ nni_aio_init(&p->aio_recv, pair0_recv_cb, p);
+ nni_aio_init(&p->aio_getq, pair0_getq_cb, p);
+ nni_aio_init(&p->aio_putq, pair0_putq_cb, p);
+
+ p->npipe = npipe;
+ p->psock = psock;
+ return (0);
+}
+
+static int
+pair0_pipe_start(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_PAIR_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ if (s->ppipe != NULL) {
+ nni_mtx_unlock(&s->mtx);
+ return (NNG_EBUSY); // Already have a peer, denied.
+ }
+ s->ppipe = p;
+ nni_mtx_unlock(&s->mtx);
+
+ // Schedule a getq on the upper, and a read from the pipe.
+ // Each of these also sets up another hold on the pipe itself.
+ nni_msgq_aio_get(s->uwq, &p->aio_getq);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+
+ return (0);
+}
+
+static void
+pair0_pipe_close(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_putq);
+ nni_aio_close(&p->aio_getq);
+
+ nni_mtx_lock(&s->mtx);
+ if (s->ppipe == p) {
+ s->ppipe = NULL;
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+pair0_recv_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ nni_msg_set_pipe(msg, nni_pipe_id(p->npipe));
+ nni_msgq_aio_put(s->urq, &p->aio_putq);
+}
+
+static void
+pair0_putq_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static void
+pair0_getq_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+ nni_pipe_send(p->npipe, &p->aio_send);
+}
+
+static void
+pair0_send_cb(void *arg)
+{
+ pair0_pipe *p = arg;
+ pair0_sock *s = p->psock;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_msgq_aio_get(s->uwq, &p->aio_getq);
+}
+
+static void
+pair0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair0_sock_send(void *arg, nni_aio *aio)
+{
+ pair0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+pair0_sock_recv(void *arg, nni_aio *aio)
+{
+ pair0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops pair0_pipe_ops = {
+ .pipe_size = sizeof(pair0_pipe),
+ .pipe_init = pair0_pipe_init,
+ .pipe_fini = pair0_pipe_fini,
+ .pipe_start = pair0_pipe_start,
+ .pipe_close = pair0_pipe_close,
+ .pipe_stop = pair0_pipe_stop,
+};
+
+static nni_option pair0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ }
+};
+
+static nni_proto_sock_ops pair0_sock_ops = {
+ .sock_size = sizeof(pair0_sock),
+ .sock_init = pair0_sock_init,
+ .sock_fini = pair0_sock_fini,
+ .sock_open = pair0_sock_open,
+ .sock_close = pair0_sock_close,
+ .sock_send = pair0_sock_send,
+ .sock_recv = pair0_sock_recv,
+ .sock_options = pair0_sock_options,
+};
+
+// Legacy protocol (v0)
+static nni_proto pair0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_peer = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &pair0_sock_ops,
+ .proto_pipe_ops = &pair0_pipe_ops,
+};
+
+static nni_proto pair0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_peer = { NNI_PROTO_PAIR_V0, "pair" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &pair0_sock_ops,
+ .proto_pipe_ops = &pair0_pipe_ops,
+};
+
+int
+nng_pair0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pair0_proto));
+}
+
+int
+nng_pair0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pair0_proto_raw));
+}
diff --git a/src/sp/protocol/pair1/CMakeLists.txt b/src/sp/protocol/pair1/CMakeLists.txt
new file mode 100644
index 00000000..12e12607
--- /dev/null
+++ b/src/sp/protocol/pair1/CMakeLists.txt
@@ -0,0 +1,20 @@
+#
+# Copyright 2019 Staysail Systems, Inc. <info@staystail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# PAIRv1 protocol
+nng_directory(pair1)
+
+# XXX: break pair1_poly into an ifdef.
+nng_sources_if(NNG_PROTO_PAIR1 pair.c pair1_poly.c)
+nng_headers_if(NNG_PROTO_PAIR1 nng/protocol/pair1/pair.h)
+nng_defines_if(NNG_PROTO_PAIR1 NNG_HAVE_PAIR1)
+
+nng_test(pair1_test)
+nng_test(pair1_poly_test) \ No newline at end of file
diff --git a/src/sp/protocol/pair1/pair.c b/src/sp/protocol/pair1/pair.c
new file mode 100644
index 00000000..ba497c42
--- /dev/null
+++ b/src/sp/protocol/pair1/pair.c
@@ -0,0 +1,540 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pair1/pair.h"
+
+// Pair protocol. The PAIRv1 protocol is a simple 1:1 messaging pattern.
+
+#ifdef NNG_ENABLE_STATS
+#define BUMP_STAT(x) nni_stat_inc(x, 1)
+#else
+#define BUMP_STAT(x)
+#endif
+
+typedef struct pair1_pipe pair1_pipe;
+typedef struct pair1_sock pair1_sock;
+
+static void pair1_pipe_send_cb(void *);
+static void pair1_pipe_recv_cb(void *);
+static void pair1_pipe_get_cb(void *);
+static void pair1_pipe_put_cb(void *);
+static void pair1_pipe_fini(void *);
+
+// pair1_sock is our per-socket protocol private structure.
+struct pair1_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_sock * sock;
+ bool raw;
+ pair1_pipe * p;
+ nni_atomic_int ttl;
+ nni_mtx mtx;
+#ifdef NNG_ENABLE_STATS
+ nni_stat_item stat_poly;
+ nni_stat_item stat_raw;
+ nni_stat_item stat_reject_mismatch;
+ nni_stat_item stat_reject_already;
+ nni_stat_item stat_ttl_drop;
+ nni_stat_item stat_rx_malformed;
+ nni_stat_item stat_tx_malformed;
+ nni_stat_item stat_tx_drop;
+#endif
+#ifdef NNG_TEST_LIB
+ bool inject_header;
+#endif
+};
+
+// pair1_pipe is our per-pipe protocol private structure.
+struct pair1_pipe {
+ nni_pipe * pipe;
+ pair1_sock *pair;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_get;
+ nni_aio aio_put;
+};
+
+static void
+pair1_sock_fini(void *arg)
+{
+ pair1_sock *s = arg;
+
+ nni_mtx_fini(&s->mtx);
+}
+
+#ifdef NNG_ENABLE_STATS
+static void
+pair1_add_sock_stat(
+ pair1_sock *s, nni_stat_item *item, const nni_stat_info *info)
+{
+ nni_stat_init(item, info);
+ nni_sock_add_stat(s->sock, item);
+}
+#endif
+
+static int
+pair1_sock_init_impl(void *arg, nni_sock *sock, bool raw)
+{
+ pair1_sock *s = arg;
+
+ // Raw mode uses this.
+ nni_mtx_init(&s->mtx);
+ s->sock = sock;
+
+#ifdef NNG_ENABLE_STATS
+ static const nni_stat_info poly_info = {
+ .si_name = "poly",
+ .si_desc = "polyamorous mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info raw_info = {
+ .si_name = "raw",
+ .si_desc = "raw mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info mismatch_info = {
+ .si_name = "mismatch",
+ .si_desc = "pipes rejected (protocol mismatch)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info already_info = {
+ .si_name = "already",
+ .si_desc = "pipes rejected (already connected)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info ttl_drop_info = {
+ .si_name = "ttl_drop",
+ .si_desc = "messages dropped due to too many hops",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_drop_info = {
+ .si_name = "tx_drop",
+ .si_desc = "messages dropped undeliverable",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info rx_malformed_info = {
+ .si_name = "rx_malformed",
+ .si_desc = "malformed messages received",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_malformed_info = {
+ .si_name = "tx_malformed",
+ .si_desc = "malformed messages not sent",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+
+ pair1_add_sock_stat(s, &s->stat_poly, &poly_info);
+ pair1_add_sock_stat(s, &s->stat_raw, &raw_info);
+ pair1_add_sock_stat(s, &s->stat_reject_mismatch, &mismatch_info);
+ pair1_add_sock_stat(s, &s->stat_reject_already, &already_info);
+ pair1_add_sock_stat(s, &s->stat_ttl_drop, &ttl_drop_info);
+ pair1_add_sock_stat(s, &s->stat_tx_drop, &tx_drop_info);
+ pair1_add_sock_stat(s, &s->stat_rx_malformed, &rx_malformed_info);
+
+ if (raw) {
+ // This stat only makes sense in raw mode.
+ pair1_add_sock_stat(
+ s, &s->stat_tx_malformed, &tx_malformed_info);
+ }
+
+ nni_stat_set_bool(&s->stat_raw, raw);
+ nni_stat_set_bool(&s->stat_poly, false);
+#endif
+
+ s->raw = raw;
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ return (0);
+}
+
+static int
+pair1_sock_init(void *arg, nni_sock *sock)
+{
+ return (pair1_sock_init_impl(arg, sock, false));
+}
+
+static int
+pair1_sock_init_raw(void *arg, nni_sock *sock)
+{
+ return (pair1_sock_init_impl(arg, sock, true));
+}
+
+static void
+pair1_pipe_stop(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+
+ nni_mtx_lock(&s->mtx);
+ if (s->p == p) {
+ s->p = NULL;
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_put);
+ nni_aio_stop(&p->aio_get);
+}
+
+static void
+pair1_pipe_fini(void *arg)
+{
+ pair1_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_put);
+ nni_aio_fini(&p->aio_get);
+}
+
+static int
+pair1_pipe_init(void *arg, nni_pipe *pipe, void *pair)
+{
+ pair1_pipe *p = arg;
+
+ nni_aio_init(&p->aio_send, pair1_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, pair1_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_get, pair1_pipe_get_cb, p);
+ nni_aio_init(&p->aio_put, pair1_pipe_put_cb, p);
+
+ p->pipe = pipe;
+ p->pair = pair;
+
+ return (0);
+}
+
+static int
+pair1_pipe_start(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+
+ if (nni_pipe_peer(p->pipe) != NNG_PAIR1_PEER) {
+ BUMP_STAT(&s->stat_reject_mismatch);
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ if (s->p != NULL) {
+ nni_mtx_unlock(&s->mtx);
+ BUMP_STAT(&s->stat_reject_already);
+ return (NNG_EBUSY);
+ }
+ s->p = p;
+ nni_mtx_unlock(&s->mtx);
+
+ // Schedule a get.
+ nni_msgq_aio_get(s->uwq, &p->aio_get);
+
+ // And the pipe read of course.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ return (0);
+}
+
+static void
+pair1_pipe_close(void *arg)
+{
+ pair1_pipe *p = arg;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_put);
+ nni_aio_close(&p->aio_get);
+}
+
+static void
+pair1_pipe_recv_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+ nni_msg * msg;
+ uint32_t hdr;
+ nni_pipe * pipe = p->pipe;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ // Store the pipe ID.
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // If the message is missing the hop count header, scrap it.
+ if ((nni_msg_len(msg) < sizeof(uint32_t)) ||
+ ((hdr = nni_msg_trim_u32(msg)) > 0xff)) {
+ BUMP_STAT(&s->stat_rx_malformed);
+ nni_msg_free(msg);
+ nni_pipe_close(pipe);
+ return;
+ }
+
+ len = nni_msg_len(msg);
+
+ // If we bounced too many times, discard the message, but
+ // keep getting more.
+ if ((int) hdr > nni_atomic_get(&s->ttl)) {
+ BUMP_STAT(&s->stat_ttl_drop);
+ nni_msg_free(msg);
+ nni_pipe_recv(pipe, &p->aio_recv);
+ return;
+ }
+
+ // Store the hop count in the header.
+ nni_msg_header_append_u32(msg, hdr);
+
+ // Send the message up.
+ nni_aio_set_msg(&p->aio_put, msg);
+ nni_sock_bump_rx(s->sock, len);
+ nni_msgq_aio_put(s->urq, &p->aio_put);
+}
+
+static void
+pair1_pipe_put_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_put) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_put));
+ nni_aio_set_msg(&p->aio_put, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+pair1_pipe_get_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+ nni_msg * msg;
+ uint32_t hops;
+
+ if (nni_aio_result(&p->aio_get) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_get);
+ nni_aio_set_msg(&p->aio_get, NULL);
+
+ // Raw mode messages have the header already formed, with a hop count.
+ // Cooked mode messages have no header so we have to add one.
+ if (s->raw) {
+ if ((nni_msg_header_len(msg) != sizeof(uint32_t)) ||
+ ((hops = nni_msg_header_trim_u32(msg)) > 254)) {
+ BUMP_STAT(&s->stat_tx_malformed);
+ nni_msg_free(msg);
+ nni_msgq_aio_get(s->uwq, &p->aio_get);
+ return;
+ }
+#if NNG_TEST_LIB
+ } else if (s->inject_header) {
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ return;
+#endif
+ } else {
+ // Strip off any previously existing header, such as when
+ // replying to messages.
+ nni_msg_header_clear(msg);
+ hops = 0;
+ }
+
+ hops++;
+
+ // Insert the hops header.
+ nni_msg_header_append_u32(msg, hops);
+
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+pair1_pipe_send_cb(void *arg)
+{
+ pair1_pipe *p = arg;
+ pair1_sock *s = p->pair;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_msgq_aio_get(s->uwq, &p->aio_get);
+}
+
+static void
+pair1_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair1_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static int
+pair1_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ pair1_sock *s = arg;
+ int rv;
+ int ttl;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+
+ return (rv);
+}
+
+static int
+pair1_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ pair1_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+#ifdef NNG_TEST_LIB
+static int
+pair1_set_test_inject_header(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ pair1_sock *s = arg;
+ int rv;
+ nni_mtx_lock(&s->mtx);
+ rv = nni_copyin_bool(&s->inject_header, buf, sz, t);
+ nni_mtx_unlock(&s->mtx);
+ return (rv);
+}
+#endif
+
+static void
+pair1_sock_send(void *arg, nni_aio *aio)
+{
+ pair1_sock *s = arg;
+
+ nni_sock_bump_tx(s->sock, nni_msg_len(nni_aio_get_msg(aio)));
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+pair1_sock_recv(void *arg, nni_aio *aio)
+{
+ pair1_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops pair1_pipe_ops = {
+ .pipe_size = sizeof(pair1_pipe),
+ .pipe_init = pair1_pipe_init,
+ .pipe_fini = pair1_pipe_fini,
+ .pipe_start = pair1_pipe_start,
+ .pipe_close = pair1_pipe_close,
+ .pipe_stop = pair1_pipe_stop,
+};
+
+static nni_option pair1_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = pair1_sock_get_max_ttl,
+ .o_set = pair1_sock_set_max_ttl,
+ },
+#ifdef NNG_TEST_LIB
+ {
+ // Test only option to pass header unmolested. This allows
+ // us to inject bad header contents.
+ .o_name = "pair1_test_inject_header",
+ .o_set = pair1_set_test_inject_header,
+ },
+#endif
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops pair1_sock_ops = {
+ .sock_size = sizeof(pair1_sock),
+ .sock_init = pair1_sock_init,
+ .sock_fini = pair1_sock_fini,
+ .sock_open = pair1_sock_open,
+ .sock_close = pair1_sock_close,
+ .sock_recv = pair1_sock_recv,
+ .sock_send = pair1_sock_send,
+ .sock_options = pair1_sock_options,
+};
+
+static nni_proto pair1_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_PAIR1_SELF, NNG_PAIR1_SELF_NAME },
+ .proto_peer = { NNG_PAIR1_PEER, NNG_PAIR1_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &pair1_sock_ops,
+ .proto_pipe_ops = &pair1_pipe_ops,
+};
+
+int
+nng_pair1_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &pair1_proto));
+}
+
+static nni_proto_sock_ops pair1_sock_ops_raw = {
+ .sock_size = sizeof(pair1_sock),
+ .sock_init = pair1_sock_init_raw,
+ .sock_fini = pair1_sock_fini,
+ .sock_open = pair1_sock_open,
+ .sock_close = pair1_sock_close,
+ .sock_recv = pair1_sock_recv,
+ .sock_send = pair1_sock_send,
+ .sock_options = pair1_sock_options,
+};
+
+static nni_proto pair1_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_PAIR1_SELF, NNG_PAIR1_SELF_NAME },
+ .proto_peer = { NNG_PAIR1_PEER, NNG_PAIR1_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &pair1_sock_ops_raw,
+ .proto_pipe_ops = &pair1_pipe_ops,
+};
+
+int
+nng_pair1_open_raw(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &pair1_proto_raw));
+}
diff --git a/src/sp/protocol/pair1/pair1_poly.c b/src/sp/protocol/pair1/pair1_poly.c
new file mode 100644
index 00000000..6c16745c
--- /dev/null
+++ b/src/sp/protocol/pair1/pair1_poly.c
@@ -0,0 +1,535 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pair1/pair.h"
+
+// Pair1 polyamorous mode. The PAIRv1 protocol is normally a simple 1:1
+// messaging pattern, but this mode offers the ability to use a best-effort
+// multicast type of communication. There are limitations however.
+// Most notably this does not interact well with nng_device type
+// proxies, and there is no support for raw mode.
+
+// THIS FEATURE IS DEPRECATED. We discourage use in new applications.
+
+#ifdef NNG_ENABLE_STATS
+#define BUMP_STAT(x) nni_stat_inc(x, 1)
+#else
+#define BUMP_STAT(x)
+#endif
+
+typedef struct pair1poly_pipe pair1poly_pipe;
+typedef struct pair1poly_sock pair1poly_sock;
+
+static void pair1poly_sock_get_cb(void *);
+static void pair1poly_pipe_send_cb(void *);
+static void pair1poly_pipe_recv_cb(void *);
+static void pair1poly_pipe_get_cb(void *);
+static void pair1poly_pipe_put_cb(void *);
+static void pair1poly_pipe_fini(void *);
+
+// pair1poly_sock is our per-socket protocol private structure.
+struct pair1poly_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_sock * sock;
+ nni_atomic_int ttl;
+ nni_mtx mtx;
+ nni_id_map pipes;
+ nni_list plist;
+ bool started;
+ nni_aio aio_get;
+ nni_stat_item stat_poly;
+ nni_stat_item stat_raw;
+ nni_stat_item stat_reject_mismatch;
+ nni_stat_item stat_reject_already;
+ nni_stat_item stat_ttl_drop;
+ nni_stat_item stat_rx_malformed;
+ nni_stat_item stat_tx_malformed;
+ nni_stat_item stat_tx_drop;
+};
+
+// pair1poly_pipe is our per-pipe protocol private structure.
+struct pair1poly_pipe {
+ nni_pipe * pipe;
+ pair1poly_sock *pair;
+ nni_msgq * send_queue;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_get;
+ nni_aio aio_put;
+ nni_list_node node;
+};
+
+static void
+pair1poly_sock_fini(void *arg)
+{
+ pair1poly_sock *s = arg;
+
+ nni_aio_fini(&s->aio_get);
+ nni_id_map_fini(&s->pipes);
+ nni_mtx_fini(&s->mtx);
+}
+
+#ifdef NNG_ENABLE_STATS
+static void
+pair1_add_sock_stat(
+ pair1poly_sock *s, nni_stat_item *item, const nni_stat_info *info)
+{
+ nni_stat_init(item, info);
+ nni_sock_add_stat(s->sock, item);
+}
+#endif
+
+static int
+pair1poly_sock_init(void *arg, nni_sock *sock)
+{
+ pair1poly_sock *s = arg;
+
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ NNI_LIST_INIT(&s->plist, pair1poly_pipe, node);
+ s->sock = sock;
+
+ // Raw mode uses this.
+ nni_mtx_init(&s->mtx);
+
+ nni_aio_init(&s->aio_get, pair1poly_sock_get_cb, s);
+
+#ifdef NNG_ENABLE_STATS
+ static const nni_stat_info poly_info = {
+ .si_name = "poly",
+ .si_desc = "polyamorous mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info raw_info = {
+ .si_name = "raw",
+ .si_desc = "raw mode?",
+ .si_type = NNG_STAT_BOOLEAN,
+ };
+ static const nni_stat_info mismatch_info = {
+ .si_name = "mismatch",
+ .si_desc = "pipes rejected (protocol mismatch)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info already_info = {
+ .si_name = "already",
+ .si_desc = "pipes rejected (already connected)",
+ .si_type = NNG_STAT_COUNTER,
+ .si_atomic = true,
+ };
+ static const nni_stat_info ttl_drop_info = {
+ .si_name = "ttl_drop",
+ .si_desc = "messages dropped due to too many hops",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_drop_info = {
+ .si_name = "tx_drop",
+ .si_desc = "messages dropped undeliverable",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info rx_malformed_info = {
+ .si_name = "rx_malformed",
+ .si_desc = "malformed messages received",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+ static const nni_stat_info tx_malformed_info = {
+ .si_name = "tx_malformed",
+ .si_desc = "malformed messages not sent",
+ .si_type = NNG_STAT_COUNTER,
+ .si_unit = NNG_UNIT_MESSAGES,
+ .si_atomic = true,
+ };
+
+ pair1_add_sock_stat(s, &s->stat_poly, &poly_info);
+ pair1_add_sock_stat(s, &s->stat_raw, &raw_info);
+ pair1_add_sock_stat(s, &s->stat_reject_mismatch, &mismatch_info);
+ pair1_add_sock_stat(s, &s->stat_reject_already, &already_info);
+ pair1_add_sock_stat(s, &s->stat_ttl_drop, &ttl_drop_info);
+ pair1_add_sock_stat(s, &s->stat_tx_drop, &tx_drop_info);
+ pair1_add_sock_stat(s, &s->stat_rx_malformed, &rx_malformed_info);
+ pair1_add_sock_stat(s, &s->stat_tx_malformed, &tx_malformed_info);
+
+ nni_stat_set_bool(&s->stat_raw, false);
+ nni_stat_set_bool(&s->stat_poly, true);
+#endif
+
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ return (0);
+}
+
+static void
+pair1poly_pipe_stop(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_put);
+ nni_aio_stop(&p->aio_get);
+}
+
+static void
+pair1poly_pipe_fini(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_put);
+ nni_aio_fini(&p->aio_get);
+ nni_msgq_fini(p->send_queue);
+}
+
+static int
+pair1poly_pipe_init(void *arg, nni_pipe *pipe, void *pair)
+{
+ pair1poly_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_send, pair1poly_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, pair1poly_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_get, pair1poly_pipe_get_cb, p);
+ nni_aio_init(&p->aio_put, pair1poly_pipe_put_cb, p);
+
+ if ((rv = nni_msgq_init(&p->send_queue, 2)) != 0) {
+ pair1poly_pipe_fini(p);
+ return (rv);
+ }
+
+ p->pipe = pipe;
+ p->pair = pair;
+
+ return (0);
+}
+
+static int
+pair1poly_pipe_start(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ pair1poly_sock *s = p->pair;
+ uint32_t id;
+ int rv;
+
+ nni_mtx_lock(&s->mtx);
+ if (nni_pipe_peer(p->pipe) != NNG_PAIR1_PEER) {
+ nni_mtx_unlock(&s->mtx);
+ BUMP_STAT(&s->stat_reject_mismatch);
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ id = nni_pipe_id(p->pipe);
+ if ((rv = nni_id_set(&s->pipes, id, p)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ return (rv);
+ }
+ if (!s->started) {
+ nni_msgq_aio_get(s->uwq, &s->aio_get);
+ }
+ nni_list_append(&s->plist, p);
+ s->started = true;
+ nni_mtx_unlock(&s->mtx);
+
+ // Schedule a get. In polyamorous mode we get on the per pipe
+ // send_queue, as the socket distributes to us. In monogamous mode
+ // we bypass and get from the upper write queue directly (saving a
+ // set of context switches).
+ nni_msgq_aio_get(p->send_queue, &p->aio_get);
+
+ // And the pipe read of course.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ return (0);
+}
+
+static void
+pair1poly_pipe_close(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ pair1poly_sock *s = p->pair;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_put);
+ nni_aio_close(&p->aio_get);
+
+ nni_mtx_lock(&s->mtx);
+ nni_id_remove(&s->pipes, nni_pipe_id(p->pipe));
+ nni_list_node_remove(&p->node);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_msgq_close(p->send_queue);
+}
+
+static void
+pair1poly_pipe_recv_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ pair1poly_sock *s = p->pair;
+ nni_msg * msg;
+ uint32_t hdr;
+ nni_pipe * pipe = p->pipe;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ // Store the pipe ID.
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // If the message is missing the hop count header, scrap it.
+ if ((nni_msg_len(msg) < sizeof(uint32_t)) ||
+ ((hdr = nni_msg_trim_u32(msg)) > 0xff)) {
+ BUMP_STAT(&s->stat_rx_malformed);
+ nni_msg_free(msg);
+ nni_pipe_close(pipe);
+ return;
+ }
+
+ len = nni_msg_len(msg);
+
+ // If we bounced too many times, discard the message, but
+ // keep getting more.
+ if ((int) hdr > nni_atomic_get(&s->ttl)) {
+ BUMP_STAT(&s->stat_ttl_drop);
+ nni_msg_free(msg);
+ nni_pipe_recv(pipe, &p->aio_recv);
+ return;
+ }
+
+ // Store the hop count in the header.
+ nni_msg_header_append_u32(msg, hdr);
+
+ // Send the message up.
+ nni_aio_set_msg(&p->aio_put, msg);
+ nni_sock_bump_rx(s->sock, len);
+ nni_msgq_aio_put(s->urq, &p->aio_put);
+}
+
+static void
+pair1poly_sock_get_cb(void *arg)
+{
+ pair1poly_pipe *p;
+ pair1poly_sock *s = arg;
+ nni_msg * msg;
+ uint32_t id;
+
+ if (nni_aio_result(&s->aio_get) != 0) {
+ // Socket closing...
+ return;
+ }
+
+ msg = nni_aio_get_msg(&s->aio_get);
+ nni_aio_set_msg(&s->aio_get, NULL);
+
+ p = NULL;
+ nni_mtx_lock(&s->mtx);
+ // If no pipe was requested, we look for any connected peer.
+ if (((id = nni_msg_get_pipe(msg)) == 0) &&
+ (!nni_list_empty(&s->plist))) {
+ p = nni_list_first(&s->plist);
+ } else {
+ p = nni_id_get(&s->pipes, id);
+ }
+
+ // Try a non-blocking send. If this fails we just discard the
+ // message. We have to do this to avoid head-of-line blocking
+ // for messages sent to other pipes. Note that there is some
+ // buffering in the send_queue.
+ if ((p == NULL) || nni_msgq_tryput(p->send_queue, msg) != 0) {
+ BUMP_STAT(&s->stat_tx_drop);
+ nni_msg_free(msg);
+ }
+
+ nni_mtx_unlock(&s->mtx);
+ nni_msgq_aio_get(s->uwq, &s->aio_get);
+}
+
+static void
+pair1poly_pipe_put_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_put) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_put));
+ nni_aio_set_msg(&p->aio_put, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+pair1poly_pipe_get_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_get) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_get);
+ nni_aio_set_msg(&p->aio_get, NULL);
+
+ // Cooked mode messages have no header so we have to add one.
+ // Strip off any previously existing header, such as when
+ // replying to messages.
+ nni_msg_header_clear(msg);
+
+ // Insert the hops header.
+ nni_msg_header_append_u32(msg, 1);
+
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+pair1poly_pipe_send_cb(void *arg)
+{
+ pair1poly_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->send_queue, &p->aio_get);
+}
+
+static void
+pair1poly_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pair1poly_sock_close(void *arg)
+{
+ pair1poly_sock *s = arg;
+ nni_aio_close(&s->aio_get);
+}
+
+static int
+pair1poly_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ pair1poly_sock *s = arg;
+ int rv;
+ int ttl;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+
+ return (rv);
+}
+
+static int
+pair1poly_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ pair1poly_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+pair1poly_get_poly(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ NNI_ARG_UNUSED(arg);
+ return (nni_copyout_bool(true, buf, szp, t));
+}
+
+static void
+pair1poly_sock_send(void *arg, nni_aio *aio)
+{
+ pair1poly_sock *s = arg;
+
+ nni_sock_bump_tx(s->sock, nni_msg_len(nni_aio_get_msg(aio)));
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+pair1poly_sock_recv(void *arg, nni_aio *aio)
+{
+ pair1poly_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops pair1poly_pipe_ops = {
+ .pipe_size = sizeof(pair1poly_pipe),
+ .pipe_init = pair1poly_pipe_init,
+ .pipe_fini = pair1poly_pipe_fini,
+ .pipe_start = pair1poly_pipe_start,
+ .pipe_close = pair1poly_pipe_close,
+ .pipe_stop = pair1poly_pipe_stop,
+};
+
+static nni_option pair1poly_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = pair1poly_get_max_ttl,
+ .o_set = pair1poly_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_PAIR1_POLY,
+ .o_get = pair1poly_get_poly,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops pair1poly_sock_ops = {
+ .sock_size = sizeof(pair1poly_sock),
+ .sock_init = pair1poly_sock_init,
+ .sock_fini = pair1poly_sock_fini,
+ .sock_open = pair1poly_sock_open,
+ .sock_close = pair1poly_sock_close,
+ .sock_recv = pair1poly_sock_recv,
+ .sock_send = pair1poly_sock_send,
+ .sock_options = pair1poly_sock_options,
+};
+
+static nni_proto pair1poly_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_PAIR1_SELF, NNG_PAIR1_SELF_NAME },
+ .proto_peer = { NNG_PAIR1_PEER, NNG_PAIR1_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &pair1poly_sock_ops,
+ .proto_pipe_ops = &pair1poly_pipe_ops,
+};
+
+int
+nng_pair1_open_poly(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &pair1poly_proto));
+}
diff --git a/src/sp/protocol/pair1/pair1_poly_test.c b/src/sp/protocol/pair1/pair1_poly_test.c
new file mode 100644
index 00000000..f26f7809
--- /dev/null
+++ b/src/sp/protocol/pair1/pair1_poly_test.c
@@ -0,0 +1,370 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2017 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+#define SECOND 1000
+
+#define APPEND_STR(m, s) NUTS_PASS(nng_msg_append(m, s, strlen(s)))
+#define CHECK_STR(m, s) \
+ NUTS_TRUE(nng_msg_len(m) == strlen(s)); \
+ NUTS_TRUE(memcmp(nng_msg_body(m), s, strlen(s)) == 0)
+
+static void
+test_poly_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 32
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+void
+test_poly_best_effort(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(c1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, SECOND));
+
+ NUTS_MARRY(s1, c1);
+
+ for (int i = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ }
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_poly_cooked(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_socket c2;
+ nng_msg * msg;
+ bool v;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_pair1_open(&c2));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 10));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 10));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_RECVTIMEO, SECOND / 10));
+
+ NUTS_PASS(nng_getopt_bool(s1, NNG_OPT_PAIR1_POLY, &v));
+ NUTS_TRUE(v);
+
+ NUTS_MARRY(s1, c1);
+ NUTS_MARRY(s1, c2);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "ONE");
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ CHECK_STR(msg, "ONE");
+ p1 = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "TWO");
+ NUTS_PASS(nng_sendmsg(c2, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ CHECK_STR(msg, "TWO");
+ p2 = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+ nng_msg_free(msg);
+
+ NUTS_TRUE(nng_pipe_id(p1) != nng_pipe_id(p2));
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ nng_msg_set_pipe(msg, p1);
+ APPEND_STR(msg, "UNO");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ CHECK_STR(msg, "UNO");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ nng_msg_set_pipe(msg, p2);
+ APPEND_STR(msg, "DOS");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c2, &msg, 0));
+ CHECK_STR(msg, "DOS");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_close(c1));
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ nng_msg_set_pipe(msg, p1);
+ APPEND_STR(msg, "EIN");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(c2, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c2);
+}
+
+void
+test_poly_default(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_socket c2;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_pair1_open(&c2));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_SENDTIMEO, SECOND));
+
+ NUTS_MARRY(s1, c1);
+ NUTS_MARRY(s1, c2);
+
+ // This assumes poly picks the first suitor. Applications
+ // should not make the same assumption.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "YES");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ CHECK_STR(msg, "YES");
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(c1);
+ NUTS_SLEEP(10);
+
+ // Verify that the other pipe is chosen as the next suitor.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "AGAIN");
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c2, &msg, 0));
+ CHECK_STR(msg, "AGAIN");
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c2);
+}
+
+void
+test_poly_close_abort(void)
+{
+ nng_socket s;
+ nng_socket c;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(c, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(c, s);
+
+ for (int i = 0; i < 20; i++) {
+ NUTS_SEND(c, "TEST");
+ }
+ NUTS_SLEEP(50);
+
+ NUTS_CLOSE(s);
+ NUTS_CLOSE(c);
+}
+
+void
+test_poly_recv_no_header(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+void
+test_poly_recv_garbage(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open_poly(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ // ridiculous hop count
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x1000));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+void
+test_poly_ttl(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+ uint32_t val;
+ int ttl;
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 5));
+
+ // cannot set insane TTLs
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 1000), NNG_EINVAL);
+ ttl = 8;
+ NUTS_FAIL(nng_setopt(s1, NNG_OPT_MAXTTL, &ttl, 1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_MAXTTL, true), NNG_EBADTYPE);
+
+ NUTS_MARRY(s1, c1);
+
+ // Let's check enforcement of TTL
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 4));
+ NUTS_PASS(nng_getopt_int(s1, NNG_OPT_MAXTTL, &ttl));
+ NUTS_TRUE(ttl == 4);
+
+ // Bad TTL bounces
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 4));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Good TTL passes
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 3));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 0xFEEDFACE);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 4);
+ nng_msg_free(msg);
+
+ // Large TTL passes
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 1234));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 14));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 1234);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 15);
+ nng_msg_free(msg);
+
+ // Max TTL fails
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 15));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_poly_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pair1_open_poly(&s1));
+ NUTS_PASS(nng_pair0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+TEST_LIST = {
+ { "pair1 poly identity", test_poly_identity },
+ { "pair1 poly best effort", test_poly_best_effort },
+ { "pair1 poly cooked", test_poly_cooked },
+ { "pair1 poly default", test_poly_default },
+ { "pair1 poly recv no header", test_poly_recv_no_header },
+ { "pair1 poly recv garbage", test_poly_recv_garbage },
+ { "pair1 poly ttl", test_poly_ttl },
+ { "pair1 poly close abort", test_poly_close_abort },
+ { "pair1 poly validate peer", test_poly_validate_peer },
+
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pair1/pair1_test.c b/src/sp/protocol/pair1/pair1_test.c
new file mode 100644
index 00000000..881c4ac8
--- /dev/null
+++ b/src/sp/protocol/pair1/pair1_test.c
@@ -0,0 +1,433 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2017 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+#define SECOND 1000
+
+#define APPEND_STR(m, s) NUTS_TRUE(nng_msg_append(m, s, strlen(s)) == 0)
+#define CHECK_STR(m, s) \
+ NUTS_TRUE(nng_msg_len(m) == strlen(s)); \
+ NUTS_TRUE(memcmp(nng_msg_body(m), s, strlen(s)) == 0)
+
+static void
+test_mono_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pair1_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 32
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(1u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pair1");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+void
+test_mono_cooked(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nuts_marry(s1, c1));
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "ALPHA", strlen("ALPHA") + 1));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_TRUE(nng_msg_len(msg) == strlen("ALPHA") + 1);
+ NUTS_MATCH(nng_msg_body(msg), "ALPHA");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "BETA", strlen("BETA") + 1));
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ NUTS_TRUE(nng_msg_len(msg) == strlen("BETA") + 1);
+ NUTS_MATCH(nng_msg_body(msg), "BETA");
+
+ nng_msg_free(msg);
+ NUTS_CLOSE(c1);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_mono_faithful(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_socket c2;
+ nng_msg * msg;
+ const char *addr = "inproc://pair1_mono_faithful";
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_pair1_open(&c2));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 4));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c2, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_int(c2, NNG_OPT_SENDBUF, 2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_MARRY(s1, c1);
+ NUTS_PASS(nng_dial(c2, addr, NULL, 0));
+
+ NUTS_SLEEP(100);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "ONE");
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ CHECK_STR(msg, "ONE");
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "TWO");
+ NUTS_PASS(nng_sendmsg(c2, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+ NUTS_CLOSE(c2);
+}
+
+void
+test_mono_back_pressure(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ int i;
+ int rv;
+ nng_msg * msg;
+ nng_duration to = 100;
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair1_open(&c1));
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(c1, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_SENDTIMEO, to));
+
+ NUTS_MARRY(s1, c1);
+
+ // We choose to allow some buffering. In reality the
+ // buffer size is just 1, and we will fail after 2.
+ for (i = 0, rv = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ if ((rv = nng_sendmsg(s1, msg, 0)) != 0) {
+ nng_msg_free(msg);
+ break;
+ }
+ }
+ NUTS_FAIL(rv, NNG_ETIMEDOUT);
+ NUTS_TRUE(i < 10);
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_mono_raw_exchange(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+
+ nng_msg *msg;
+ uint32_t hops;
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_MARRY(s1, c1);
+
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "GAMMA");
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_TRUE(nng_msg_header_len(msg) == sizeof(uint32_t));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ p = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p) > 0);
+
+ CHECK_STR(msg, "GAMMA");
+ NUTS_TRUE(nng_msg_header_len(msg) == sizeof(uint32_t));
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &hops));
+ NUTS_TRUE(hops == 2);
+ nng_msg_free(msg);
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ APPEND_STR(msg, "EPSILON");
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_PASS(nng_sendmsg(s1, msg, 0));
+ NUTS_PASS(nng_recvmsg(c1, &msg, 0));
+ CHECK_STR(msg, "EPSILON");
+ NUTS_TRUE(nng_msg_header_len(msg) == sizeof(uint32_t));
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &hops));
+ p = nng_msg_get_pipe(msg);
+ NUTS_TRUE(nng_pipe_id(p) > 0);
+
+ NUTS_TRUE(hops == 2);
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_mono_raw_header(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+ uint32_t v;
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_MARRY(s1, c1);
+
+ // Missing bits in the header
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Valid header works
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &v));
+ NUTS_TRUE(v == 0xFEEDFACE);
+ nng_msg_free(msg);
+
+ // Header with reserved bits set dropped
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 0xDEAD0000));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Header with no chance to add another hop gets dropped
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 0xff));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // With the same bits clear it works
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 1));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &v));
+ NUTS_TRUE(v == 0xFEEDFACE);
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_pair1_raw(void)
+{
+ nng_socket s1;
+ bool raw;
+
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_getopt_bool(s1, NNG_OPT_RAW, &raw));
+ NUTS_TRUE(raw == false);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_RAW, true), NNG_EREADONLY);
+ NUTS_PASS(nng_close(s1));
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_getopt_bool(s1, NNG_OPT_RAW, &raw));
+ NUTS_TRUE(raw == true);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_RAW, false), NNG_EREADONLY);
+ NUTS_PASS(nng_close(s1));
+}
+
+void
+test_pair1_ttl(void)
+{
+ nng_socket s1;
+ nng_socket c1;
+ nng_msg * msg;
+ uint32_t val;
+ int ttl;
+
+ NUTS_PASS(nng_pair1_open_raw(&s1));
+ NUTS_PASS(nng_pair1_open_raw(&c1));
+ NUTS_PASS(nng_setopt_ms(s1, NNG_OPT_RECVTIMEO, SECOND / 5));
+ NUTS_PASS(nng_setopt_ms(c1, NNG_OPT_RECVTIMEO, SECOND / 5));
+
+ // cannot set insane TTLs
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s1, NNG_OPT_MAXTTL, 1000), NNG_EINVAL);
+ ttl = 8;
+ NUTS_FAIL(nng_setopt(s1, NNG_OPT_MAXTTL, &ttl, 1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(s1, NNG_OPT_MAXTTL, true), NNG_EBADTYPE);
+
+ NUTS_MARRY(s1, c1);
+
+ // Let's check enforcement of TTL
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 4));
+ NUTS_PASS(nng_getopt_int(s1, NNG_OPT_MAXTTL, &ttl));
+ NUTS_TRUE(ttl == 4);
+
+ // Bad TTL bounces
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 4));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ // Good TTL passes
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 0xFEEDFACE));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 3));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 0xFEEDFACE);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 4);
+ nng_msg_free(msg);
+
+ // Large TTL passes
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append_u32(msg, 1234));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 14));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_PASS(nng_recvmsg(s1, &msg, 0));
+ NUTS_PASS(nng_msg_trim_u32(msg, &val));
+ NUTS_TRUE(val == 1234);
+ NUTS_PASS(nng_msg_header_trim_u32(msg, &val));
+ NUTS_TRUE(val == 15);
+ nng_msg_free(msg);
+
+ // Max TTL fails
+ NUTS_PASS(nng_setopt_int(s1, NNG_OPT_MAXTTL, 15));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_header_append_u32(msg, 15));
+ NUTS_PASS(nng_sendmsg(c1, msg, 0));
+ NUTS_FAIL(nng_recvmsg(s1, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(c1);
+}
+
+void
+test_pair1_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+ NUTS_PASS(nng_pair1_open(&s1));
+ NUTS_PASS(nng_pair0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+void
+test_pair1_recv_no_header(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+void
+test_pair1_recv_garbage(void)
+{
+ nng_socket s;
+ nng_socket c;
+ nng_msg * m;
+
+ NUTS_PASS(nng_pair1_open(&s));
+ NUTS_PASS(nng_pair1_open(&c));
+ NUTS_PASS(nng_setopt_bool(c, "pair1_test_inject_header", true));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_setopt_ms(s, NNG_OPT_SENDTIMEO, 200));
+
+ NUTS_MARRY(c, s);
+
+ // ridiculous hop count
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x1000));
+ NUTS_PASS(nng_sendmsg(c, m, 0));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(c);
+ NUTS_CLOSE(s);
+}
+
+NUTS_TESTS = {
+ { "pair1 mono identity", test_mono_identity },
+ { "pair1 mono cooked", test_mono_cooked },
+ { "pair1 mono faithful", test_mono_faithful },
+ { "pair1 mono back pressure", test_mono_back_pressure },
+ { "pair1 mono raw exchange", test_mono_raw_exchange },
+ { "pair1 mono raw header", test_mono_raw_header },
+ { "pair1 raw", test_pair1_raw },
+ { "pair1 ttl", test_pair1_ttl },
+ { "pair1 validate peer", test_pair1_validate_peer },
+ { "pair1 recv no header", test_pair1_recv_no_header },
+ { "pair1 recv garbage", test_pair1_recv_garbage },
+
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pipeline0/CMakeLists.txt b/src/sp/protocol/pipeline0/CMakeLists.txt
new file mode 100644
index 00000000..8a10eab7
--- /dev/null
+++ b/src/sp/protocol/pipeline0/CMakeLists.txt
@@ -0,0 +1,23 @@
+#
+# Copyright 2019 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Pipeline protocol
+nng_directory(pipeline0)
+
+nng_sources_if(NNG_PROTO_PUSH0 push.c)
+nng_headers_if(NNG_PROTO_PUSH0 nng/protocol/pipeline0/push.h)
+nng_defines_if(NNG_PROTO_PUSH0 NNG_HAVE_PUSH0)
+
+nng_sources_if(NNG_PROTO_PULL0 pull.c)
+nng_headers_if(NNG_PROTO_PULL0 nng/protocol/pipeline0/pull.h)
+nng_defines_if(NNG_PROTO_PULL0 NNG_HAVE_PULL0)
+
+nng_test(pull_test)
+nng_test(push_test)
diff --git a/src/sp/protocol/pipeline0/pull.c b/src/sp/protocol/pipeline0/pull.c
new file mode 100644
index 00000000..616b0817
--- /dev/null
+++ b/src/sp/protocol/pipeline0/pull.c
@@ -0,0 +1,325 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pipeline0/pull.h"
+
+// Pull protocol. The PULL protocol is the "read" side of a pipeline.
+
+#ifndef NNI_PROTO_PULL_V0
+#define NNI_PROTO_PULL_V0 NNI_PROTO(5, 1)
+#endif
+
+#ifndef NNI_PROTO_PUSH_V0
+#define NNI_PROTO_PUSH_V0 NNI_PROTO(5, 0)
+#endif
+
+typedef struct pull0_pipe pull0_pipe;
+typedef struct pull0_sock pull0_sock;
+
+static void pull0_recv_cb(void *);
+
+// pull0_sock is our per-socket protocol private structure.
+struct pull0_sock {
+ bool raw;
+ nni_list pl; // pipe list (pipes with data ready)
+ nni_list rq; // recv queue (aio list)
+ nni_mtx m;
+ nni_pollable readable;
+};
+
+// pull0_pipe is our per-pipe protocol private structure.
+struct pull0_pipe {
+ nni_pipe * p;
+ pull0_sock * s;
+ nni_msg * m;
+ nni_aio aio;
+ bool closed;
+ nni_list_node node;
+};
+
+static int
+pull0_sock_init(void *arg, nni_sock *sock)
+{
+ pull0_sock *s = arg;
+ NNI_ARG_UNUSED(sock);
+
+ nni_aio_list_init(&s->rq);
+ NNI_LIST_INIT(&s->pl, pull0_pipe, node);
+ nni_mtx_init(&s->m);
+ nni_pollable_init(&s->readable);
+ return (0);
+}
+
+static void
+pull0_sock_fini(void *arg)
+{
+ pull0_sock *s = arg;
+ nni_mtx_fini(&s->m);
+ nni_pollable_fini(&s->readable);
+}
+
+static void
+pull0_pipe_stop(void *arg)
+{
+ pull0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio);
+}
+
+static void
+pull0_pipe_fini(void *arg)
+{
+ pull0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio);
+ if (p->m) {
+ nni_msg_free(p->m);
+ }
+}
+
+static int
+pull0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ pull0_pipe *p = arg;
+
+ nni_aio_init(&p->aio, pull0_recv_cb, p);
+ p->p = pipe;
+ p->s = s;
+ return (0);
+}
+
+static int
+pull0_pipe_start(void *arg)
+{
+ pull0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->p) != NNI_PROTO_PUSH_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ // Start the pending receive...
+ nni_pipe_recv(p->p, &p->aio);
+
+ return (0);
+}
+
+static void
+pull0_pipe_close(void *arg)
+{
+ pull0_pipe *p = arg;
+ pull0_sock *s = p->s;
+
+ nni_mtx_lock(&s->m);
+ p->closed = true;
+ if (nni_list_node_active(&p->node)) {
+ nni_list_node_remove(&p->node);
+ if (nni_list_empty(&s->pl)) {
+ nni_pollable_clear(&s->readable);
+ }
+ }
+ nni_mtx_unlock(&s->m);
+
+ nni_aio_close(&p->aio);
+}
+
+static void
+pull0_recv_cb(void *arg)
+{
+ pull0_pipe *p = arg;
+ pull0_sock *s = p->s;
+ nni_aio * ap = &p->aio;
+ nni_aio * as;
+ nni_msg * m;
+
+ if (nni_aio_result(ap) != 0) {
+ // Failed to get a message, probably the pipe is closed.
+ nni_pipe_close(p->p);
+ return;
+ }
+
+ // Got a message... start the put to send it up to the application.
+ m = nni_aio_get_msg(ap);
+ nni_aio_set_msg(ap, NULL);
+ nni_msg_set_pipe(m, nni_pipe_id(p->p));
+
+ nni_mtx_lock(&s->m);
+ if (p->closed) {
+ nni_mtx_unlock(&s->m);
+ nni_msg_free(m);
+ return;
+ }
+ if (nni_list_empty(&s->rq)) {
+ nni_list_append(&s->pl, p);
+ if (nni_list_first(&s->pl) == p) {
+ nni_pollable_raise(&s->readable);
+ }
+ p->m = m;
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+ nni_pipe_recv(p->p, ap);
+ as = nni_list_first(&s->rq);
+ nni_aio_list_remove(as);
+ nni_mtx_unlock(&s->m);
+ nni_aio_set_msg(as, m);
+ nni_aio_finish_sync(as, 0, nni_msg_len(m));
+}
+
+static void
+pull0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pull0_sock_close(void *arg)
+{
+ pull0_sock *s = arg;
+ nni_aio * a;
+ nni_mtx_lock(&s->m);
+ while ((a = nni_list_first(&s->rq)) != NULL) {
+ nni_aio_list_remove(a);
+ nni_aio_finish_error(a, NNG_ECLOSED);
+ }
+ // NB: The common socket framework closes pipes before this.
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+pull0_sock_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+}
+
+static void
+pull0_cancel(nni_aio *aio, void *arg, int rv)
+{
+ pull0_sock *s = arg;
+ nni_mtx_lock(&s->m);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+pull0_sock_recv(void *arg, nni_aio *aio)
+{
+ pull0_sock *s = arg;
+ pull0_pipe *p;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&s->m);
+ if ((p = nni_list_first(&s->pl)) == NULL) {
+
+ int rv;
+ if ((rv = nni_aio_schedule(aio, pull0_cancel, s)) != 0) {
+ nni_mtx_unlock(&s->m);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ nni_aio_list_append(&s->rq, aio);
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+
+ nni_list_remove(&s->pl, p);
+ if (nni_list_empty(&s->pl)) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_aio_finish_msg(aio, p->m);
+ p->m = NULL;
+ nni_pipe_recv(p->p, &p->aio);
+ nni_mtx_unlock(&s->m);
+}
+
+static int
+pull0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ pull0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static nni_option pull0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = pull0_sock_get_recv_fd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_pipe_ops pull0_pipe_ops = {
+ .pipe_size = sizeof(pull0_pipe),
+ .pipe_init = pull0_pipe_init,
+ .pipe_fini = pull0_pipe_fini,
+ .pipe_start = pull0_pipe_start,
+ .pipe_close = pull0_pipe_close,
+ .pipe_stop = pull0_pipe_stop,
+};
+
+static nni_proto_sock_ops pull0_sock_ops = {
+ .sock_size = sizeof(pull0_sock),
+ .sock_init = pull0_sock_init,
+ .sock_fini = pull0_sock_fini,
+ .sock_open = pull0_sock_open,
+ .sock_close = pull0_sock_close,
+ .sock_send = pull0_sock_send,
+ .sock_recv = pull0_sock_recv,
+ .sock_options = pull0_sock_options,
+};
+
+static nni_proto pull0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_peer = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_flags = NNI_PROTO_FLAG_RCV,
+ .proto_pipe_ops = &pull0_pipe_ops,
+ .proto_sock_ops = &pull0_sock_ops,
+};
+
+static nni_proto pull0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_peer = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_flags = NNI_PROTO_FLAG_RCV | NNI_PROTO_FLAG_RAW,
+ .proto_pipe_ops = &pull0_pipe_ops,
+ .proto_sock_ops = &pull0_sock_ops,
+};
+
+int
+nng_pull0_open(nng_socket *s)
+{
+ return (nni_proto_open(s, &pull0_proto));
+}
+
+int
+nng_pull0_open_raw(nng_socket *s)
+{
+ return (nni_proto_open(s, &pull0_proto_raw));
+}
diff --git a/src/sp/protocol/pipeline0/pull_test.c b/src/sp/protocol/pipeline0/pull_test.c
new file mode 100644
index 00000000..25066093
--- /dev/null
+++ b/src/sp/protocol/pipeline0/pull_test.c
@@ -0,0 +1,264 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_pull_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 1u)); // 81
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 0u)); // 80
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pull");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "push");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_cannot_send(void)
+{
+ nng_socket s;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_FAIL(nng_send(s, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_not_writeable(void)
+{
+ int fd;
+ nng_socket s;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_FAIL(nng_socket_get_int(s, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pull_poll_readable(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(pull, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pull, push);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(push, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // and receiving makes it no longer ready
+ NUTS_RECV(pull, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+static void
+test_pull_close_pending(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+ nng_pipe p1, p2;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_get_int(pull, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+ NUTS_MARRY_EX(pull, push, addr, &p1, &p2);
+
+ // Send a message -- it's ready for reading.
+ NUTS_SEND(push, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // NB: We have to close the pipe instead of the socket.
+ // This is because the socket won't notice the remote pipe
+ // disconnect until we collect the message and start another
+ // receive operation.
+ nng_pipe_close(p1);
+ nng_pipe_close(p2);
+
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+void
+test_pull_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pull0_open(&s1));
+ NUTS_PASS(nng_pull0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_pull_recv_aio_stopped(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(s, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_close_recv(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_recv_aio(s, aio);
+ NUTS_PASS(nng_close(s));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_recv_nonblock(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_recv_aio(s, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_recv_cancel(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_recv_aio(s, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_pull_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_pull0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_CLOSE(s);
+}
+
+TEST_LIST = {
+ { "pull identity", test_pull_identity },
+ { "pull cannot send", test_pull_cannot_send },
+ { "pull no context", test_pull_no_context },
+ { "pull not writeable", test_pull_not_writeable },
+ { "pull poll readable", test_pull_poll_readable },
+ { "pull close pending", test_pull_close_pending },
+ { "pull validate peer", test_pull_validate_peer },
+ { "pull recv aio stopped", test_pull_recv_aio_stopped },
+ { "pull close recv", test_pull_close_recv },
+ { "pull recv nonblock", test_pull_recv_nonblock },
+ { "pull recv cancel", test_pull_recv_cancel },
+ { "pull cooked", test_pull_cooked },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pipeline0/push.c b/src/sp/protocol/pipeline0/push.c
new file mode 100644
index 00000000..ad43d967
--- /dev/null
+++ b/src/sp/protocol/pipeline0/push.c
@@ -0,0 +1,442 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pipeline0/push.h"
+
+// Push protocol. The PUSH protocol is the "write" side of a pipeline.
+// Push distributes fairly, or tries to, by giving messages in round-robin
+// order.
+
+#ifndef NNI_PROTO_PULL_V0
+#define NNI_PROTO_PULL_V0 NNI_PROTO(5, 1)
+#endif
+
+#ifndef NNI_PROTO_PUSH_V0
+#define NNI_PROTO_PUSH_V0 NNI_PROTO(5, 0)
+#endif
+
+typedef struct push0_pipe push0_pipe;
+typedef struct push0_sock push0_sock;
+
+static void push0_send_cb(void *);
+static void push0_recv_cb(void *);
+static void push0_pipe_ready(push0_pipe *);
+
+// push0_sock is our per-socket protocol private structure.
+struct push0_sock {
+ nni_lmq wq; // list of messages queued
+ nni_list aq; // list of aio senders waiting
+ nni_list pl; // list of pipes ready to send
+ nni_pollable writable;
+ nni_mtx m;
+};
+
+// push0_pipe is our per-pipe protocol private structure.
+struct push0_pipe {
+ nni_pipe * pipe;
+ push0_sock * push;
+ nni_list_node node;
+
+ nni_aio aio_recv;
+ nni_aio aio_send;
+};
+
+static int
+push0_sock_init(void *arg, nni_sock *sock)
+{
+ push0_sock *s = arg;
+ NNI_ARG_UNUSED(sock);
+
+ nni_mtx_init(&s->m);
+ nni_aio_list_init(&s->aq);
+ NNI_LIST_INIT(&s->pl, push0_pipe, node);
+ nni_lmq_init(&s->wq, 0); // initially we start unbuffered.
+ nni_pollable_init(&s->writable);
+
+ return (0);
+}
+
+static void
+push0_sock_fini(void *arg)
+{
+ push0_sock *s = arg;
+ nni_pollable_fini(&s->writable);
+ nni_lmq_fini(&s->wq);
+ nni_mtx_fini(&s->m);
+}
+
+static void
+push0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+push0_sock_close(void *arg)
+{
+ push0_sock *s = arg;
+ nni_aio * a;
+ nni_mtx_lock(&s->m);
+ while ((a = nni_list_first(&s->aq)) != NULL) {
+ nni_aio_list_remove(a);
+ nni_aio_finish_error(a, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_pipe_stop(void *arg)
+{
+ push0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_send);
+}
+
+static void
+push0_pipe_fini(void *arg)
+{
+ push0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_send);
+}
+
+static int
+push0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ push0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, push0_recv_cb, p);
+ nni_aio_init(&p->aio_send, push0_send_cb, p);
+ NNI_LIST_NODE_INIT(&p->node);
+ p->pipe = pipe;
+ p->push = s;
+ return (0);
+}
+
+static int
+push0_pipe_start(void *arg)
+{
+ push0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_PULL_V0) {
+ return (NNG_EPROTO);
+ }
+
+ // Schedule a receiver. This is mostly so that we can detect
+ // a closed transport pipe.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ push0_pipe_ready(p);
+
+ return (0);
+}
+
+static void
+push0_pipe_close(void *arg)
+{
+ push0_pipe *p = arg;
+ push0_sock *s = p->push;
+
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_send);
+
+ nni_mtx_lock(&s->m);
+ if (nni_list_node_active(&p->node)) {
+ nni_list_node_remove(&p->node);
+
+ if (nni_list_empty(&s->pl) && nni_lmq_full(&s->wq)) {
+ nni_pollable_clear(&s->writable);
+ }
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_recv_cb(void *arg)
+{
+ push0_pipe *p = arg;
+
+ // We normally expect to receive an error. If a pipe actually
+ // sends us data, we just discard it.
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_msg_free(nni_aio_get_msg(&p->aio_recv));
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+push0_pipe_ready(push0_pipe *p)
+{
+ push0_sock *s = p->push;
+ nni_msg * m;
+ nni_aio * a = NULL;
+ size_t l;
+ bool blocked;
+
+ nni_mtx_lock(&s->m);
+
+ blocked = nni_lmq_full(&s->wq) && nni_list_empty(&s->pl);
+
+ // if message is waiting in the buffered queue
+ // then we prefer that.
+ if (nni_lmq_getq(&s->wq, &m) == 0) {
+ nni_aio_set_msg(&p->aio_send, m);
+ nni_pipe_send(p->pipe, &p->aio_send);
+
+ if ((a = nni_list_first(&s->aq)) != NULL) {
+ nni_aio_list_remove(a);
+ m = nni_aio_get_msg(a);
+ l = nni_msg_len(m);
+ nni_lmq_putq(&s->wq, m);
+ }
+
+ } else if ((a = nni_list_first(&s->aq)) != NULL) {
+ // Looks like we had the unbuffered case, but
+ // someone was waiting.
+ nni_aio_list_remove(a);
+ m = nni_aio_get_msg(a);
+ l = nni_msg_len(m);
+
+ nni_aio_set_msg(&p->aio_send, m);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ } else {
+ // We had nothing to send. Just put us in the ready list.
+ nni_list_append(&s->pl, p);
+ }
+
+ if (blocked) {
+ // if we blocked, then toggle the status.
+ if ((!nni_lmq_full(&s->wq)) || (!nni_list_empty(&s->pl))) {
+ nni_pollable_raise(&s->writable);
+ }
+ }
+
+ nni_mtx_unlock(&s->m);
+
+ if (a != NULL) {
+ nni_aio_set_msg(a, NULL);
+ nni_aio_finish_sync(a, 0, l);
+ }
+}
+
+static void
+push0_send_cb(void *arg)
+{
+ push0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ push0_pipe_ready(p);
+}
+
+static void
+push0_cancel(nni_aio *aio, void *arg, int rv)
+{
+ push0_sock *s = arg;
+
+ nni_mtx_lock(&s->m);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_sock_send(void *arg, nni_aio *aio)
+{
+ push0_sock *s = arg;
+ push0_pipe *p;
+ nni_msg * m;
+ size_t l;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ m = nni_aio_get_msg(aio);
+ l = nni_msg_len(m);
+
+ nni_mtx_lock(&s->m);
+
+ // First we want to see if we can send it right now.
+ // Note that we don't block the sender until the read is complete,
+ // only until we have committed to send it.
+ if ((p = nni_list_first(&s->pl)) != NULL) {
+ nni_list_remove(&s->pl, p);
+ // NB: We won't have had any waiters in the message queue
+ // or the aio queue, because we would not put the pipe
+ // in the ready list in that case. Note though that the
+ // wq may be "full" if we are unbuffered.
+ if (nni_list_empty(&s->pl) && (nni_lmq_full(&s->wq))) {
+ nni_pollable_clear(&s->writable);
+ }
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, l);
+ nni_aio_set_msg(&p->aio_send, m);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+
+ // Can we maybe queue it.
+ if (nni_lmq_putq(&s->wq, m) == 0) {
+ // Yay, we can. So we're done.
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, l);
+ if (nni_lmq_full(&s->wq)) {
+ nni_pollable_clear(&s->writable);
+ }
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+
+ if ((rv = nni_aio_schedule(aio, push0_cancel, s)) != 0) {
+ nni_aio_finish_error(aio, rv);
+ nni_mtx_unlock(&s->m);
+ return;
+ }
+ nni_aio_list_append(&s->aq, aio);
+ nni_mtx_unlock(&s->m);
+}
+
+static void
+push0_sock_recv(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+}
+
+static int
+push0_set_send_buf_len(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ push0_sock *s = arg;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, buf, sz, 0, 8192, t)) != 0) {
+ return (rv);
+ }
+ nni_mtx_lock(&s->m);
+ rv = nni_lmq_resize(&s->wq, (size_t) val);
+ // Changing the size of the queue can affect our readiness.
+ if (!nni_lmq_full(&s->wq)) {
+ nni_pollable_raise(&s->writable);
+ } else if (nni_list_empty(&s->pl)) {
+ nni_pollable_clear(&s->writable);
+ }
+ nni_mtx_unlock(&s->m);
+ return (rv);
+}
+
+static int
+push0_get_send_buf_len(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ push0_sock *s = arg;
+ int val;
+
+ nni_mtx_lock(&s->m);
+ val = nni_lmq_cap(&s->wq);
+ nni_mtx_unlock(&s->m);
+
+ return (nni_copyout_int(val, buf, szp, t));
+}
+
+static int
+push0_sock_get_send_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ push0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static nni_proto_pipe_ops push0_pipe_ops = {
+ .pipe_size = sizeof(push0_pipe),
+ .pipe_init = push0_pipe_init,
+ .pipe_fini = push0_pipe_fini,
+ .pipe_start = push0_pipe_start,
+ .pipe_close = push0_pipe_close,
+ .pipe_stop = push0_pipe_stop,
+};
+
+static nni_option push0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = push0_sock_get_send_fd,
+ },
+ {
+ .o_name = NNG_OPT_SENDBUF,
+ .o_get = push0_get_send_buf_len,
+ .o_set = push0_set_send_buf_len,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops push0_sock_ops = {
+ .sock_size = sizeof(push0_sock),
+ .sock_init = push0_sock_init,
+ .sock_fini = push0_sock_fini,
+ .sock_open = push0_sock_open,
+ .sock_close = push0_sock_close,
+ .sock_options = push0_sock_options,
+ .sock_send = push0_sock_send,
+ .sock_recv = push0_sock_recv,
+};
+
+static nni_proto push0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_peer = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_flags = NNI_PROTO_FLAG_SND,
+ .proto_pipe_ops = &push0_pipe_ops,
+ .proto_sock_ops = &push0_sock_ops,
+};
+
+static nni_proto push0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUSH_V0, "push" },
+ .proto_peer = { NNI_PROTO_PULL_V0, "pull" },
+ .proto_flags = NNI_PROTO_FLAG_SND | NNI_PROTO_FLAG_RAW,
+ .proto_pipe_ops = &push0_pipe_ops,
+ .proto_sock_ops = &push0_sock_ops,
+};
+
+int
+nng_push0_open(nng_socket *s)
+{
+ return (nni_proto_open(s, &push0_proto));
+}
+
+int
+nng_push0_open_raw(nng_socket *s)
+{
+ return (nni_proto_open(s, &push0_proto_raw));
+}
diff --git a/src/sp/protocol/pipeline0/push_test.c b/src/sp/protocol/pipeline0/push_test.c
new file mode 100644
index 00000000..d22ccaa4
--- /dev/null
+++ b/src/sp/protocol/pipeline0/push_test.c
@@ -0,0 +1,525 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_push_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 0u)); // 80
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(5u, 1u)); // 81
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "push");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pull");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_cannot_recv(void)
+{
+ nng_socket s;
+ nng_msg * m = NULL;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_FAIL(nng_recvmsg(s, &m, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_not_readable(void)
+{
+ int fd;
+ nng_socket s;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_FAIL(nng_socket_get_int(s, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_poll_writable(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(push, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // This tests unbuffered sockets for now.
+ // Note that for this we are using unbuffered inproc.
+ // If using TCP or similar, then transport buffering will
+ // break assumptions in this test.
+
+ // Not writable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // After connect we can write.
+ NUTS_MARRY(pull, push);
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // But once we send a message, it is not anymore.
+ NUTS_SEND(push, "abc");
+ // Have to send a second message, because the remote socket
+ // will have consumed the first one.
+ NUTS_SEND(push, "def");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // and receiving receiving the message makes it possible again.
+ NUTS_RECV(pull, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+static void
+test_push_poll_buffered(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 2));
+ NUTS_PASS(nng_socket_get_int(push, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We can write two message while unbuffered.
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // After connect we remote end will pick up one of them.
+ // Also, the local pipe itself will pick up one. So we
+ // have two.
+ NUTS_MARRY(pull, push);
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "ghi");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "jkl");
+ // Now it should be full.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // and receiving receiving the message makes it possible again.
+ NUTS_RECV(pull, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_RECV(pull, "def");
+ NUTS_RECV(pull, "ghi");
+ NUTS_RECV(pull, "jkl");
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+static void
+test_push_poll_truncate(void)
+{
+ int fd;
+ nng_socket pull;
+ nng_socket push;
+
+ // This test starts with a buffer and then truncates it to see
+ // that shortening the buffer has an impact.
+
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_push0_open(&push));
+ NUTS_PASS(nng_socket_set_ms(pull, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(push, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 3));
+ NUTS_PASS(nng_socket_get_int(push, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We can write two message while unbuffered.
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd));
+ NUTS_SEND(push, "def");
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 1));
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(pull, push);
+ NUTS_RECV(pull, "abc");
+ // def got dropped
+ NUTS_SEND(push, "ghi");
+ NUTS_RECV(pull, "ghi");
+
+ NUTS_CLOSE(pull);
+ NUTS_SLEEP(100);
+
+ // We have a buffer of one.
+ NUTS_SEND(push, "jkl");
+ // Resize to 0 (unbuffered)
+ NUTS_PASS(nng_socket_set_int(push, NNG_OPT_SENDBUF, 0));
+
+ // reopen the pull socket and connect it
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_MARRY(push, pull);
+
+ // jkl got dropped.
+ NUTS_SEND(push, "mno");
+ NUTS_RECV(pull, "mno");
+
+ NUTS_CLOSE(pull);
+ NUTS_CLOSE(push);
+}
+
+void
+test_push_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_push0_open(&s1));
+ NUTS_PASS(nng_push0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_push_send_aio_stopped(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_msg(aio, m);
+ nng_aio_stop(aio);
+ nng_send_aio(s, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_close_send(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+ NUTS_PASS(nng_close(s));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_nonblock(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_timeout(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_timeout(aio, 10);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_cancel(void)
+{
+ nng_socket s;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_push_send_late_unbuffered(void)
+{
+ nng_socket s;
+ nng_socket pull;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append(m, "123\0", 4));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ NUTS_MARRY(s, pull);
+
+ NUTS_RECV(pull, "123");
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+
+static void
+test_push_send_late_buffered(void)
+{
+ nng_socket s;
+ nng_socket pull;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_SENDBUF, 2));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append(m, "123\0", 4));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_aio_set_msg(aio, m);
+ nng_send_aio(s, aio);
+
+ NUTS_MARRY(s, pull);
+
+ NUTS_RECV(pull, "123");
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ NUTS_CLOSE(s);
+ nng_aio_free(aio);
+}
+
+static void
+test_push_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_push_load_balance_buffered(void)
+{
+ nng_socket s;
+ nng_socket pull1;
+ nng_socket pull2;
+ nng_socket pull3;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull1));
+ NUTS_PASS(nng_pull0_open(&pull2));
+ NUTS_PASS(nng_pull0_open(&pull3));
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_SENDBUF, 4));
+ NUTS_MARRY(s, pull1);
+ NUTS_MARRY(s, pull2);
+ NUTS_MARRY(s, pull3);
+ NUTS_SLEEP(100);
+ NUTS_SEND(s, "one");
+ NUTS_SEND(s, "two");
+ NUTS_SEND(s, "three");
+ NUTS_RECV(pull1, "one");
+ NUTS_RECV(pull2, "two");
+ NUTS_RECV(pull3, "three");
+ NUTS_CLOSE(s);
+ NUTS_CLOSE(pull1);
+ NUTS_CLOSE(pull2);
+ NUTS_CLOSE(pull3);
+}
+
+static void
+test_push_load_balance_unbuffered(void)
+{
+ nng_socket s;
+ nng_socket pull1;
+ nng_socket pull2;
+ nng_socket pull3;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_pull0_open(&pull1));
+ NUTS_PASS(nng_pull0_open(&pull2));
+ NUTS_PASS(nng_pull0_open(&pull3));
+ NUTS_MARRY(s, pull1);
+ NUTS_MARRY(s, pull2);
+ NUTS_MARRY(s, pull3);
+ NUTS_SLEEP(100);
+ NUTS_SEND(s, "one");
+ NUTS_SEND(s, "two");
+ NUTS_SEND(s, "three");
+ NUTS_RECV(pull1, "one");
+ NUTS_RECV(pull2, "two");
+ NUTS_RECV(pull3, "three");
+ // Loop around is unpredictable somewhat, because the the
+ // pull sockets can take different periods of time to return
+ // back to readiness.
+ NUTS_CLOSE(s);
+ NUTS_CLOSE(pull1);
+ NUTS_CLOSE(pull2);
+ NUTS_CLOSE(pull3);
+}
+
+static void
+test_push_send_buffer(void)
+{
+ nng_socket s;
+ int v;
+ bool b;
+ size_t sz;
+
+ NUTS_PASS(nng_push0_open(&s));
+ NUTS_PASS(nng_getopt_int(s, NNG_OPT_SENDBUF, &v));
+ NUTS_TRUE(v == 0);
+ NUTS_FAIL(nng_getopt_bool(s, NNG_OPT_SENDBUF, &b), NNG_EBADTYPE);
+ sz = 1;
+ NUTS_FAIL(nng_getopt(s, NNG_OPT_SENDBUF, &b, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s, NNG_OPT_SENDBUF, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(s, NNG_OPT_SENDBUF, 100000), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(s, NNG_OPT_SENDBUF, false), NNG_EBADTYPE);
+ NUTS_FAIL(nng_setopt(s, NNG_OPT_SENDBUF, &b, 1), NNG_EINVAL);
+ NUTS_PASS(nng_setopt_int(s, NNG_OPT_SENDBUF, 100));
+ NUTS_PASS(nng_getopt_int(s, NNG_OPT_SENDBUF, &v));
+ NUTS_TRUE(v == 100);
+ NUTS_CLOSE(s);
+}
+
+TEST_LIST = {
+ { "push identity", test_push_identity },
+ { "push cannot recv", test_push_cannot_recv },
+ { "push no context", test_push_no_context },
+ { "push not readable", test_push_not_readable },
+ { "push poll writable", test_push_poll_writable },
+ { "push poll buffered", test_push_poll_buffered },
+ { "push poll truncate", test_push_poll_truncate },
+ { "push validate peer", test_push_validate_peer },
+ { "push send aio stopped", test_push_send_aio_stopped },
+ { "push close send", test_push_close_send },
+ { "push send nonblock", test_push_send_nonblock },
+ { "push send timeout", test_push_send_timeout },
+ { "push send cancel", test_push_send_cancel },
+ { "push send late buffered", test_push_send_late_buffered },
+ { "push send late unbuffered", test_push_send_late_unbuffered },
+ { "push cooked", test_push_cooked },
+ { "push load balance buffered", test_push_load_balance_buffered },
+ { "push load balance unbuffered", test_push_load_balance_unbuffered },
+ { "push send buffer", test_push_send_buffer },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pubsub0/CMakeLists.txt b/src/sp/protocol/pubsub0/CMakeLists.txt
new file mode 100644
index 00000000..160b7462
--- /dev/null
+++ b/src/sp/protocol/pubsub0/CMakeLists.txt
@@ -0,0 +1,24 @@
+#
+# Copyright 2019 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Pub/Sub protocol
+nng_directory(pubsub0)
+
+nng_sources_if(NNG_PROTO_PUB0 pub.c)
+nng_headers_if(NNG_PROTO_PUB0 nng/protocol/pubsub0/pub.h)
+nng_defines_if(NNG_PROTO_PUB0 NNG_HAVE_PUB0)
+
+nng_sources_if(NNG_PROTO_SUB0 sub.c xsub.c)
+nng_headers_if(NNG_PROTO_SUB0 nng/protocol/pubsub0/sub.h)
+nng_defines_if(NNG_PROTO_SUB0 NNG_HAVE_SUB0)
+
+nng_test(pub_test)
+nng_test(sub_test)
+nng_test(xsub_test)
diff --git a/src/sp/protocol/pubsub0/pub.c b/src/sp/protocol/pubsub0/pub.c
new file mode 100644
index 00000000..e3d4f16a
--- /dev/null
+++ b/src/sp/protocol/pubsub0/pub.c
@@ -0,0 +1,383 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pubsub0/pub.h"
+
+// Publish protocol. The PUB protocol simply sends messages out, as
+// a broadcast. It has nothing more sophisticated because it does not
+// perform sender-side filtering. Its best effort delivery, so anything
+// that can't receive the message won't get one.
+
+#ifndef NNI_PROTO_SUB_V0
+#define NNI_PROTO_SUB_V0 NNI_PROTO(2, 1)
+#endif
+
+#ifndef NNI_PROTO_PUB_V0
+#define NNI_PROTO_PUB_V0 NNI_PROTO(2, 0)
+#endif
+
+typedef struct pub0_pipe pub0_pipe;
+typedef struct pub0_sock pub0_sock;
+
+static void pub0_pipe_recv_cb(void *);
+static void pub0_pipe_send_cb(void *);
+static void pub0_sock_fini(void *);
+static void pub0_pipe_fini(void *);
+
+// pub0_sock is our per-socket protocol private structure.
+struct pub0_sock {
+ nni_list pipes;
+ nni_mtx mtx;
+ bool closed;
+ size_t sendbuf;
+ nni_pollable *sendable;
+};
+
+// pub0_pipe is our per-pipe protocol private structure.
+struct pub0_pipe {
+ nni_pipe * pipe;
+ pub0_sock * pub;
+ nni_lmq sendq;
+ bool closed;
+ bool busy;
+ nni_aio * aio_send;
+ nni_aio * aio_recv;
+ nni_list_node node;
+};
+
+static void
+pub0_sock_fini(void *arg)
+{
+ pub0_sock *s = arg;
+
+ nni_pollable_free(s->sendable);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+pub0_sock_init(void *arg, nni_sock *nsock)
+{
+ pub0_sock *sock = arg;
+ int rv;
+ NNI_ARG_UNUSED(nsock);
+
+ if ((rv = nni_pollable_alloc(&sock->sendable)) != 0) {
+ return (rv);
+ }
+ nni_mtx_init(&sock->mtx);
+ NNI_LIST_INIT(&sock->pipes, pub0_pipe, node);
+ sock->sendbuf = 16; // fairly arbitrary
+ return (0);
+}
+
+static void
+pub0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pub0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+pub0_pipe_stop(void *arg)
+{
+ pub0_pipe *p = arg;
+
+ nni_aio_stop(p->aio_send);
+ nni_aio_stop(p->aio_recv);
+}
+
+static void
+pub0_pipe_fini(void *arg)
+{
+ pub0_pipe *p = arg;
+
+ nni_aio_free(p->aio_send);
+ nni_aio_free(p->aio_recv);
+ nni_lmq_fini(&p->sendq);
+}
+
+static int
+pub0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = s;
+ int rv;
+ size_t len;
+
+ nni_mtx_lock(&sock->mtx);
+ len = sock->sendbuf;
+ nni_mtx_unlock(&sock->mtx);
+
+ // XXX: consider making this depth tunable
+ if (((rv = nni_lmq_init(&p->sendq, len)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_send, pub0_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->aio_recv, pub0_pipe_recv_cb, p)) != 0)) {
+
+ pub0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->busy = false;
+ p->pipe = pipe;
+ p->pub = s;
+ return (0);
+}
+
+static int
+pub0_pipe_start(void *arg)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = p->pub;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_SUB_V0) {
+ return (NNG_EPROTO);
+ }
+ nni_mtx_lock(&sock->mtx);
+ nni_list_append(&sock->pipes, p);
+ nni_mtx_unlock(&sock->mtx);
+
+ // Start the receiver.
+ nni_pipe_recv(p->pipe, p->aio_recv);
+
+ return (0);
+}
+
+static void
+pub0_pipe_close(void *arg)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = p->pub;
+
+ nni_aio_close(p->aio_send);
+ nni_aio_close(p->aio_recv);
+
+ nni_mtx_lock(&sock->mtx);
+ p->closed = true;
+ nni_lmq_flush(&p->sendq);
+
+ if (nni_list_active(&sock->pipes, p)) {
+ nni_list_remove(&sock->pipes, p);
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+pub0_pipe_recv_cb(void *arg)
+{
+ pub0_pipe *p = arg;
+
+ // We should never receive a message -- the only valid reason for us to
+ // be here is on pipe close.
+ if (nni_aio_result(p->aio_recv) == 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_recv));
+ }
+ nni_pipe_close(p->pipe);
+}
+
+static void
+pub0_pipe_send_cb(void *arg)
+{
+ pub0_pipe *p = arg;
+ pub0_sock *sock = p->pub;
+ nni_msg * msg;
+
+ if (nni_aio_result(p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_send));
+ nni_aio_set_msg(p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ if (p->closed) {
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+ if (nni_lmq_getq(&p->sendq, &msg) == 0) {
+ nni_aio_set_msg(p->aio_send, msg);
+ nni_pipe_send(p->pipe, p->aio_send);
+ } else {
+ p->busy = false;
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+pub0_sock_recv(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ if (nni_aio_begin(aio) == 0) {
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+ }
+}
+
+static void
+pub0_sock_send(void *arg, nni_aio *aio)
+{
+ pub0_sock *sock = arg;
+ pub0_pipe *p;
+ nng_msg * msg;
+ size_t len;
+
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg);
+ nni_mtx_lock(&sock->mtx);
+ NNI_LIST_FOREACH (&sock->pipes, p) {
+
+ nni_msg_clone(msg);
+ if (p->busy) {
+ if (nni_lmq_full(&p->sendq)) {
+ // Make space for the new message.
+ nni_msg *old;
+ (void) nni_lmq_getq(&p->sendq, &old);
+ nni_msg_free(old);
+ }
+ nni_lmq_putq(&p->sendq, msg);
+ } else {
+ p->busy = true;
+ nni_aio_set_msg(p->aio_send, msg);
+ nni_pipe_send(p->pipe, p->aio_send);
+ }
+ }
+ nni_mtx_unlock(&sock->mtx);
+ nng_msg_free(msg);
+ nni_aio_finish(aio, 0, len);
+}
+
+static int
+pub0_sock_get_sendfd(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ pub0_sock *sock = arg;
+ int fd;
+ int rv;
+ nni_mtx_lock(&sock->mtx);
+ // PUB sockets are *always* writable.
+ nni_pollable_raise(sock->sendable);
+ rv = nni_pollable_getfd(sock->sendable, &fd);
+ nni_mtx_unlock(&sock->mtx);
+
+ if (rv == 0) {
+ rv = nni_copyout_int(fd, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+pub0_sock_set_sendbuf(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ pub0_sock *sock = arg;
+ pub0_pipe *p;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, buf, sz, 1, 8192, t)) != 0) {
+ return (rv);
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ sock->sendbuf = (size_t) val;
+ NNI_LIST_FOREACH (&sock->pipes, p) {
+ // If we fail part way thru (should only be ENOMEM), we
+ // stop short. The others would likely fail for ENOMEM as
+ // well anyway. There is a weird effect here where the
+ // buffers may have been set for *some* of the pipes, but
+ // we have no way to correct partial failure.
+ if ((rv = nni_lmq_resize(&p->sendq, (size_t) val)) != 0) {
+ break;
+ }
+ }
+ nni_mtx_unlock(&sock->mtx);
+ return (rv);
+}
+
+static int
+pub0_sock_get_sendbuf(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ pub0_sock *sock = arg;
+ int val;
+ nni_mtx_lock(&sock->mtx);
+ val = (int) sock->sendbuf;
+ nni_mtx_unlock(&sock->mtx);
+ return (nni_copyout_int(val, buf, szp, t));
+}
+
+static nni_proto_pipe_ops pub0_pipe_ops = {
+ .pipe_size = sizeof(pub0_pipe),
+ .pipe_init = pub0_pipe_init,
+ .pipe_fini = pub0_pipe_fini,
+ .pipe_start = pub0_pipe_start,
+ .pipe_close = pub0_pipe_close,
+ .pipe_stop = pub0_pipe_stop,
+};
+
+static nni_option pub0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = pub0_sock_get_sendfd,
+ },
+ {
+ .o_name = NNG_OPT_SENDBUF,
+ .o_get = pub0_sock_get_sendbuf,
+ .o_set = pub0_sock_set_sendbuf,
+ },
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops pub0_sock_ops = {
+ .sock_size = sizeof(pub0_sock),
+ .sock_init = pub0_sock_init,
+ .sock_fini = pub0_sock_fini,
+ .sock_open = pub0_sock_open,
+ .sock_close = pub0_sock_close,
+ .sock_send = pub0_sock_send,
+ .sock_recv = pub0_sock_recv,
+ .sock_options = pub0_sock_options,
+};
+
+static nni_proto pub0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_peer = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_flags = NNI_PROTO_FLAG_SND,
+ .proto_sock_ops = &pub0_sock_ops,
+ .proto_pipe_ops = &pub0_pipe_ops,
+};
+
+static nni_proto pub0_proto_raw = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_peer = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_flags = NNI_PROTO_FLAG_SND | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &pub0_sock_ops,
+ .proto_pipe_ops = &pub0_pipe_ops,
+};
+
+int
+nng_pub0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pub0_proto));
+}
+
+int
+nng_pub0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &pub0_proto_raw));
+}
diff --git a/src/sp/protocol/pubsub0/pub_test.c b/src/sp/protocol/pubsub0/pub_test.c
new file mode 100644
index 00000000..a430b610
--- /dev/null
+++ b/src/sp/protocol/pubsub0/pub_test.c
@@ -0,0 +1,331 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_pub_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_pub0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 0u)); // 32
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "pub");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "sub");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_pub_cannot_recv(void)
+{
+ nng_socket pub;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_FAIL(nng_recv(pub, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_no_context(void)
+{
+ nng_socket pub;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_FAIL(nng_ctx_open(&ctx, pub), NNG_ENOTSUP);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_not_readable(void)
+{
+ int fd;
+ nng_socket pub;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_FAIL(nng_socket_get_int(pub, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_poll_writeable(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_get_int(pub, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Pub is *always* writeable
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // Even if we send messages.
+ NUTS_SEND(pub, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_pub_send_no_pipes(void)
+{
+ nng_socket pub;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_SEND(pub, "DROP1");
+ NUTS_SEND(pub, "DROP2");
+ NUTS_CLOSE(pub);
+}
+
+void
+test_pub_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char *addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_pub0_open(&s1));
+ NUTS_PASS(nng_pub0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_pub_send_queued(void)
+{
+ nng_socket pub;
+ nng_socket sub;
+
+ // MB: What we really need is a mock so that we can send harder
+ // than we receive -- we need a way to apply back-pressure for this
+ // test to be really meaningful.
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "", 0));
+ NUTS_PASS(nng_socket_set_int(pub, NNG_OPT_SENDBUF, 10));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 10));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_MARRY(pub, sub);
+ NUTS_SEND(pub, "first");
+ NUTS_SEND(pub, "second");
+ NUTS_SEND(pub, "three musketeers");
+ NUTS_SEND(pub, "four");
+ NUTS_SLEEP(50);
+ NUTS_RECV(sub, "first");
+ NUTS_RECV(sub, "second");
+ NUTS_RECV(sub, "three musketeers");
+ NUTS_RECV(sub, "four");
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+static void
+test_sub_recv_ctx_closed(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_ctx_close(ctx);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_ctx_recv_aio_stopped(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_close_context_recv(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ NUTS_PASS(nng_ctx_close(ctx));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_nonblock(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_cancel(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_pub_send_buf_option(void)
+{
+ nng_socket pub;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_SENDBUF;
+
+ NUTS_PASS(nng_pub0_open(&pub));
+
+ NUTS_PASS(nng_socket_set_int(pub, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(pub, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(pub, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(pub, opt, 1000000), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(pub, opt, 3));
+ NUTS_PASS(nng_socket_get_int(pub, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(pub, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(pub, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(pub, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(pub, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(pub, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_pub_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_pub0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_FAIL(nng_socket_set_bool(s, NNG_OPT_RAW, true), NNG_EREADONLY);
+ NUTS_PASS(nng_close(s));
+
+ // raw pub only differs in the option setting
+ NUTS_PASS(nng_pub0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+NUTS_TESTS = {
+ { "pub identity", test_pub_identity },
+ { "pub cannot recv", test_pub_cannot_recv },
+ { "put no context", test_pub_no_context },
+ { "pub not readable", test_pub_not_readable },
+ { "pub poll writeable", test_pub_poll_writeable },
+ { "pub validate peer", test_pub_validate_peer },
+ { "pub send queued", test_pub_send_queued },
+ { "pub send no pipes", test_pub_send_no_pipes },
+ { "sub recv ctx closed", test_sub_recv_ctx_closed },
+ { "sub recv aio ctx stopped", test_sub_ctx_recv_aio_stopped },
+ { "sub close context recv", test_sub_close_context_recv },
+ { "sub context recv nonblock", test_sub_ctx_recv_nonblock },
+ { "sub context recv cancel", test_sub_ctx_recv_cancel },
+ { "pub send buf option", test_pub_send_buf_option },
+ { "pub cooked", test_pub_cooked },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pubsub0/sub.c b/src/sp/protocol/pubsub0/sub.c
new file mode 100644
index 00000000..9f3f2283
--- /dev/null
+++ b/src/sp/protocol/pubsub0/sub.c
@@ -0,0 +1,755 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2019 Nathan Kent <nate@nkent.net>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdbool.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pubsub0/sub.h"
+
+// Subscriber protocol. The SUB protocol receives messages sent to
+// it from publishers, and filters out those it is not interested in,
+// only passing up ones that match known subscriptions.
+
+#ifndef NNI_PROTO_SUB_V0
+#define NNI_PROTO_SUB_V0 NNI_PROTO(2, 1)
+#endif
+
+#ifndef NNI_PROTO_PUB_V0
+#define NNI_PROTO_PUB_V0 NNI_PROTO(2, 0)
+#endif
+
+// By default we accept 128 messages.
+#define SUB0_DEFAULT_RECV_BUF_LEN 128
+
+// By default, prefer new messages when the queue is full.
+#define SUB0_DEFAULT_PREFER_NEW true
+
+typedef struct sub0_pipe sub0_pipe;
+typedef struct sub0_sock sub0_sock;
+typedef struct sub0_ctx sub0_ctx;
+typedef struct sub0_topic sub0_topic;
+
+static void sub0_recv_cb(void *);
+static void sub0_pipe_fini(void *);
+
+struct sub0_topic {
+ nni_list_node node;
+ size_t len;
+ void * buf;
+};
+
+// sub0_ctx is a context for a SUB socket. The advantage of contexts is
+// that different contexts can maintain different subscriptions.
+struct sub0_ctx {
+ nni_list_node node;
+ sub0_sock * sock;
+ nni_list topics; // TODO: Consider patricia trie
+ nni_list recv_queue; // can have multiple pending receives
+ nni_lmq lmq;
+ bool prefer_new;
+};
+
+// sub0_sock is our per-socket protocol private structure.
+struct sub0_sock {
+ nni_pollable readable;
+ sub0_ctx master; // default context
+ nni_list contexts; // all contexts
+ int num_contexts;
+ size_t recv_buf_len;
+ bool prefer_new;
+ nni_mtx lk;
+};
+
+// sub0_pipe is our per-pipe protocol private structure.
+struct sub0_pipe {
+ nni_pipe * pipe;
+ sub0_sock *sub;
+ nni_aio aio_recv;
+};
+
+static void
+sub0_ctx_cancel(nng_aio *aio, void *arg, int rv)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ nni_mtx_lock(&sock->lk);
+ if (nni_list_active(&ctx->recv_queue, aio)) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&sock->lk);
+}
+
+static void
+sub0_ctx_recv(void *arg, nni_aio *aio)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&sock->lk);
+
+again:
+ if (nni_lmq_empty(&ctx->lmq)) {
+ int rv;
+ if ((rv = nni_aio_schedule(aio, sub0_ctx_cancel, ctx)) != 0) {
+ nni_mtx_unlock(&sock->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&ctx->recv_queue, aio);
+ nni_mtx_unlock(&sock->lk);
+ return;
+ }
+
+ (void) nni_lmq_getq(&ctx->lmq, &msg);
+
+ if (nni_lmq_empty(&ctx->lmq) && (ctx == &sock->master)) {
+ nni_pollable_clear(&sock->readable);
+ }
+ if ((msg = nni_msg_unique(msg)) == NULL) {
+ goto again;
+ }
+ nni_aio_set_msg(aio, msg);
+ nni_mtx_unlock(&sock->lk);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+sub0_ctx_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ if (nni_aio_begin(aio) == 0) {
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+ }
+}
+
+static void
+sub0_ctx_close(void *arg)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ nni_aio * aio;
+
+ nni_mtx_lock(&sock->lk);
+ while ((aio = nni_list_first(&ctx->recv_queue)) != NULL) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&sock->lk);
+}
+
+static void
+sub0_ctx_fini(void *arg)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock * sock = ctx->sock;
+ sub0_topic *topic;
+
+ sub0_ctx_close(ctx);
+
+ nni_mtx_lock(&sock->lk);
+ nni_list_remove(&sock->contexts, ctx);
+ sock->num_contexts--;
+ nni_mtx_unlock(&sock->lk);
+
+ while ((topic = nni_list_first(&ctx->topics)) != 0) {
+ nni_list_remove(&ctx->topics, topic);
+ nni_free(topic->buf, topic->len);
+ NNI_FREE_STRUCT(topic);
+ }
+
+ nni_lmq_fini(&ctx->lmq);
+}
+
+static int
+sub0_ctx_init(void *ctx_arg, void *sock_arg)
+{
+ sub0_sock *sock = sock_arg;
+ sub0_ctx * ctx = ctx_arg;
+ size_t len;
+ bool prefer_new;
+ int rv;
+
+ nni_mtx_lock(&sock->lk);
+ len = sock->recv_buf_len;
+ prefer_new = sock->prefer_new;
+
+ if ((rv = nni_lmq_init(&ctx->lmq, len)) != 0) {
+ return (rv);
+ }
+ ctx->prefer_new = prefer_new;
+
+ nni_aio_list_init(&ctx->recv_queue);
+ NNI_LIST_INIT(&ctx->topics, sub0_topic, node);
+
+ ctx->sock = sock;
+
+ nni_list_append(&sock->contexts, ctx);
+ sock->num_contexts++;
+ nni_mtx_unlock(&sock->lk);
+
+ return (0);
+}
+
+static void
+sub0_sock_fini(void *arg)
+{
+ sub0_sock *sock = arg;
+
+ sub0_ctx_fini(&sock->master);
+ nni_pollable_fini(&sock->readable);
+ nni_mtx_fini(&sock->lk);
+}
+
+static int
+sub0_sock_init(void *arg, nni_sock *unused)
+{
+ sub0_sock *sock = arg;
+ int rv;
+
+ NNI_ARG_UNUSED(unused);
+
+ NNI_LIST_INIT(&sock->contexts, sub0_ctx, node);
+ nni_mtx_init(&sock->lk);
+ sock->recv_buf_len = SUB0_DEFAULT_RECV_BUF_LEN;
+ sock->prefer_new = SUB0_DEFAULT_PREFER_NEW;
+ nni_pollable_init(&sock->readable);
+
+ if ((rv = sub0_ctx_init(&sock->master, sock)) != 0) {
+ sub0_sock_fini(sock);
+ return (rv);
+ }
+
+ return (0);
+}
+
+static void
+sub0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+sub0_sock_close(void *arg)
+{
+ sub0_sock *sock = arg;
+ sub0_ctx_close(&sock->master);
+}
+
+static void
+sub0_pipe_stop(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+sub0_pipe_fini(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+sub0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, sub0_recv_cb, p);
+
+ p->pipe = pipe;
+ p->sub = s;
+ return (0);
+}
+
+static int
+sub0_pipe_start(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_PUB_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+sub0_pipe_close(void *arg)
+{
+ sub0_pipe *p = arg;
+
+ nni_aio_close(&p->aio_recv);
+}
+
+static bool
+sub0_matches(sub0_ctx *ctx, uint8_t *body, size_t len)
+{
+ sub0_topic *topic;
+
+ // This is a naive and trivial matcher. Replace with a real
+ // patricia trie later.
+ NNI_LIST_FOREACH (&ctx->topics, topic) {
+ if (len < topic->len) {
+ continue;
+ }
+ if ((topic->len == 0) ||
+ (memcmp(topic->buf, body, topic->len) == 0)) {
+ return (true);
+ }
+ }
+ return (false);
+}
+
+static void
+sub0_recv_cb(void *arg)
+{
+ sub0_pipe *p = arg;
+ sub0_sock *sock = p->sub;
+ sub0_ctx * ctx;
+ nni_msg * msg;
+ size_t len;
+ uint8_t * body;
+ nni_list finish;
+ nng_aio * aio;
+ nni_msg * dup_msg;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_aio_list_init(&finish);
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ body = nni_msg_body(msg);
+ len = nni_msg_len(msg);
+ dup_msg = NULL;
+
+ nni_mtx_lock(&sock->lk);
+ // Go through all contexts. We will try to send up.
+ NNI_LIST_FOREACH (&sock->contexts, ctx) {
+ bool queued = false;
+
+ if (nni_lmq_full(&ctx->lmq) && !ctx->prefer_new) {
+ // Cannot deliver here, as receive buffer is full.
+ continue;
+ }
+
+ if (!sub0_matches(ctx, body, len)) {
+ continue;
+ }
+
+ // This is a performance optimization, that ensures we
+ // do not duplicate a message in the common case, where there
+ // is only a single context.
+ if (sock->num_contexts > 1) {
+ if (nni_msg_dup(&dup_msg, msg) != 0) {
+ // if we cannot dup it, continue on
+ continue;
+ }
+ } else {
+ // We only have one context, so it's the only
+ // possible message.
+ dup_msg = msg;
+ }
+
+ if (!nni_list_empty(&ctx->recv_queue)) {
+ aio = nni_list_first(&ctx->recv_queue);
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_set_msg(aio, dup_msg);
+
+ // Save for synchronous completion
+ nni_list_append(&finish, aio);
+ } else if (nni_lmq_full(&ctx->lmq)) {
+ // Make space for the new message.
+ nni_msg *old;
+ (void) nni_lmq_getq(&ctx->lmq, &old);
+ nni_msg_free(old);
+
+ (void) nni_lmq_putq(&ctx->lmq, dup_msg);
+ queued = true;
+
+ } else {
+ (void) nni_lmq_putq(&ctx->lmq, dup_msg);
+ queued = true;
+ }
+ if (queued && ctx == &sock->master) {
+ nni_pollable_raise(&sock->readable);
+ }
+ }
+ nni_mtx_unlock(&sock->lk);
+
+ // NB: This is slightly less efficient in that we may have
+ // created an extra copy in the face of e.g. two subscriptions,
+ // but optimizing this further would require checking the subscription
+ // list twice, adding complexity. If this turns out to be a problem
+ // we could probably add some other sophistication with a counter
+ // and flags on the contexts themselves.
+ if (msg != dup_msg) {
+ // If we didn't just use the message, then free our copy.
+ nni_msg_free(msg);
+ }
+
+ while ((aio = nni_list_first(&finish)) != NULL) {
+ nni_list_remove(&finish, aio);
+ nni_aio_finish_sync(aio, 0, len);
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+sub0_ctx_get_recv_buf_len(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ int val;
+ nni_mtx_lock(&sock->lk);
+ val = (int) nni_lmq_cap(&ctx->lmq);
+ nni_mtx_unlock(&sock->lk);
+
+ return (nni_copyout_int(val, buf, szp, t));
+}
+
+static int
+sub0_ctx_set_recv_buf_len(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, buf, sz, 1, 8192, t)) != 0) {
+ return (rv);
+ }
+ nni_mtx_lock(&sock->lk);
+ if ((rv = nni_lmq_resize(&ctx->lmq, (size_t) val)) != 0) {
+ nni_mtx_unlock(&sock->lk);
+ return (rv);
+ }
+
+ // If we change the socket, then this will change the queue for
+ // any new contexts. (Previously constructed contexts are unaffected.)
+ if (&sock->master == ctx) {
+ sock->recv_buf_len = (size_t) val;
+ }
+ nni_mtx_unlock(&sock->lk);
+ return (0);
+}
+
+// For now we maintain subscriptions on a sorted linked list. As we do not
+// expect to have huge numbers of subscriptions, and as the operation is
+// really O(n), we think this is acceptable. In the future we might decide
+// to replace this with a patricia trie, like old nanomsg had.
+
+static int
+sub0_ctx_subscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock * sock = ctx->sock;
+ sub0_topic *topic;
+ sub0_topic *new_topic;
+ NNI_ARG_UNUSED(t);
+
+ nni_mtx_lock(&sock->lk);
+ NNI_LIST_FOREACH (&ctx->topics, topic) {
+ if (topic->len != sz) {
+ continue;
+ }
+ if (memcmp(topic->buf, buf, sz) == 0) {
+ // Already have it.
+ nni_mtx_unlock(&sock->lk);
+ return (0);
+ }
+ }
+ if ((new_topic = NNI_ALLOC_STRUCT(new_topic)) == NULL) {
+ nni_mtx_unlock(&sock->lk);
+ return (NNG_ENOMEM);
+ }
+ if ((sz > 0) && ((new_topic->buf = nni_alloc(sz)) == NULL)) {
+ nni_mtx_unlock(&sock->lk);
+ NNI_FREE_STRUCT(new_topic);
+ return (NNG_ENOMEM);
+ }
+ memcpy(new_topic->buf, buf, sz);
+ new_topic->len = sz;
+ nni_list_append(&ctx->topics, new_topic);
+ nni_mtx_unlock(&sock->lk);
+ return (0);
+}
+
+static int
+sub0_ctx_unsubscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock * sock = ctx->sock;
+ sub0_topic *topic;
+ size_t len;
+ NNI_ARG_UNUSED(t);
+
+ nni_mtx_lock(&sock->lk);
+ NNI_LIST_FOREACH (&ctx->topics, topic) {
+ if (topic->len != sz) {
+ continue;
+ }
+ if (memcmp(topic->buf, buf, sz) == 0) {
+ // Matched!
+ break;
+ }
+ }
+ if (topic == NULL) {
+ nni_mtx_unlock(&sock->lk);
+ return (NNG_ENOENT);
+ }
+ nni_list_remove(&ctx->topics, topic);
+
+ // Now we need to make sure that any messages that are waiting still
+ // match the subscription. We basically just run through the queue
+ // and requeue those messages we need.
+ len = nni_lmq_len(&ctx->lmq);
+ for (size_t i = 0; i < len; i++) {
+ nni_msg *msg;
+
+ (void) nni_lmq_getq(&ctx->lmq, &msg);
+ if (sub0_matches(ctx, nni_msg_body(msg), nni_msg_len(msg))) {
+ (void) nni_lmq_putq(&ctx->lmq, msg);
+ } else {
+ nni_msg_free(msg);
+ }
+ }
+ nni_mtx_unlock(&sock->lk);
+
+ nni_free(topic->buf, topic->len);
+ NNI_FREE_STRUCT(topic);
+ return (0);
+}
+
+static int
+sub0_ctx_get_prefer_new(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ bool val;
+
+ nni_mtx_lock(&sock->lk);
+ val = ctx->prefer_new;
+ nni_mtx_unlock(&sock->lk);
+
+ return (nni_copyout_bool(val, buf, szp, t));
+}
+
+static int
+sub0_ctx_set_prefer_new(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_ctx * ctx = arg;
+ sub0_sock *sock = ctx->sock;
+ bool val;
+ int rv;
+
+ if ((rv = nni_copyin_bool(&val, buf, sz, t)) != 0) {
+ return (rv);
+ }
+
+ nni_mtx_lock(&sock->lk);
+ ctx->prefer_new = val;
+ if (&sock->master == ctx) {
+ sock->prefer_new = val;
+ }
+ nni_mtx_unlock(&sock->lk);
+
+ return (0);
+}
+
+static nni_option sub0_ctx_options[] = {
+ {
+ .o_name = NNG_OPT_RECVBUF,
+ .o_get = sub0_ctx_get_recv_buf_len,
+ .o_set = sub0_ctx_set_recv_buf_len,
+ },
+ {
+ .o_name = NNG_OPT_SUB_SUBSCRIBE,
+ .o_set = sub0_ctx_subscribe,
+ },
+ {
+ .o_name = NNG_OPT_SUB_UNSUBSCRIBE,
+ .o_set = sub0_ctx_unsubscribe,
+ },
+ {
+ .o_name = NNG_OPT_SUB_PREFNEW,
+ .o_get = sub0_ctx_get_prefer_new,
+ .o_set = sub0_ctx_set_prefer_new,
+ },
+ {
+ .o_name = NULL,
+ },
+};
+
+static void
+sub0_sock_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ if (nni_aio_begin(aio) == 0) {
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+ }
+}
+
+static void
+sub0_sock_recv(void *arg, nni_aio *aio)
+{
+ sub0_sock *sock = arg;
+
+ sub0_ctx_recv(&sock->master, aio);
+}
+
+static int
+sub0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ sub0_sock *sock = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&sock->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+sub0_sock_get_recv_buf_len(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_get_recv_buf_len(&sock->master, buf, szp, t));
+}
+
+static int
+sub0_sock_set_recv_buf_len(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_set_recv_buf_len(&sock->master, buf, sz, t));
+}
+
+static int
+sub0_sock_subscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_subscribe(&sock->master, buf, sz, t));
+}
+
+static int
+sub0_sock_unsubscribe(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_unsubscribe(&sock->master, buf, sz, t));
+}
+
+static int
+sub0_sock_get_prefer_new(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_get_prefer_new(&sock->master, buf, szp, t));
+}
+
+static int
+sub0_sock_set_prefer_new(void *arg, const void *buf, size_t sz, nni_type t)
+{
+ sub0_sock *sock = arg;
+ return (sub0_ctx_set_prefer_new(&sock->master, buf, sz, t));
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops sub0_pipe_ops = {
+ .pipe_size = sizeof(sub0_pipe),
+ .pipe_init = sub0_pipe_init,
+ .pipe_fini = sub0_pipe_fini,
+ .pipe_start = sub0_pipe_start,
+ .pipe_close = sub0_pipe_close,
+ .pipe_stop = sub0_pipe_stop,
+};
+
+static nni_proto_ctx_ops sub0_ctx_ops = {
+ .ctx_size = sizeof(sub0_ctx),
+ .ctx_init = sub0_ctx_init,
+ .ctx_fini = sub0_ctx_fini,
+ .ctx_send = sub0_ctx_send,
+ .ctx_recv = sub0_ctx_recv,
+ .ctx_options = sub0_ctx_options,
+};
+
+static nni_option sub0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_SUB_SUBSCRIBE,
+ .o_set = sub0_sock_subscribe,
+ },
+ {
+ .o_name = NNG_OPT_SUB_UNSUBSCRIBE,
+ .o_set = sub0_sock_unsubscribe,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = sub0_sock_get_recv_fd,
+ },
+ {
+ .o_name = NNG_OPT_RECVBUF,
+ .o_get = sub0_sock_get_recv_buf_len,
+ .o_set = sub0_sock_set_recv_buf_len,
+ },
+ {
+ .o_name = NNG_OPT_SUB_PREFNEW,
+ .o_get = sub0_sock_get_prefer_new,
+ .o_set = sub0_sock_set_prefer_new,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops sub0_sock_ops = {
+ .sock_size = sizeof(sub0_sock),
+ .sock_init = sub0_sock_init,
+ .sock_fini = sub0_sock_fini,
+ .sock_open = sub0_sock_open,
+ .sock_close = sub0_sock_close,
+ .sock_send = sub0_sock_send,
+ .sock_recv = sub0_sock_recv,
+ .sock_options = sub0_sock_options,
+};
+
+static nni_proto sub0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_peer = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_flags = NNI_PROTO_FLAG_RCV,
+ .proto_sock_ops = &sub0_sock_ops,
+ .proto_pipe_ops = &sub0_pipe_ops,
+ .proto_ctx_ops = &sub0_ctx_ops,
+};
+
+int
+nng_sub0_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &sub0_proto));
+}
diff --git a/src/sp/protocol/pubsub0/sub_test.c b/src/sp/protocol/pubsub0/sub_test.c
new file mode 100644
index 00000000..b830ae80
--- /dev/null
+++ b/src/sp/protocol/pubsub0/sub_test.c
@@ -0,0 +1,624 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_sub_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_sub0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 0u)); // 32
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "sub");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pub");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_sub_cannot_send(void)
+{
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_FAIL(nng_send(sub, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_context_cannot_send(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_msg * m;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_msg(aio, m);
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+ nng_msg_free(m);
+}
+
+static void
+test_sub_not_writeable(void)
+{
+ int fd;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_FAIL(nng_socket_get_int(sub, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_poll_readable(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "a", 1));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // If we send a message we didn't subscribe to, that doesn't matter.
+ NUTS_SEND(pub, "def");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(100);
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // and receiving makes it no longer ready
+ NUTS_RECV(sub, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_recv_late(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "", 0));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ nng_recv_aio(sub, aio);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(200);
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, NULL);
+ NUTS_TRUE(nng_msg_len(msg) == 4);
+ NUTS_MATCH(nng_msg_body(msg), "abc");
+
+ nng_msg_free(msg);
+ nng_aio_free(aio);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_context_no_poll(void)
+{
+ int fd;
+ nng_socket sub;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_sub0_open(&s1));
+ NUTS_PASS(nng_sub0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_sub_recv_ctx_closed(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_ctx_close(ctx);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_ctx_recv_aio_stopped(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_close_context_recv(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ NUTS_PASS(nng_ctx_close(ctx));
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_nonblock(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_ctx_recv_cancel(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_ctx_open(&ctx, sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 1000);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_abort(aio, NNG_ECANCELED);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_sub_recv_buf_option(void)
+{
+ nng_socket sub;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_RECVBUF;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set_int(sub, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 1000000), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(sub, opt, 3));
+ NUTS_PASS(nng_socket_get_int(sub, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(sub, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(sub, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(sub, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(sub, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(sub, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_subscribe_option(void)
+{
+ nng_socket sub;
+ size_t sz;
+ int v;
+ const char *opt = NNG_OPT_SUB_SUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set(sub, opt, "abc", 3));
+ NUTS_PASS(nng_socket_set(sub, opt, "abc", 3)); // duplicate
+ NUTS_PASS(nng_socket_set_bool(sub, opt, false));
+ NUTS_PASS(nng_socket_set_int(sub, opt, 32));
+ sz = sizeof(v);
+ NUTS_FAIL(nng_socket_get(sub, opt, &v, &sz), NNG_EWRITEONLY);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_unsubscribe_option(void)
+{
+ nng_socket sub;
+ size_t sz;
+ int v;
+ const char *opt1 = NNG_OPT_SUB_SUBSCRIBE;
+ const char *opt2 = NNG_OPT_SUB_UNSUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set(sub, opt1, "abc", 3));
+ NUTS_FAIL(nng_socket_set(sub, opt2, "abc123", 6), NNG_ENOENT);
+ NUTS_PASS(nng_socket_set(sub, opt2, "abc", 3));
+ NUTS_FAIL(nng_socket_set(sub, opt2, "abc", 3), NNG_ENOENT);
+ NUTS_PASS(nng_socket_set_int(sub, opt1, 32));
+ NUTS_FAIL(nng_socket_set_int(sub, opt2, 23), NNG_ENOENT);
+ NUTS_PASS(nng_socket_set_int(sub, opt2, 32));
+ sz = sizeof(v);
+ NUTS_FAIL(nng_socket_get(sub, opt2, &v, &sz), NNG_EWRITEONLY);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_prefer_new_option(void)
+{
+ nng_socket sub;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_SUB_PREFNEW;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+
+ NUTS_PASS(nng_socket_set_bool(sub, opt, true));
+ NUTS_PASS(nng_socket_set_bool(sub, opt, false));
+ NUTS_PASS(nng_socket_get_bool(sub, opt, &b));
+ NUTS_TRUE(b == false);
+ sz = sizeof(b);
+ b = true;
+ NUTS_PASS(nng_socket_get(sub, opt, &b, &sz));
+ NUTS_TRUE(b == false);
+ NUTS_TRUE(sz == sizeof(bool));
+
+ NUTS_FAIL(nng_socket_set(sub, opt, "abc", 3), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 1), NNG_EBADTYPE);
+
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_drop_new(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 2));
+ NUTS_PASS(nng_socket_set_bool(sub, NNG_OPT_SUB_PREFNEW, false));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, NULL, 0));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_MARRY(pub, sub);
+ NUTS_SEND(pub, "one");
+ NUTS_SEND(pub, "two");
+ NUTS_SEND(pub, "three");
+ NUTS_SLEEP(100);
+ NUTS_RECV(sub, "one");
+ NUTS_RECV(sub, "two");
+ NUTS_FAIL(nng_recvmsg(sub, &msg, 0), NNG_ETIMEDOUT);
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_sub_drop_old(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 2));
+ NUTS_PASS(nng_socket_set_bool(sub, NNG_OPT_SUB_PREFNEW, true));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, NULL, 0));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_MARRY(pub, sub);
+ NUTS_SEND(pub, "one");
+ NUTS_SEND(pub, "two");
+ NUTS_SEND(pub, "three");
+ NUTS_SLEEP(100);
+ NUTS_RECV(sub, "two");
+ NUTS_RECV(sub, "three");
+ NUTS_FAIL(nng_recvmsg(sub, &msg, 0), NNG_ETIMEDOUT);
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_sub_filter(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ char buf[32];
+ size_t sz;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 10));
+
+ // Set up some default filters
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "abc", 3));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "def", 3));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "ghi", 3));
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_SUBSCRIBE, "jkl", 3));
+
+ NUTS_MARRY(pub, sub);
+
+ NUTS_PASS(nng_send(pub, "def", 3, 0));
+ NUTS_PASS(nng_send(pub, "de", 2, 0)); // will not go through
+ NUTS_PASS(nng_send(pub, "abc123", 6, 0));
+ NUTS_PASS(nng_send(pub, "xzy", 3, 0)); // does not match
+ NUTS_PASS(nng_send(pub, "ghi-drop", 7, 0)); // dropped by unsub
+ NUTS_PASS(nng_send(pub, "jkl-mno", 6, 0));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_socket_set(sub, NNG_OPT_SUB_UNSUBSCRIBE, "ghi", 3));
+ sz = sizeof(buf);
+ NUTS_PASS(nng_recv(sub, buf, &sz, 0));
+ NUTS_TRUE(sz == 3);
+ NUTS_TRUE(memcmp(buf, "def", 3) == 0);
+
+ sz = sizeof(buf);
+ NUTS_PASS(nng_recv(sub, buf, &sz, 0));
+ NUTS_TRUE(sz == 6);
+ NUTS_TRUE(memcmp(buf, "abc123", 6) == 0);
+
+ sz = sizeof(buf);
+ NUTS_PASS(nng_recv(sub, buf, &sz, 0));
+ NUTS_TRUE(sz == 6);
+ NUTS_TRUE(memcmp(buf, "jkl-mno", 6) == 0);
+
+ NUTS_CLOSE(sub);
+ NUTS_CLOSE(pub);
+}
+
+static void
+test_sub_multi_context(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+ nng_ctx c1;
+ nng_ctx c2;
+ nng_aio * aio1;
+ nng_aio * aio2;
+ nng_msg * m;
+
+ NUTS_PASS(nng_sub0_open(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+ NUTS_PASS(nng_aio_alloc(&aio2, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&c1, sub));
+ NUTS_PASS(nng_ctx_open(&c2, sub));
+
+ NUTS_PASS(nng_ctx_set(c1, NNG_OPT_SUB_SUBSCRIBE, "one", 3));
+ NUTS_PASS(nng_ctx_set(c1, NNG_OPT_SUB_SUBSCRIBE, "all", 3));
+
+ NUTS_PASS(nng_ctx_set(c2, NNG_OPT_SUB_SUBSCRIBE, "two", 3));
+ NUTS_PASS(nng_ctx_set(c2, NNG_OPT_SUB_SUBSCRIBE, "all", 3));
+
+ nng_aio_set_timeout(aio1, 100);
+ nng_aio_set_timeout(aio2, 100);
+
+ NUTS_MARRY(pub, sub);
+
+ NUTS_SEND(pub, "one for the money");
+ NUTS_SEND(pub, "all dogs go to heaven");
+ NUTS_SEND(pub, "nobody likes a snitch");
+ NUTS_SEND(pub, "two for the show");
+
+ nng_ctx_recv(c1, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "one for the money");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c1, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "all dogs go to heaven");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c2, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "all dogs go to heaven");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c2, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ m = nng_aio_get_msg(aio1);
+ NUTS_MATCH(nng_msg_body(m), "two for the show");
+ nng_msg_free(m);
+
+ nng_ctx_recv(c1, aio1);
+ nng_ctx_recv(c2, aio2);
+
+ nng_aio_wait(aio1);
+ nng_aio_wait(aio2);
+ NUTS_FAIL(nng_aio_result(aio1), NNG_ETIMEDOUT);
+ NUTS_FAIL(nng_aio_result(aio2), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ NUTS_CLOSE(pub);
+ nng_aio_free(aio1);
+ nng_aio_free(aio2);
+}
+
+static void
+test_sub_cooked(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_sub0_open(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(!b);
+ NUTS_CLOSE(s);
+}
+
+TEST_LIST = {
+ { "sub identity", test_sub_identity },
+ { "sub cannot send", test_sub_cannot_send },
+ { "sub context cannot send", test_sub_context_cannot_send },
+ { "sub not writeable", test_sub_not_writeable },
+ { "sub poll readable", test_sub_poll_readable },
+ { "sub context does not poll", test_sub_context_no_poll },
+ { "sub validate peer", test_sub_validate_peer },
+ { "sub recv late", test_sub_recv_late },
+ { "sub recv ctx closed", test_sub_recv_ctx_closed },
+ { "sub recv aio ctx stopped", test_sub_ctx_recv_aio_stopped },
+ { "sub close context recv", test_sub_close_context_recv },
+ { "sub context recv nonblock", test_sub_ctx_recv_nonblock },
+ { "sub context recv cancel", test_sub_ctx_recv_cancel },
+ { "sub recv buf option", test_sub_recv_buf_option },
+ { "sub subscribe option", test_sub_subscribe_option },
+ { "sub unsubscribe option", test_sub_unsubscribe_option },
+ { "sub prefer new option", test_sub_prefer_new_option },
+ { "sub drop new", test_sub_drop_new },
+ { "sub drop old", test_sub_drop_old },
+ { "sub filter", test_sub_filter },
+ { "sub multi context", test_sub_multi_context },
+ { "sub cooked", test_sub_cooked },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/pubsub0/xsub.c b/src/sp/protocol/pubsub0/xsub.c
new file mode 100644
index 00000000..0013b8b3
--- /dev/null
+++ b/src/sp/protocol/pubsub0/xsub.c
@@ -0,0 +1,211 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/pubsub0/sub.h"
+
+// Subscriber protocol. The SUB protocol receives messages sent to
+// it from publishers, and filters out those it is not interested in,
+// only passing up ones that match known subscriptions.
+
+#ifndef NNI_PROTO_SUB_V0
+#define NNI_PROTO_SUB_V0 NNI_PROTO(2, 1)
+#endif
+
+#ifndef NNI_PROTO_PUB_V0
+#define NNI_PROTO_PUB_V0 NNI_PROTO(2, 0)
+#endif
+
+typedef struct xsub0_pipe xsub0_pipe;
+typedef struct xsub0_sock xsub0_sock;
+
+static void xsub0_recv_cb(void *);
+static void xsub0_pipe_fini(void *);
+
+// xsub0_sock is our per-socket protocol private structure.
+struct xsub0_sock {
+ nni_msgq *urq;
+ nni_mtx lk;
+};
+
+// sub0_pipe is our per-pipe protocol private structure.
+struct xsub0_pipe {
+ nni_pipe * pipe;
+ xsub0_sock *sub;
+ nni_aio aio_recv;
+};
+
+static int
+xsub0_sock_init(void *arg, nni_sock *sock)
+{
+ xsub0_sock *s = arg;
+
+ s->urq = nni_sock_recvq(sock);
+ return (0);
+}
+
+static void
+xsub0_sock_fini(void *arg)
+{
+ xsub0_sock *s = arg;
+ nni_mtx_fini(&s->lk);
+}
+
+static void
+xsub0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xsub0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xsub0_pipe_stop(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+xsub0_pipe_fini(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+xsub0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, xsub0_recv_cb, p);
+
+ p->pipe = pipe;
+ p->sub = s;
+ return (0);
+}
+
+static int
+xsub0_pipe_start(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ if (nni_pipe_peer(p->pipe) != NNI_PROTO_PUB_V0) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xsub0_pipe_close(void *arg)
+{
+ xsub0_pipe *p = arg;
+
+ nni_aio_close(&p->aio_recv);
+}
+
+static void
+xsub0_recv_cb(void *arg)
+{
+ xsub0_pipe *p = arg;
+ xsub0_sock *s = p->sub;
+ nni_msgq * urq = s->urq;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ if (nni_msgq_tryput(urq, msg) != 0) {
+ // This only happens for two reasons. For flow control,
+ // in which case we just want to discard the message and
+ // carry on, and for a close of the socket (which is very
+ // hard to achieve, since we close the pipes.) In either
+ // case the easiest thing to do is just free the message
+ // and try again.
+ nni_msg_free(msg);
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+xsub0_sock_send(void *arg, nni_aio *aio)
+{
+ NNI_ARG_UNUSED(arg);
+ nni_aio_finish_error(aio, NNG_ENOTSUP);
+}
+
+static void
+xsub0_sock_recv(void *arg, nni_aio *aio)
+{
+ xsub0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops xsub0_pipe_ops = {
+ .pipe_size = sizeof(xsub0_pipe),
+ .pipe_init = xsub0_pipe_init,
+ .pipe_fini = xsub0_pipe_fini,
+ .pipe_start = xsub0_pipe_start,
+ .pipe_close = xsub0_pipe_close,
+ .pipe_stop = xsub0_pipe_stop,
+};
+
+static nni_option xsub0_sock_options[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xsub0_sock_ops = {
+ .sock_size = sizeof(xsub0_sock),
+ .sock_init = xsub0_sock_init,
+ .sock_fini = xsub0_sock_fini,
+ .sock_open = xsub0_sock_open,
+ .sock_close = xsub0_sock_close,
+ .sock_send = xsub0_sock_send,
+ .sock_recv = xsub0_sock_recv,
+ .sock_options = xsub0_sock_options,
+};
+
+static nni_proto xsub0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_SUB_V0, "sub" },
+ .proto_peer = { NNI_PROTO_PUB_V0, "pub" },
+ .proto_flags = NNI_PROTO_FLAG_RCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xsub0_sock_ops,
+ .proto_pipe_ops = &xsub0_pipe_ops,
+};
+
+int
+nng_sub0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xsub0_proto));
+}
diff --git a/src/sp/protocol/pubsub0/xsub_test.c b/src/sp/protocol/pubsub0/xsub_test.c
new file mode 100644
index 00000000..19815661
--- /dev/null
+++ b/src/sp/protocol/pubsub0/xsub_test.c
@@ -0,0 +1,376 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xsub_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_sub0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 1u)); // 33
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NUTS_PROTO(2u, 0u)); // 32
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, "sub");
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, "pub");
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsub_cannot_send(void)
+{
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_send(sub, "", 0, 0), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_not_writeable(void)
+{
+ int fd;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_socket_get_int(sub, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_poll_readable(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(200);
+
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // and receiving makes it no longer ready
+ NUTS_RECV(sub, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_recv_late(void)
+{
+ int fd;
+ nng_socket pub;
+ nng_socket sub;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_get_int(sub, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(pub, sub);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ nng_recv_aio(sub, aio);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(pub, "abc");
+ NUTS_SLEEP(200);
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, NULL);
+ NUTS_TRUE(nng_msg_len(msg) == 4);
+ NUTS_TRUE(strcmp(nng_msg_body(msg), "abc") == 0);
+
+ nng_msg_free(msg);
+ nng_aio_free(aio);
+
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_xsub_no_context(void)
+{
+ nng_socket sub;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_ctx_open(&ctx, sub), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+void
+test_xsub_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_sub0_open_raw(&s1));
+ NUTS_PASS(nng_sub0_open_raw(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xsub_recv_closed(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_CLOSE(sub);
+ nng_recv_aio(sub, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+}
+
+static void
+test_xsub_close_recv(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_aio_set_timeout(aio, 1000);
+ nng_recv_aio(sub, aio);
+ NUTS_CLOSE(sub);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+
+ nng_aio_free(aio);
+}
+
+static void
+test_xsub_recv_nonblock(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_recv_aio(sub, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+static void
+test_xsub_recv_buf_option(void)
+{
+ nng_socket sub;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_RECVBUF;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+
+ NUTS_PASS(nng_socket_set_int(sub, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(sub, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(sub, opt, 1000000), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(sub, opt, 3));
+ NUTS_PASS(nng_socket_get_int(sub, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(sub, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(sub, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(sub, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(sub, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(sub, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_subscribe_option(void)
+{
+ nng_socket sub;
+ const char *opt = NNG_OPT_SUB_SUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_socket_set(sub, opt, "abc", 3), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_unsubscribe_option(void)
+{
+ nng_socket sub;
+ const char *opt = NNG_OPT_SUB_UNSUBSCRIBE;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_FAIL(nng_socket_set(sub, opt, "abc", 3), NNG_ENOTSUP);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_sub0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsub_close_during_recv(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(pub, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(pub, sub);
+
+ for (unsigned i = 0; i < 100; i++) {
+ NUTS_PASS(nng_send(pub, "abc", 3, 0));
+ }
+ NUTS_CLOSE(pub);
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_close_during_pipe_recv(void)
+{
+ nng_socket sub;
+ nng_socket pub;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_pub0_open(&pub));
+ NUTS_PASS(nng_socket_set_ms(sub, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(pub, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(sub, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(pub, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(pub, sub);
+
+ for (unsigned i = 0; i < 100; i++) {
+ int rv;
+ rv = nng_send(pub, "abc", 3, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ break;
+ }
+ NUTS_SLEEP(1);
+ }
+ NUTS_CLOSE(sub);
+}
+
+static void
+test_xsub_recv_aio_stopped(void)
+{
+ nng_socket sub;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_sub0_open_raw(&sub));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(sub, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(sub);
+ nng_aio_free(aio);
+}
+
+TEST_LIST = {
+ { "xsub identity", test_xsub_identity },
+ { "xsub cannot send", test_xsub_cannot_send },
+ { "xsub not writeable", test_xsub_not_writeable },
+ { "xsub poll readable", test_xsub_poll_readable },
+ { "xsub validate peer", test_xsub_validate_peer },
+ { "xsub recv late", test_xsub_recv_late },
+ { "xsub recv closed", test_xsub_recv_closed },
+ { "xsub close recv", test_xsub_close_recv },
+ { "xsub recv nonblock", test_xsub_recv_nonblock },
+ { "xsub recv buf option", test_xsub_recv_buf_option },
+ { "xsub subscribe option", test_xsub_subscribe_option },
+ { "xsub unsubscribe option", test_xsub_unsubscribe_option },
+ { "xsub no context", test_xsub_no_context },
+ { "xsub raw", test_xsub_raw },
+ { "xsub recv aio stopped", test_xsub_recv_aio_stopped },
+ { "xsub close during recv ", test_xsub_close_during_recv },
+ { "xsub close during pipe recv", test_xsub_close_during_pipe_recv },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/CMakeLists.txt b/src/sp/protocol/reqrep0/CMakeLists.txt
new file mode 100644
index 00000000..a3cecfd0
--- /dev/null
+++ b/src/sp/protocol/reqrep0/CMakeLists.txt
@@ -0,0 +1,25 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Req/Rep protocol
+nng_directory(reqrep0)
+
+nng_sources_if(NNG_PROTO_REQ0 req.c xreq.c)
+nng_headers_if(NNG_PROTO_REQ0 nng/protocol/reqrep0/req.h)
+nng_defines_if(NNG_PROTO_REQ0 NNG_HAVE_REQ0)
+
+nng_sources_if(NNG_PROTO_REP0 rep.c xrep.c)
+nng_headers_if(NNG_PROTO_REP0 nng/protocol/reqrep0/rep.h)
+nng_defines_if(NNG_PROTO_REP0 NNG_HAVE_REP0)
+
+nng_test(req_test)
+nng_test(rep_test)
+nng_test(xrep_test)
+nng_test(xreq_test)
diff --git a/src/sp/protocol/reqrep0/rep.c b/src/sp/protocol/reqrep0/rep.c
new file mode 100644
index 00000000..aa32d249
--- /dev/null
+++ b/src/sp/protocol/reqrep0/rep.c
@@ -0,0 +1,705 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/rep.h"
+
+// Response protocol. The REP protocol is the "reply" side of a
+// request-reply pair. This is useful for building RPC servers, for
+// example.
+
+typedef struct rep0_pipe rep0_pipe;
+typedef struct rep0_sock rep0_sock;
+typedef struct rep0_ctx rep0_ctx;
+
+static void rep0_pipe_send_cb(void *);
+static void rep0_pipe_recv_cb(void *);
+static void rep0_pipe_fini(void *);
+
+struct rep0_ctx {
+ rep0_sock * sock;
+ uint32_t pipe_id;
+ rep0_pipe * spipe; // send pipe
+ nni_aio * saio; // send aio
+ nni_aio * raio; // recv aio
+ nni_list_node sqnode;
+ nni_list_node rqnode;
+ size_t btrace_len;
+ uint32_t btrace[NNI_MAX_MAX_TTL + 1];
+};
+
+// rep0_sock is our per-socket protocol private structure.
+struct rep0_sock {
+ nni_mtx lk;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ nni_list recvpipes; // list of pipes with data to receive
+ nni_list recvq;
+ rep0_ctx ctx;
+ nni_pollable readable;
+ nni_pollable writable;
+};
+
+// rep0_pipe is our per-pipe protocol private structure.
+struct rep0_pipe {
+ nni_pipe * pipe;
+ rep0_sock * rep;
+ uint32_t id;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_list_node rnode; // receivable list linkage
+ nni_list sendq; // contexts waiting to send
+ bool busy;
+ bool closed;
+};
+
+static void
+rep0_ctx_close(void *arg)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+ nni_aio * aio;
+
+ nni_mtx_lock(&s->lk);
+ if ((aio = ctx->saio) != NULL) {
+ rep0_pipe *pipe = ctx->spipe;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ nni_list_remove(&pipe->sendq, ctx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ if ((aio = ctx->raio) != NULL) {
+ nni_list_remove(&s->recvq, ctx);
+ ctx->raio = NULL;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_ctx_fini(void *arg)
+{
+ rep0_ctx *ctx = arg;
+
+ rep0_ctx_close(ctx);
+}
+
+static int
+rep0_ctx_init(void *carg, void *sarg)
+{
+ rep0_sock *s = sarg;
+ rep0_ctx * ctx = carg;
+
+ NNI_LIST_NODE_INIT(&ctx->sqnode);
+ NNI_LIST_NODE_INIT(&ctx->rqnode);
+ ctx->btrace_len = 0;
+ ctx->sock = s;
+ ctx->pipe_id = 0;
+
+ return (0);
+}
+
+static void
+rep0_ctx_cancel_send(nni_aio *aio, void *arg, int rv)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->lk);
+ if (ctx->saio != aio) {
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+ nni_list_node_remove(&ctx->sqnode);
+ ctx->saio = NULL;
+ nni_mtx_unlock(&s->lk);
+
+ nni_msg_header_clear(nni_aio_get_msg(aio)); // reset the headers
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+rep0_ctx_send(void *arg, nni_aio *aio)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+ rep0_pipe *p;
+ nni_msg * msg;
+ int rv;
+ size_t len;
+ uint32_t p_id; // pipe id
+
+ msg = nni_aio_get_msg(aio);
+ nni_msg_header_clear(msg);
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&s->lk);
+ len = ctx->btrace_len;
+ p_id = ctx->pipe_id;
+
+ // Assert "completion" of the previous req request. This ensures
+ // exactly one send for one receive ordering.
+ ctx->btrace_len = 0;
+ ctx->pipe_id = 0;
+
+ if (ctx == &s->ctx) {
+ // No matter how this goes, we will no longer be able
+ // to send on the socket (root context). That's because
+ // we will have finished (successfully or otherwise) the
+ // reply for the single request we got.
+ nni_pollable_clear(&s->writable);
+ }
+ if (len == 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ if ((rv = nni_msg_header_append(msg, ctx->btrace, len)) != 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ if ((p = nni_id_get(&s->pipes, p_id)) == NULL) {
+ // Pipe is gone. Make this look like a good send to avoid
+ // disrupting the state machine. We don't care if the peer
+ // lost interest in our reply.
+ nni_mtx_unlock(&s->lk);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ return;
+ }
+ if (!p->busy) {
+ p->busy = true;
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ nni_mtx_unlock(&s->lk);
+
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, len);
+ return;
+ }
+
+ if ((rv = nni_aio_schedule(aio, rep0_ctx_cancel_send, ctx)) != 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ ctx->saio = aio;
+ ctx->spipe = p;
+ nni_list_append(&p->sendq, ctx);
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_sock_fini(void *arg)
+{
+ rep0_sock *s = arg;
+
+ nni_id_map_fini(&s->pipes);
+ rep0_ctx_fini(&s->ctx);
+ nni_pollable_fini(&s->writable);
+ nni_pollable_fini(&s->readable);
+ nni_mtx_fini(&s->lk);
+}
+
+static int
+rep0_sock_init(void *arg, nni_sock *sock)
+{
+ rep0_sock *s = arg;
+
+ NNI_ARG_UNUSED(sock);
+
+ nni_mtx_init(&s->lk);
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ NNI_LIST_INIT(&s->recvq, rep0_ctx, rqnode);
+ NNI_LIST_INIT(&s->recvpipes, rep0_pipe, rnode);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ (void) rep0_ctx_init(&s->ctx, s);
+
+ // We start off without being either readable or writable.
+ // Readability comes when there is something on the socket.
+ nni_pollable_init(&s->writable);
+ nni_pollable_init(&s->readable);
+
+ return (0);
+}
+
+static void
+rep0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+rep0_sock_close(void *arg)
+{
+ rep0_sock *s = arg;
+
+ rep0_ctx_close(&s->ctx);
+}
+
+static void
+rep0_pipe_stop(void *arg)
+{
+ rep0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+rep0_pipe_fini(void *arg)
+{
+ rep0_pipe *p = arg;
+ nng_msg * msg;
+
+ if ((msg = nni_aio_get_msg(&p->aio_recv)) != NULL) {
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_free(msg);
+ }
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+rep0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ rep0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_send, rep0_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, rep0_pipe_recv_cb, p);
+
+ NNI_LIST_INIT(&p->sendq, rep0_ctx, sqnode);
+
+ p->id = nni_pipe_id(pipe);
+ p->pipe = pipe;
+ p->rep = s;
+ return (0);
+}
+
+static int
+rep0_pipe_start(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ int rv;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REP0_PEER) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->lk);
+ rv = nni_id_set(&s->pipes, nni_pipe_id(p->pipe), p);
+ nni_mtx_unlock(&s->lk);
+ if (rv != 0) {
+ return (rv);
+ }
+ // By definition, we have not received a request yet on this pipe,
+ // so it cannot cause us to become writable.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+rep0_pipe_close(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ rep0_ctx * ctx;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_mtx_lock(&s->lk);
+ p->closed = true;
+ if (nni_list_active(&s->recvpipes, p)) {
+ // We are no longer "receivable".
+ nni_list_remove(&s->recvpipes, p);
+ }
+ while ((ctx = nni_list_first(&p->sendq)) != NULL) {
+ nni_aio *aio;
+ nni_msg *msg;
+ // Pipe was closed. To avoid pushing an error back to the
+ // entire socket, we pretend we completed this successfully.
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ msg = nni_aio_get_msg(aio);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ }
+ if (p->id == s->ctx.pipe_id) {
+ // We "can" send. (Well, not really, but we will happily
+ // accept a message and discard it.)
+ nni_pollable_raise(&s->writable);
+ }
+ nni_id_remove(&s->pipes, nni_pipe_id(p->pipe));
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_pipe_send_cb(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ rep0_ctx * ctx;
+ nni_aio * aio;
+ nni_msg * msg;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_mtx_lock(&s->lk);
+ p->busy = false;
+ if ((ctx = nni_list_first(&p->sendq)) == NULL) {
+ // Nothing else to send.
+ if (p->id == s->ctx.pipe_id) {
+ // Mark us ready for the other side to send!
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ p->busy = true;
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+
+ nni_mtx_unlock(&s->lk);
+
+ nni_aio_finish_sync(aio, 0, len);
+}
+
+static void
+rep0_cancel_recv(nni_aio *aio, void *arg, int rv)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->lk);
+ if (ctx->raio == aio) {
+ nni_list_remove(&s->recvq, ctx);
+ ctx->raio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+rep0_ctx_recv(void *arg, nni_aio *aio)
+{
+ rep0_ctx * ctx = arg;
+ rep0_sock *s = ctx->sock;
+ rep0_pipe *p;
+ size_t len;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->lk);
+ if ((p = nni_list_first(&s->recvpipes)) == NULL) {
+ int rv;
+ if ((rv = nni_aio_schedule(aio, rep0_cancel_recv, ctx)) != 0) {
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ if (ctx->raio != NULL) {
+ // Cannot have a second receive operation pending.
+ // This could be ESTATE, or we could cancel the first
+ // with ECANCELED. We elect the former.
+ nni_mtx_unlock(&s->lk);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ ctx->raio = aio;
+ nni_list_append(&s->recvq, ctx);
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_list_remove(&s->recvpipes, p);
+ if (nni_list_empty(&s->recvpipes)) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ if ((ctx == &s->ctx) && !p->busy) {
+ nni_pollable_raise(&s->writable);
+ }
+
+ len = nni_msg_header_len(msg);
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ ctx->btrace_len = len;
+ ctx->pipe_id = nni_pipe_id(p->pipe);
+ nni_mtx_unlock(&s->lk);
+
+ nni_msg_header_clear(msg);
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+rep0_pipe_recv_cb(void *arg)
+{
+ rep0_pipe *p = arg;
+ rep0_sock *s = p->rep;
+ rep0_ctx * ctx;
+ nni_msg * msg;
+ uint8_t * body;
+ nni_aio * aio;
+ size_t len;
+ int hops;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ ttl = nni_atomic_get(&s->ttl);
+
+ nni_msg_set_pipe(msg, p->id);
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end;
+
+ if (hops > ttl) {
+ // This isn't malformed, but it has gone
+ // through too many hops. Do not disconnect,
+ // because we can legitimately receive messages
+ // with too many hops from devices, etc.
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer is speaking garbage. Kick it.
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ // Out of memory, so drop it.
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ len = nni_msg_header_len(msg);
+
+ nni_mtx_lock(&s->lk);
+
+ if (p->closed) {
+ // If we are closed, then we can't return data.
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_mtx_unlock(&s->lk);
+ nni_msg_free(msg);
+ return;
+ }
+
+ if ((ctx = nni_list_first(&s->recvq)) == NULL) {
+ // No one waiting to receive yet, holding pattern.
+ nni_list_append(&s->recvpipes, p);
+ nni_pollable_raise(&s->readable);
+ nni_mtx_unlock(&s->lk);
+ return;
+ }
+
+ nni_list_remove(&s->recvq, ctx);
+ aio = ctx->raio;
+ ctx->raio = NULL;
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ if ((ctx == &s->ctx) && !p->busy) {
+ nni_pollable_raise(&s->writable);
+ }
+
+ // schedule another receive
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ ctx->btrace_len = len;
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ nni_msg_header_clear(msg);
+ ctx->pipe_id = p->id;
+
+ nni_mtx_unlock(&s->lk);
+
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, nni_msg_len(msg));
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+rep0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+ int ttl;
+ int rv;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+rep0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+rep0_sock_get_sendfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+rep0_sock_get_recvfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ rep0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static void
+rep0_sock_send(void *arg, nni_aio *aio)
+{
+ rep0_sock *s = arg;
+
+ rep0_ctx_send(&s->ctx, aio);
+}
+
+static void
+rep0_sock_recv(void *arg, nni_aio *aio)
+{
+ rep0_sock *s = arg;
+
+ rep0_ctx_recv(&s->ctx, aio);
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops rep0_pipe_ops = {
+ .pipe_size = sizeof(rep0_pipe),
+ .pipe_init = rep0_pipe_init,
+ .pipe_fini = rep0_pipe_fini,
+ .pipe_start = rep0_pipe_start,
+ .pipe_close = rep0_pipe_close,
+ .pipe_stop = rep0_pipe_stop,
+};
+
+static nni_proto_ctx_ops rep0_ctx_ops = {
+ .ctx_size = sizeof(rep0_ctx),
+ .ctx_init = rep0_ctx_init,
+ .ctx_fini = rep0_ctx_fini,
+ .ctx_send = rep0_ctx_send,
+ .ctx_recv = rep0_ctx_recv,
+};
+
+static nni_option rep0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = rep0_sock_get_max_ttl,
+ .o_set = rep0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = rep0_sock_get_recvfd,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = rep0_sock_get_sendfd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops rep0_sock_ops = {
+ .sock_size = sizeof(rep0_sock),
+ .sock_init = rep0_sock_init,
+ .sock_fini = rep0_sock_fini,
+ .sock_open = rep0_sock_open,
+ .sock_close = rep0_sock_close,
+ .sock_options = rep0_sock_options,
+ .sock_send = rep0_sock_send,
+ .sock_recv = rep0_sock_recv,
+};
+
+static nni_proto rep0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REP0_SELF, NNG_REP0_SELF_NAME },
+ .proto_peer = { NNG_REP0_PEER, NNG_REP0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &rep0_sock_ops,
+ .proto_pipe_ops = &rep0_pipe_ops,
+ .proto_ctx_ops = &rep0_ctx_ops,
+};
+
+int
+nng_rep0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &rep0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/rep_test.c b/src/sp/protocol/reqrep0/rep_test.c
new file mode 100644
index 00000000..5a47e67a
--- /dev/null
+++ b/src/sp/protocol/reqrep0/rep_test.c
@@ -0,0 +1,669 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_rep_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_rep0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_REP0_SELF);
+ NUTS_TRUE(p2 == NNG_REP0_PEER);
+ NUTS_MATCH(n1, NNG_REP0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_REP0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+void
+test_rep_send_bad_state(void)
+{
+ nng_socket rep;
+ nng_msg * msg = NULL;
+
+ NUTS_TRUE(nng_rep0_open(&rep) == 0);
+ NUTS_TRUE(nng_msg_alloc(&msg, 0) == 0);
+ NUTS_TRUE(nng_sendmsg(rep, msg, 0) == NNG_ESTATE);
+ nng_msg_free(msg);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep);
+
+ // Still not writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // If we get a job, *then* we become writable
+ NUTS_SEND(req, "abc");
+ NUTS_RECV(rep, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // And is no longer writable once we send a message
+ NUTS_SEND(rep, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ // Even after receiving it
+ NUTS_RECV(req, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(req, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_context_no_poll(void)
+{
+ int fd;
+ nng_socket req;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_rep0_open(&req));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(req);
+}
+
+void
+test_rep_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+ NUTS_PASS(nng_rep0_open(&s1));
+ NUTS_PASS(nng_rep0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+void
+test_rep_double_recv(void)
+{
+ nng_socket s1;
+ nng_aio * aio1;
+ nng_aio * aio2;
+
+ NUTS_PASS(nng_rep0_open(&s1));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+ NUTS_PASS(nng_aio_alloc(&aio2, NULL, NULL));
+
+ nng_recv_aio(s1, aio1);
+ nng_recv_aio(s1, aio2);
+
+ nng_aio_wait(aio2);
+ NUTS_FAIL(nng_aio_result(aio2), NNG_ESTATE);
+ NUTS_CLOSE(s1);
+ NUTS_FAIL(nng_aio_result(aio1), NNG_ECLOSED);
+ nng_aio_free(aio1);
+ nng_aio_free(aio2);
+}
+
+void
+test_rep_close_pipe_before_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(req, rep);
+ NUTS_SEND(req, "test");
+
+ nng_recv_aio(rep, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio1);
+}
+
+void
+test_rep_close_pipe_during_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ for (int i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ p = nng_msg_get_pipe(m);
+ rv = nng_sendmsg(rep, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ // Queue is backed up, senders are busy.
+ nng_msg_free(m);
+ break;
+ }
+ NUTS_PASS(rv);
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_ctx_recv_aio_stopped(void)
+{
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, rep));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+void
+test_rep_close_pipe_context_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+ nng_ctx ctx[100];
+ nng_aio * aio[100];
+ int i;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], rep));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 100; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ p = nng_msg_get_pipe(m);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that REQ socket is not reading the results.
+ NUTS_PASS(nng_pipe_close(p));
+
+ for (i = 0; i < 100; i++) {
+ int rv;
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_close_context_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_ctx ctx[100];
+ nng_aio * aio[100];
+ int i;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], rep));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ for (i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 100; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that REQ socket is not reading the results.
+ for (i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_rep_close_recv(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ nng_recv_aio(rep, aio);
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+}
+
+struct rep_close_recv_cb_state {
+ nng_aio *aio;
+ nng_mtx *mtx;
+ nng_cv * cv;
+ int done;
+ int result;
+ nng_msg *msg;
+};
+
+static void
+rep_close_recv_cb(void *arg)
+{
+ struct rep_close_recv_cb_state *state = arg;
+
+ nng_mtx_lock(state->mtx);
+ state->result = nng_aio_result(state->aio);
+ state->msg = nng_aio_get_msg(state->aio);
+ state->done = true;
+ nng_cv_wake(state->cv);
+ nng_mtx_unlock(state->mtx);
+}
+
+void
+test_rep_close_recv_cb(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ struct rep_close_recv_cb_state state;
+
+ memset(&state, 0, sizeof(state));
+ NUTS_PASS(nng_mtx_alloc(&state.mtx));
+ NUTS_PASS(nng_cv_alloc(&state.cv, state.mtx));
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+ NUTS_PASS(nng_aio_alloc(&state.aio, rep_close_recv_cb, &state));
+ nng_recv_aio(rep, state.aio);
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+ nng_mtx_lock(state.mtx);
+ while (!state.done) {
+ NUTS_PASS(nng_cv_until(state.cv, nng_clock() + 1000));
+ }
+ nng_mtx_unlock(state.mtx);
+ NUTS_TRUE(state.done != 0);
+ NUTS_FAIL(nng_aio_result(state.aio), NNG_ECLOSED);
+ NUTS_TRUE(nng_aio_get_msg(state.aio) == NULL);
+ nng_aio_free(state.aio);
+ nng_cv_free(state.cv);
+ nng_mtx_free(state.mtx);
+}
+
+static void
+test_rep_ctx_recv_nonblock(void)
+{
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_ctx_open(&ctx, rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+static void
+test_rep_ctx_send_nonblock(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 2000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_ctx_open(&ctx, rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(req, rep);
+
+ NUTS_SEND(req, "SEND");
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ // message carries over
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+ nng_aio_free(aio);
+}
+
+static void
+test_rep_ctx_send_nonblock2(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_ctx rep_ctx[10];
+ nng_aio * rep_aio[10];
+ int num_good = 0;
+ int num_fail = 0;
+
+ // We are going to send a bunch of requests, receive them,
+ // but then see that non-block pressure exerts for some, but
+ // that at least one non-blocking send works.
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ for (int i = 0; i < 10; i++) {
+ NUTS_PASS(nng_ctx_open(&rep_ctx[i], rep));
+ NUTS_PASS(nng_aio_alloc(&rep_aio[i], NULL, NULL));
+ }
+ NUTS_MARRY(req, rep);
+
+ for (int i = 0; i < 10; i++) {
+ nng_msg *msg;
+ NUTS_PASS(nng_msg_alloc(&msg, 4));
+ NUTS_PASS(nng_msg_append_u32(msg, (unsigned) i | 0x80000000u));
+ nng_ctx_recv(rep_ctx[i], rep_aio[i]);
+ NUTS_PASS(nng_sendmsg(req, msg, 0));
+ }
+ for (int i = 0; i < 10; i++) {
+ nng_msg *msg;
+ nng_aio_wait(rep_aio[i]);
+ NUTS_PASS(nng_aio_result(rep_aio[i]));
+ msg = nng_aio_get_msg(rep_aio[i]);
+ nng_aio_set_timeout(rep_aio[i], 0);
+ nng_aio_set_msg(rep_aio[i], msg);
+ nng_ctx_send(rep_ctx[i], rep_aio[i]);
+ }
+
+ for (int i = 0; i < 10; i++) {
+ int rv;
+ nng_aio_wait(rep_aio[i]);
+ rv = nng_aio_result(rep_aio[i]);
+ if (rv == 0) {
+ num_good++;
+ } else {
+ NUTS_FAIL(rv, NNG_ETIMEDOUT);
+ nng_msg_free(nng_aio_get_msg(rep_aio[i]));
+ num_fail++;
+ }
+ }
+
+ TEST_ASSERT(num_good > 0);
+ TEST_ASSERT(num_fail > 0);
+
+ for (int i = 0; i < 10; i++) {
+ nng_aio_free(rep_aio[i]);
+ nng_ctx_close(rep_ctx[i]);
+ }
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+}
+
+static void
+test_rep_send_nonblock(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ int rv;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_MARRY(req, rep);
+
+ NUTS_SEND(req, "SEND");
+ NUTS_RECV(rep, "SEND");
+
+ // Use the nonblock flag
+ rv = nng_send(rep, "RECV", 5, NNG_FLAG_NONBLOCK);
+
+ NUTS_PASS(rv);
+ NUTS_RECV(req, "RECV");
+ NUTS_CLOSE(rep);
+ NUTS_CLOSE(req);
+}
+
+void
+test_rep_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+NUTS_TESTS = {
+ { "rep identity", test_rep_identity },
+ { "rep send bad state", test_rep_send_bad_state },
+ { "rep poll readable", test_rep_poll_readable },
+ { "rep poll writable", test_rep_poll_writeable },
+ { "rep context does not poll", test_rep_context_no_poll },
+ { "rep validate peer", test_rep_validate_peer },
+ { "rep double recv", test_rep_double_recv },
+ { "rep send nonblock", test_rep_send_nonblock },
+ { "rep close pipe before send", test_rep_close_pipe_before_send },
+ { "rep close pipe during send", test_rep_close_pipe_during_send },
+ { "rep recv aio ctx stopped", test_rep_ctx_recv_aio_stopped },
+ { "rep close pipe context send", test_rep_close_pipe_context_send },
+ { "rep close context send", test_rep_close_context_send },
+ { "rep close recv", test_rep_close_recv },
+ { "rep close recv cb", test_rep_close_recv_cb },
+ { "rep context send nonblock", test_rep_ctx_send_nonblock },
+ { "rep context send nonblock 2", test_rep_ctx_send_nonblock2 },
+ { "rep context recv nonblock", test_rep_ctx_recv_nonblock },
+ { "rep recv garbage", test_rep_recv_garbage },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/req.c b/src/sp/protocol/reqrep0/req.c
new file mode 100644
index 00000000..cb3c9395
--- /dev/null
+++ b/src/sp/protocol/reqrep0/req.c
@@ -0,0 +1,869 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+#include <stdio.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/req.h"
+
+// Request protocol. The REQ protocol is the "request" side of a
+// request-reply pair. This is useful for building RPC clients, for example.
+
+typedef struct req0_pipe req0_pipe;
+typedef struct req0_sock req0_sock;
+typedef struct req0_ctx req0_ctx;
+
+static void req0_run_send_queue(req0_sock *, nni_list *);
+static void req0_ctx_reset(req0_ctx *);
+static void req0_ctx_timeout(void *);
+static void req0_pipe_fini(void *);
+static void req0_ctx_fini(void *);
+static int req0_ctx_init(void *, void *);
+
+// A req0_ctx is a "context" for the request. It uses most of the
+// socket, but keeps track of its own outstanding replays, the request ID,
+// and so forth.
+struct req0_ctx {
+ req0_sock * sock;
+ nni_list_node sock_node; // node on the socket context list
+ nni_list_node send_node; // node on the send_queue
+ nni_list_node pipe_node; // node on the pipe list
+ uint32_t request_id; // request ID, without high bit set
+ nni_aio * recv_aio; // user aio waiting to recv - only one!
+ nni_aio * send_aio; // user aio waiting to send
+ nng_msg * req_msg; // request message (owned by protocol)
+ size_t req_len; // length of request message (for stats)
+ nng_msg * rep_msg; // reply message
+ nni_timer_node timer;
+ nni_duration retry;
+ bool conn_reset; // sent message w/o retry, peer disconnect
+};
+
+// A req0_sock is our per-socket protocol private structure.
+struct req0_sock {
+ nni_duration retry;
+ bool closed;
+ nni_atomic_int ttl;
+ req0_ctx master; // base socket master
+ nni_list ready_pipes;
+ nni_list busy_pipes;
+ nni_list stop_pipes;
+ nni_list contexts;
+ nni_list send_queue; // contexts waiting to send.
+ nni_id_map requests; // contexts by request ID
+ nni_pollable readable;
+ nni_pollable writable;
+ nni_mtx mtx;
+};
+
+// A req0_pipe is our per-pipe protocol private structure.
+struct req0_pipe {
+ nni_pipe * pipe;
+ req0_sock * req;
+ nni_list_node node;
+ nni_list contexts; // contexts with pending traffic
+ bool closed;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+};
+
+static void req0_sock_fini(void *);
+static void req0_send_cb(void *);
+static void req0_recv_cb(void *);
+
+static int
+req0_sock_init(void *arg, nni_sock *sock)
+{
+ req0_sock *s = arg;
+
+ NNI_ARG_UNUSED(sock);
+
+ // Request IDs are 32 bits, with the high order bit set.
+ // We start at a random point, to minimize likelihood of
+ // accidental collision across restarts.
+ nni_id_map_init(&s->requests, 0x80000000u, 0xffffffffu, true);
+
+ nni_mtx_init(&s->mtx);
+
+ NNI_LIST_INIT(&s->ready_pipes, req0_pipe, node);
+ NNI_LIST_INIT(&s->busy_pipes, req0_pipe, node);
+ NNI_LIST_INIT(&s->stop_pipes, req0_pipe, node);
+ NNI_LIST_INIT(&s->send_queue, req0_ctx, send_node);
+ NNI_LIST_INIT(&s->contexts, req0_ctx, sock_node);
+
+ // this is "semi random" start for request IDs.
+ s->retry = NNI_SECOND * 60;
+
+ (void) req0_ctx_init(&s->master, s);
+
+ nni_pollable_init(&s->writable);
+ nni_pollable_init(&s->readable);
+
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+ return (0);
+}
+
+static void
+req0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+req0_sock_close(void *arg)
+{
+ req0_sock *s = arg;
+
+ nni_mtx_lock(&s->mtx);
+ s->closed = true;
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_sock_fini(void *arg)
+{
+ req0_sock *s = arg;
+
+ nni_mtx_lock(&s->mtx);
+ NNI_ASSERT(nni_list_empty(&s->busy_pipes));
+ NNI_ASSERT(nni_list_empty(&s->stop_pipes));
+ NNI_ASSERT(nni_list_empty(&s->ready_pipes));
+ nni_mtx_unlock(&s->mtx);
+
+ req0_ctx_fini(&s->master);
+ nni_pollable_fini(&s->readable);
+ nni_pollable_fini(&s->writable);
+ nni_id_map_fini(&s->requests);
+ nni_mtx_fini(&s->mtx);
+}
+
+static void
+req0_pipe_stop(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_send);
+ nni_mtx_lock(&s->mtx);
+ nni_list_node_remove(&p->node);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_pipe_fini(void *arg)
+{
+ req0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_send);
+}
+
+static int
+req0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ req0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, req0_recv_cb, p);
+ nni_aio_init(&p->aio_send, req0_send_cb, p);
+ NNI_LIST_NODE_INIT(&p->node);
+ NNI_LIST_INIT(&p->contexts, req0_ctx, pipe_node);
+ p->pipe = pipe;
+ p->req = s;
+ return (0);
+}
+
+static int
+req0_pipe_start(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REQ0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->ready_pipes, p);
+ nni_pollable_raise(&s->writable);
+ req0_run_send_queue(s, NULL);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+req0_pipe_close(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+ req0_ctx * ctx;
+
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_send);
+
+ nni_mtx_lock(&s->mtx);
+ // This removes the node from either busy_pipes or ready_pipes.
+ // It doesn't much matter which. We stick the pipe on the stop
+ // list, so that we can wait for that to close down safely.
+ p->closed = true;
+ nni_list_node_remove(&p->node);
+ nni_list_append(&s->stop_pipes, p);
+ if (nni_list_empty(&s->ready_pipes)) {
+ nni_pollable_clear(&s->writable);
+ }
+
+ while ((ctx = nni_list_first(&p->contexts)) != NULL) {
+ nni_list_remove(&p->contexts, ctx);
+ nng_aio *aio;
+ if (ctx->retry <= 0) {
+ // If we can't retry, then just cancel the operation
+ // altogether. We should only be waiting for recv,
+ // because we will already have sent if we are here.
+ if ((aio = ctx->recv_aio) != NULL) {
+ ctx->recv_aio = NULL;
+ nni_aio_finish_error(aio, NNG_ECONNRESET);
+ req0_ctx_reset(ctx);
+ } else {
+ req0_ctx_reset(ctx);
+ ctx->conn_reset = true;
+ }
+ } else {
+ // Reset the timer on this so it expires immediately.
+ // This is actually easier than canceling the timer and
+ // running the send_queue separately. (In particular,
+ // it avoids a potential deadlock on cancelling the
+ // timer.)
+ nni_timer_schedule(&ctx->timer, NNI_TIME_ZERO);
+ }
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+// For cooked mode, we use a context, and send out that way. This
+// completely bypasses the upper write queue. Each context keeps one
+// message pending; these are "scheduled" via the send_queue. The send_queue
+// is ordered, so FIFO ordering between contexts is provided for.
+
+static void
+req0_send_cb(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+ nni_aio * aio;
+ nni_list sent_list;
+
+ nni_aio_list_init(&sent_list);
+ if (nni_aio_result(&p->aio_send) != 0) {
+ // We failed to send... clean up and deal with it.
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ // We completed a cooked send, so we need to reinsert ourselves
+ // in the ready list, and re-run the send_queue.
+
+ nni_mtx_lock(&s->mtx);
+ if (p->closed || s->closed) {
+ // This occurs if the req0_pipe_close has been called.
+ // In that case we don't want any more processing.
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+ nni_list_remove(&s->busy_pipes, p);
+ nni_list_append(&s->ready_pipes, p);
+ if (nni_list_empty(&s->send_queue)) {
+ nni_pollable_raise(&s->writable);
+ }
+ req0_run_send_queue(s, &sent_list);
+ nni_mtx_unlock(&s->mtx);
+
+ while ((aio = nni_list_first(&sent_list)) != NULL) {
+ nni_list_remove(&sent_list, aio);
+ nni_aio_finish_sync(aio, 0, 0);
+ }
+}
+
+static void
+req0_recv_cb(void *arg)
+{
+ req0_pipe *p = arg;
+ req0_sock *s = p->req;
+ req0_ctx * ctx;
+ nni_msg * msg;
+ nni_aio * aio;
+ uint32_t id;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // We yank 4 bytes from front of body, and move them to the header.
+ if (nni_msg_len(msg) < 4) {
+ // Malformed message.
+ goto malformed;
+ }
+ id = nni_msg_trim_u32(msg);
+
+ // Schedule another receive while we are processing this.
+ nni_mtx_lock(&s->mtx);
+
+ // NB: If close was called, then this will just abort.
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+
+ // Look for a context to receive it.
+ if (((ctx = nni_id_get(&s->requests, id)) == NULL) ||
+ (ctx->send_aio != NULL) || (ctx->rep_msg != NULL)) {
+ nni_mtx_unlock(&s->mtx);
+ // No waiting context, we have not sent the request out to
+ // the wire yet, or context already has a reply ready.
+ // Discard the message.
+ nni_msg_free(msg);
+ return;
+ }
+
+ // We have our match, so we can remove this.
+ nni_list_node_remove(&ctx->send_node);
+ nni_id_remove(&s->requests, id);
+ ctx->request_id = 0;
+ if (ctx->req_msg != NULL) {
+ nni_msg_free(ctx->req_msg);
+ ctx->req_msg = NULL;
+ }
+
+ // Is there an aio waiting for us?
+ if ((aio = ctx->recv_aio) != NULL) {
+ ctx->recv_aio = NULL;
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, nni_msg_len(msg));
+ } else {
+ // No AIO, so stash msg. Receive will pick it up later.
+ ctx->rep_msg = msg;
+ if (ctx == &s->master) {
+ nni_pollable_raise(&s->readable);
+ }
+ nni_mtx_unlock(&s->mtx);
+ }
+ return;
+
+malformed:
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+}
+
+static void
+req0_ctx_timeout(void *arg)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if ((ctx->req_msg != NULL) && (!s->closed)) {
+ if (!nni_list_node_active(&ctx->send_node)) {
+ nni_list_append(&s->send_queue, ctx);
+ }
+ req0_run_send_queue(s, NULL);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static int
+req0_ctx_init(void *arg, void *sock)
+{
+ req0_sock *s = sock;
+ req0_ctx * ctx = arg;
+
+ nni_timer_init(&ctx->timer, req0_ctx_timeout, ctx);
+
+ nni_mtx_lock(&s->mtx);
+ ctx->sock = s;
+ ctx->recv_aio = NULL;
+ ctx->retry = s->retry;
+ nni_list_append(&s->contexts, ctx);
+ nni_mtx_unlock(&s->mtx);
+
+ return (0);
+}
+
+static void
+req0_ctx_fini(void *arg)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+ nni_aio * aio;
+
+ nni_mtx_lock(&s->mtx);
+ if ((aio = ctx->recv_aio) != NULL) {
+ ctx->recv_aio = NULL;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ if ((aio = ctx->send_aio) != NULL) {
+ ctx->send_aio = NULL;
+ nni_aio_set_msg(aio, ctx->req_msg);
+ ctx->req_msg = NULL;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ req0_ctx_reset(ctx);
+ nni_list_remove(&s->contexts, ctx);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_timer_cancel(&ctx->timer);
+ nni_timer_fini(&ctx->timer);
+}
+
+static int
+req0_ctx_set_resend_time(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ req0_ctx *ctx = arg;
+ return (nni_copyin_ms(&ctx->retry, buf, sz, t));
+}
+
+static int
+req0_ctx_get_resend_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_ctx *ctx = arg;
+ return (nni_copyout_ms(ctx->retry, buf, szp, t));
+}
+
+static void
+req0_run_send_queue(req0_sock *s, nni_list *sent_list)
+{
+ req0_ctx *ctx;
+ nni_aio * aio;
+
+ // Note: This routine should be called with the socket lock held.
+ while ((ctx = nni_list_first(&s->send_queue)) != NULL) {
+ req0_pipe *p;
+
+ if ((p = nni_list_first(&s->ready_pipes)) == NULL) {
+ return;
+ }
+
+ // We have a place to send it, so do the send.
+ // If a sending error occurs that causes the message to
+ // be dropped, we rely on the resend timer to pick it up.
+ // We also notify the completion callback if this is the
+ // first send attempt.
+ nni_list_remove(&s->send_queue, ctx);
+
+ // Schedule a resubmit timer. We only do this if we got
+ // a pipe to send to. Otherwise, we should get handled
+ // the next time that the send_queue is run. We don't do this
+ // if the retry is "disabled" with NNG_DURATION_INFINITE.
+ if (ctx->retry > 0) {
+ nni_timer_schedule(
+ &ctx->timer, nni_clock() + ctx->retry);
+ }
+
+ // Put us on the pipe list of active contexts.
+ // This gives the pipe a chance to kick a resubmit
+ // if the pipe is removed.
+ nni_list_node_remove(&ctx->pipe_node);
+ nni_list_append(&p->contexts, ctx);
+
+ nni_list_remove(&s->ready_pipes, p);
+ nni_list_append(&s->busy_pipes, p);
+ if (nni_list_empty(&s->ready_pipes)) {
+ nni_pollable_clear(&s->writable);
+ }
+
+ if ((aio = ctx->send_aio) != NULL) {
+ ctx->send_aio = NULL;
+ nni_aio_bump_count(aio, ctx->req_len);
+ // If the list was passed in, we want to do a
+ // synchronous completion later.
+ if (sent_list != NULL) {
+ nni_list_append(sent_list, aio);
+ } else {
+ nni_aio_finish(aio, 0, 0);
+ }
+ }
+
+ // At this point, we will never give this message back to
+ // to the user, so we don't have to worry about making it
+ // unique. We can freely clone it.
+ nni_msg_clone(ctx->req_msg);
+ nni_aio_set_msg(&p->aio_send, ctx->req_msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ }
+}
+
+void
+req0_ctx_reset(req0_ctx *ctx)
+{
+ req0_sock *s = ctx->sock;
+ // Call with sock lock held!
+
+ // We cannot safely "wait" using nni_timer_cancel, but this removes
+ // any scheduled timer activation. If the timeout is already running
+ // concurrently, it will still run. It should do nothing, because
+ // we toss the request. There is still a very narrow race if the
+ // timeout fires, but doesn't actually start running before we
+ // both finish this function, *and* manage to reschedule another
+ // request. The consequence of that occurring is that the request
+ // will be emitted on the wire twice. This is not actually tragic.
+ nni_timer_schedule(&ctx->timer, NNI_TIME_NEVER);
+
+ nni_list_node_remove(&ctx->pipe_node);
+ nni_list_node_remove(&ctx->send_node);
+ if (ctx->request_id != 0) {
+ nni_id_remove(&s->requests, ctx->request_id);
+ ctx->request_id = 0;
+ }
+ if (ctx->req_msg != NULL) {
+ nni_msg_free(ctx->req_msg);
+ ctx->req_msg = NULL;
+ }
+ if (ctx->rep_msg != NULL) {
+ nni_msg_free(ctx->rep_msg);
+ ctx->rep_msg = NULL;
+ }
+ ctx->conn_reset = false;
+}
+
+static void
+req0_ctx_cancel_recv(nni_aio *aio, void *arg, int rv)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->recv_aio == aio) {
+ ctx->recv_aio = NULL;
+
+ // Cancellation of a pending receive is treated as aborting the
+ // entire state machine. This allows us to preserve the
+ // semantic of exactly one receive operation per send
+ // operation, and should be the least surprising for users. The
+ // main consequence is that if a receive operation is completed
+ // (in error or otherwise), the user must submit a new send
+ // operation to restart the state machine.
+ req0_ctx_reset(ctx);
+
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_ctx_recv(void *arg, nni_aio *aio)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ if ((ctx->recv_aio != NULL) ||
+ ((ctx->req_msg == NULL) && (ctx->rep_msg == NULL))) {
+ // We have already got a pending receive or have not
+ // tried to send a request yet.
+ // Either of these violate our basic state assumptions.
+ int rv;
+ if (ctx->conn_reset) {
+ ctx->conn_reset = false;
+ rv = NNG_ECONNRESET;
+ } else {
+ rv = NNG_ESTATE;
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ if ((msg = ctx->rep_msg) == NULL) {
+ int rv;
+ rv = nni_aio_schedule(aio, req0_ctx_cancel_recv, ctx);
+ if (rv != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ctx->recv_aio = aio;
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+
+ ctx->rep_msg = NULL;
+
+ // We have got a message to pass up, yay!
+ nni_aio_set_msg(aio, msg);
+ if (ctx == &s->master) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+req0_ctx_cancel_send(nni_aio *aio, void *arg, int rv)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->send_aio == aio) {
+ // There should not be a pending reply, because we canceled
+ // it while we were waiting.
+ NNI_ASSERT(ctx->recv_aio == NULL);
+ ctx->send_aio = NULL;
+ // Restore the message back to the aio.
+ nni_aio_set_msg(aio, ctx->req_msg);
+ nni_msg_header_clear(ctx->req_msg);
+ ctx->req_msg = NULL;
+
+ // Cancellation of a pending receive is treated as aborting the
+ // entire state machine. This allows us to preserve the
+ // semantic of exactly one receive operation per send
+ // operation, and should be the least surprising for users. The
+ // main consequence is that if a receive operation is completed
+ // (in error or otherwise), the user must submit a new send
+ // operation to restart the state machine.
+ req0_ctx_reset(ctx);
+
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_ctx_send(void *arg, nni_aio *aio)
+{
+ req0_ctx * ctx = arg;
+ req0_sock *s = ctx->sock;
+ nng_msg * msg = nni_aio_get_msg(aio);
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ if (s->closed) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ // Sending a new request cancels the old one, including any
+ // outstanding reply.
+ if (ctx->recv_aio != NULL) {
+ nni_aio_finish_error(ctx->recv_aio, NNG_ECANCELED);
+ ctx->recv_aio = NULL;
+ }
+ if (ctx->send_aio != NULL) {
+ nni_aio_set_msg(ctx->send_aio, ctx->req_msg);
+ nni_msg_header_clear(ctx->req_msg);
+ ctx->req_msg = NULL;
+ nni_aio_finish_error(ctx->send_aio, NNG_ECANCELED);
+ ctx->send_aio = NULL;
+ nni_list_remove(&s->send_queue, ctx);
+ }
+
+ // This resets the entire state machine.
+ req0_ctx_reset(ctx);
+
+ // Insert us on the per ID hash list, so that receives can find us.
+ if ((rv = nni_id_alloc(&s->requests, &ctx->request_id, ctx)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_msg_header_clear(msg);
+ nni_msg_header_append_u32(msg, ctx->request_id);
+
+ // If no pipes are ready, and the request was a poll (no background
+ // schedule), then fail it. Should be NNG_ETIMEDOUT.
+ rv = nni_aio_schedule(aio, req0_ctx_cancel_send, ctx);
+ if ((rv != 0) && (nni_list_empty(&s->ready_pipes))) {
+ nni_id_remove(&s->requests, ctx->request_id);
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ctx->req_len = nni_msg_len(msg);
+ ctx->req_msg = msg;
+ ctx->send_aio = aio;
+ nni_aio_set_msg(aio, NULL);
+
+ // Stick us on the send_queue list.
+ nni_list_append(&s->send_queue, ctx);
+
+ req0_run_send_queue(s, NULL);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+req0_sock_send(void *arg, nni_aio *aio)
+{
+ req0_sock *s = arg;
+ req0_ctx_send(&s->master, aio);
+}
+
+static void
+req0_sock_recv(void *arg, nni_aio *aio)
+{
+ req0_sock *s = arg;
+ req0_ctx_recv(&s->master, aio);
+}
+
+static int
+req0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+req0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+req0_sock_set_resend_time(
+ void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int rv;
+ rv = req0_ctx_set_resend_time(&s->master, buf, sz, t);
+ s->retry = s->master.retry;
+ return (rv);
+}
+
+static int
+req0_sock_get_resend_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ return (req0_ctx_get_resend_time(&s->master, buf, szp, t));
+}
+
+static int
+req0_sock_get_send_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+req0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ req0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static nni_proto_pipe_ops req0_pipe_ops = {
+ .pipe_size = sizeof(req0_pipe),
+ .pipe_init = req0_pipe_init,
+ .pipe_fini = req0_pipe_fini,
+ .pipe_start = req0_pipe_start,
+ .pipe_close = req0_pipe_close,
+ .pipe_stop = req0_pipe_stop,
+};
+
+static nni_option req0_ctx_options[] = {
+ {
+ .o_name = NNG_OPT_REQ_RESENDTIME,
+ .o_get = req0_ctx_get_resend_time,
+ .o_set = req0_ctx_set_resend_time,
+ },
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_ctx_ops req0_ctx_ops = {
+ .ctx_size = sizeof(req0_ctx),
+ .ctx_init = req0_ctx_init,
+ .ctx_fini = req0_ctx_fini,
+ .ctx_recv = req0_ctx_recv,
+ .ctx_send = req0_ctx_send,
+ .ctx_options = req0_ctx_options,
+};
+
+static nni_option req0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = req0_sock_get_max_ttl,
+ .o_set = req0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_REQ_RESENDTIME,
+ .o_get = req0_sock_get_resend_time,
+ .o_set = req0_sock_set_resend_time,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = req0_sock_get_recv_fd,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = req0_sock_get_send_fd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops req0_sock_ops = {
+ .sock_size = sizeof(req0_sock),
+ .sock_init = req0_sock_init,
+ .sock_fini = req0_sock_fini,
+ .sock_open = req0_sock_open,
+ .sock_close = req0_sock_close,
+ .sock_options = req0_sock_options,
+ .sock_send = req0_sock_send,
+ .sock_recv = req0_sock_recv,
+};
+
+static nni_proto req0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REQ0_SELF, NNG_REQ0_SELF_NAME },
+ .proto_peer = { NNG_REQ0_PEER, NNG_REQ0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &req0_sock_ops,
+ .proto_pipe_ops = &req0_pipe_ops,
+ .proto_ctx_ops = &req0_ctx_ops,
+};
+
+int
+nng_req0_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &req0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/req_test.c b/src/sp/protocol/reqrep0/req_test.c
new file mode 100644
index 00000000..fb78efa0
--- /dev/null
+++ b/src/sp/protocol/reqrep0/req_test.c
@@ -0,0 +1,968 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_req_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_req0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_REQ0_SELF);
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NNG_REQ0_PEER); // 49
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, NNG_REQ0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, NNG_REQ0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_req_ttl_option(void)
+{
+ nng_socket req;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_req0_open(&req));
+
+ NUTS_PASS(nng_socket_set_int(req, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(req, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(req, opt, -1), NNG_EINVAL);
+ // This test will fail if the NNI_MAX_MAX_TTL is changed from the
+ // builtin default of 15.
+ NUTS_FAIL(nng_socket_set_int(req, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(req, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(req, opt, 3));
+ NUTS_PASS(nng_socket_get_int(req, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(req, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(req, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(req, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(req, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(req, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_resend_option(void)
+{
+ nng_socket req;
+ nng_duration d;
+ bool b;
+ size_t sz = sizeof(b);
+ const char * opt = NNG_OPT_REQ_RESENDTIME;
+
+ NUTS_PASS(nng_req0_open(&req));
+
+ NUTS_TRUE(nng_socket_set_ms(req, opt, 10) == 0);
+ NUTS_FAIL(nng_socket_set(req, opt, "", 1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_get(req, opt, &b, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(req, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(req, opt, &b), NNG_EBADTYPE);
+
+ NUTS_PASS(nng_socket_get_ms(req, opt, &d));
+ NUTS_TRUE(d == 10);
+ NUTS_CLOSE(req);
+}
+
+void
+test_req_recv_bad_state(void)
+{
+ nng_socket req;
+ nng_msg * msg = NULL;
+
+ NUTS_TRUE(nng_req0_open(&req) == 0);
+ NUTS_TRUE(nng_recvmsg(req, &msg, 0) == NNG_ESTATE);
+ NUTS_NULL(msg);
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ uint32_t req_id;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &req_id));
+
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ NUTS_FAIL(nng_recvmsg(req, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+#define SECOND 1000
+
+void
+test_req_rep_exchange(void)
+{
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_TRUE(nng_req0_open(&req) == 0);
+ NUTS_TRUE(nng_rep0_open(&rep) == 0);
+
+ NUTS_TRUE(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND) == 0);
+ NUTS_TRUE(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, SECOND) == 0);
+ NUTS_TRUE(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND) == 0);
+ NUTS_TRUE(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, SECOND) == 0);
+
+ NUTS_MARRY(rep, req);
+
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep, "ping");
+ NUTS_SEND(rep, "pong");
+ NUTS_RECV(req, "pong");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_resend(void)
+{
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 10));
+
+ NUTS_MARRY(rep, req);
+
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep, "ping");
+ NUTS_RECV(rep, "ping");
+ NUTS_RECV(rep, "ping");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_resend_reconnect(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_SENDTIMEO, SECOND));
+ // We intentionally set the retry time long; that way we only see
+ // the retry from loss of our original peer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 60 * SECOND));
+
+ NUTS_MARRY(rep1, req);
+
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+
+ NUTS_CLOSE(rep1);
+ NUTS_MARRY(rep2, req);
+
+ NUTS_RECV(rep2, "ping");
+ NUTS_SEND(rep2, "rep2");
+ NUTS_RECV(req, "rep2");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_resend_disconnect(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_SENDTIMEO, SECOND));
+ // We intentionally set the retry time long; that way we only see
+ // the retry from loss of our original peer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 60 * SECOND));
+
+ NUTS_MARRY(rep1, req);
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+
+ NUTS_MARRY(rep2, req);
+ NUTS_CLOSE(rep1);
+
+ NUTS_RECV(rep2, "ping");
+ NUTS_SEND(rep2, "rep2");
+ NUTS_RECV(req, "rep2");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_disconnect_no_retry(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND / 10));
+ // Setting the resend time to zero so we will force an error
+ // if the peer disconnects without sending us an answer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 0));
+
+ NUTS_MARRY(rep1, req);
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+
+ NUTS_MARRY(rep2, req);
+ NUTS_CLOSE(rep1);
+
+ nng_msg *msg = NULL;
+ NUTS_FAIL(nng_recvmsg(req, &msg, 0), NNG_ECONNRESET);
+ NUTS_FAIL(nng_recvmsg(rep2, &msg, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_disconnect_abort(void)
+{
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+ NUTS_PASS(nng_aio_alloc(&aio, 0, 0));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep2, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep1, NNG_OPT_SENDTIMEO, SECOND / 10));
+ // Setting the resend time to zero so we will force an error
+ // if the peer disconnects without sending us an answer.
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, 0));
+
+ NUTS_MARRY(rep1, req);
+ NUTS_SEND(req, "ping");
+ NUTS_RECV(rep1, "ping");
+ nng_recv_aio(req, aio);
+
+ NUTS_MARRY(rep2, req);
+ NUTS_CLOSE(rep1);
+
+ nng_msg *msg = NULL;
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECONNRESET);
+ NUTS_FAIL(nng_recvmsg(rep2, &msg, 0), NNG_ETIMEDOUT);
+ nng_aio_free(aio);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_cancel(void)
+{
+ nng_duration retry = SECOND;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_rep_open(&rep));
+ NUTS_PASS(nng_req_open(&req));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, retry));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 16));
+
+ NUTS_MARRY(rep, req);
+
+ // Send req #1 (abc).
+ NUTS_SEND(req, "abc");
+
+ // Sleep a bit. This is so that we ensure that our request gets
+ // to the far side. (If we cancel too fast, then our outgoing send
+ // will be canceled before it gets to the peer.)
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the REP side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to abc.
+ NUTS_SEND(req, "def");
+
+ // Receive the first request (should be abc) on the REP server.
+ NUTS_RECV(rep, "abc");
+
+ // REP sends the reply to first command. This will be discarded
+ // by the REQ socket.
+ NUTS_SEND(rep, "abc");
+
+ // Now get the next command from the REP; should be "def".
+ NUTS_RECV(rep, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(rep, "def");
+
+ // And we got back only the second result.
+ NUTS_RECV(req, "def");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_cancel_abort_recv(void)
+{
+ nng_aio * aio;
+ nng_duration retry = SECOND * 10; // 10s (kind of never)
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_rep_open(&rep));
+ NUTS_PASS(nng_req_open(&req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_REQ_RESENDTIME, retry));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 16));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 5 * SECOND));
+
+ NUTS_MARRY(rep, req);
+
+ // Send req #1 (abc).
+ NUTS_SEND(req, "abc");
+
+ // Wait for it to get ot the other side.
+ NUTS_SLEEP(100);
+
+ nng_aio_set_timeout(aio, 5 * SECOND);
+ nng_recv_aio(req, aio);
+
+ // Give time for this recv to post properly.
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the REP side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to
+ // abc.
+ NUTS_SEND(req, "def");
+
+ // Our pending I/O should have been canceled.
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+
+ // Receive the first request (should be abc) on the REP server.
+ NUTS_RECV(rep, "abc");
+
+ // REP sends the reply to first command. This will be
+ // discarded by the REQ socket.
+ NUTS_SEND(rep, "abc");
+
+ // Now get the next command from the REP; should be "def".
+ NUTS_RECV(rep, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(rep, "def");
+
+ // Try a req command. This should give back "def"
+ NUTS_RECV(req, "def");
+
+ nng_aio_free(aio);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_req_cancel_post_recv(void)
+{
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_MARRY(req, rep);
+
+ NUTS_SEND(req, "ONE");
+ NUTS_RECV(rep, "ONE");
+ NUTS_SEND(rep, "one");
+ NUTS_SLEEP(100); // Make sure reply arrives!
+ NUTS_SEND(req, "TWO");
+ NUTS_RECV(rep, "TWO");
+ NUTS_SEND(rep, "two");
+ NUTS_RECV(req, "two");
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep);
+
+ // It should be writable now.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ // Submit a bunch of jobs. Note that we have to stall a bit
+ // between each message to let it queue up.
+ for (int i = 0; i < 10; i++) {
+ int rv = nng_send(req, "", 0, NNG_FLAG_NONBLOCK);
+ if (rv == NNG_EAGAIN) {
+ break;
+ }
+ NUTS_PASS(rv);
+ NUTS_SLEEP(50);
+ }
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_poll_contention(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_aio * aio;
+ nng_ctx ctx[5];
+ nng_aio * ctx_aio[5];
+ nng_msg * ctx_msg[5];
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+
+ for (int i = 0; i < 5; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&ctx_aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&ctx_msg[i], 0));
+ }
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ nng_aio_set_msg(aio, msg);
+ nng_send_aio(req, aio);
+ for (int i = 0; i < 5; i++) {
+ nng_aio_set_msg(ctx_aio[i], ctx_msg[i]);
+ nng_ctx_send(ctx[i], ctx_aio[i]);
+ }
+ NUTS_SLEEP(50); // so everything is queued steady state
+
+ NUTS_MARRY(req, rep);
+
+ // It should not be writable now.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+
+ // Still not writeable...
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ for (int i = 0; i < 5; i++) {
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+ }
+ // It can take a little bit of time for the eased back-pressure
+ // to reflect across the network.
+ NUTS_SLEEP(100);
+
+ // Should be come writeable now...
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ for (int i = 0; i < 5; i++) {
+ nng_aio_free(ctx_aio[i]);
+ }
+ nng_aio_free(aio);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+void
+test_req_poll_multi_pipe(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep1;
+ nng_socket rep2;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep1));
+ NUTS_PASS(nng_rep0_open(&rep2));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep1);
+ NUTS_MARRY(req, rep2);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+ NUTS_SEND(req, "ONE");
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep1);
+ NUTS_CLOSE(rep2);
+}
+
+void
+test_req_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "xyz", 3));
+ NUTS_PASS(nng_sendmsg(req, msg, 0));
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0)); // recv on rep
+ NUTS_PASS(nng_sendmsg(rep, msg, 0)); // echo it back
+ NUTS_SLEEP(200); // give time for message to arrive
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(req, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_req_ctx_no_poll(void)
+{
+ int fd;
+ nng_socket req;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_FAIL(nng_ctx_getopt_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_getopt_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_ctx_send_queued(void)
+{
+ nng_socket req;
+ nng_socket rep;
+ nng_ctx ctx[3];
+ nng_aio * aio[3];
+ nng_msg * msg[3];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 100));
+
+ for (int i = 0; i < 3; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_SLEEP(50); // Only to ensure stuff queues up
+ for (int i = 0; i < 3; i++) {
+ nng_msg *m;
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ nng_msg_free(m);
+ }
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ for (int i = 0; i < 3; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ nng_aio_free(aio[i]);
+ }
+}
+
+static void
+test_req_ctx_send_close(void)
+{
+ nng_socket req;
+ nng_ctx ctx[3];
+ nng_aio * aio[3];
+ nng_msg * msg[3];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ for (int i = 0; i < 3; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_ctx_close(ctx[i]);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_FAIL(nng_aio_result(aio[i]), NNG_ECLOSED);
+ nng_aio_free(aio[i]);
+ nng_msg_free(msg[i]);
+ }
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_ctx_send_abort(void)
+{
+ nng_socket req;
+ nng_ctx ctx[3];
+ nng_aio * aio[3];
+ nng_msg * msg[3];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ for (int i = 0; i < 3; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], req));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_abort(aio[i], NNG_ECANCELED);
+ }
+
+ for (int i = 0; i < 3; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_FAIL(nng_aio_result(aio[i]), NNG_ECANCELED);
+ nng_aio_free(aio[i]);
+ nng_msg_free(msg[i]);
+ }
+ NUTS_CLOSE(req);
+}
+
+static void
+test_req_ctx_send_twice(void)
+{
+ nng_socket req;
+ nng_ctx ctx;
+ nng_aio * aio[2];
+ nng_msg * msg[2];
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+
+ for (int i = 0; i < 2; i++) {
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg[i], 0));
+ }
+
+ for (int i = 0; i < 2; i++) {
+ nng_aio_set_msg(aio[i], msg[i]);
+ nng_ctx_send(ctx, aio[i]);
+ NUTS_SLEEP(50);
+ }
+
+ NUTS_CLOSE(req);
+ nng_aio_wait(aio[0]);
+ nng_aio_wait(aio[1]);
+ NUTS_FAIL(nng_aio_result(aio[0]), NNG_ECANCELED);
+ NUTS_FAIL(nng_aio_result(aio[1]), NNG_ECLOSED);
+
+ for (int i = 0; i < 2; i++) {
+ nng_aio_free(aio[i]);
+ nng_msg_free(msg[i]);
+ }
+}
+
+static void
+test_req_ctx_recv_nonblock(void)
+{
+ nng_socket req;
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ NUTS_MARRY(req, rep);
+
+ nng_aio_set_msg(aio, msg);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+static void
+test_req_ctx_send_nonblock(void)
+{
+ nng_socket req;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(req);
+ nng_aio_free(aio);
+ nng_msg_free(msg);
+}
+
+static void
+test_req_ctx_recv_close_socket(void)
+{
+ nng_socket req;
+ nng_socket rep;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_ctx_open(&ctx, req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(req, rep);
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ nng_aio_set_msg(aio, m);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+
+ nng_ctx_recv(ctx, aio);
+ nng_close(req);
+
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_req_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_req0_open(&s1));
+ NUTS_PASS(nng_req0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+NUTS_TESTS = {
+ { "req identity", test_req_identity },
+ { "req ttl option", test_req_ttl_option },
+ { "req resend option", test_req_resend_option },
+ { "req recv bad state", test_req_recv_bad_state },
+ { "req recv garbage", test_req_recv_garbage },
+ { "req rep exchange", test_req_rep_exchange },
+ { "req resend", test_req_resend },
+ { "req resend disconnect", test_req_resend_disconnect },
+ { "req disconnect no retry", test_req_disconnect_no_retry },
+ { "req disconnect abort", test_req_disconnect_abort },
+ { "req resend reconnect", test_req_resend_reconnect },
+ { "req cancel", test_req_cancel },
+ { "req cancel abort recv", test_req_cancel_abort_recv },
+ { "req cancel post recv", test_req_cancel_post_recv },
+ { "req poll writable", test_req_poll_writeable },
+ { "req poll contention", test_req_poll_contention },
+ { "req poll multi pipe", test_req_poll_multi_pipe },
+ { "req poll readable", test_req_poll_readable },
+ { "req context send queued", test_req_ctx_send_queued },
+ { "req context send close", test_req_ctx_send_close },
+ { "req context send abort", test_req_ctx_send_abort },
+ { "req context send twice", test_req_ctx_send_twice },
+ { "req context does not poll", test_req_ctx_no_poll },
+ { "req context recv close socket", test_req_ctx_recv_close_socket },
+ { "req context recv nonblock", test_req_ctx_recv_nonblock },
+ { "req context send nonblock", test_req_ctx_send_nonblock },
+ { "req validate peer", test_req_validate_peer },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/xrep.c b/src/sp/protocol/reqrep0/xrep.c
new file mode 100644
index 00000000..9737c600
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xrep.c
@@ -0,0 +1,432 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/rep.h"
+
+// Response protocol in raw mode. The REP protocol is the "reply" side of a
+// request-reply pair. This is useful for building RPC servers, for
+// example.
+
+typedef struct xrep0_pipe xrep0_pipe;
+typedef struct xrep0_sock xrep0_sock;
+
+static void xrep0_sock_getq_cb(void *);
+static void xrep0_pipe_getq_cb(void *);
+static void xrep0_pipe_putq_cb(void *);
+static void xrep0_pipe_send_cb(void *);
+static void xrep0_pipe_recv_cb(void *);
+static void xrep0_pipe_fini(void *);
+
+// xrep0_sock is our per-socket protocol private structure.
+struct xrep0_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_mtx lk;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ nni_aio aio_getq;
+};
+
+// xrep0_pipe is our per-pipe protocol private structure.
+struct xrep0_pipe {
+ nni_pipe * pipe;
+ xrep0_sock *rep;
+ nni_msgq * sendq;
+ nni_aio aio_getq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_putq;
+};
+
+static void
+xrep0_sock_fini(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ nni_aio_fini(&s->aio_getq);
+ nni_id_map_fini(&s->pipes);
+ nni_mtx_fini(&s->lk);
+}
+
+static int
+xrep0_sock_init(void *arg, nni_sock *sock)
+{
+ xrep0_sock *s = arg;
+
+ nni_mtx_init(&s->lk);
+ nni_aio_init(&s->aio_getq, xrep0_sock_getq_cb, s);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8); // Per RFC
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ return (0);
+}
+
+static void
+xrep0_sock_open(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ // This starts us retrieving message from the upper write q.
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+static void
+xrep0_sock_close(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ nni_aio_close(&s->aio_getq);
+}
+
+static void
+xrep0_pipe_stop(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_putq);
+}
+
+static void
+xrep0_pipe_fini(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_putq);
+ nni_msgq_fini(p->sendq);
+}
+
+static int
+xrep0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ xrep0_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_getq, xrep0_pipe_getq_cb, p);
+ nni_aio_init(&p->aio_send, xrep0_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, xrep0_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_putq, xrep0_pipe_putq_cb, p);
+
+ p->pipe = pipe;
+ p->rep = s;
+
+ // We want a pretty deep send queue on pipes. The rationale here is
+ // that the send rate will be mitigated by the receive rate.
+ // If a slow pipe (req pipe not reading its own responses!?)
+ // comes up, then we will start discarding its replies eventually,
+ // but it takes some time. It would be poor form for a peer to
+ // smash us with requests, but be unable to handle replies faster
+ // than we can forward them. If they do that, their replies get
+ // dropped. (From a DDoS perspective, it might be nice in the
+ // future if we had a way to exert back pressure to the send side --
+ // essentially don't let peers send requests faster than they are
+ // willing to receive replies. Something to think about for the
+ // future.)
+ if ((rv = nni_msgq_init(&p->sendq, 64)) != 0) {
+ xrep0_pipe_fini(p);
+ return (rv);
+ }
+ return (0);
+}
+
+static int
+xrep0_pipe_start(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+ int rv;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REP0_PEER) {
+ // Peer protocol mismatch.
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->lk);
+ rv = nni_id_set(&s->pipes, nni_pipe_id(p->pipe), p);
+ nni_mtx_unlock(&s->lk);
+ if (rv != 0) {
+ return (rv);
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xrep0_pipe_close(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_putq);
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->lk);
+ nni_id_remove(&s->pipes, nni_pipe_id(p->pipe));
+ nni_mtx_unlock(&s->lk);
+}
+
+static void
+xrep0_sock_getq_cb(void *arg)
+{
+ xrep0_sock *s = arg;
+ nni_msgq * uwq = s->uwq;
+ nni_msg * msg;
+ uint32_t id;
+ xrep0_pipe *p;
+
+ // This watches for messages from the upper write queue,
+ // extracts the destination pipe, and forwards it to the appropriate
+ // destination pipe via a separate queue. This prevents a single bad
+ // or slow pipe from gumming up the works for the entire socket.
+
+ if (nni_aio_result(&s->aio_getq) != 0) {
+ // Closed socket?
+ return;
+ }
+
+ msg = nni_aio_get_msg(&s->aio_getq);
+ nni_aio_set_msg(&s->aio_getq, NULL);
+
+ // We yank the outgoing pipe id from the header
+ if (nni_msg_header_len(msg) < 4) {
+ nni_msg_free(msg);
+
+ // Look for another message on the upper write queue.
+ nni_msgq_aio_get(uwq, &s->aio_getq);
+ return;
+ }
+
+ id = nni_msg_header_trim_u32(msg);
+
+ // Look for the pipe, and attempt to put the message there
+ // (non-blocking) if we can. If we can't for any reason, then we
+ // free the message.
+ nni_mtx_lock(&s->lk);
+ if (((p = nni_id_get(&s->pipes, id)) == NULL) ||
+ (nni_msgq_tryput(p->sendq, msg) != 0)) {
+ nni_msg_free(msg);
+ }
+ nni_mtx_unlock(&s->lk);
+
+ // Now look for another message on the upper write queue.
+ nni_msgq_aio_get(uwq, &s->aio_getq);
+}
+
+static void
+xrep0_pipe_getq_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+xrep0_pipe_send_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+}
+
+static void
+xrep0_pipe_recv_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+ nni_msg * msg;
+ int hops;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ ttl = nni_atomic_get(&s->ttl);
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // Store the pipe id in the header, first thing.
+ nni_msg_header_append_u32(msg, nni_pipe_id(p->pipe));
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end;
+ uint8_t *body;
+ if (hops > ttl) {
+ // This isn't malformed, but it has gone through
+ // too many hops. Do not disconnect, because we
+ // can legitimately receive messages with too many
+ // hops from devices, etc.
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer is speaking garbage. Kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ // Out of memory most likely, but keep going to
+ // avoid breaking things.
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ // Go ahead and send it up.
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(s->urq, &p->aio_putq);
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+xrep0_pipe_putq_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+xrep0_sock_set_maxttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xrep0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xrep0_sock_get_maxttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xrep0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static void
+xrep0_sock_send(void *arg, nni_aio *aio)
+{
+ xrep0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+xrep0_sock_recv(void *arg, nni_aio *aio)
+{
+ xrep0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops xrep0_pipe_ops = {
+ .pipe_size = sizeof(xrep0_pipe),
+ .pipe_init = xrep0_pipe_init,
+ .pipe_fini = xrep0_pipe_fini,
+ .pipe_start = xrep0_pipe_start,
+ .pipe_close = xrep0_pipe_close,
+ .pipe_stop = xrep0_pipe_stop,
+};
+
+static nni_option xrep0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xrep0_sock_get_maxttl,
+ .o_set = xrep0_sock_set_maxttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xrep0_sock_ops = {
+ .sock_size = sizeof(xrep0_sock),
+ .sock_init = xrep0_sock_init,
+ .sock_fini = xrep0_sock_fini,
+ .sock_open = xrep0_sock_open,
+ .sock_close = xrep0_sock_close,
+ .sock_options = xrep0_sock_options,
+ .sock_send = xrep0_sock_send,
+ .sock_recv = xrep0_sock_recv,
+};
+
+static nni_proto xrep0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REP0_SELF, NNG_REP0_SELF_NAME },
+ .proto_peer = { NNG_REP0_PEER, NNG_REP0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xrep0_sock_ops,
+ .proto_pipe_ops = &xrep0_pipe_ops,
+};
+
+int
+nng_rep0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xrep0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/xrep_test.c b/src/sp/protocol/reqrep0/xrep_test.c
new file mode 100644
index 00000000..6f1564eb
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xrep_test.c
@@ -0,0 +1,434 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xrep_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_rep0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_REP0_SELF);
+ NUTS_TRUE(p2 == NNG_REP0_PEER);
+ NUTS_MATCH(n1, NNG_REP0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_REP0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+static void
+test_xrep_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_rep0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xrep_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_rep0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xrep_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We are always writeable, even before connect. This is so that
+ // back-pressure from a bad peer can't trash others. We assume
+ // that peers won't send us requests faster than they can consume
+ // the answers. If they do, they will lose their answers.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_MARRY(req, rep);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_socket_get_int(rep, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(req, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char *addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_rep0_open_raw(&s1));
+ NUTS_PASS(nng_rep0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_PASS(nng_close(s1));
+ NUTS_PASS(nng_close(s2));
+ nng_stats_free(stats);
+}
+
+static void
+test_xrep_close_pipe_before_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(req, rep);
+ NUTS_SEND(req, "test");
+
+ nng_recv_aio(rep, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio1);
+}
+
+static void
+test_xrep_close_pipe_during_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_pipe p;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) 0x81000000u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ p = nng_msg_get_pipe(m);
+ nng_msg_free(m);
+
+ for (int i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p)));
+ NUTS_PASS(
+ nng_msg_header_append_u32(m, (unsigned) i | 0x80000000u));
+ // xrep does not exert back-pressure
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_close_during_recv(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(req, rep);
+
+ for (unsigned i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ rv = nng_sendmsg(req, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ nng_msg_free(m);
+ break;
+ }
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_recv_aio_stopped(void)
+{
+ nng_socket rep;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(rep, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(rep);
+ nng_aio_free(aio);
+}
+
+static void
+test_xrep_send_no_header(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xrep_ttl_option(void)
+{
+ nng_socket rep;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+
+ NUTS_PASS(nng_socket_set_int(rep, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(rep, opt, 3));
+ NUTS_PASS(nng_socket_get_int(rep, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(rep, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_TRUE(nng_socket_set(rep, opt, "", 1) == NNG_EINVAL);
+ sz = 1;
+ NUTS_TRUE(nng_socket_get(rep, opt, &v, &sz) == NNG_EINVAL);
+ NUTS_TRUE(nng_socket_set_bool(rep, opt, true) == NNG_EBADTYPE);
+ NUTS_TRUE(nng_socket_get_bool(rep, opt, &b) == NNG_EBADTYPE);
+
+ NUTS_TRUE(nng_close(rep) == 0);
+}
+
+static void
+test_xrep_ttl_drop(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_MAXTTL, 3));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ // Send messages. Note that xrep implicitly adds a hop on receive.
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 2 hops
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000001u));
+ NUTS_PASS(nng_msg_append(m, "PASS1", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "FAIL2", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 3 hops - passes
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "PASS3", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "FAIL4", 6));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ // So on receive we should see 80000001 and 80000003.
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_TRUE(strcmp(nng_msg_body(m), "PASS1") == 0);
+ nng_msg_free(m);
+
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 16); // 3 hops + ID
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_TRUE(strcmp(nng_msg_body(m), "PASS3") == 0);
+ nng_msg_free(m);
+
+ NUTS_FAIL(nng_recvmsg(rep, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+NUTS_TESTS = {
+ { "xrep identity", test_xrep_identity },
+ { "xrep raw", test_xrep_raw },
+ { "xrep no context", test_xrep_no_context },
+ { "xrep poll readable", test_xrep_poll_readable },
+ { "xrep poll writable", test_xrep_poll_writeable },
+ { "xrep validate peer", test_xrep_validate_peer },
+ { "xrep close pipe before send", test_xrep_close_pipe_before_send },
+ { "xrep close pipe during send", test_xrep_close_pipe_during_send },
+ { "xrep close during recv", test_xrep_close_during_recv },
+ { "xrep recv aio stopped", test_xrep_recv_aio_stopped },
+ { "xrep send no header", test_xrep_send_no_header },
+ { "xrep recv garbage", test_xrep_recv_garbage },
+ { "xrep ttl option", test_xrep_ttl_option },
+ { "xrep ttl drop", test_xrep_ttl_drop },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/reqrep0/xreq.c b/src/sp/protocol/reqrep0/xreq.c
new file mode 100644
index 00000000..bcb218bf
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xreq.c
@@ -0,0 +1,319 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdio.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/reqrep0/req.h"
+
+// Request protocol. The REQ protocol is the "request" side of a
+// request-reply pair. This is useful for building RPC clients, for example.
+
+typedef struct xreq0_pipe xreq0_pipe;
+typedef struct xreq0_sock xreq0_sock;
+
+// An xreq0_sock is our per-socket protocol private structure.
+struct xreq0_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_atomic_int ttl;
+};
+
+// A req0_pipe is our per-pipe protocol private structure.
+struct xreq0_pipe {
+ nni_pipe * pipe;
+ xreq0_sock *req;
+ nni_aio aio_getq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_aio aio_putq;
+};
+
+static void xreq0_sock_fini(void *);
+static void xreq0_getq_cb(void *);
+static void xreq0_send_cb(void *);
+static void xreq0_recv_cb(void *);
+static void xreq0_putq_cb(void *);
+
+static int
+xreq0_sock_init(void *arg, nni_sock *sock)
+{
+ xreq0_sock *s = arg;
+
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+
+ return (0);
+}
+
+static void
+xreq0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xreq0_sock_close(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xreq0_sock_fini(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+xreq0_pipe_stop(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_putq);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_send);
+}
+
+static void
+xreq0_pipe_fini(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_putq);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_send);
+}
+
+static int
+xreq0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_getq, xreq0_getq_cb, p);
+ nni_aio_init(&p->aio_putq, xreq0_putq_cb, p);
+ nni_aio_init(&p->aio_recv, xreq0_recv_cb, p);
+ nni_aio_init(&p->aio_send, xreq0_send_cb, p);
+
+ p->pipe = pipe;
+ p->req = s;
+ return (0);
+}
+
+static int
+xreq0_pipe_start(void *arg)
+{
+ xreq0_pipe *p = arg;
+ xreq0_sock *s = p->req;
+
+ if (nni_pipe_peer(p->pipe) != NNG_REQ0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_msgq_aio_get(s->uwq, &p->aio_getq);
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xreq0_pipe_close(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_putq);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_send);
+}
+
+// For raw mode we can just let the pipes "contend" via get queue to get a
+// message from the upper write queue. The msg queue implementation
+// actually provides ordering, so load will be spread automatically.
+// (NB: We may have to revise this in the future if we want to provide some
+// kind of priority.)
+
+static void
+xreq0_getq_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->pipe, &p->aio_send);
+}
+
+static void
+xreq0_send_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ // Sent a message so we just need to look for another one.
+ nni_msgq_aio_get(p->req->uwq, &p->aio_getq);
+}
+
+static void
+xreq0_putq_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_aio_set_msg(&p->aio_putq, NULL);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static void
+xreq0_recv_cb(void *arg)
+{
+ xreq0_pipe *p = arg;
+ xreq0_sock *sock = p->req;
+ nni_msg * msg;
+ bool end;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+ end = false;
+
+ while (!end) {
+ uint8_t *body;
+
+ if (nni_msg_len(msg) < 4) {
+ // Peer gave us garbage, so kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+
+ if (nng_msg_header_append(msg, body, sizeof (uint32_t)) != 0) {
+ // TODO: bump a no-memory stat
+ nni_msg_free(msg);
+ // Closing the pipe may release some memory.
+ // It at least gives an indication to the peer
+ // that we've lost the message.
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ nni_msg_trim(msg, sizeof (uint32_t));
+ }
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(sock->urq, &p->aio_putq);
+}
+
+static void
+xreq0_sock_send(void *arg, nni_aio *aio)
+{
+ xreq0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+xreq0_sock_recv(void *arg, nni_aio *aio)
+{
+ xreq0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static int
+xreq0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xreq0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xreq0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xreq0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static nni_proto_pipe_ops xreq0_pipe_ops = {
+ .pipe_size = sizeof(xreq0_pipe),
+ .pipe_init = xreq0_pipe_init,
+ .pipe_fini = xreq0_pipe_fini,
+ .pipe_start = xreq0_pipe_start,
+ .pipe_close = xreq0_pipe_close,
+ .pipe_stop = xreq0_pipe_stop,
+};
+
+static nni_option xreq0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xreq0_sock_get_max_ttl,
+ .o_set = xreq0_sock_set_max_ttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xreq0_sock_ops = {
+ .sock_size = sizeof(xreq0_sock),
+ .sock_init = xreq0_sock_init,
+ .sock_fini = xreq0_sock_fini,
+ .sock_open = xreq0_sock_open,
+ .sock_close = xreq0_sock_close,
+ .sock_options = xreq0_sock_options,
+ .sock_send = xreq0_sock_send,
+ .sock_recv = xreq0_sock_recv,
+};
+
+static nni_proto xreq0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_REQ0_SELF, NNG_REQ0_SELF_NAME },
+ .proto_peer = { NNG_REQ0_PEER, NNG_REQ0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xreq0_sock_ops,
+ .proto_pipe_ops = &xreq0_pipe_ops,
+ .proto_ctx_ops = NULL, // raw mode does not support contexts
+};
+
+int
+nng_req0_open_raw(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &xreq0_proto));
+}
diff --git a/src/sp/protocol/reqrep0/xreq_test.c b/src/sp/protocol/reqrep0/xreq_test.c
new file mode 100644
index 00000000..8c850cba
--- /dev/null
+++ b/src/sp/protocol/reqrep0/xreq_test.c
@@ -0,0 +1,367 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xreq_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_req0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_REQ0_SELF);
+ NUTS_TRUE(p2 == NNG_REQ0_PEER);
+ NUTS_MATCH(n1, NNG_REQ0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_REQ0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+static void
+test_xreq_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_req0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xreq_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_req0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xreq_poll_writeable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We can't write until we have a connection.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(req, rep);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_poll_readable(void)
+{
+ int fd;
+ nng_socket req;
+ nng_socket rep;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_rep0_open(&rep));
+ NUTS_PASS(nng_socket_get_int(req, NNG_OPT_RECVFD, &fd));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(req, rep);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ // Request ID
+ NUTS_PASS(nng_msg_append_u32(msg, 0x80000000));
+ NUTS_PASS(nng_sendmsg(req, msg, 0));
+
+ NUTS_PASS(nng_recvmsg(rep, &msg, 0));
+ NUTS_PASS(nng_sendmsg(rep, msg, 0));
+
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(req, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_req0_open_raw(&s1));
+ NUTS_PASS(nng_req0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xreq_recv_aio_stopped(void)
+{
+ nng_socket req;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(req, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(req);
+ nng_aio_free(aio);
+}
+
+static void
+test_xreq_recv_garbage(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ uint32_t req_id;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(req, rep);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000000));
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+
+ NUTS_PASS(nng_recvmsg(rep, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &req_id));
+ NUTS_TRUE(req_id == 0x80000000);
+
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ NUTS_FAIL(nng_recvmsg(req, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_recv_header(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_pipe p1, p2;
+ uint32_t id;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY_EX(req, rep, NULL, &p1, &p2);
+
+ // Simulate a few hops.
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p2)));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x2));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x1));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x80000123u));
+
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+
+ NUTS_PASS(nng_recvmsg(req, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x2);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x1);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x80000123u);
+
+ nng_msg_free(m);
+
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_close_during_recv(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(req, rep, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p2)));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(rep, m, 0));
+ }
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_close_pipe_during_send(void)
+{
+ nng_socket rep;
+ nng_socket req;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_rep0_open_raw(&rep));
+ NUTS_PASS(nng_req0_open_raw(&req));
+ NUTS_PASS(nng_socket_set_ms(rep, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(req, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(rep, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(req, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(req, rep, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(req, m, 0));
+ }
+
+ NUTS_PASS(nng_pipe_close(p1));
+ NUTS_CLOSE(req);
+ NUTS_CLOSE(rep);
+}
+
+static void
+test_xreq_ttl_option(void)
+{
+ nng_socket rep;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_req0_open_raw(&rep));
+
+ NUTS_PASS(nng_socket_set_int(rep, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(rep, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(rep, opt, 3));
+ NUTS_PASS(nng_socket_get_int(rep, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(rep, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_TRUE(nng_socket_set(rep, opt, "", 1) == NNG_EINVAL);
+ sz = 1;
+ NUTS_TRUE(nng_socket_get(rep, opt, &v, &sz) == NNG_EINVAL);
+ NUTS_TRUE(nng_socket_set_bool(rep, opt, true) == NNG_EBADTYPE);
+ NUTS_TRUE(nng_socket_get_bool(rep, opt, &b) == NNG_EBADTYPE);
+
+ NUTS_TRUE(nng_close(rep) == 0);
+}
+
+NUTS_TESTS = {
+ { "xreq identity", test_xreq_identity },
+ { "xreq raw", test_xreq_raw },
+ { "xreq no context", test_xreq_no_context },
+ { "xreq poll readable", test_xreq_poll_readable },
+ { "xreq poll writable", test_xreq_poll_writeable },
+ { "xreq validate peer", test_xreq_validate_peer },
+ { "xreq recv aio stopped", test_xreq_recv_aio_stopped },
+ { "xreq recv garbage", test_xreq_recv_garbage },
+ { "xreq recv header", test_xreq_recv_header },
+ { "xreq close during recv", test_xreq_close_during_recv },
+ { "xreq close pipe during send", test_xreq_close_pipe_during_send },
+ { "xreq ttl option", test_xreq_ttl_option },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/CMakeLists.txt b/src/sp/protocol/survey0/CMakeLists.txt
new file mode 100644
index 00000000..b5daca41
--- /dev/null
+++ b/src/sp/protocol/survey0/CMakeLists.txt
@@ -0,0 +1,25 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Surveyor/Respondent protocol
+nng_directory(survey0)
+
+nng_sources_if(NNG_PROTO_SURVEYOR0 survey.c xsurvey.c)
+nng_headers_if(NNG_PROTO_SURVEYOR0 nng/protocol/survey0/survey.h)
+nng_defines_if(NNG_PROTO_SURVEYOR0 NNG_HAVE_SURVEYOR0)
+
+nng_sources_if(NNG_PROTO_RESPONDENT0 respond.c xrespond.c)
+nng_headers_if(NNG_PROTO_RESPONDENT0 nng/protocol/survey0/respond.h)
+nng_defines_if(NNG_PROTO_RESPONDENT0 NNG_HAVE_RESPONDENT0)
+
+nng_test(respond_test)
+nng_test(survey_test)
+nng_test(xrespond_test)
+nng_test(xsurvey_test) \ No newline at end of file
diff --git a/src/sp/protocol/survey0/respond.c b/src/sp/protocol/survey0/respond.c
new file mode 100644
index 00000000..ad551c8f
--- /dev/null
+++ b/src/sp/protocol/survey0/respond.c
@@ -0,0 +1,693 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/respond.h"
+
+// Respondent protocol. The RESPONDENT protocol is the "replier" side of
+// the surveyor pattern. This is useful for building service discovery, or
+// voting algorithms, for example.
+
+#ifndef NNI_PROTO_SURVEYOR_V0
+#define NNI_PROTO_SURVEYOR_V0 NNI_PROTO(6, 2)
+#endif
+
+#ifndef NNI_PROTO_RESPONDENT_V0
+#define NNI_PROTO_RESPONDENT_V0 NNI_PROTO(6, 3)
+#endif
+
+typedef struct resp0_pipe resp0_pipe;
+typedef struct resp0_sock resp0_sock;
+typedef struct resp0_ctx resp0_ctx;
+
+static void resp0_pipe_send_cb(void *);
+static void resp0_pipe_recv_cb(void *);
+static void resp0_pipe_fini(void *);
+
+struct resp0_ctx {
+ resp0_sock * sock;
+ uint32_t pipe_id;
+ resp0_pipe * spipe; // send pipe
+ nni_aio * saio; // send aio
+ nni_aio * raio; // recv aio
+ nni_list_node sqnode;
+ nni_list_node rqnode;
+ size_t btrace_len;
+ uint32_t btrace[NNI_MAX_MAX_TTL + 1];
+};
+
+// resp0_sock is our per-socket protocol private structure.
+struct resp0_sock {
+ nni_mtx mtx;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ resp0_ctx ctx;
+ nni_list recvpipes;
+ nni_list recvq;
+ nni_pollable readable;
+ nni_pollable writable;
+};
+
+// resp0_pipe is our per-pipe protocol private structure.
+struct resp0_pipe {
+ nni_pipe * npipe;
+ resp0_sock * psock;
+ bool busy;
+ bool closed;
+ uint32_t id;
+ nni_list sendq; // contexts waiting to send
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ nni_list_node rnode; // receivable linkage
+};
+
+static void
+resp0_ctx_close(void *arg)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+ nni_aio * aio;
+
+ // complete any outstanding operations here, cancellation, etc.
+
+ nni_mtx_lock(&s->mtx);
+ if ((aio = ctx->saio) != NULL) {
+ resp0_pipe *p = ctx->spipe;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ nni_list_remove(&p->sendq, ctx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ if ((aio = ctx->raio) != NULL) {
+ ctx->raio = NULL;
+ nni_list_remove(&s->recvq, ctx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_ctx_fini(void *arg)
+{
+ resp0_ctx *ctx = arg;
+
+ resp0_ctx_close(ctx);
+}
+
+static int
+resp0_ctx_init(void *carg, void *sarg)
+{
+ resp0_sock *s = sarg;
+ resp0_ctx * ctx = carg;
+
+ NNI_LIST_NODE_INIT(&ctx->sqnode);
+ NNI_LIST_NODE_INIT(&ctx->rqnode);
+ ctx->btrace_len = 0;
+ ctx->sock = s;
+ ctx->pipe_id = 0;
+
+ return (0);
+}
+
+static void
+resp0_ctx_cancel_send(nni_aio *aio, void *arg, int rv)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->saio != aio) {
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+ nni_list_node_remove(&ctx->sqnode);
+ ctx->saio = NULL;
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_header_clear(nni_aio_get_msg(aio)); // reset the headers
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+resp0_ctx_send(void *arg, nni_aio *aio)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+ resp0_pipe *p;
+ nni_msg * msg;
+ size_t len;
+ uint32_t pid;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ msg = nni_aio_get_msg(aio);
+ nni_msg_header_clear(msg);
+
+ if (ctx == &s->ctx) {
+ // We can't send anymore, because only one send per request.
+ nni_pollable_clear(&s->writable);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ if ((rv = nni_aio_schedule(aio, resp0_ctx_cancel_send, ctx)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ if ((len = ctx->btrace_len) == 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ pid = ctx->pipe_id;
+ ctx->pipe_id = 0;
+ ctx->btrace_len = 0;
+
+ if ((rv = nni_msg_header_append(msg, ctx->btrace, len)) != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ if ((p = nni_id_get(&s->pipes, pid)) == NULL) {
+ // Surveyor has left the building. Just discard the reply.
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ return;
+ }
+
+ if (!p->busy) {
+ p->busy = true;
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->npipe, &p->aio_send);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, len);
+ return;
+ }
+
+ ctx->saio = aio;
+ ctx->spipe = p;
+ nni_list_append(&p->sendq, ctx);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_sock_fini(void *arg)
+{
+ resp0_sock *s = arg;
+
+ nni_id_map_fini(&s->pipes);
+ resp0_ctx_fini(&s->ctx);
+ nni_pollable_fini(&s->writable);
+ nni_pollable_fini(&s->readable);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+resp0_sock_init(void *arg, nni_sock *nsock)
+{
+ resp0_sock *s = arg;
+
+ NNI_ARG_UNUSED(nsock);
+
+ nni_mtx_init(&s->mtx);
+ nni_id_map_init(&s->pipes, 0, 0, false);
+
+ NNI_LIST_INIT(&s->recvq, resp0_ctx, rqnode);
+ NNI_LIST_INIT(&s->recvpipes, resp0_pipe, rnode);
+
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8); // Per RFC
+
+ (void) resp0_ctx_init(&s->ctx, s);
+
+ // We start off without being either readable or writable.
+ // Readability comes when there is something on the socket.
+ nni_pollable_init(&s->writable);
+ nni_pollable_init(&s->readable);
+ return (0);
+}
+
+static void
+resp0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+resp0_sock_close(void *arg)
+{
+ resp0_sock *s = arg;
+
+ resp0_ctx_close(&s->ctx);
+}
+
+static void
+resp0_pipe_stop(void *arg)
+{
+ resp0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+resp0_pipe_fini(void *arg)
+{
+ resp0_pipe *p = arg;
+ nng_msg * msg;
+
+ if ((msg = nni_aio_get_msg(&p->aio_recv)) != NULL) {
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_free(msg);
+ }
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+}
+
+static int
+resp0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ resp0_pipe *p = arg;
+
+ nni_aio_init(&p->aio_recv, resp0_pipe_recv_cb, p);
+ nni_aio_init(&p->aio_send, resp0_pipe_send_cb, p);
+
+ NNI_LIST_INIT(&p->sendq, resp0_ctx, sqnode);
+
+ p->npipe = npipe;
+ p->psock = s;
+ p->busy = false;
+ p->id = nni_pipe_id(npipe);
+
+ return (0);
+}
+
+static int
+resp0_pipe_start(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ int rv;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_SURVEYOR_V0) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ rv = nni_id_set(&s->pipes, p->id, p);
+ nni_mtx_unlock(&s->mtx);
+ if (rv != 0) {
+ return (rv);
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+ return (rv);
+}
+
+static void
+resp0_pipe_close(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ resp0_ctx * ctx;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_mtx_lock(&s->mtx);
+ p->closed = true;
+ while ((ctx = nni_list_first(&p->sendq)) != NULL) {
+ nni_aio *aio;
+ nni_msg *msg;
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ msg = nni_aio_get_msg(aio);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+ nni_msg_free(msg);
+ }
+ if (p->id == s->ctx.pipe_id) {
+ // Make sure user space knows they can send a message to us,
+ // which we will happily discard.
+ nni_pollable_raise(&s->writable);
+ }
+ nni_id_remove(&s->pipes, p->id);
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_pipe_send_cb(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ resp0_ctx * ctx;
+ nni_aio * aio;
+ nni_msg * msg;
+ size_t len;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ p->busy = false;
+ if ((ctx = nni_list_first(&p->sendq)) == NULL) {
+ // Nothing else to send.
+ if (p->id == s->ctx.pipe_id) {
+ // Mark us ready for the other side to send!
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+
+ nni_list_remove(&p->sendq, ctx);
+ aio = ctx->saio;
+ ctx->saio = NULL;
+ ctx->spipe = NULL;
+ p->busy = true;
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg);
+ nni_aio_set_msg(aio, NULL);
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->npipe, &p->aio_send);
+
+ nni_mtx_unlock(&s->mtx);
+
+ nni_aio_finish_sync(aio, 0, len);
+}
+
+static void
+resp0_cancel_recv(nni_aio *aio, void *arg, int rv)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+
+ nni_mtx_lock(&s->mtx);
+ if (ctx->raio == aio) {
+ nni_list_remove(&s->recvq, ctx);
+ ctx->raio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+resp0_ctx_recv(void *arg, nni_aio *aio)
+{
+ resp0_ctx * ctx = arg;
+ resp0_sock *s = ctx->sock;
+ resp0_pipe *p;
+ size_t len;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&s->mtx);
+ if ((p = nni_list_first(&s->recvpipes)) == NULL) {
+ int rv;
+ rv = nni_aio_schedule(aio, resp0_cancel_recv, ctx);
+ if (rv != 0) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ // We cannot have two concurrent receive requests on the same
+ // context...
+ if (ctx->raio != NULL) {
+ nni_mtx_unlock(&s->mtx);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+ ctx->raio = aio;
+ nni_list_append(&s->recvq, ctx);
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_list_remove(&s->recvpipes, p);
+ if (nni_list_empty(&s->recvpipes)) {
+ nni_pollable_clear(&s->readable);
+ }
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+
+ len = nni_msg_header_len(msg);
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ ctx->btrace_len = len;
+ ctx->pipe_id = p->id;
+ if (ctx == &s->ctx) {
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->mtx);
+
+ nni_msg_header_clear(msg);
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish(aio, 0, nni_msg_len(msg));
+}
+
+static void
+resp0_pipe_recv_cb(void *arg)
+{
+ resp0_pipe *p = arg;
+ resp0_sock *s = p->psock;
+ resp0_ctx * ctx;
+ nni_msg * msg;
+ nni_aio * aio;
+ int hops;
+ size_t len;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ ttl = nni_atomic_get(&s->ttl);
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_msg_set_pipe(msg, p->id);
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end = 0;
+ uint8_t *body;
+
+ if (hops > ttl) {
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer is speaking garbage, kick it.
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ len = nni_msg_header_len(msg);
+
+ nni_mtx_lock(&s->mtx);
+
+ if (p->closed) {
+ // If pipe was closed, we just abandon the data from it.
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_mtx_unlock(&s->mtx);
+ nni_msg_free(msg);
+ return;
+ }
+ if ((ctx = nni_list_first(&s->recvq)) == NULL) {
+ // No one blocked in recv, stall.
+ nni_list_append(&s->recvpipes, p);
+ nni_pollable_raise(&s->readable);
+ nni_mtx_unlock(&s->mtx);
+ return;
+ }
+
+ nni_list_remove(&s->recvq, ctx);
+ aio = ctx->raio;
+ ctx->raio = NULL;
+ nni_aio_set_msg(&p->aio_recv, NULL);
+
+ // Start the next receive.
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+
+ ctx->btrace_len = len;
+ memcpy(ctx->btrace, nni_msg_header(msg), len);
+ nni_msg_header_clear(msg);
+ ctx->pipe_id = p->id;
+
+ if ((ctx == &s->ctx) && (!p->busy)) {
+ nni_pollable_raise(&s->writable);
+ }
+ nni_mtx_unlock(&s->mtx);
+
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, nni_msg_len(msg));
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static int
+resp0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ int ttl;
+ int rv;
+
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+resp0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static int
+resp0_sock_get_sendfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+resp0_sock_get_recvfd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ resp0_sock *s = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&s->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static void
+resp0_sock_send(void *arg, nni_aio *aio)
+{
+ resp0_sock *s = arg;
+
+ resp0_ctx_send(&s->ctx, aio);
+}
+
+static void
+resp0_sock_recv(void *arg, nni_aio *aio)
+{
+ resp0_sock *s = arg;
+
+ resp0_ctx_recv(&s->ctx, aio);
+}
+
+static nni_proto_pipe_ops resp0_pipe_ops = {
+ .pipe_size = sizeof(resp0_pipe),
+ .pipe_init = resp0_pipe_init,
+ .pipe_fini = resp0_pipe_fini,
+ .pipe_start = resp0_pipe_start,
+ .pipe_close = resp0_pipe_close,
+ .pipe_stop = resp0_pipe_stop,
+};
+
+static nni_proto_ctx_ops resp0_ctx_ops = {
+ .ctx_size = sizeof(resp0_ctx),
+ .ctx_init = resp0_ctx_init,
+ .ctx_fini = resp0_ctx_fini,
+ .ctx_send = resp0_ctx_send,
+ .ctx_recv = resp0_ctx_recv,
+};
+
+static nni_option resp0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = resp0_sock_get_max_ttl,
+ .o_set = resp0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = resp0_sock_get_recvfd,
+ .o_set = NULL,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = resp0_sock_get_sendfd,
+ .o_set = NULL,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops resp0_sock_ops = {
+ .sock_size = sizeof(resp0_sock),
+ .sock_init = resp0_sock_init,
+ .sock_fini = resp0_sock_fini,
+ .sock_open = resp0_sock_open,
+ .sock_close = resp0_sock_close,
+ .sock_send = resp0_sock_send,
+ .sock_recv = resp0_sock_recv,
+ .sock_options = resp0_sock_options,
+};
+
+static nni_proto resp0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_RESPONDENT_V0, "respondent" },
+ .proto_peer = { NNI_PROTO_SURVEYOR_V0, "surveyor" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &resp0_sock_ops,
+ .proto_pipe_ops = &resp0_pipe_ops,
+ .proto_ctx_ops = &resp0_ctx_ops,
+};
+
+int
+nng_respondent0_open(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &resp0_proto));
+}
diff --git a/src/sp/protocol/survey0/respond_test.c b/src/sp/protocol/survey0/respond_test.c
new file mode 100644
index 00000000..51844c76
--- /dev/null
+++ b/src/sp/protocol/survey0/respond_test.c
@@ -0,0 +1,586 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+void
+test_resp_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_respondent0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_RESPONDENT0_SELF);
+ NUTS_TRUE(nng_socket_get_int(s, NNG_OPT_PEER, &p) == 0);
+ NUTS_TRUE(p == NNG_RESPONDENT0_PEER);
+ NUTS_TRUE(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n) == 0);
+ NUTS_MATCH(n, NNG_RESPONDENT0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_TRUE(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n) == 0);
+ NUTS_MATCH(n, NNG_RESPONDENT0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+void
+test_resp_send_bad_state(void)
+{
+ nng_socket resp;
+ nng_msg * msg = NULL;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_FAIL(nng_sendmsg(resp, msg, 0), NNG_ESTATE);
+ nng_msg_free(msg);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not writable before connect.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_MARRY(surv, resp);
+
+ // Still not writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // If we get a job, *then* we become writable
+ NUTS_SEND(surv, "abc");
+ NUTS_RECV(resp, "abc");
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // And is no longer writable once we send a message
+ NUTS_SEND(resp, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+ // Even after receiving it
+ NUTS_RECV(surv, "def");
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(surv, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_context_no_poll(void)
+{
+ int fd;
+ nng_socket resp;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_respondent0_open(&s1));
+ NUTS_PASS(nng_respondent0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+void
+test_resp_double_recv(void)
+{
+ nng_socket s1;
+ nng_aio * aio1;
+ nng_aio * aio2;
+
+ NUTS_PASS(nng_respondent0_open(&s1));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+ NUTS_PASS(nng_aio_alloc(&aio2, NULL, NULL));
+
+ nng_recv_aio(s1, aio1);
+ nng_recv_aio(s1, aio2);
+
+ nng_aio_wait(aio2);
+ NUTS_FAIL(nng_aio_result(aio2), NNG_ESTATE);
+ NUTS_CLOSE(s1);
+ NUTS_FAIL(nng_aio_result(aio1), NNG_ECLOSED);
+ nng_aio_free(aio1);
+ nng_aio_free(aio2);
+}
+
+void
+test_resp_close_pipe_before_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(surv, resp);
+ NUTS_SEND(surv, "test");
+
+ nng_recv_aio(resp, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio1);
+}
+
+void
+test_resp_close_pipe_during_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(surv, resp);
+
+ for (int i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ p = nng_msg_get_pipe(m);
+ rv = nng_sendmsg(resp, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ // Queue is backed up, senders are busy.
+ nng_msg_free(m);
+ break;
+ }
+ NUTS_PASS(rv);
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_ctx_recv_aio_stopped(void)
+{
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+
+ nng_aio_stop(aio);
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+void
+test_resp_close_pipe_context_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p = NNG_PIPE_INITIALIZER;
+ nng_msg * m;
+ nng_ctx ctx[10];
+ nng_aio * aio[10];
+ int i;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_RECVBUF, 1));
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], resp));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+
+ NUTS_MARRY(surv, resp);
+
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 10; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ p = nng_msg_get_pipe(m);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that SURVEYOR socket is not reading the results.
+ NUTS_PASS(nng_pipe_close(p));
+
+ for (i = 0; i < 10; i++) {
+ int rv;
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_resp_close_context_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_ctx ctx[10];
+ nng_aio * aio[10];
+ int i;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_SENDBUF, 1));
+ NUTS_PASS(nng_setopt_int(surv, NNG_OPT_RECVBUF, 1));
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_ctx_open(&ctx[i], resp));
+ NUTS_PASS(nng_aio_alloc(&aio[i], NULL, NULL));
+ }
+
+ NUTS_MARRY(surv, resp);
+
+ for (i = 0; i < 10; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) i | 0x80000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ nng_ctx_recv(ctx[i], aio[i]);
+ }
+ for (i = 0; i < 10; i++) {
+ nng_aio_wait(aio[i]);
+ NUTS_PASS(nng_aio_result(aio[i]));
+ NUTS_TRUE((m = nng_aio_get_msg(aio[i])) != NULL);
+ nng_aio_set_msg(aio[i], m);
+ nng_ctx_send(ctx[i], aio[i]);
+ }
+
+ // Note that REQ socket is not reading the results.
+ for (i = 0; i < 10; i++) {
+ int rv;
+ NUTS_PASS(nng_ctx_close(ctx[i]));
+ nng_aio_wait(aio[i]);
+ rv = nng_aio_result(aio[i]);
+ if (rv != 0) {
+ NUTS_FAIL(rv, NNG_ECLOSED);
+ nng_msg_free(nng_aio_get_msg(aio[i]));
+ }
+ nng_aio_free(aio[i]);
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_resp_ctx_recv_nonblock(void)
+{
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_resp_ctx_send_nonblock(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_ctx_open(&ctx, resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(surv, resp);
+
+ NUTS_SEND(surv, "SEND");
+ nng_ctx_recv(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ // message carries over
+ msg = nng_aio_get_msg(aio);
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+ nng_msg_free(msg);
+}
+
+void
+test_resp_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_resp_ttl_option(void)
+{
+ nng_socket resp;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+
+ NUTS_PASS(nng_setopt_int(resp, opt, 1));
+ NUTS_FAIL(nng_setopt_int(resp, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(resp, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(resp, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_int(resp, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_setopt_int(resp, opt, 3));
+ NUTS_PASS(nng_socket_get_int(resp, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(resp, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_setopt(resp, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(resp, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_setopt_bool(resp, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(resp, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_resp_ttl_drop(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_setopt_int(resp, NNG_OPT_MAXTTL, 3));
+ NUTS_PASS(nng_setopt_ms(resp, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_setopt_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ // Send messages. Note that xrep implicitly adds a hop on receive.
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 2 hops
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000001u));
+ NUTS_PASS(nng_msg_append(m, "PASS1", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "FAIL2", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 3 hops - passes
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "PASS3", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "FAIL4", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_RECV(resp, "PASS1");
+ NUTS_RECV(resp, "PASS3");
+
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(resp);
+ NUTS_CLOSE(surv);
+}
+
+TEST_LIST = {
+ { "respond identity", test_resp_identity },
+ { "respond send bad state", test_resp_send_bad_state },
+ { "respond poll readable", test_resp_poll_readable },
+ { "respond poll writable", test_resp_poll_writeable },
+ { "respond context does not poll", test_resp_context_no_poll },
+ { "respond validate peer", test_resp_validate_peer },
+ { "respond double recv", test_resp_double_recv },
+ { "respond close pipe before send", test_resp_close_pipe_before_send },
+ { "respond close pipe during send", test_resp_close_pipe_during_send },
+ { "respond recv aio ctx stopped", test_resp_ctx_recv_aio_stopped },
+ { "respond close pipe context send",
+ test_resp_close_pipe_context_send },
+ { "respond close context send", test_resp_close_context_send },
+ { "respond context send nonblock", test_resp_ctx_send_nonblock },
+ { "respond context recv nonblock", test_resp_ctx_recv_nonblock },
+ { "respond recv garbage", test_resp_recv_garbage },
+ { "respond ttl option", test_resp_ttl_option },
+ { "respond ttl drop", test_resp_ttl_drop },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/survey.c b/src/sp/protocol/survey0/survey.c
new file mode 100644
index 00000000..ce1ed601
--- /dev/null
+++ b/src/sp/protocol/survey0/survey.c
@@ -0,0 +1,663 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/survey.h"
+
+// Surveyor protocol. The SURVEYOR protocol is the "survey" side of the
+// survey pattern. This is useful for building service discovery, voting, etc.
+// Note that this pattern is not optimized for extreme low latency, as it makes
+// multiple use of queues for simplicity. Typically this is used in cases
+// where a few dozen extra microseconds does not matter.
+
+typedef struct surv0_pipe surv0_pipe;
+typedef struct surv0_sock surv0_sock;
+typedef struct surv0_ctx surv0_ctx;
+
+static void surv0_pipe_send_cb(void *);
+static void surv0_pipe_recv_cb(void *);
+static void surv0_ctx_timeout(void *);
+
+struct surv0_ctx {
+ surv0_sock * sock;
+ uint32_t survey_id; // survey id
+ nni_timer_node timer;
+ nni_time expire;
+ nni_lmq recv_lmq;
+ nni_list recv_queue;
+ nni_atomic_int recv_buf;
+ nni_atomic_int survey_time;
+ int err;
+};
+
+// surv0_sock is our per-socket protocol private structure.
+struct surv0_sock {
+ int ttl;
+ nni_list pipes;
+ nni_mtx mtx;
+ surv0_ctx ctx;
+ nni_id_map surveys;
+ nni_pollable writable;
+ nni_pollable readable;
+ nni_atomic_int send_buf;
+};
+
+// surv0_pipe is our per-pipe protocol private structure.
+struct surv0_pipe {
+ nni_pipe * pipe;
+ surv0_sock * sock;
+ nni_lmq send_queue;
+ nni_list_node node;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+ bool busy;
+ bool closed;
+};
+
+static void
+surv0_ctx_abort(surv0_ctx *ctx, int err)
+{
+ nni_aio * aio;
+ surv0_sock *sock = ctx->sock;
+
+ while ((aio = nni_list_first(&ctx->recv_queue)) != NULL) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, err);
+ }
+ nni_lmq_flush(&ctx->recv_lmq);
+ if (ctx->survey_id != 0) {
+ nni_id_remove(&sock->surveys, ctx->survey_id);
+ ctx->survey_id = 0;
+ }
+ if (ctx == &sock->ctx) {
+ nni_pollable_clear(&sock->readable);
+ }
+}
+
+static void
+surv0_ctx_close(surv0_ctx *ctx)
+{
+ surv0_sock *sock = ctx->sock;
+
+ nni_mtx_lock(&sock->mtx);
+ surv0_ctx_abort(ctx, NNG_ECLOSED);
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_ctx_fini(void *arg)
+{
+ surv0_ctx *ctx = arg;
+
+ surv0_ctx_close(ctx);
+ nni_timer_cancel(&ctx->timer);
+ nni_lmq_fini(&ctx->recv_lmq);
+}
+
+static int
+surv0_ctx_init(void *c, void *s)
+{
+ surv0_ctx * ctx = c;
+ surv0_sock * sock = s;
+ int rv;
+ int len;
+ nng_duration tmo;
+
+ nni_aio_list_init(&ctx->recv_queue);
+ nni_atomic_init(&ctx->recv_buf);
+ nni_atomic_init(&ctx->survey_time);
+
+ if (ctx == &sock->ctx) {
+ len = 128;
+ tmo = NNI_SECOND; // survey timeout
+ } else {
+ len = nni_atomic_get(&sock->ctx.recv_buf);
+ tmo = nni_atomic_get(&sock->ctx.survey_time);
+ }
+
+ nni_atomic_set(&ctx->recv_buf, len);
+ nni_atomic_set(&ctx->survey_time, tmo);
+
+ ctx->sock = sock;
+
+ if ((rv = nni_lmq_init(&ctx->recv_lmq, len)) != 0) {
+ surv0_ctx_fini(ctx);
+ return (rv);
+ }
+ nni_timer_init(&ctx->timer, surv0_ctx_timeout, ctx);
+ return (0);
+}
+
+static void
+surv0_ctx_cancel(nni_aio *aio, void *arg, int rv)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock *sock = ctx->sock;
+ nni_mtx_lock(&sock->mtx);
+ if (nni_list_active(&ctx->recv_queue, aio)) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ if (ctx->survey_id != 0) {
+ nni_id_remove(&sock->surveys, ctx->survey_id);
+ ctx->survey_id = 0;
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_ctx_recv(void *arg, nni_aio *aio)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock *sock = ctx->sock;
+ nni_msg * msg;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ if (ctx->survey_id == 0) {
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_error(aio, NNG_ESTATE);
+ return;
+ }
+again:
+ if (nni_lmq_getq(&ctx->recv_lmq, &msg) != 0) {
+ int rv;
+ if ((rv = nni_aio_schedule(aio, &surv0_ctx_cancel, ctx)) !=
+ 0) {
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&ctx->recv_queue, aio);
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+ if (nni_lmq_empty(&ctx->recv_lmq) && (ctx == &sock->ctx)) {
+ nni_pollable_clear(&sock->readable);
+ }
+ if ((msg = nni_msg_unique(msg)) == NULL) {
+ goto again;
+ }
+
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_msg(aio, msg);
+}
+
+void
+surv0_ctx_timeout(void *arg)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock *sock = ctx->sock;
+
+ nni_mtx_lock(&sock->mtx);
+ if (nni_clock() < ctx->expire) {
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+
+ // Abort any pending receives.
+ surv0_ctx_abort(ctx, NNG_ETIMEDOUT);
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_ctx_send(void *arg, nni_aio *aio)
+{
+ surv0_ctx * ctx = arg;
+ surv0_sock * sock = ctx->sock;
+ surv0_pipe * pipe;
+ nni_msg * msg = nni_aio_get_msg(aio);
+ size_t len = nni_msg_len(msg);
+ nni_time now = nni_clock();
+ nng_duration survey_time;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ survey_time = nni_atomic_get(&ctx->survey_time);
+
+ nni_mtx_lock(&sock->mtx);
+
+ // Abort everything outstanding.
+ surv0_ctx_abort(ctx, NNG_ECANCELED);
+ nni_timer_cancel(&ctx->timer);
+
+ // Allocate the new ID.
+ if ((rv = nni_id_alloc(&sock->surveys, &ctx->survey_id, ctx)) != 0) {
+ nni_mtx_unlock(&sock->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_msg_header_clear(msg);
+ nni_msg_header_append_u32(msg, (uint32_t) ctx->survey_id);
+
+ // From this point, we're committed to success. Note that we send
+ // regardless of whether there are any pipes or not. If no pipes,
+ // then it just gets discarded.
+ nni_aio_set_msg(aio, NULL);
+ NNI_LIST_FOREACH (&sock->pipes, pipe) {
+
+ // if the pipe isn't busy, then send this message direct.
+ if (!pipe->busy) {
+ pipe->busy = true;
+ nni_msg_clone(msg);
+ nni_aio_set_msg(&pipe->aio_send, msg);
+ nni_pipe_send(pipe->pipe, &pipe->aio_send);
+ } else if (!nni_lmq_full(&pipe->send_queue)) {
+ nni_msg_clone(msg);
+ nni_lmq_putq(&pipe->send_queue, msg);
+ }
+ }
+
+ ctx->expire = now + survey_time;
+ nni_timer_schedule(&ctx->timer, ctx->expire);
+
+ nni_mtx_unlock(&sock->mtx);
+ nni_msg_free(msg);
+
+ nni_aio_finish(aio, 0, len);
+}
+
+static void
+surv0_sock_fini(void *arg)
+{
+ surv0_sock *sock = arg;
+
+ surv0_ctx_fini(&sock->ctx);
+ nni_id_map_fini(&sock->surveys);
+ nni_pollable_fini(&sock->writable);
+ nni_pollable_fini(&sock->readable);
+ nni_mtx_fini(&sock->mtx);
+}
+
+static int
+surv0_sock_init(void *arg, nni_sock *s)
+{
+ surv0_sock *sock = arg;
+ int rv;
+
+ NNI_ARG_UNUSED(s);
+
+ NNI_LIST_INIT(&sock->pipes, surv0_pipe, node);
+ nni_mtx_init(&sock->mtx);
+ nni_pollable_init(&sock->readable);
+ nni_pollable_init(&sock->writable);
+ // We are always writable.
+ nni_pollable_raise(&sock->writable);
+
+ // We allow for some buffering on a per pipe basis, to allow for
+ // multiple contexts to have surveys outstanding. It is recommended
+ // to increase this if many contexts will want to publish
+ // at nearly the same time.
+ nni_atomic_init(&sock->send_buf);
+ nni_atomic_set(&sock->send_buf, 8);
+
+ // Survey IDs are 32 bits, with the high order bit set.
+ // We start at a random point, to minimize likelihood of
+ // accidental collision across restarts.
+ nni_id_map_init(&sock->surveys, 0x80000000u, 0xffffffffu, true);
+
+ if ((rv = surv0_ctx_init(&sock->ctx, sock)) != 0) {
+ surv0_sock_fini(sock);
+ return (rv);
+ }
+
+ sock->ttl = 8;
+
+ return (0);
+}
+
+static void
+surv0_sock_open(void *arg)
+{
+ NNI_ARG_UNUSED(arg);
+}
+
+static void
+surv0_sock_close(void *arg)
+{
+ surv0_sock *s = arg;
+
+ surv0_ctx_close(&s->ctx);
+}
+
+static void
+surv0_pipe_stop(void *arg)
+{
+ surv0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+surv0_pipe_fini(void *arg)
+{
+ surv0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_lmq_fini(&p->send_queue);
+}
+
+static int
+surv0_pipe_init(void *arg, nni_pipe *pipe, void *s)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *sock = s;
+ int rv;
+ int len;
+
+ len = nni_atomic_get(&sock->send_buf);
+ nni_aio_init(&p->aio_send, surv0_pipe_send_cb, p);
+ nni_aio_init(&p->aio_recv, surv0_pipe_recv_cb, p);
+
+ // This depth could be tunable. The deeper the queue, the more
+ // concurrent surveys that can be delivered (multiple contexts).
+ // Note that surveys can be *outstanding*, but not yet put on the wire.
+ if ((rv = nni_lmq_init(&p->send_queue, len)) != 0) {
+ surv0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->pipe = pipe;
+ p->sock = sock;
+ return (0);
+}
+
+static int
+surv0_pipe_start(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *s = p->sock;
+
+ if (nni_pipe_peer(p->pipe) != NNG_SURVEYOR0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->pipes, p);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+surv0_pipe_close(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *s = p->sock;
+
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_mtx_lock(&s->mtx);
+ p->closed = true;
+ nni_lmq_flush(&p->send_queue);
+ if (nni_list_active(&s->pipes, p)) {
+ nni_list_remove(&s->pipes, p);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+surv0_pipe_send_cb(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *sock = p->sock;
+ nni_msg * msg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ nni_mtx_lock(&sock->mtx);
+ if (p->closed) {
+ nni_mtx_unlock(&sock->mtx);
+ return;
+ }
+ if (nni_lmq_getq(&p->send_queue, &msg) == 0) {
+ nni_aio_set_msg(&p->aio_send, msg);
+ nni_pipe_send(p->pipe, &p->aio_send);
+ } else {
+ p->busy = false;
+ }
+ nni_mtx_unlock(&sock->mtx);
+}
+
+static void
+surv0_pipe_recv_cb(void *arg)
+{
+ surv0_pipe *p = arg;
+ surv0_sock *sock = p->sock;
+ surv0_ctx * ctx;
+ nni_msg * msg;
+ uint32_t id;
+ nni_aio * aio;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // We yank 4 bytes of body, and move them to the header.
+ if (nni_msg_len(msg) < 4) {
+ // Peer sent us garbage. Kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->pipe);
+ return;
+ }
+ id = nni_msg_trim_u32(msg);
+ nni_msg_header_append_u32(msg, id);
+
+ nni_mtx_lock(&sock->mtx);
+ // Best effort at delivery. Discard if no context or context is
+ // unable to receive it.
+ if (((ctx = nni_id_get(&sock->surveys, id)) == NULL) ||
+ (nni_lmq_full(&ctx->recv_lmq))) {
+ nni_msg_free(msg);
+ } else if ((aio = nni_list_first(&ctx->recv_queue)) != NULL) {
+ nni_list_remove(&ctx->recv_queue, aio);
+ nni_aio_finish_msg(aio, msg);
+ } else {
+ nni_lmq_putq(&ctx->recv_lmq, msg);
+ if (ctx == &sock->ctx) {
+ nni_pollable_raise(&sock->readable);
+ }
+ }
+ nni_mtx_unlock(&sock->mtx);
+
+ nni_pipe_recv(p->pipe, &p->aio_recv);
+}
+
+static int
+surv0_ctx_set_survey_time(
+ void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ surv0_ctx * ctx = arg;
+ nng_duration expire;
+ int rv;
+ if ((rv = nni_copyin_ms(&expire, buf, sz, t)) == 0) {
+ nni_atomic_set(&ctx->survey_time, expire);
+ }
+ return (rv);
+}
+
+static int
+surv0_ctx_get_survey_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_ctx *ctx = arg;
+ return (
+ nni_copyout_ms(nni_atomic_get(&ctx->survey_time), buf, szp, t));
+}
+
+static int
+surv0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (nni_copyin_int(&s->ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t));
+}
+
+static int
+surv0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (nni_copyout_int(s->ttl, buf, szp, t));
+}
+
+static int
+surv0_sock_set_survey_time(
+ void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (surv0_ctx_set_survey_time(&s->ctx, buf, sz, t));
+}
+
+static int
+surv0_sock_get_survey_time(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *s = arg;
+ return (surv0_ctx_get_survey_time(&s->ctx, buf, szp, t));
+}
+
+static int
+surv0_sock_get_send_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *sock = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&sock->writable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static int
+surv0_sock_get_recv_fd(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ surv0_sock *sock = arg;
+ int rv;
+ int fd;
+
+ if ((rv = nni_pollable_getfd(&sock->readable, &fd)) != 0) {
+ return (rv);
+ }
+ return (nni_copyout_int(fd, buf, szp, t));
+}
+
+static void
+surv0_sock_recv(void *arg, nni_aio *aio)
+{
+ surv0_sock *s = arg;
+ surv0_ctx_recv(&s->ctx, aio);
+}
+
+static void
+surv0_sock_send(void *arg, nni_aio *aio)
+{
+ surv0_sock *s = arg;
+ surv0_ctx_send(&s->ctx, aio);
+}
+
+static nni_proto_pipe_ops surv0_pipe_ops = {
+ .pipe_size = sizeof(surv0_pipe),
+ .pipe_init = surv0_pipe_init,
+ .pipe_fini = surv0_pipe_fini,
+ .pipe_start = surv0_pipe_start,
+ .pipe_close = surv0_pipe_close,
+ .pipe_stop = surv0_pipe_stop,
+};
+
+static nni_option surv0_ctx_options[] = {
+ {
+ .o_name = NNG_OPT_SURVEYOR_SURVEYTIME,
+ .o_get = surv0_ctx_get_survey_time,
+ .o_set = surv0_ctx_set_survey_time,
+ },
+ {
+ .o_name = NULL,
+ }
+};
+static nni_proto_ctx_ops surv0_ctx_ops = {
+ .ctx_size = sizeof(surv0_ctx),
+ .ctx_init = surv0_ctx_init,
+ .ctx_fini = surv0_ctx_fini,
+ .ctx_send = surv0_ctx_send,
+ .ctx_recv = surv0_ctx_recv,
+ .ctx_options = surv0_ctx_options,
+};
+
+static nni_option surv0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_SURVEYOR_SURVEYTIME,
+ .o_get = surv0_sock_get_survey_time,
+ .o_set = surv0_sock_set_survey_time,
+ },
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = surv0_sock_get_max_ttl,
+ .o_set = surv0_sock_set_max_ttl,
+ },
+ {
+ .o_name = NNG_OPT_RECVFD,
+ .o_get = surv0_sock_get_recv_fd,
+ },
+ {
+ .o_name = NNG_OPT_SENDFD,
+ .o_get = surv0_sock_get_send_fd,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops surv0_sock_ops = {
+ .sock_size = sizeof(surv0_sock),
+ .sock_init = surv0_sock_init,
+ .sock_fini = surv0_sock_fini,
+ .sock_open = surv0_sock_open,
+ .sock_close = surv0_sock_close,
+ .sock_send = surv0_sock_send,
+ .sock_recv = surv0_sock_recv,
+ .sock_options = surv0_sock_options,
+};
+
+static nni_proto surv0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_SURVEYOR0_SELF, NNG_SURVEYOR0_SELF_NAME },
+ .proto_peer = { NNG_SURVEYOR0_PEER, NNG_SURVEYOR0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV,
+ .proto_sock_ops = &surv0_sock_ops,
+ .proto_pipe_ops = &surv0_pipe_ops,
+ .proto_ctx_ops = &surv0_ctx_ops,
+};
+
+int
+nng_surveyor0_open(nng_socket *sock)
+{
+ return (nni_proto_open(sock, &surv0_proto));
+}
diff --git a/src/sp/protocol/survey0/survey_test.c b/src/sp/protocol/survey0/survey_test.c
new file mode 100644
index 00000000..95d27adf
--- /dev/null
+++ b/src/sp/protocol/survey0/survey_test.c
@@ -0,0 +1,626 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_surv_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_surveyor0_open(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_SELF);
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_PEER); // 49
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_surv_ttl_option(void)
+{
+ nng_socket surv;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+
+ NUTS_PASS(nng_socket_set_int(surv, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(surv, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(surv, opt, -1), NNG_EINVAL);
+ // This test will fail if the NNI_MAX_MAX_TTL is changed from the
+ // builtin default of 15.
+ NUTS_FAIL(nng_socket_set_int(surv, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(surv, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(surv, opt, 3));
+ NUTS_PASS(nng_socket_get_int(surv, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(surv, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(surv, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(surv, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(surv, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(surv, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(surv);
+}
+
+static void
+test_surv_survey_time_option(void)
+{
+ nng_socket surv;
+ nng_duration d;
+ bool b;
+ size_t sz = sizeof(b);
+ const char * opt = NNG_OPT_SURVEYOR_SURVEYTIME;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+
+ NUTS_PASS(nng_socket_set_ms(surv, opt, 10));
+ NUTS_FAIL(nng_socket_set(surv, opt, "", 1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_get(surv, opt, &b, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(surv, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(surv, opt, &b), NNG_EBADTYPE);
+
+ NUTS_PASS(nng_socket_get_ms(surv, opt, &d));
+ NUTS_TRUE(d == 10);
+ NUTS_CLOSE(surv);
+}
+
+void
+test_surv_recv_bad_state(void)
+{
+ nng_socket surv;
+ nng_msg * msg = NULL;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_FAIL(nng_recvmsg(surv, &msg, 0), NNG_ESTATE);
+ NUTS_TRUE(msg == NULL);
+ NUTS_CLOSE(surv);
+}
+
+static void
+test_surv_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ uint32_t surv_id;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &surv_id));
+
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ NUTS_FAIL(nng_recvmsg(surv, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+#define SECOND 1000
+
+void
+test_surv_resp_exchange(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, SECOND));
+
+ NUTS_MARRY(resp, surv);
+
+ NUTS_SEND(surv, "ping");
+ NUTS_RECV(resp, "ping");
+ NUTS_SEND(resp, "pong");
+ NUTS_RECV(surv, "pong");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_surv_cancel(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, SECOND));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 16));
+
+ NUTS_MARRY(resp, surv);
+
+ // Send req #1 (abc).
+ NUTS_SEND(surv, "abc");
+
+ // Sleep a bit. This is so that we ensure that our request gets
+ // to the far side. (If we cancel too fast, then our outgoing send
+ // will be canceled before it gets to the peer.)
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the RESP side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to abc.
+ NUTS_SEND(surv, "def");
+
+ // Receive the first request (should be abc) on the REP server.
+ NUTS_RECV(resp, "abc");
+
+ // RESP sends the reply to first command. This will be discarded
+ // by the SURV socket.
+ NUTS_SEND(resp, "abc");
+
+ // Now get the next command from the REP; should be "def".
+ NUTS_RECV(resp, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(resp, "def");
+
+ // Try a req command. This should give back "def"
+ NUTS_RECV(surv, "def");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_surv_cancel_abort_recv(void)
+{
+ nng_aio * aio;
+ nng_duration time = SECOND * 10; // 10s (kind of never)
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SURVEYOR_SURVEYTIME, time));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 16));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 5 * SECOND));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 5 * SECOND));
+
+ NUTS_MARRY(resp, surv);
+
+ // Send survey #1 (abc).
+ NUTS_SEND(surv, "abc");
+
+ // Wait for it to get ot the other side.
+ NUTS_SLEEP(100);
+
+ nng_aio_set_timeout(aio, 5 * SECOND);
+ nng_recv_aio(surv, aio);
+
+ // Give time for this recv to post properly.
+ NUTS_SLEEP(100);
+
+ // Send the next next request ("def"). Note that
+ // the respondent side server will have already buffered the receive
+ // request, and should simply be waiting for us to reply to
+ // abc.
+ NUTS_SEND(surv, "def");
+
+ // Our pending I/O should have been canceled.
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+
+ // Receive the first request (should be abc) on the respondent.
+ NUTS_RECV(resp, "abc");
+
+ // Respondent sends the reply to first survey. This will be
+ // discarded by the SURV socket.
+ NUTS_SEND(resp, "abc");
+
+ // Now get the next survey from the RESP; should be "def".
+ NUTS_RECV(resp, "def");
+
+ // And send it back to REQ.
+ NUTS_SEND(resp, "def");
+
+ // Try a req command. This should give back "def"
+ NUTS_RECV(surv, "def");
+
+ nng_aio_free(aio);
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_cancel_post_recv(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_MARRY(surv, resp);
+
+ NUTS_SEND(surv, "ONE");
+ NUTS_RECV(resp, "ONE");
+ NUTS_SEND(resp, "one");
+ NUTS_SLEEP(100); // Make sure reply arrives!
+ NUTS_SEND(surv, "TWO");
+ NUTS_RECV(resp, "TWO");
+ NUTS_SEND(resp, "two");
+ NUTS_RECV(surv, "two");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Survey is broadcast, so we can always write.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_MARRY(surv, resp);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+void
+test_surv_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ NUTS_PASS(nng_msg_append(msg, "xyz", 3));
+ NUTS_PASS(nng_sendmsg(surv, msg, 0));
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0)); // recv on rep
+ NUTS_PASS(nng_sendmsg(resp, msg, 0)); // echo it back
+ NUTS_SLEEP(200); // give time for message to arrive
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(surv, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // TODO verify unsolicited response
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_ctx_no_poll(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_SENDFD, &fd), NNG_ENOTSUP);
+ NUTS_FAIL(nng_ctx_get_int(ctx, NNG_OPT_RECVFD, &fd), NNG_ENOTSUP);
+ NUTS_PASS(nng_ctx_close(ctx));
+ NUTS_CLOSE(surv);
+}
+
+static void
+test_surv_ctx_recv_nonblock(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ NUTS_MARRY(surv, resp);
+
+ nng_aio_set_msg(aio, msg);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_recv(ctx, aio);
+
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ETIMEDOUT);
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_surv_ctx_send_nonblock(void)
+{
+ nng_socket surv;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+
+ nng_aio_set_msg(aio, msg);
+ nng_aio_set_timeout(aio, 0); // Instant timeout
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio)); // We never block
+ NUTS_CLOSE(surv);
+ nng_aio_free(aio);
+}
+
+static void
+test_surv_send_best_effort(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_MARRY(surv, resp);
+
+ for (int i = 0; i < 200; i++) {
+ NUTS_SEND(surv, "junk");
+ }
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_survey_timeout(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ char buf[16];
+ size_t sz;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SURVEYOR_SURVEYTIME, 50));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_SEND(surv, "hello");
+ NUTS_RECV(resp, "hello");
+
+ sz = sizeof(buf);
+ NUTS_FAIL(nng_recv(surv, buf, &sz, 0), NNG_ETIMEDOUT);
+ NUTS_SEND(resp, "world");
+ NUTS_FAIL(nng_recv(surv, buf, &sz, 0), NNG_ESTATE);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_ctx_recv_close_socket(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ nng_ctx ctx;
+ nng_aio * aio;
+ nng_msg * m;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_ctx_open(&ctx, surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+ NUTS_MARRY(surv, resp);
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ nng_aio_set_msg(aio, m);
+ nng_ctx_send(ctx, aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+
+ nng_ctx_recv(ctx, aio);
+ nng_close(surv);
+
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECLOSED);
+ nng_aio_free(aio);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_surv_context_multi(void)
+{
+ nng_socket surv;
+ nng_socket resp;
+ nng_ctx c[5];
+ nng_aio * aio;
+ nng_msg * m;
+ int cnt = sizeof(c) / sizeof(c[0]);
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_MARRY(surv, resp);
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SURVEYOR_SURVEYTIME, 200));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ for (int i = 0; i < cnt; i++) {
+ NUTS_PASS(nng_ctx_open(&c[i], surv));
+ }
+
+ for (int i = 0; i < cnt; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, i));
+ nng_aio_set_msg(aio, m);
+ nng_ctx_send(c[i], aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ }
+
+ for (int i = 0; i < cnt; i++) {
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ }
+
+ for (int i = cnt - 1; i >= 0; i--) {
+ uint32_t x;
+ nng_ctx_recv(c[i], aio);
+ nng_aio_wait(aio);
+ NUTS_PASS(nng_aio_result(aio));
+ m = nng_aio_get_msg(aio);
+ TEST_ASSERT(m != NULL);
+ NUTS_PASS(nng_msg_trim_u32(m, &x));
+ NUTS_TRUE(x == (uint32_t) i);
+ nng_msg_free(m);
+ }
+
+ for (int i = 0; i < cnt; i++) {
+ nng_ctx_recv(c[i], aio);
+ nng_aio_wait(aio);
+ NUTS_TRUE(nng_aio_result(aio) != 0);
+ }
+ for (int i = 0; i < cnt; i++) {
+ nng_ctx_close(c[i]);
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_surv_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+ NUTS_PASS(nng_surveyor0_open(&s1));
+ NUTS_PASS(nng_surveyor0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_PASS(nng_close(s1));
+ NUTS_PASS(nng_close(s2));
+ nng_stats_free(stats);
+}
+
+TEST_LIST = {
+ { "survey identity", test_surv_identity },
+ { "survey ttl option", test_surv_ttl_option },
+ { "survey survey time option", test_surv_survey_time_option },
+ { "survey recv bad state", test_surv_recv_bad_state },
+ { "survey recv garbage", test_surv_recv_garbage },
+ { "survey respondent exchange", test_surv_resp_exchange },
+ { "survey cancel", test_surv_cancel },
+ { "survey cancel abort recv", test_surv_cancel_abort_recv },
+ { "survey cancel post recv", test_surv_cancel_post_recv },
+ { "survey poll writable", test_surv_poll_writeable },
+ { "survey poll readable", test_surv_poll_readable },
+ { "survey context does not poll", test_surv_ctx_no_poll },
+ { "survey context recv close socket",
+ test_surv_ctx_recv_close_socket },
+ { "survey context recv nonblock", test_surv_ctx_recv_nonblock },
+ { "survey context send nonblock", test_surv_ctx_send_nonblock },
+ { "survey timeout", test_surv_survey_timeout },
+ { "survey send best effort", test_surv_send_best_effort },
+ { "survey context multi", test_surv_context_multi },
+ { "survey validate peer", test_surv_validate_peer },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/xrespond.c b/src/sp/protocol/survey0/xrespond.c
new file mode 100644
index 00000000..b2f203c3
--- /dev/null
+++ b/src/sp/protocol/survey0/xrespond.c
@@ -0,0 +1,417 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/respond.h"
+
+// Respondent protocol. The RESPONDENT protocol is the "replier" side of
+// the surveyor pattern. This is useful for building service discovery, or
+// voting algorithms, for example.
+
+#ifndef NNI_PROTO_SURVEYOR_V0
+#define NNI_PROTO_SURVEYOR_V0 NNI_PROTO(6, 2)
+#endif
+
+#ifndef NNI_PROTO_RESPONDENT_V0
+#define NNI_PROTO_RESPONDENT_V0 NNI_PROTO(6, 3)
+#endif
+
+typedef struct xresp0_pipe xresp0_pipe;
+typedef struct xresp0_sock xresp0_sock;
+
+static void xresp0_recv_cb(void *);
+static void xresp0_putq_cb(void *);
+static void xresp0_getq_cb(void *);
+static void xresp0_send_cb(void *);
+static void xresp0_sock_getq_cb(void *);
+static void xresp0_pipe_fini(void *);
+
+// resp0_sock is our per-socket protocol private structure.
+struct xresp0_sock {
+ nni_msgq * urq;
+ nni_msgq * uwq;
+ nni_atomic_int ttl;
+ nni_id_map pipes;
+ nni_aio aio_getq;
+ nni_mtx mtx;
+};
+
+// resp0_pipe is our per-pipe protocol private structure.
+struct xresp0_pipe {
+ nni_pipe * npipe;
+ xresp0_sock *psock;
+ uint32_t id;
+ nni_msgq * sendq;
+ nni_aio aio_getq;
+ nni_aio aio_putq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+};
+
+static void
+xresp0_sock_fini(void *arg)
+{
+ xresp0_sock *s = arg;
+
+ nni_aio_fini(&s->aio_getq);
+ nni_id_map_fini(&s->pipes);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+xresp0_sock_init(void *arg, nni_sock *nsock)
+{
+ xresp0_sock *s = arg;
+
+ nni_mtx_init(&s->mtx);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8); // Per RFC
+ nni_id_map_init(&s->pipes, 0, 0, false);
+ nni_aio_init(&s->aio_getq, xresp0_sock_getq_cb, s);
+
+ s->urq = nni_sock_recvq(nsock);
+ s->uwq = nni_sock_sendq(nsock);
+
+ return (0);
+}
+
+static void
+xresp0_sock_open(void *arg)
+{
+ xresp0_sock *s = arg;
+
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+static void
+xresp0_sock_close(void *arg)
+{
+ xresp0_sock *s = arg;
+
+ nni_aio_close(&s->aio_getq);
+}
+
+static void
+xresp0_pipe_stop(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_putq);
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+}
+
+static void
+xresp0_pipe_fini(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_putq);
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_msgq_fini(p->sendq);
+}
+
+static int
+xresp0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ xresp0_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_putq, xresp0_putq_cb, p);
+ nni_aio_init(&p->aio_recv, xresp0_recv_cb, p);
+ nni_aio_init(&p->aio_getq, xresp0_getq_cb, p);
+ nni_aio_init(&p->aio_send, xresp0_send_cb, p);
+
+ if ((rv = nni_msgq_init(&p->sendq, 2)) != 0) {
+ xresp0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->npipe = npipe;
+ p->psock = s;
+ return (0);
+}
+
+static int
+xresp0_pipe_start(void *arg)
+{
+ xresp0_pipe *p = arg;
+ xresp0_sock *s = p->psock;
+ int rv;
+
+ if (nni_pipe_peer(p->npipe) != NNI_PROTO_SURVEYOR_V0) {
+ return (NNG_EPROTO);
+ }
+
+ p->id = nni_pipe_id(p->npipe);
+
+ nni_mtx_lock(&s->mtx);
+ rv = nni_id_set(&s->pipes, p->id, p);
+ nni_mtx_unlock(&s->mtx);
+ if (rv != 0) {
+ return (rv);
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+
+ return (rv);
+}
+
+static void
+xresp0_pipe_close(void *arg)
+{
+ xresp0_pipe *p = arg;
+ xresp0_sock *s = p->psock;
+
+ nni_aio_close(&p->aio_putq);
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->mtx);
+ nni_id_remove(&s->pipes, p->id);
+ nni_mtx_unlock(&s->mtx);
+}
+
+// resp0_sock_send watches for messages from the upper write queue,
+// extracts the destination pipe, and forwards it to the appropriate
+// destination pipe via a separate queue. This prevents a single bad
+// or slow pipe from gumming up the works for the entire socket.s
+
+void
+xresp0_sock_getq_cb(void *arg)
+{
+ xresp0_sock *s = arg;
+ nni_msg * msg;
+ uint32_t id;
+ xresp0_pipe *p;
+
+ if (nni_aio_result(&s->aio_getq) != 0) {
+ return;
+ }
+ msg = nni_aio_get_msg(&s->aio_getq);
+ nni_aio_set_msg(&s->aio_getq, NULL);
+
+ // We yank the outgoing pipe id from the header
+ if (nni_msg_header_len(msg) < 4) {
+ nni_msg_free(msg);
+ // We can't really close down the socket, so just keep going.
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+ return;
+ }
+ id = nni_msg_header_trim_u32(msg);
+
+ nni_mtx_lock(&s->mtx);
+ // Look for the pipe, and attempt to put the message there
+ // (nonblocking) if we can. If we can't for any reason, then we
+ // free the message.
+ if (((p = nni_id_get(&s->pipes, id)) == NULL) ||
+ (nni_msgq_tryput(p->sendq, msg) != 0)) {
+ nni_msg_free(msg);
+ }
+ nni_mtx_unlock(&s->mtx);
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+void
+xresp0_getq_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->npipe, &p->aio_send);
+}
+
+void
+xresp0_send_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+}
+
+static void
+xresp0_recv_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+ xresp0_sock *s = p->psock;
+ nni_msgq * urq = s->urq;
+ nni_msg * msg;
+ int hops;
+ int ttl;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ ttl = nni_atomic_get(&s->ttl);
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, p->id);
+
+ // Store the pipe id in the header, first thing.
+ nni_msg_header_append_u32(msg, p->id);
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ bool end;
+ uint8_t *body;
+
+ if (hops > ttl) {
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer sent us garbage, so kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+ if (nni_msg_header_append(msg, body, 4) != 0) {
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ // Now send it up.
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(urq, &p->aio_putq);
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static void
+xresp0_putq_cb(void *arg)
+{
+ xresp0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static int
+xresp0_sock_set_maxttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xresp0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xresp0_sock_get_maxttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xresp0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static void
+xresp0_sock_send(void *arg, nni_aio *aio)
+{
+ xresp0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+xresp0_sock_recv(void *arg, nni_aio *aio)
+{
+ xresp0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static nni_proto_pipe_ops xresp0_pipe_ops = {
+ .pipe_size = sizeof(xresp0_pipe),
+ .pipe_init = xresp0_pipe_init,
+ .pipe_fini = xresp0_pipe_fini,
+ .pipe_start = xresp0_pipe_start,
+ .pipe_close = xresp0_pipe_close,
+ .pipe_stop = xresp0_pipe_stop,
+};
+
+static nni_option xresp0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xresp0_sock_get_maxttl,
+ .o_set = xresp0_sock_set_maxttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xresp0_sock_ops = {
+ .sock_size = sizeof(xresp0_sock),
+ .sock_init = xresp0_sock_init,
+ .sock_fini = xresp0_sock_fini,
+ .sock_open = xresp0_sock_open,
+ .sock_close = xresp0_sock_close,
+ .sock_send = xresp0_sock_send,
+ .sock_recv = xresp0_sock_recv,
+ .sock_options = xresp0_sock_options,
+};
+
+static nni_proto xresp0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_RESPONDENT_V0, "respondent" },
+ .proto_peer = { NNI_PROTO_SURVEYOR_V0, "surveyor" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xresp0_sock_ops,
+ .proto_pipe_ops = &xresp0_pipe_ops,
+};
+
+int
+nng_respondent0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xresp0_proto));
+}
diff --git a/src/sp/protocol/survey0/xrespond_test.c b/src/sp/protocol/survey0/xrespond_test.c
new file mode 100644
index 00000000..ec5e99a3
--- /dev/null
+++ b/src/sp/protocol/survey0/xrespond_test.c
@@ -0,0 +1,436 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xresp_identity(void)
+{
+ nng_socket s;
+ int p1, p2;
+ char * n1;
+ char * n2;
+
+ NUTS_PASS(nng_respondent0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p1));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p2));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n1));
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n2));
+ NUTS_CLOSE(s);
+ NUTS_TRUE(p1 == NNG_RESPONDENT0_SELF);
+ NUTS_TRUE(p2 == NNG_RESPONDENT0_PEER);
+ NUTS_MATCH(n1, NNG_RESPONDENT0_SELF_NAME);
+ NUTS_MATCH(n2, NNG_RESPONDENT0_PEER_NAME);
+ nng_strfree(n1);
+ nng_strfree(n2);
+}
+
+static void
+test_xresp_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_respondent0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xresp_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_respondent0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xresp_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // We are always writeable, even before connect. This is so that
+ // back-pressure from a bad peer can't trash others. We assume
+ // that peers won't send us requests faster than they can consume
+ // the answers. If they do, they will lose their answers.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_MARRY(surv, resp);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_socket_get_int(resp, NNG_OPT_RECVFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_SEND(surv, "abc");
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) == true);
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char * addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_respondent0_open_raw(&s1));
+ NUTS_PASS(nng_respondent0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xresp_close_pipe_before_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p;
+ nng_aio * aio1;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_aio_alloc(&aio1, NULL, NULL));
+
+ NUTS_MARRY(surv, resp);
+ NUTS_SEND(surv, "test");
+
+ nng_recv_aio(resp, aio1);
+ nng_aio_wait(aio1);
+ NUTS_PASS(nng_aio_result(aio1));
+ NUTS_TRUE((m = nng_aio_get_msg(aio1)) != NULL);
+ p = nng_msg_get_pipe(m);
+ NUTS_PASS(nng_pipe_close(p));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio1);
+}
+
+static void
+test_xresp_close_pipe_during_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_pipe p;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_RECVBUF, 20));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 20));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_RECVBUF, 1));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, (unsigned) 0x81000000u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ p = nng_msg_get_pipe(m);
+ nng_msg_free(m);
+
+ for (int i = 0; i < 100; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p)));
+ NUTS_PASS(
+ nng_msg_header_append_u32(m, (unsigned) i | 0x80000000u));
+ // protocol does not exert back-pressure
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ }
+ NUTS_PASS(nng_pipe_close(p));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_close_during_recv(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY(surv, resp);
+
+ for (unsigned i = 0; i < 100; i++) {
+ int rv;
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ rv = nng_sendmsg(surv, m, 0);
+ if (rv == NNG_ETIMEDOUT) {
+ nng_msg_free(m);
+ break;
+ }
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_recv_aio_stopped(void)
+{
+ nng_socket resp;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(resp, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(resp);
+ nng_aio_free(aio);
+}
+
+static void
+test_xresp_send_no_header(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_append_u32(m, 1u));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_ttl_option(void)
+{
+ nng_socket resp;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+
+ NUTS_PASS(nng_socket_set_int(resp, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(resp, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(resp, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(resp, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(resp, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(resp, opt, 3));
+ NUTS_PASS(nng_socket_get_int(resp, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(resp, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(resp, opt, "", 1), NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(resp, opt, &v, &sz), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(resp, opt, true), NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(resp, opt, &b), NNG_EBADTYPE);
+
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xresp_ttl_drop(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_MAXTTL, 3));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 200));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ // Send messages. Note that xresp implicitly adds a hop on receive.
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 2 hops
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000001u));
+ NUTS_PASS(nng_msg_append(m, "PASS1", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "FAIL2", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 3 hops - passes
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "PASS3", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 1u)); // 4 hops -- discard!
+ NUTS_PASS(nng_msg_append_u32(m, 2u));
+ NUTS_PASS(nng_msg_append_u32(m, 3u));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000003u));
+ NUTS_PASS(nng_msg_append(m, "FAIL4", 6));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ // So on receive we should see 80000001 and 80000003.
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_MATCH(nng_msg_body(m), "PASS1");
+ nng_msg_free(m);
+
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 16); // 3 hops + ID
+ NUTS_TRUE(nng_msg_len(m) == 6);
+ NUTS_MATCH(nng_msg_body(m), "PASS3");
+ nng_msg_free(m);
+
+ NUTS_FAIL(nng_recvmsg(resp, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+NUTS_TESTS = {
+ { "xrespond identity", test_xresp_identity },
+ { "xrespond raw", test_xresp_raw },
+ { "xrespond no context", test_xresp_no_context },
+ { "xrespond poll readable", test_xresp_poll_readable },
+ { "xrespond poll writable", test_xresp_poll_writeable },
+ { "xrespond validate peer", test_xresp_validate_peer },
+ { "xrespond close pipe before send",
+ test_xresp_close_pipe_before_send },
+ { "xrespond close pipe during send",
+ test_xresp_close_pipe_during_send },
+ { "xrespond close during recv", test_xresp_close_during_recv },
+ { "xrespond recv aio stopped", test_xresp_recv_aio_stopped },
+ { "xrespond send no header", test_xresp_send_no_header },
+ { "xrespond recv garbage", test_xresp_recv_garbage },
+ { "xrespond ttl option", test_xresp_ttl_option },
+ { "xrespond ttl drop", test_xresp_ttl_drop },
+ { NULL, NULL },
+};
diff --git a/src/sp/protocol/survey0/xsurvey.c b/src/sp/protocol/survey0/xsurvey.c
new file mode 100644
index 00000000..2a198662
--- /dev/null
+++ b/src/sp/protocol/survey0/xsurvey.c
@@ -0,0 +1,379 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include "core/nng_impl.h"
+#include "nng/protocol/survey0/survey.h"
+
+// Surveyor protocol. The SURVEYOR protocol is the "survey" side of the
+// survey pattern. This is useful for building service discovery, voting, etc.
+
+typedef struct xsurv0_pipe xsurv0_pipe;
+typedef struct xsurv0_sock xsurv0_sock;
+
+static void xsurv0_sock_getq_cb(void *);
+static void xsurv0_getq_cb(void *);
+static void xsurv0_putq_cb(void *);
+static void xsurv0_send_cb(void *);
+static void xsurv0_recv_cb(void *);
+
+// surv0_sock is our per-socket protocol private structure.
+struct xsurv0_sock {
+ nni_list pipes;
+ nni_aio aio_getq;
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_mtx mtx;
+ nni_atomic_int ttl;
+};
+
+// surv0_pipe is our per-pipe protocol private structure.
+struct xsurv0_pipe {
+ nni_pipe * npipe;
+ xsurv0_sock * psock;
+ nni_msgq * sendq;
+ nni_list_node node;
+ nni_aio aio_getq;
+ nni_aio aio_putq;
+ nni_aio aio_send;
+ nni_aio aio_recv;
+};
+
+static void
+xsurv0_sock_fini(void *arg)
+{
+ xsurv0_sock *s = arg;
+
+ nni_aio_fini(&s->aio_getq);
+ nni_mtx_fini(&s->mtx);
+}
+
+static int
+xsurv0_sock_init(void *arg, nni_sock *nsock)
+{
+ xsurv0_sock *s = arg;
+
+ nni_aio_init(&s->aio_getq, xsurv0_sock_getq_cb, s);
+ NNI_LIST_INIT(&s->pipes, xsurv0_pipe, node);
+ nni_mtx_init(&s->mtx);
+
+ s->uwq = nni_sock_sendq(nsock);
+ s->urq = nni_sock_recvq(nsock);
+ nni_atomic_init(&s->ttl);
+ nni_atomic_set(&s->ttl, 8);
+
+ return (0);
+}
+
+static void
+xsurv0_sock_open(void *arg)
+{
+ xsurv0_sock *s = arg;
+
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+}
+
+static void
+xsurv0_sock_close(void *arg)
+{
+ xsurv0_sock *s = arg;
+
+ nni_aio_close(&s->aio_getq);
+}
+
+static void
+xsurv0_pipe_stop(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ nni_aio_stop(&p->aio_getq);
+ nni_aio_stop(&p->aio_send);
+ nni_aio_stop(&p->aio_recv);
+ nni_aio_stop(&p->aio_putq);
+}
+
+static void
+xsurv0_pipe_fini(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ nni_aio_fini(&p->aio_getq);
+ nni_aio_fini(&p->aio_send);
+ nni_aio_fini(&p->aio_recv);
+ nni_aio_fini(&p->aio_putq);
+ nni_msgq_fini(p->sendq);
+}
+
+static int
+xsurv0_pipe_init(void *arg, nni_pipe *npipe, void *s)
+{
+ xsurv0_pipe *p = arg;
+ int rv;
+
+ nni_aio_init(&p->aio_getq, xsurv0_getq_cb, p);
+ nni_aio_init(&p->aio_putq, xsurv0_putq_cb, p);
+ nni_aio_init(&p->aio_send, xsurv0_send_cb, p);
+ nni_aio_init(&p->aio_recv, xsurv0_recv_cb, p);
+
+ // This depth could be tunable. The queue exists so that if we
+ // have multiple requests coming in faster than we can deliver them,
+ // we try to avoid dropping them. We don't really have a solution
+ // for applying back pressure. It would be nice if surveys carried
+ // an expiration with them, so that we could discard any that are
+ // not delivered before their expiration date.
+ if ((rv = nni_msgq_init(&p->sendq, 16)) != 0) {
+ xsurv0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->npipe = npipe;
+ p->psock = s;
+ return (0);
+}
+
+static int
+xsurv0_pipe_start(void *arg)
+{
+ xsurv0_pipe *p = arg;
+ xsurv0_sock *s = p->psock;
+
+ if (nni_pipe_peer(p->npipe) != NNG_SURVEYOR0_PEER) {
+ return (NNG_EPROTO);
+ }
+
+ nni_mtx_lock(&s->mtx);
+ nni_list_append(&s->pipes, p);
+ nni_mtx_unlock(&s->mtx);
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+ return (0);
+}
+
+static void
+xsurv0_pipe_close(void *arg)
+{
+ xsurv0_pipe *p = arg;
+ xsurv0_sock *s = p->psock;
+
+ nni_aio_close(&p->aio_getq);
+ nni_aio_close(&p->aio_send);
+ nni_aio_close(&p->aio_recv);
+ nni_aio_close(&p->aio_putq);
+
+ nni_msgq_close(p->sendq);
+
+ nni_mtx_lock(&s->mtx);
+ if (nni_list_active(&s->pipes, p)) {
+ nni_list_remove(&s->pipes, p);
+ }
+ nni_mtx_unlock(&s->mtx);
+}
+
+static void
+xsurv0_getq_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_getq) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_aio_set_msg(&p->aio_send, nni_aio_get_msg(&p->aio_getq));
+ nni_aio_set_msg(&p->aio_getq, NULL);
+
+ nni_pipe_send(p->npipe, &p->aio_send);
+}
+
+static void
+xsurv0_send_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_send));
+ nni_aio_set_msg(&p->aio_send, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->sendq, &p->aio_getq);
+}
+
+static void
+xsurv0_putq_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+
+ if (nni_aio_result(&p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(&p->aio_putq));
+ nni_aio_set_msg(&p->aio_putq, NULL);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ nni_pipe_recv(p->npipe, &p->aio_recv);
+}
+
+static void
+xsurv0_recv_cb(void *arg)
+{
+ xsurv0_pipe *p = arg;
+ nni_msg * msg;
+ bool end;
+
+ if (nni_aio_result(&p->aio_recv) != 0) {
+ nni_pipe_close(p->npipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(&p->aio_recv);
+ nni_aio_set_msg(&p->aio_recv, NULL);
+ nni_msg_set_pipe(msg, nni_pipe_id(p->npipe));
+ end = false;
+
+ while (!end) {
+ uint8_t *body;
+
+ if (nni_msg_len(msg) < 4) {
+ // Peer gave us garbage, so kick it.
+ nni_msg_free(msg);
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = ((body[0] & 0x80u) != 0);
+
+ if (nni_msg_header_append(msg, body, sizeof(uint32_t)) != 0) {
+ // TODO: bump a no-memory stat
+ nni_msg_free(msg);
+ // Closing the pipe may release some memory.
+ // It at least gives an indication to the peer
+ // that we've lost the message.
+ nni_pipe_close(p->npipe);
+ return;
+ }
+ nni_msg_trim(msg, sizeof(uint32_t));
+ }
+
+ nni_aio_set_msg(&p->aio_putq, msg);
+ nni_msgq_aio_put(p->psock->urq, &p->aio_putq);
+}
+
+static int
+xsurv0_sock_set_max_ttl(void *arg, const void *buf, size_t sz, nni_opt_type t)
+{
+ xsurv0_sock *s = arg;
+ int ttl;
+ int rv;
+ if ((rv = nni_copyin_int(&ttl, buf, sz, 1, NNI_MAX_MAX_TTL, t)) == 0) {
+ nni_atomic_set(&s->ttl, ttl);
+ }
+ return (rv);
+}
+
+static int
+xsurv0_sock_get_max_ttl(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ xsurv0_sock *s = arg;
+ return (nni_copyout_int(nni_atomic_get(&s->ttl), buf, szp, t));
+}
+
+static void
+xsurv0_sock_getq_cb(void *arg)
+{
+ xsurv0_sock *s = arg;
+ xsurv0_pipe *p;
+ nni_msg * msg;
+
+ if (nni_aio_result(&s->aio_getq) != 0) {
+ // Should be NNG_ECLOSED.
+ return;
+ }
+ msg = nni_aio_get_msg(&s->aio_getq);
+ nni_aio_set_msg(&s->aio_getq, NULL);
+
+ nni_mtx_lock(&s->mtx);
+ NNI_LIST_FOREACH (&s->pipes, p) {
+ nni_msg_clone(msg);
+ if (nni_msgq_tryput(p->sendq, msg) != 0) {
+ nni_msg_free(msg);
+ }
+ }
+
+ nni_msgq_aio_get(s->uwq, &s->aio_getq);
+ nni_mtx_unlock(&s->mtx);
+
+ // If there were no pipes to send on, just toss the message.
+ nni_msg_free(msg);
+}
+
+static void
+xsurv0_sock_recv(void *arg, nni_aio *aio)
+{
+ xsurv0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+static void
+xsurv0_sock_send(void *arg, nni_aio *aio)
+{
+ xsurv0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static nni_proto_pipe_ops xsurv0_pipe_ops = {
+ .pipe_size = sizeof(xsurv0_pipe),
+ .pipe_init = xsurv0_pipe_init,
+ .pipe_fini = xsurv0_pipe_fini,
+ .pipe_start = xsurv0_pipe_start,
+ .pipe_close = xsurv0_pipe_close,
+ .pipe_stop = xsurv0_pipe_stop,
+};
+
+static nni_option xsurv0_sock_options[] = {
+ {
+ .o_name = NNG_OPT_MAXTTL,
+ .o_get = xsurv0_sock_get_max_ttl,
+ .o_set = xsurv0_sock_set_max_ttl,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xsurv0_sock_ops = {
+ .sock_size = sizeof(xsurv0_sock),
+ .sock_init = xsurv0_sock_init,
+ .sock_fini = xsurv0_sock_fini,
+ .sock_open = xsurv0_sock_open,
+ .sock_close = xsurv0_sock_close,
+ .sock_send = xsurv0_sock_send,
+ .sock_recv = xsurv0_sock_recv,
+ .sock_options = xsurv0_sock_options,
+};
+
+static nni_proto xsurv0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNG_SURVEYOR0_SELF, NNG_SURVEYOR0_SELF_NAME },
+ .proto_peer = { NNG_SURVEYOR0_PEER, NNG_SURVEYOR0_PEER_NAME },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xsurv0_sock_ops,
+ .proto_pipe_ops = &xsurv0_pipe_ops,
+};
+
+int
+nng_surveyor0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xsurv0_proto));
+}
diff --git a/src/sp/protocol/survey0/xsurvey_test.c b/src/sp/protocol/survey0/xsurvey_test.c
new file mode 100644
index 00000000..f8e9d401
--- /dev/null
+++ b/src/sp/protocol/survey0/xsurvey_test.c
@@ -0,0 +1,399 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_xsurveyor_identity(void)
+{
+ nng_socket s;
+ int p;
+ char * n;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PROTO, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_SELF); // 0x62
+ NUTS_PASS(nng_socket_get_int(s, NNG_OPT_PEER, &p));
+ NUTS_TRUE(p == NNG_SURVEYOR0_PEER); // 0x62
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PROTONAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_SELF_NAME);
+ nng_strfree(n);
+ NUTS_PASS(nng_socket_get_string(s, NNG_OPT_PEERNAME, &n));
+ NUTS_MATCH(n, NNG_SURVEYOR0_PEER_NAME);
+ nng_strfree(n);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurveyor_raw(void)
+{
+ nng_socket s;
+ bool b;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_RAW, &b));
+ NUTS_TRUE(b);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurvey_no_context(void)
+{
+ nng_socket s;
+ nng_ctx ctx;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+ NUTS_FAIL(nng_ctx_open(&ctx, s), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurvey_poll_writeable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_SENDFD, &fd));
+ NUTS_TRUE(fd >= 0);
+
+ // Survey is broadcast, so we can always write.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_MARRY(surv, resp);
+
+ // Now it's writable.
+ NUTS_TRUE(nuts_poll_fd(fd));
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_poll_readable(void)
+{
+ int fd;
+ nng_socket surv;
+ nng_socket resp;
+ nng_msg * msg;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_respondent0_open(&resp));
+ NUTS_PASS(nng_socket_get_int(surv, NNG_OPT_RECVFD, &fd));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_TRUE(fd >= 0);
+
+ // Not readable if not connected!
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // Even after connect (no message yet)
+ NUTS_MARRY(surv, resp);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ // But once we send messages, it is.
+ // We have to send a request, in order to send a reply.
+ NUTS_PASS(nng_msg_alloc(&msg, 0));
+ // Request ID
+ NUTS_PASS(nng_msg_append_u32(msg, 0x80000000));
+ NUTS_PASS(nng_sendmsg(surv, msg, 0));
+
+ NUTS_PASS(nng_recvmsg(resp, &msg, 0));
+ NUTS_PASS(nng_sendmsg(resp, msg, 0));
+
+ NUTS_SLEEP(100);
+
+ NUTS_TRUE(nuts_poll_fd(fd) );
+
+ // and receiving makes it no longer ready
+ NUTS_PASS(nng_recvmsg(surv, &msg, 0));
+ nng_msg_free(msg);
+ NUTS_TRUE(nuts_poll_fd(fd) == false);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_validate_peer(void)
+{
+ nng_socket s1, s2;
+ nng_stat * stats;
+ nng_stat * reject;
+ char *addr;
+
+ NUTS_ADDR(addr, "inproc");
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s1));
+ NUTS_PASS(nng_surveyor0_open(&s2));
+
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr, NULL, NNG_FLAG_NONBLOCK));
+
+ NUTS_SLEEP(100);
+ NUTS_PASS(nng_stats_get(&stats));
+
+ NUTS_TRUE(stats != NULL);
+ NUTS_TRUE((reject = nng_stat_find_socket(stats, s1)) != NULL);
+ NUTS_TRUE((reject = nng_stat_find(reject, "reject")) != NULL);
+
+ NUTS_TRUE(nng_stat_type(reject) == NNG_STAT_COUNTER);
+ NUTS_TRUE(nng_stat_value(reject) > 0);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ nng_stats_free(stats);
+}
+
+static void
+test_xsurvey_recv_aio_stopped(void)
+{
+ nng_socket surv;
+ nng_aio * aio;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_aio_alloc(&aio, NULL, NULL));
+
+ nng_aio_stop(aio);
+ nng_recv_aio(surv, aio);
+ nng_aio_wait(aio);
+ NUTS_FAIL(nng_aio_result(aio), NNG_ECANCELED);
+ NUTS_CLOSE(surv);
+ nng_aio_free(aio);
+}
+
+static void
+test_xsurvey_recv_garbage(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ uint32_t req_id;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY(surv, resp);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_append_u32(m, 0x80000000));
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+
+ NUTS_PASS(nng_recvmsg(resp, &m, 0));
+
+ // The message will have a header that contains the 32-bit pipe ID,
+ // followed by the 32-bit request ID. We will discard the request
+ // ID before sending it out.
+ NUTS_TRUE(nng_msg_header_len(m) == 8);
+ NUTS_PASS(nng_msg_header_chop_u32(m, &req_id));
+ NUTS_TRUE(req_id == 0x80000000);
+
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ NUTS_FAIL(nng_recvmsg(surv, &m, 0), NNG_ETIMEDOUT);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_recv_header(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_pipe p;
+ uint32_t id;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_SENDTIMEO, 1000));
+
+ NUTS_MARRY_EX(surv, resp, NULL, NULL, &p);
+
+ // Simulate a few hops.
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p)));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x2));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x1));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x80000123u));
+
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+
+ NUTS_PASS(nng_recvmsg(surv, &m, 0));
+ NUTS_TRUE(nng_msg_header_len(m) == 12);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x2);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x1);
+ NUTS_PASS(nng_msg_header_trim_u32(m, &id));
+ NUTS_TRUE(id == 0x80000123u);
+
+ nng_msg_free(m);
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_close_during_recv(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_RECVBUF, 1));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(surv, resp, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, nng_pipe_id(p2)));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(resp, m, 0));
+ }
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_close_pipe_during_send(void)
+{
+ nng_socket resp;
+ nng_socket surv;
+ nng_msg * m;
+ nng_pipe p1;
+ nng_pipe p2;
+
+ NUTS_PASS(nng_respondent0_open_raw(&resp));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_socket_set_int(resp, NNG_OPT_RECVBUF, 5));
+ NUTS_PASS(nng_socket_set_int(surv, NNG_OPT_SENDBUF, 20));
+
+ NUTS_MARRY_EX(surv, resp, NULL, &p1, &p2);
+ NUTS_TRUE(nng_pipe_id(p1) > 0);
+ NUTS_TRUE(nng_pipe_id(p2) > 0);
+
+ for (unsigned i = 0; i < 20; i++) {
+ NUTS_PASS(nng_msg_alloc(&m, 4));
+ NUTS_PASS(nng_msg_header_append_u32(m, i | 0x80000000u));
+ NUTS_SLEEP(10);
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ }
+
+ NUTS_PASS(nng_pipe_close(p1));
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp);
+}
+
+static void
+test_xsurvey_ttl_option(void)
+{
+ nng_socket s;
+ int v;
+ bool b;
+ size_t sz;
+ const char *opt = NNG_OPT_MAXTTL;
+
+ NUTS_PASS(nng_surveyor0_open_raw(&s));
+
+ NUTS_PASS(nng_socket_set_int(s, opt, 1));
+ NUTS_FAIL(nng_socket_set_int(s, opt, 0), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(s, opt, -1), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(s, opt, 16), NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_int(s, opt, 256), NNG_EINVAL);
+ NUTS_PASS(nng_socket_set_int(s, opt, 3));
+ NUTS_PASS(nng_socket_get_int(s, opt, &v));
+ NUTS_TRUE(v == 3);
+ v = 0;
+ sz = sizeof(v);
+ NUTS_PASS(nng_socket_get(s, opt, &v, &sz));
+ NUTS_TRUE(v == 3);
+ NUTS_TRUE(sz == sizeof(v));
+
+ NUTS_FAIL(nng_socket_set(s, opt, "", 1) , NNG_EINVAL);
+ sz = 1;
+ NUTS_FAIL(nng_socket_get(s, opt, &v, &sz) , NNG_EINVAL);
+ NUTS_FAIL(nng_socket_set_bool(s, opt, true) , NNG_EBADTYPE);
+ NUTS_FAIL(nng_socket_get_bool(s, opt, &b) , NNG_EBADTYPE);
+
+ NUTS_CLOSE(s);
+}
+
+static void
+test_xsurvey_broadcast(void)
+{
+ nng_socket resp1;
+ nng_socket resp2;
+ nng_socket surv;
+ nng_msg * m;
+
+ NUTS_PASS(nng_respondent0_open(&resp1));
+ NUTS_PASS(nng_respondent0_open(&resp2));
+ NUTS_PASS(nng_surveyor0_open_raw(&surv));
+ NUTS_PASS(nng_socket_set_ms(resp1, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(resp2, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(surv, NNG_OPT_SENDTIMEO, 100));
+
+ NUTS_MARRY(surv, resp1);
+ NUTS_MARRY(surv, resp2);
+
+ NUTS_PASS(nng_msg_alloc(&m, 0));
+ NUTS_PASS(nng_msg_header_append_u32(m, 0x80000002u));
+ NUTS_PASS(nng_msg_append(m, "hello", 6));
+
+ NUTS_PASS(nng_sendmsg(surv, m, 0));
+ NUTS_RECV(resp1, "hello");
+ NUTS_RECV(resp2, "hello");
+
+ NUTS_CLOSE(surv);
+ NUTS_CLOSE(resp1);
+ NUTS_CLOSE(resp2);
+}
+
+TEST_LIST = {
+ { "xsurvey identity", test_xsurveyor_identity },
+ { "xsurvey raw", test_xsurveyor_raw },
+ { "xsurvey no context", test_xsurvey_no_context },
+ { "xsurvey poll readable", test_xsurvey_poll_readable },
+ { "xsurvey poll writable", test_xsurvey_poll_writeable },
+ { "xsurvey validate peer", test_xsurvey_validate_peer },
+ { "xsurvey recv aio stopped", test_xsurvey_recv_aio_stopped },
+ { "xsurvey recv garbage", test_xsurvey_recv_garbage },
+ { "xsurvey recv header", test_xsurvey_recv_header },
+ { "xsurvey close during recv", test_xsurvey_close_during_recv },
+ { "xsurvey close pipe during send",
+ test_xsurvey_close_pipe_during_send },
+ { "xsurvey ttl option", test_xsurvey_ttl_option },
+ { "xsurvey broadcast", test_xsurvey_broadcast },
+ { NULL, NULL },
+};
diff --git a/src/sp/transport/CMakeLists.txt b/src/sp/transport/CMakeLists.txt
new file mode 100644
index 00000000..add8a9c9
--- /dev/null
+++ b/src/sp/transport/CMakeLists.txt
@@ -0,0 +1,19 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staystail.tech>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# Transports.
+nng_directory(transport)
+
+add_subdirectory(inproc)
+add_subdirectory(ipc)
+add_subdirectory(tcp)
+add_subdirectory(tls)
+add_subdirectory(ws)
+add_subdirectory(zerotier)
+
diff --git a/src/sp/transport/inproc/CMakeLists.txt b/src/sp/transport/inproc/CMakeLists.txt
new file mode 100644
index 00000000..317686bb
--- /dev/null
+++ b/src/sp/transport/inproc/CMakeLists.txt
@@ -0,0 +1,16 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# inproc protocol
+nng_directory(inproc)
+
+nng_sources_if(NNG_TRANSPORT_INPROC inproc.c)
+nng_headers_if(NNG_TRANSPORT_INPROC nng/transport/inproc/inproc.h)
+nng_defines_if(NNG_TRANSPORT_INPROC NNG_TRANSPORT_INPROC) \ No newline at end of file
diff --git a/src/sp/transport/inproc/inproc.c b/src/sp/transport/inproc/inproc.c
new file mode 100644
index 00000000..84e2c625
--- /dev/null
+++ b/src/sp/transport/inproc/inproc.c
@@ -0,0 +1,692 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2018 Devolutions <info@devolutions.net>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <string.h>
+
+#include "core/nng_impl.h"
+
+// Inproc transport. This just transports messages from one
+// peer to another. The inproc transport is only valid within the same
+// process.
+
+typedef struct inproc_pair inproc_pair;
+typedef struct inproc_pipe inproc_pipe;
+typedef struct inproc_ep inproc_ep;
+typedef struct inproc_queue inproc_queue;
+
+typedef struct {
+ nni_mtx mx;
+ nni_list servers;
+} inproc_global;
+
+// inproc_pipe represents one half of a connection.
+struct inproc_pipe {
+ const char * addr;
+ inproc_pair * pair;
+ inproc_queue *recv_queue;
+ inproc_queue *send_queue;
+ uint16_t peer;
+ uint16_t proto;
+};
+
+struct inproc_queue {
+ nni_list readers;
+ nni_list writers;
+ nni_mtx lock;
+ bool closed;
+};
+
+// inproc_pair represents a pair of pipes. Because we control both
+// sides of the pipes, we can allocate and free this in one structure.
+struct inproc_pair {
+ nni_atomic_int ref;
+ inproc_queue queues[2];
+};
+
+struct inproc_ep {
+ const char * addr;
+ bool listener;
+ nni_list_node node;
+ uint16_t proto;
+ nni_cv cv;
+ nni_list clients;
+ nni_list aios;
+ size_t rcvmax;
+ nni_mtx mtx;
+};
+
+// nni_inproc is our global state - this contains the list of active endpoints
+// which we use for coordinating rendezvous.
+static inproc_global nni_inproc;
+
+static int
+inproc_init(void)
+{
+ NNI_LIST_INIT(&nni_inproc.servers, inproc_ep, node);
+
+ nni_mtx_init(&nni_inproc.mx);
+ return (0);
+}
+
+static void
+inproc_fini(void)
+{
+ nni_mtx_fini(&nni_inproc.mx);
+}
+
+// inproc_pair destroy is called when both pipe-ends of the pipe
+// have been destroyed.
+static void
+inproc_pair_destroy(inproc_pair *pair)
+{
+ for (int i = 0; i < 2; i++) {
+ nni_mtx_fini(&pair->queues[i].lock);
+ }
+ NNI_FREE_STRUCT(pair);
+}
+
+static int
+inproc_pipe_alloc(inproc_pipe **pipep, inproc_ep *ep)
+{
+ inproc_pipe *pipe;
+
+ if ((pipe = NNI_ALLOC_STRUCT(pipe)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+
+ pipe->proto = ep->proto;
+ pipe->addr = ep->addr;
+ *pipep = pipe;
+ return (0);
+}
+
+static int
+inproc_pipe_init(void *arg, nni_pipe *p)
+{
+ NNI_ARG_UNUSED(arg);
+ NNI_ARG_UNUSED(p);
+ return (0);
+}
+
+static void
+inproc_pipe_fini(void *arg)
+{
+ inproc_pipe *pipe = arg;
+ inproc_pair *pair;
+
+ if ((pair = pipe->pair) != NULL) {
+ // If we are the last peer, then toss the pair structure.
+ if (nni_atomic_dec_nv(&pair->ref) == 0) {
+ inproc_pair_destroy(pair);
+ }
+ }
+
+ NNI_FREE_STRUCT(pipe);
+}
+
+static void
+inproc_queue_run_closed(inproc_queue *queue)
+{
+ nni_aio *aio;
+ while (((aio = nni_list_first(&queue->readers)) != NULL) ||
+ ((aio = nni_list_first(&queue->writers)) != NULL)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+}
+
+static void
+inproc_queue_run(inproc_queue *queue)
+{
+ if (queue->closed) {
+ inproc_queue_run_closed(queue);
+ }
+ for (;;) {
+ nni_aio *rd;
+ nni_aio *wr;
+ nni_msg *msg;
+ nni_msg *pu;
+
+ if (((rd = nni_list_first(&queue->readers)) == NULL) ||
+ ((wr = nni_list_first(&queue->writers)) == NULL)) {
+ return;
+ }
+
+ msg = nni_aio_get_msg(wr);
+ NNI_ASSERT(msg != NULL);
+
+ // At this point, we pass success back to the caller. If
+ // we drop the message for any reason, its accounted on the
+ // receiver side.
+ nni_aio_list_remove(wr);
+ nni_aio_set_msg(wr, NULL);
+ nni_aio_finish(
+ wr, 0, nni_msg_len(msg) + nni_msg_header_len(msg));
+
+ // TODO: We could check the max receive size here.
+
+ // Now the receive side. We need to ensure that we have
+ // an exclusive copy of the message, and pull the header
+ // up into the body to match protocol expectations.
+ if ((pu = nni_msg_pull_up(msg)) == NULL) {
+ nni_msg_free(msg);
+ continue;
+ }
+ msg = pu;
+
+ nni_aio_list_remove(rd);
+ nni_aio_set_msg(rd, msg);
+ nni_aio_finish(rd, 0, nni_msg_len(msg));
+ }
+}
+
+static void
+inproc_queue_cancel(nni_aio *aio, void *arg, int rv)
+{
+ inproc_queue *queue = arg;
+
+ nni_mtx_lock(&queue->lock);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&queue->lock);
+}
+
+static void
+inproc_pipe_send(void *arg, nni_aio *aio)
+{
+ inproc_pipe * pipe = arg;
+ inproc_queue *queue = pipe->send_queue;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&queue->lock);
+ if ((rv = nni_aio_schedule(aio, inproc_queue_cancel, queue)) != 0) {
+ nni_mtx_unlock(&queue->lock);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_aio_list_append(&queue->writers, aio);
+ inproc_queue_run(queue);
+ nni_mtx_unlock(&queue->lock);
+}
+
+static void
+inproc_pipe_recv(void *arg, nni_aio *aio)
+{
+ inproc_pipe * pipe = arg;
+ inproc_queue *queue = pipe->recv_queue;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&queue->lock);
+ if ((rv = nni_aio_schedule(aio, inproc_queue_cancel, queue)) != 0) {
+ nni_mtx_unlock(&queue->lock);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_aio_list_append(&queue->readers, aio);
+ inproc_queue_run(queue);
+ nni_mtx_unlock(&queue->lock);
+}
+
+static void
+inproc_pipe_close(void *arg)
+{
+ inproc_pipe *pipe = arg;
+ inproc_pair *pair = pipe->pair;
+
+ for (int i = 0; i < 2; i++) {
+ inproc_queue *queue = &pair->queues[i];
+ nni_mtx_lock(&queue->lock);
+ queue->closed = true;
+ inproc_queue_run_closed(queue);
+ nni_mtx_unlock(&queue->lock);
+ }
+}
+
+static uint16_t
+inproc_pipe_peer(void *arg)
+{
+ inproc_pipe *pipe = arg;
+
+ return (pipe->peer);
+}
+
+static int
+inproc_pipe_get_addr(void *arg, void *buf, size_t *szp, nni_opt_type t)
+{
+ inproc_pipe *p = arg;
+ nni_sockaddr sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.s_inproc.sa_family = NNG_AF_INPROC;
+ nni_strlcpy(sa.s_inproc.sa_name, p->addr, sizeof(sa.s_inproc.sa_name));
+ return (nni_copyout_sockaddr(&sa, buf, szp, t));
+}
+
+static int
+inproc_dialer_init(void **epp, nni_url *url, nni_dialer *ndialer)
+{
+ inproc_ep *ep;
+ nni_sock * sock = nni_dialer_sock(ndialer);
+
+ if ((ep = NNI_ALLOC_STRUCT(ep)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&ep->mtx);
+
+ ep->listener = false;
+ ep->proto = nni_sock_proto_id(sock);
+ ep->rcvmax = 0;
+ NNI_LIST_INIT(&ep->clients, inproc_ep, node);
+ nni_aio_list_init(&ep->aios);
+
+ ep->addr = url->u_rawurl; // we match on the full URL.
+
+ *epp = ep;
+ return (0);
+}
+
+static int
+inproc_listener_init(void **epp, nni_url *url, nni_listener *nlistener)
+{
+ inproc_ep *ep;
+ nni_sock * sock = nni_listener_sock(nlistener);
+
+ if ((ep = NNI_ALLOC_STRUCT(ep)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&ep->mtx);
+
+ ep->listener = true;
+ ep->proto = nni_sock_proto_id(sock);
+ ep->rcvmax = 0;
+ NNI_LIST_INIT(&ep->clients, inproc_ep, node);
+ nni_aio_list_init(&ep->aios);
+
+ ep->addr = url->u_rawurl; // we match on the full URL.
+
+ *epp = ep;
+ return (0);
+}
+
+static void
+inproc_ep_fini(void *arg)
+{
+ inproc_ep *ep = arg;
+ nni_mtx_fini(&ep->mtx);
+ NNI_FREE_STRUCT(ep);
+}
+
+static void
+inproc_conn_finish(nni_aio *aio, int rv, inproc_ep *ep, inproc_pipe *pipe)
+{
+ nni_aio_list_remove(aio);
+
+ if ((!ep->listener) && nni_list_empty(&ep->aios)) {
+ nni_list_node_remove(&ep->node);
+ }
+
+ if (rv == 0) {
+ nni_aio_set_output(aio, 0, pipe);
+ nni_aio_finish(aio, 0, 0);
+ } else {
+ NNI_ASSERT(pipe == NULL);
+ nni_aio_finish_error(aio, rv);
+ }
+}
+
+static void
+inproc_ep_close(void *arg)
+{
+ inproc_ep *ep = arg;
+ inproc_ep *client;
+ nni_aio * aio;
+
+ nni_mtx_lock(&nni_inproc.mx);
+ if (nni_list_active(&nni_inproc.servers, ep)) {
+ nni_list_remove(&nni_inproc.servers, ep);
+ }
+ // Notify any waiting clients that we are closed.
+ while ((client = nni_list_first(&ep->clients)) != NULL) {
+ while ((aio = nni_list_first(&client->aios)) != NULL) {
+ inproc_conn_finish(aio, NNG_ECONNREFUSED, ep, NULL);
+ }
+ nni_list_remove(&ep->clients, client);
+ }
+ while ((aio = nni_list_first(&ep->aios)) != NULL) {
+ inproc_conn_finish(aio, NNG_ECLOSED, ep, NULL);
+ }
+ nni_mtx_unlock(&nni_inproc.mx);
+}
+
+static void
+inproc_accept_clients(inproc_ep *srv)
+{
+ inproc_ep *cli, *nclient;
+
+ nclient = nni_list_first(&srv->clients);
+ while ((cli = nclient) != NULL) {
+ nni_aio *caio;
+ nclient = nni_list_next(&srv->clients, nclient);
+ NNI_LIST_FOREACH (&cli->aios, caio) {
+
+ inproc_pipe *cpipe;
+ inproc_pipe *spipe;
+ inproc_pair *pair;
+ nni_aio * saio;
+ int rv;
+
+ if ((saio = nni_list_first(&srv->aios)) == NULL) {
+ // No outstanding accept() calls.
+ break;
+ }
+
+ if ((pair = NNI_ALLOC_STRUCT(pair)) == NULL) {
+ inproc_conn_finish(
+ caio, NNG_ENOMEM, cli, NULL);
+ inproc_conn_finish(
+ saio, NNG_ENOMEM, srv, NULL);
+ continue;
+ }
+ for (int i = 0; i < 2; i++) {
+ nni_aio_list_init(&pair->queues[i].readers);
+ nni_aio_list_init(&pair->queues[i].writers);
+ nni_mtx_init(&pair->queues[i].lock);
+ }
+ nni_atomic_init(&pair->ref);
+ nni_atomic_set(&pair->ref, 2);
+
+ spipe = cpipe = NULL;
+ if (((rv = inproc_pipe_alloc(&cpipe, cli)) != 0) ||
+ ((rv = inproc_pipe_alloc(&spipe, srv)) != 0)) {
+
+ if (cpipe != NULL) {
+ inproc_pipe_fini(cpipe);
+ }
+ if (spipe != NULL) {
+ inproc_pipe_fini(spipe);
+ }
+ inproc_conn_finish(caio, rv, cli, NULL);
+ inproc_conn_finish(saio, rv, srv, NULL);
+ inproc_pair_destroy(pair);
+ continue;
+ }
+
+ cpipe->peer = spipe->proto;
+ spipe->peer = cpipe->proto;
+ cpipe->pair = pair;
+ spipe->pair = pair;
+ cpipe->send_queue = &pair->queues[0];
+ cpipe->recv_queue = &pair->queues[1];
+ spipe->send_queue = &pair->queues[1];
+ spipe->recv_queue = &pair->queues[0];
+
+ inproc_conn_finish(caio, 0, cli, cpipe);
+ inproc_conn_finish(saio, 0, srv, spipe);
+ }
+
+ if (nni_list_first(&cli->aios) == NULL) {
+ // No more outstanding client connects.
+ // Normally there should only be one.
+ if (nni_list_active(&srv->clients, cli)) {
+ nni_list_remove(&srv->clients, cli);
+ }
+ }
+ }
+}
+
+static void
+inproc_ep_cancel(nni_aio *aio, void *arg, int rv)
+{
+ inproc_ep *ep = arg;
+
+ nni_mtx_lock(&nni_inproc.mx);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_list_node_remove(&ep->node);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&nni_inproc.mx);
+}
+
+static void
+inproc_ep_connect(void *arg, nni_aio *aio)
+{
+ inproc_ep *ep = arg;
+ inproc_ep *server;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&nni_inproc.mx);
+
+ // Find a server.
+ NNI_LIST_FOREACH (&nni_inproc.servers, server) {
+ if (strcmp(server->addr, ep->addr) == 0) {
+ break;
+ }
+ }
+ if (server == NULL) {
+ nni_mtx_unlock(&nni_inproc.mx);
+ nni_aio_finish_error(aio, NNG_ECONNREFUSED);
+ return;
+ }
+
+ // We don't have to worry about the case where a zero timeout
+ // on connect was specified, as there is no option to specify
+ // that in the upper API.
+ if ((rv = nni_aio_schedule(aio, inproc_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&nni_inproc.mx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ nni_list_append(&server->clients, ep);
+ nni_aio_list_append(&ep->aios, aio);
+
+ inproc_accept_clients(server);
+ nni_mtx_unlock(&nni_inproc.mx);
+}
+
+static int
+inproc_ep_bind(void *arg)
+{
+ inproc_ep *ep = arg;
+ inproc_ep *srch;
+ nni_list * list = &nni_inproc.servers;
+
+ nni_mtx_lock(&nni_inproc.mx);
+ NNI_LIST_FOREACH (list, srch) {
+ if (strcmp(srch->addr, ep->addr) == 0) {
+ nni_mtx_unlock(&nni_inproc.mx);
+ return (NNG_EADDRINUSE);
+ }
+ }
+ nni_list_append(list, ep);
+ nni_mtx_unlock(&nni_inproc.mx);
+ return (0);
+}
+
+static void
+inproc_ep_accept(void *arg, nni_aio *aio)
+{
+ inproc_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&nni_inproc.mx);
+
+ // We need not worry about the case where a non-blocking
+ // accept was tried -- there is no API to do such a thing.
+ if ((rv = nni_aio_schedule(aio, inproc_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&nni_inproc.mx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ // We are already on the master list of servers, thanks to bind.
+ // Insert us into pending server aios, and then run accept list.
+ nni_aio_list_append(&ep->aios, aio);
+ inproc_accept_clients(ep);
+ nni_mtx_unlock(&nni_inproc.mx);
+}
+
+static int
+inproc_ep_get_recvmaxsz(void *arg, void *v, size_t *szp, nni_opt_type t)
+{
+ inproc_ep *ep = arg;
+ int rv;
+ nni_mtx_lock(&ep->mtx);
+ rv = nni_copyout_size(ep->rcvmax, v, szp, t);
+ nni_mtx_unlock(&ep->mtx);
+ return (rv);
+}
+
+static int
+inproc_ep_set_recvmaxsz(void *arg, const void *v, size_t sz, nni_opt_type t)
+{
+ inproc_ep *ep = arg;
+ size_t val;
+ int rv;
+ if ((rv = nni_copyin_size(&val, v, sz, 0, NNI_MAXSZ, t)) == 0) {
+ nni_mtx_lock(&ep->mtx);
+ ep->rcvmax = val;
+ nni_mtx_unlock(&ep->mtx);
+ }
+ return (rv);
+}
+
+static int
+inproc_ep_get_addr(void *arg, void *v, size_t *szp, nni_opt_type t)
+{
+ inproc_ep * ep = arg;
+ nng_sockaddr sa;
+ sa.s_inproc.sa_family = NNG_AF_INPROC;
+ nni_strlcpy(
+ sa.s_inproc.sa_name, ep->addr, sizeof(sa.s_inproc.sa_name));
+ return (nni_copyout_sockaddr(&sa, v, szp, t));
+}
+
+static const nni_option inproc_pipe_options[] = {
+ {
+ .o_name = NNG_OPT_LOCADDR,
+ .o_get = inproc_pipe_get_addr,
+ },
+ {
+ .o_name = NNG_OPT_REMADDR,
+ .o_get = inproc_pipe_get_addr,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+inproc_pipe_getopt(
+ void *arg, const char *name, void *v, size_t *szp, nni_type t)
+{
+ return (nni_getopt(inproc_pipe_options, name, arg, v, szp, t));
+}
+
+static nni_tran_pipe_ops inproc_pipe_ops = {
+ .p_init = inproc_pipe_init,
+ .p_fini = inproc_pipe_fini,
+ .p_send = inproc_pipe_send,
+ .p_recv = inproc_pipe_recv,
+ .p_close = inproc_pipe_close,
+ .p_peer = inproc_pipe_peer,
+ .p_getopt = inproc_pipe_getopt,
+};
+
+static const nni_option inproc_ep_options[] = {
+ {
+ .o_name = NNG_OPT_RECVMAXSZ,
+ .o_get = inproc_ep_get_recvmaxsz,
+ .o_set = inproc_ep_set_recvmaxsz,
+ },
+ {
+ .o_name = NNG_OPT_LOCADDR,
+ .o_get = inproc_ep_get_addr,
+ },
+ {
+ .o_name = NNG_OPT_REMADDR,
+ .o_get = inproc_ep_get_addr,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+inproc_ep_getopt(void *arg, const char *name, void *v, size_t *szp, nni_type t)
+{
+ return (nni_getopt(inproc_ep_options, name, arg, v, szp, t));
+}
+
+static int
+inproc_ep_setopt(
+ void *arg, const char *name, const void *v, size_t sz, nni_type t)
+{
+ return (nni_setopt(inproc_ep_options, name, arg, v, sz, t));
+}
+
+static nni_tran_dialer_ops inproc_dialer_ops = {
+ .d_init = inproc_dialer_init,
+ .d_fini = inproc_ep_fini,
+ .d_connect = inproc_ep_connect,
+ .d_close = inproc_ep_close,
+ .d_getopt = inproc_ep_getopt,
+ .d_setopt = inproc_ep_setopt,
+};
+
+static nni_tran_listener_ops inproc_listener_ops = {
+ .l_init = inproc_listener_init,
+ .l_fini = inproc_ep_fini,
+ .l_bind = inproc_ep_bind,
+ .l_accept = inproc_ep_accept,
+ .l_close = inproc_ep_close,
+ .l_getopt = inproc_ep_getopt,
+ .l_setopt = inproc_ep_setopt,
+};
+
+// This is the inproc transport linkage, and should be the only global
+// symbol in this entire file.
+struct nni_tran nni_inproc_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "inproc",
+ .tran_dialer = &inproc_dialer_ops,
+ .tran_listener = &inproc_listener_ops,
+ .tran_pipe = &inproc_pipe_ops,
+ .tran_init = inproc_init,
+ .tran_fini = inproc_fini,
+};
+
+int
+nng_inproc_register(void)
+{
+ return (nni_tran_register(&nni_inproc_tran));
+}
diff --git a/src/sp/transport/ipc/CMakeLists.txt b/src/sp/transport/ipc/CMakeLists.txt
new file mode 100644
index 00000000..c9927f75
--- /dev/null
+++ b/src/sp/transport/ipc/CMakeLists.txt
@@ -0,0 +1,17 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# ipc protocol
+nng_directory(ipc)
+
+nng_sources_if(NNG_TRANSPORT_IPC ipc.c)
+nng_headers_if(NNG_TRANSPORT_IPC nng/transport/ipc/ipc.h)
+nng_defines_if(NNG_TRANSPORT_IPC NNG_TRANSPORT_IPC)
+nng_test_if(NNG_TRANSPORT_IPC ipc_test) \ No newline at end of file
diff --git a/src/sp/transport/ipc/ipc.c b/src/sp/transport/ipc/ipc.c
new file mode 100644
index 00000000..efaa823c
--- /dev/null
+++ b/src/sp/transport/ipc/ipc.c
@@ -0,0 +1,1171 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2019 Devolutions <info@devolutions.net>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "core/nng_impl.h"
+
+#include <nng/transport/ipc/ipc.h>
+
+// IPC transport. Platform specific IPC operations must be
+// supplied as well. Normally the IPC is UNIX domain sockets or
+// Windows named pipes. Other platforms could use other mechanisms,
+// but all implementations on the platform must use the same mechanism.
+
+typedef struct ipc_pipe ipc_pipe;
+typedef struct ipc_ep ipc_ep;
+
+// ipc_pipe is one end of an IPC connection.
+struct ipc_pipe {
+ nng_stream * conn;
+ uint16_t peer;
+ uint16_t proto;
+ size_t rcv_max;
+ bool closed;
+ nni_sockaddr sa;
+ ipc_ep * ep;
+ nni_pipe * pipe;
+ nni_list_node node;
+ nni_atomic_flag reaped;
+ nni_reap_node reap;
+ uint8_t tx_head[1 + sizeof(uint64_t)];
+ uint8_t rx_head[1 + sizeof(uint64_t)];
+ size_t got_tx_head;
+ size_t got_rx_head;
+ size_t want_tx_head;
+ size_t want_rx_head;
+ nni_list recv_q;
+ nni_list send_q;
+ nni_aio tx_aio;
+ nni_aio rx_aio;
+ nni_aio neg_aio;
+ nni_msg * rx_msg;
+ nni_mtx mtx;
+};
+
+struct ipc_ep {
+ nni_mtx mtx;
+ nni_sockaddr sa;
+ size_t rcv_max;
+ uint16_t proto;
+ bool started;
+ bool closed;
+ bool fini;
+ int ref_cnt;
+ nng_stream_dialer * dialer;
+ nng_stream_listener *listener;
+ nni_aio * user_aio;
+ nni_aio * conn_aio;
+ nni_aio * time_aio;
+ nni_list busy_pipes; // busy pipes -- ones passed to socket
+ nni_list wait_pipes; // pipes waiting to match to socket
+ nni_list neg_pipes; // pipes busy negotiating
+ nni_reap_node reap;
+#ifdef NNG_ENABLE_STATS
+ nni_stat_item st_rcv_max;
+#endif
+};
+
+static void ipc_pipe_send_start(ipc_pipe *p);
+static void ipc_pipe_recv_start(ipc_pipe *p);
+static void ipc_pipe_send_cb(void *);
+static void ipc_pipe_recv_cb(void *);
+static void ipc_pipe_neg_cb(void *);
+static void ipc_pipe_fini(void *);
+static void ipc_ep_fini(void *);
+
+static nni_reap_list ipc_ep_reap_list = {
+ .rl_offset = offsetof(ipc_ep, reap),
+ .rl_func = ipc_ep_fini,
+};
+
+static nni_reap_list ipc_pipe_reap_list = {
+ .rl_offset = offsetof(ipc_pipe, reap),
+ .rl_func = ipc_pipe_fini,
+};
+
+static int
+ipc_tran_init(void)
+{
+ return (0);
+}
+
+static void
+ipc_tran_fini(void)
+{
+}
+
+static void
+ipc_pipe_close(void *arg)
+{
+ ipc_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ p->closed = true;
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_close(&p->rx_aio);
+ nni_aio_close(&p->tx_aio);
+ nni_aio_close(&p->neg_aio);
+
+ nng_stream_close(p->conn);
+}
+
+static void
+ipc_pipe_stop(void *arg)
+{
+ ipc_pipe *p = arg;
+
+ nni_aio_stop(&p->rx_aio);
+ nni_aio_stop(&p->tx_aio);
+ nni_aio_stop(&p->neg_aio);
+}
+
+static int
+ipc_pipe_init(void *arg, nni_pipe *pipe)
+{
+ ipc_pipe *p = arg;
+ p->pipe = pipe;
+ return (0);
+}
+
+static void
+ipc_pipe_fini(void *arg)
+{
+ ipc_pipe *p = arg;
+ ipc_ep * ep;
+
+ ipc_pipe_stop(p);
+ if ((ep = p->ep) != NULL) {
+ nni_mtx_lock(&ep->mtx);
+ nni_list_node_remove(&p->node);
+ ep->ref_cnt--;
+ if (ep->fini && (ep->ref_cnt == 0)) {
+ nni_reap(&ipc_ep_reap_list, ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ }
+ nni_aio_fini(&p->rx_aio);
+ nni_aio_fini(&p->tx_aio);
+ nni_aio_fini(&p->neg_aio);
+ nng_stream_free(p->conn);
+ if (p->rx_msg) {
+ nni_msg_free(p->rx_msg);
+ }
+ nni_mtx_fini(&p->mtx);
+ NNI_FREE_STRUCT(p);
+}
+
+static void
+ipc_pipe_reap(ipc_pipe *p)
+{
+ if (!nni_atomic_flag_test_and_set(&p->reaped)) {
+ if (p->conn != NULL) {
+ nng_stream_close(p->conn);
+ }
+ nni_reap(&ipc_pipe_reap_list, p);
+ }
+}
+
+static int
+ipc_pipe_alloc(ipc_pipe **pipe_p)
+{
+ ipc_pipe *p;
+
+ if ((p = NNI_ALLOC_STRUCT(p)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&p->mtx);
+ nni_aio_init(&p->tx_aio, ipc_pipe_send_cb, p);
+ nni_aio_init(&p->rx_aio, ipc_pipe_recv_cb, p);
+ nni_aio_init(&p->neg_aio, ipc_pipe_neg_cb, p);
+ nni_aio_list_init(&p->send_q);
+ nni_aio_list_init(&p->recv_q);
+ nni_atomic_flag_reset(&p->reaped);
+ *pipe_p = p;
+ return (0);
+}
+
+static void
+ipc_ep_match(ipc_ep *ep)
+{
+ nni_aio * aio;
+ ipc_pipe *p;
+
+ if (((aio = ep->user_aio) == NULL) ||
+ ((p = nni_list_first(&ep->wait_pipes)) == NULL)) {
+ return;
+ }
+ nni_list_remove(&ep->wait_pipes, p);
+ nni_list_append(&ep->busy_pipes, p);
+ ep->user_aio = NULL;
+ p->rcv_max = ep->rcv_max;
+ nni_aio_set_output(aio, 0, p);
+ nni_aio_finish(aio, 0, 0);
+}
+
+static void
+ipc_pipe_neg_cb(void *arg)
+{
+ ipc_pipe *p = arg;
+ ipc_ep * ep = p->ep;
+ nni_aio * aio = &p->neg_aio;
+ nni_aio * user_aio;
+ int rv;
+
+ nni_mtx_lock(&ep->mtx);
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ // We start transmitting before we receive.
+ if (p->got_tx_head < p->want_tx_head) {
+ p->got_tx_head += nni_aio_count(aio);
+ } else if (p->got_rx_head < p->want_rx_head) {
+ p->got_rx_head += nni_aio_count(aio);
+ }
+ if (p->got_tx_head < p->want_tx_head) {
+ nni_iov iov;
+ iov.iov_len = p->want_tx_head - p->got_tx_head;
+ iov.iov_buf = &p->tx_head[p->got_tx_head];
+ nni_aio_set_iov(aio, 1, &iov);
+ // send it down...
+ nng_stream_send(p->conn, aio);
+ nni_mtx_unlock(&p->ep->mtx);
+ return;
+ }
+ if (p->got_rx_head < p->want_rx_head) {
+ nni_iov iov;
+ iov.iov_len = p->want_rx_head - p->got_rx_head;
+ iov.iov_buf = &p->rx_head[p->got_rx_head];
+ nni_aio_set_iov(aio, 1, &iov);
+ nng_stream_recv(p->conn, aio);
+ nni_mtx_unlock(&p->ep->mtx);
+ return;
+ }
+ // We have both sent and received the headers. Lets check the
+ // receive side header.
+ if ((p->rx_head[0] != 0) || (p->rx_head[1] != 'S') ||
+ (p->rx_head[2] != 'P') || (p->rx_head[3] != 0) ||
+ (p->rx_head[6] != 0) || (p->rx_head[7] != 0)) {
+ rv = NNG_EPROTO;
+ goto error;
+ }
+
+ NNI_GET16(&p->rx_head[4], p->peer);
+
+ // We are all ready now. We put this in the wait list, and
+ // then try to run the matcher.
+ nni_list_remove(&ep->neg_pipes, p);
+ nni_list_append(&ep->wait_pipes, p);
+
+ ipc_ep_match(ep);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+
+error:
+
+ nng_stream_close(p->conn);
+ // If we are waiting to negotiate on a client side, then a failure
+ // here has to be passed to the user app.
+ if ((user_aio = ep->user_aio) != NULL) {
+ ep->user_aio = NULL;
+ nni_aio_finish_error(user_aio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ ipc_pipe_reap(p);
+}
+
+static void
+ipc_pipe_send_cb(void *arg)
+{
+ ipc_pipe *p = arg;
+ int rv;
+ nni_aio * aio;
+ size_t n;
+ nni_msg * msg;
+ nni_aio * tx_aio = &p->tx_aio;
+
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_result(tx_aio)) != 0) {
+ nni_pipe_bump_error(p->pipe, rv);
+ // Intentionally we do not queue up another transfer.
+ // There's an excellent chance that the pipe is no longer
+ // usable, with a partial transfer.
+ // The protocol should see this error, and close the
+ // pipe itself, we hope.
+
+ while ((aio = nni_list_first(&p->send_q)) != NULL) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+
+ n = nni_aio_count(tx_aio);
+ nni_aio_iov_advance(tx_aio, n);
+ if (nni_aio_iov_count(tx_aio) != 0) {
+ nng_stream_send(p->conn, tx_aio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+
+ aio = nni_list_first(&p->send_q);
+ nni_aio_list_remove(aio);
+ ipc_pipe_send_start(p);
+
+ msg = nni_aio_get_msg(aio);
+ n = nni_msg_len(msg);
+ nni_pipe_bump_tx(p->pipe, n);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_set_msg(aio, NULL);
+ nni_msg_free(msg);
+ nni_aio_finish_sync(aio, 0, n);
+}
+
+static void
+ipc_pipe_recv_cb(void *arg)
+{
+ ipc_pipe *p = arg;
+ nni_aio * aio;
+ int rv;
+ size_t n;
+ nni_msg * msg;
+ nni_aio * rx_aio = &p->rx_aio;
+
+ nni_mtx_lock(&p->mtx);
+
+ if ((rv = nni_aio_result(rx_aio)) != 0) {
+ // Error on receive. This has to cause an error back
+ // to the user. Also, if we had allocated an rx_msg, lets
+ // toss it.
+ goto error;
+ }
+
+ n = nni_aio_count(rx_aio);
+ nni_aio_iov_advance(rx_aio, n);
+ if (nni_aio_iov_count(rx_aio) != 0) {
+ // Was this a partial read? If so then resubmit for the rest.
+ nng_stream_recv(p->conn, rx_aio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+
+ // If we don't have a message yet, we were reading the message
+ // header, which is just the length. This tells us the size of the
+ // message to allocate and how much more to expect.
+ if (p->rx_msg == NULL) {
+ uint64_t len;
+
+ // Check to make sure we got msg type 1.
+ if (p->rx_head[0] != 1) {
+ rv = NNG_EPROTO;
+ goto error;
+ }
+
+ // We should have gotten a message header.
+ NNI_GET64(p->rx_head + 1, len);
+
+ // Make sure the message payload is not too big. If it is
+ // the caller will shut down the pipe.
+ if ((len > p->rcv_max) && (p->rcv_max > 0)) {
+ rv = NNG_EMSGSIZE;
+ goto error;
+ }
+
+ // Note that all IO on this pipe is blocked behind this
+ // allocation. We could possibly look at using a separate
+ // lock for the read side in the future, so that we allow
+ // transmits to proceed normally. In practice this is
+ // unlikely to be much of an issue though.
+ if ((rv = nni_msg_alloc(&p->rx_msg, (size_t) len)) != 0) {
+ goto error;
+ }
+
+ if (len != 0) {
+ nni_iov iov;
+ // Submit the rest of the data for a read -- we want to
+ // read the entire message now.
+ iov.iov_buf = nni_msg_body(p->rx_msg);
+ iov.iov_len = (size_t) len;
+
+ nni_aio_set_iov(rx_aio, 1, &iov);
+ nng_stream_recv(p->conn, rx_aio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ }
+
+ // Otherwise we got a message read completely. Let the user know the
+ // good news.
+
+ aio = nni_list_first(&p->recv_q);
+ nni_aio_list_remove(aio);
+ msg = p->rx_msg;
+ p->rx_msg = NULL;
+ n = nni_msg_len(msg);
+ nni_pipe_bump_rx(p->pipe, n);
+ ipc_pipe_recv_start(p);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, n);
+ return;
+
+error:
+ while ((aio = nni_list_first(&p->recv_q)) != NULL) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ msg = p->rx_msg;
+ p->rx_msg = NULL;
+ nni_pipe_bump_error(p->pipe, rv);
+ // Intentionally, we do not queue up another receive.
+ // The protocol should notice this error and close the pipe.
+ nni_mtx_unlock(&p->mtx);
+
+ nni_msg_free(msg);
+}
+
+static void
+ipc_pipe_send_cancel(nni_aio *aio, void *arg, int rv)
+{
+ ipc_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ if (!nni_aio_list_active(aio)) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ // If this is being sent, then cancel the pending transfer.
+ // The callback on the tx_aio will cause the user aio to
+ // be canceled too.
+ if (nni_list_first(&p->send_q) == aio) {
+ nni_aio_abort(&p->tx_aio, rv);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+ipc_pipe_send_start(ipc_pipe *p)
+{
+ nni_aio *aio;
+ nni_msg *msg;
+ int nio;
+ nni_iov iov[3];
+ uint64_t len;
+
+ if (p->closed) {
+ while ((aio = nni_list_first(&p->send_q)) != NULL) {
+ nni_list_remove(&p->send_q, aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ return;
+ }
+ if ((aio = nni_list_first(&p->send_q)) == NULL) {
+ return;
+ }
+
+ // This runs to send the message.
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg) + nni_msg_header_len(msg);
+
+ p->tx_head[0] = 1; // message type, 1.
+ NNI_PUT64(p->tx_head + 1, len);
+
+ nio = 0;
+ iov[0].iov_buf = p->tx_head;
+ iov[0].iov_len = sizeof(p->tx_head);
+ nio++;
+ if (nni_msg_header_len(msg) > 0) {
+ iov[nio].iov_buf = nni_msg_header(msg);
+ iov[nio].iov_len = nni_msg_header_len(msg);
+ nio++;
+ }
+ if (nni_msg_len(msg) > 0) {
+ iov[nio].iov_buf = nni_msg_body(msg);
+ iov[nio].iov_len = nni_msg_len(msg);
+ nio++;
+ }
+ nni_aio_set_iov(&p->tx_aio, nio, iov);
+ nng_stream_send(p->conn, &p->tx_aio);
+}
+
+static void
+ipc_pipe_send(void *arg, nni_aio *aio)
+{
+ ipc_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_schedule(aio, ipc_pipe_send_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&p->send_q, aio);
+ if (nni_list_first(&p->send_q) == aio) {
+ ipc_pipe_send_start(p);
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+ipc_pipe_recv_cancel(nni_aio *aio, void *arg, int rv)
+{
+ ipc_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ if (!nni_aio_list_active(aio)) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ // If receive in progress, then cancel the pending transfer.
+ // The callback on the rx_aio will cause the user aio to
+ // be canceled too.
+ if (nni_list_first(&p->recv_q) == aio) {
+ nni_aio_abort(&p->rx_aio, rv);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+ipc_pipe_recv_start(ipc_pipe *p)
+{
+ nni_iov iov;
+ NNI_ASSERT(p->rx_msg == NULL);
+
+ if (p->closed) {
+ nni_aio *aio;
+ while ((aio = nni_list_first(&p->recv_q)) != NULL) {
+ nni_list_remove(&p->recv_q, aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ return;
+ }
+ if (nni_list_empty(&p->recv_q)) {
+ return;
+ }
+
+ // Schedule a read of the IPC header.
+ iov.iov_buf = p->rx_head;
+ iov.iov_len = sizeof(p->rx_head);
+ nni_aio_set_iov(&p->rx_aio, 1, &iov);
+
+ nng_stream_recv(p->conn, &p->rx_aio);
+}
+
+static void
+ipc_pipe_recv(void *arg, nni_aio *aio)
+{
+ ipc_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if (p->closed) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ if ((rv = nni_aio_schedule(aio, ipc_pipe_recv_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ nni_list_append(&p->recv_q, aio);
+ if (nni_list_first(&p->recv_q) == aio) {
+ ipc_pipe_recv_start(p);
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static uint16_t
+ipc_pipe_peer(void *arg)
+{
+ ipc_pipe *p = arg;
+
+ return (p->peer);
+}
+
+static void
+ipc_pipe_start(ipc_pipe *p, nng_stream *conn, ipc_ep *ep)
+{
+ nni_iov iov;
+
+ ep->ref_cnt++;
+
+ p->conn = conn;
+ p->ep = ep;
+ p->proto = ep->proto;
+
+ p->tx_head[0] = 0;
+ p->tx_head[1] = 'S';
+ p->tx_head[2] = 'P';
+ p->tx_head[3] = 0;
+ NNI_PUT16(&p->tx_head[4], p->proto);
+ NNI_PUT16(&p->tx_head[6], 0);
+
+ p->got_rx_head = 0;
+ p->got_tx_head = 0;
+ p->want_rx_head = 8;
+ p->want_tx_head = 8;
+ iov.iov_len = 8;
+ iov.iov_buf = &p->tx_head[0];
+ nni_aio_set_iov(&p->neg_aio, 1, &iov);
+ nni_list_append(&ep->neg_pipes, p);
+
+ nni_aio_set_timeout(&p->neg_aio, 10000); // 10 sec timeout to negotiate
+ nng_stream_send(p->conn, &p->neg_aio);
+}
+
+static void
+ipc_ep_close(void *arg)
+{
+ ipc_ep * ep = arg;
+ ipc_pipe *p;
+
+ nni_mtx_lock(&ep->mtx);
+ ep->closed = true;
+ nni_aio_close(ep->time_aio);
+ if (ep->dialer != NULL) {
+ nng_stream_dialer_close(ep->dialer);
+ }
+ if (ep->listener != NULL) {
+ nng_stream_listener_close(ep->listener);
+ }
+ NNI_LIST_FOREACH (&ep->neg_pipes, p) {
+ ipc_pipe_close(p);
+ }
+ NNI_LIST_FOREACH (&ep->wait_pipes, p) {
+ ipc_pipe_close(p);
+ }
+ NNI_LIST_FOREACH (&ep->busy_pipes, p) {
+ ipc_pipe_close(p);
+ }
+ if (ep->user_aio != NULL) {
+ nni_aio_finish_error(ep->user_aio, NNG_ECLOSED);
+ ep->user_aio = NULL;
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+ipc_ep_fini(void *arg)
+{
+ ipc_ep *ep = arg;
+
+ nni_mtx_lock(&ep->mtx);
+ ep->fini = true;
+ if (ep->ref_cnt != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_stop(ep->time_aio);
+ nni_aio_stop(ep->conn_aio);
+ nng_stream_dialer_free(ep->dialer);
+ nng_stream_listener_free(ep->listener);
+ nni_aio_free(ep->time_aio);
+ nni_aio_free(ep->conn_aio);
+ nni_mtx_fini(&ep->mtx);
+ NNI_FREE_STRUCT(ep);
+}
+
+static void
+ipc_ep_timer_cb(void *arg)
+{
+ ipc_ep *ep = arg;
+ nni_mtx_lock(&ep->mtx);
+ if (nni_aio_result(ep->time_aio) == 0) {
+ nng_stream_listener_accept(ep->listener, ep->conn_aio);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+ipc_ep_accept_cb(void *arg)
+{
+ ipc_ep * ep = arg;
+ nni_aio * aio = ep->conn_aio;
+ ipc_pipe * p;
+ int rv;
+ nng_stream *conn;
+
+ nni_mtx_lock(&ep->mtx);
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ conn = nni_aio_get_output(aio, 0);
+ if ((rv = ipc_pipe_alloc(&p)) != 0) {
+ nng_stream_free(conn);
+ goto error;
+ }
+ if (ep->closed) {
+ ipc_pipe_fini(p);
+ nng_stream_free(conn);
+ rv = NNG_ECLOSED;
+ goto error;
+ }
+ ipc_pipe_start(p, conn, ep);
+ nng_stream_listener_accept(ep->listener, ep->conn_aio);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+
+error:
+ // When an error here occurs, let's send a notice up to the consumer.
+ // That way it can be reported properly.
+ if ((aio = ep->user_aio) != NULL) {
+ ep->user_aio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+
+ switch (rv) {
+
+ case NNG_ENOMEM:
+ case NNG_ENOFILES:
+ nng_sleep_aio(10, ep->time_aio);
+ break;
+
+ default:
+ if (!ep->closed) {
+ nng_stream_listener_accept(ep->listener, ep->conn_aio);
+ }
+ break;
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+ipc_ep_dial_cb(void *arg)
+{
+ ipc_ep * ep = arg;
+ nni_aio * aio = ep->conn_aio;
+ ipc_pipe * p;
+ int rv;
+ nng_stream *conn;
+
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ conn = nni_aio_get_output(aio, 0);
+ if ((rv = ipc_pipe_alloc(&p)) != 0) {
+ nng_stream_free(conn);
+ goto error;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ ipc_pipe_fini(p);
+ nng_stream_free(conn);
+ rv = NNG_ECLOSED;
+ nni_mtx_unlock(&ep->mtx);
+ goto error;
+ } else {
+ ipc_pipe_start(p, conn, ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ return;
+
+error:
+ // Error connecting. We need to pass this straight back
+ // to the user.
+ nni_mtx_lock(&ep->mtx);
+ if ((aio = ep->user_aio) != NULL) {
+ ep->user_aio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+ipc_ep_init(ipc_ep **epp, nni_sock *sock)
+{
+ ipc_ep *ep;
+
+ if ((ep = NNI_ALLOC_STRUCT(ep)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&ep->mtx);
+ NNI_LIST_INIT(&ep->busy_pipes, ipc_pipe, node);
+ NNI_LIST_INIT(&ep->wait_pipes, ipc_pipe, node);
+ NNI_LIST_INIT(&ep->neg_pipes, ipc_pipe, node);
+
+ ep->proto = nni_sock_proto_id(sock);
+
+#ifdef NNG_ENABLE_STATS
+ static const nni_stat_info rcv_max_info = {
+ .si_name = "rcv_max",
+ .si_desc = "maximum receive size",
+ .si_type = NNG_STAT_LEVEL,
+ .si_unit = NNG_UNIT_BYTES,
+ .si_atomic = true,
+ };
+ nni_stat_init(&ep->st_rcv_max, &rcv_max_info);
+#endif
+
+ *epp = ep;
+ return (0);
+}
+
+static int
+ipc_ep_init_dialer(void **dp, nni_url *url, nni_dialer *dialer)
+{
+ ipc_ep * ep;
+ int rv;
+ nni_sock *sock = nni_dialer_sock(dialer);
+
+ if ((rv = ipc_ep_init(&ep, sock)) != 0) {
+ return (rv);
+ }
+
+ if (((rv = nni_aio_alloc(&ep->conn_aio, ipc_ep_dial_cb, ep)) != 0) ||
+ ((rv = nng_stream_dialer_alloc_url(&ep->dialer, url)) != 0)) {
+ ipc_ep_fini(ep);
+ return (rv);
+ }
+#ifdef NNG_ENABLE_STATS
+ nni_dialer_add_stat(dialer, &ep->st_rcv_max);
+#endif
+ *dp = ep;
+ return (0);
+}
+
+static int
+ipc_ep_init_listener(void **dp, nni_url *url, nni_listener *listener)
+{
+ ipc_ep * ep;
+ int rv;
+ nni_sock *sock = nni_listener_sock(listener);
+
+ if ((rv = ipc_ep_init(&ep, sock)) != 0) {
+ return (rv);
+ }
+
+ if (((rv = nni_aio_alloc(&ep->conn_aio, ipc_ep_accept_cb, ep)) != 0) ||
+ ((rv = nni_aio_alloc(&ep->time_aio, ipc_ep_timer_cb, ep)) != 0) ||
+ ((rv = nng_stream_listener_alloc_url(&ep->listener, url)) != 0)) {
+ ipc_ep_fini(ep);
+ return (rv);
+ }
+
+#ifdef NNG_ENABLE_STATS
+ nni_listener_add_stat(listener, &ep->st_rcv_max);
+#endif
+ *dp = ep;
+ return (0);
+}
+
+static void
+ipc_ep_cancel(nni_aio *aio, void *arg, int rv)
+{
+ ipc_ep *ep = arg;
+ nni_mtx_lock(&ep->mtx);
+ if (aio == ep->user_aio) {
+ ep->user_aio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+ipc_ep_connect(void *arg, nni_aio *aio)
+{
+ ipc_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ if (ep->user_aio != NULL) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_EBUSY);
+ return;
+ }
+
+ if ((rv = nni_aio_schedule(aio, ipc_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ep->user_aio = aio;
+ nng_stream_dialer_dial(ep->dialer, ep->conn_aio);
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+ipc_ep_get_recv_max_sz(void *arg, void *v, size_t *szp, nni_type t)
+{
+ ipc_ep *ep = arg;
+ int rv;
+ nni_mtx_lock(&ep->mtx);
+ rv = nni_copyout_size(ep->rcv_max, v, szp, t);
+ nni_mtx_unlock(&ep->mtx);
+ return (rv);
+}
+
+static int
+ipc_ep_set_recv_max_sz(void *arg, const void *v, size_t sz, nni_type t)
+{
+ ipc_ep *ep = arg;
+ size_t val;
+ int rv;
+ if ((rv = nni_copyin_size(&val, v, sz, 0, NNI_MAXSZ, t)) == 0) {
+
+ ipc_pipe *p;
+ nni_mtx_lock(&ep->mtx);
+ ep->rcv_max = val;
+ NNI_LIST_FOREACH (&ep->wait_pipes, p) {
+ p->rcv_max = val;
+ }
+ NNI_LIST_FOREACH (&ep->neg_pipes, p) {
+ p->rcv_max = val;
+ }
+ NNI_LIST_FOREACH (&ep->busy_pipes, p) {
+ p->rcv_max = val;
+ }
+ nni_mtx_unlock(&ep->mtx);
+#ifdef NNG_ENABLE_STATS
+ nni_stat_set_value(&ep->st_rcv_max, val);
+#endif
+ }
+ return (rv);
+}
+
+static int
+ipc_ep_bind(void *arg)
+{
+ ipc_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&ep->mtx);
+ rv = nng_stream_listener_listen(ep->listener);
+ nni_mtx_unlock(&ep->mtx);
+ return (rv);
+}
+
+static void
+ipc_ep_accept(void *arg, nni_aio *aio)
+{
+ ipc_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ if (ep->user_aio != NULL) {
+ nni_aio_finish_error(aio, NNG_EBUSY);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ if ((rv = nni_aio_schedule(aio, ipc_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ep->user_aio = aio;
+ if (!ep->started) {
+ ep->started = true;
+ nng_stream_listener_accept(ep->listener, ep->conn_aio);
+ } else {
+ ipc_ep_match(ep);
+ }
+
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+ipc_pipe_get(void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ ipc_pipe *p = arg;
+
+ return (nni_stream_get(p->conn, name, buf, szp, t));
+}
+
+static nni_tran_pipe_ops ipc_tran_pipe_ops = {
+ .p_init = ipc_pipe_init,
+ .p_fini = ipc_pipe_fini,
+ .p_stop = ipc_pipe_stop,
+ .p_send = ipc_pipe_send,
+ .p_recv = ipc_pipe_recv,
+ .p_close = ipc_pipe_close,
+ .p_peer = ipc_pipe_peer,
+ .p_getopt = ipc_pipe_get,
+};
+
+static const nni_option ipc_ep_options[] = {
+ {
+ .o_name = NNG_OPT_RECVMAXSZ,
+ .o_get = ipc_ep_get_recv_max_sz,
+ .o_set = ipc_ep_set_recv_max_sz,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+ipc_dialer_get(void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ ipc_ep *ep = arg;
+ int rv;
+
+ rv = nni_getopt(ipc_ep_options, name, ep, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_stream_dialer_get(ep->dialer, name, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+ipc_dialer_set(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ ipc_ep *ep = arg;
+ int rv;
+
+ rv = nni_setopt(ipc_ep_options, name, ep, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_stream_dialer_set(ep->dialer, name, buf, sz, t);
+ }
+ return (rv);
+}
+
+static int
+ipc_listener_get(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ ipc_ep *ep = arg;
+ int rv;
+
+ rv = nni_getopt(ipc_ep_options, name, ep, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_stream_listener_get(ep->listener, name, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+ipc_listener_set(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ ipc_ep *ep = arg;
+ int rv;
+
+ rv = nni_setopt(ipc_ep_options, name, ep, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_stream_listener_set(ep->listener, name, buf, sz, t);
+ }
+ return (rv);
+}
+
+static nni_tran_dialer_ops ipc_dialer_ops = {
+ .d_init = ipc_ep_init_dialer,
+ .d_fini = ipc_ep_fini,
+ .d_connect = ipc_ep_connect,
+ .d_close = ipc_ep_close,
+ .d_getopt = ipc_dialer_get,
+ .d_setopt = ipc_dialer_set,
+};
+
+static nni_tran_listener_ops ipc_listener_ops = {
+ .l_init = ipc_ep_init_listener,
+ .l_fini = ipc_ep_fini,
+ .l_bind = ipc_ep_bind,
+ .l_accept = ipc_ep_accept,
+ .l_close = ipc_ep_close,
+ .l_getopt = ipc_listener_get,
+ .l_setopt = ipc_listener_set,
+};
+
+static nni_tran ipc_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "ipc",
+ .tran_dialer = &ipc_dialer_ops,
+ .tran_listener = &ipc_listener_ops,
+ .tran_pipe = &ipc_tran_pipe_ops,
+ .tran_init = ipc_tran_init,
+ .tran_fini = ipc_tran_fini,
+};
+
+#ifdef NNG_PLATFORM_POSIX
+static nni_tran ipc_tran_unix = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "unix",
+ .tran_dialer = &ipc_dialer_ops,
+ .tran_listener = &ipc_listener_ops,
+ .tran_pipe = &ipc_tran_pipe_ops,
+ .tran_init = ipc_tran_init,
+ .tran_fini = ipc_tran_fini,
+};
+#endif
+
+#ifdef NNG_HAVE_ABSTRACT_SOCKETS
+static nni_tran ipc_tran_abstract = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "abstract",
+ .tran_dialer = &ipc_dialer_ops,
+ .tran_listener = &ipc_listener_ops,
+ .tran_pipe = &ipc_tran_pipe_ops,
+ .tran_init = ipc_tran_init,
+ .tran_fini = ipc_tran_fini,
+};
+#endif
+
+int
+nng_ipc_register(void)
+{
+ int rv;
+ if (((rv = nni_tran_register(&ipc_tran)) != 0)
+#ifdef NNG_PLATFORM_POSIX
+ || ((rv = nni_tran_register(&ipc_tran_unix)) != 0)
+#endif
+#ifdef NNG_HAVE_ABSTRACT_SOCKETS
+ || ((rv = nni_tran_register(&ipc_tran_abstract)) != 0)
+#endif
+ ) {
+ return (rv);
+ }
+
+ return (0);
+}
diff --git a/src/sp/transport/ipc/ipc_test.c b/src/sp/transport/ipc/ipc_test.c
new file mode 100644
index 00000000..2fb4afa3
--- /dev/null
+++ b/src/sp/transport/ipc/ipc_test.c
@@ -0,0 +1,395 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Cody Piersall <cody.piersall@gmail.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+#ifdef NNG_PLATFORM_POSIX
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+void
+test_path_too_long(void)
+{
+ nng_socket s1;
+ char addr[256];
+
+ // All our names have to be less than 128 bytes.
+ memset(addr, 'a', 255);
+ addr[255] = 0;
+ memcpy(addr, "ipc://", strlen("ipc://"));
+
+ NUTS_ASSERT(strlen(addr) == 255);
+ NUTS_OPEN(s1);
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_FAIL(nng_listen(s1, addr, NULL, 0), NNG_EADDRINVAL);
+ NUTS_FAIL(nng_dial(s1, addr, NULL, NNG_FLAG_NONBLOCK), NNG_EADDRINVAL);
+
+ NUTS_CLOSE(s1);
+}
+
+void
+test_ipc_dialer_perms(void)
+{
+ nng_socket s;
+ nng_dialer d;
+ char * addr;
+
+ NUTS_ADDR(addr, "ipc");
+ NUTS_OPEN(s);
+ NUTS_PASS(nng_dialer_create(&d, s, addr));
+ NUTS_FAIL(
+ nng_dialer_set_int(d, NNG_OPT_IPC_PERMISSIONS, 0444), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+void
+test_ipc_dialer_properties(void)
+{
+ nng_socket s;
+ nng_dialer d;
+ nng_sockaddr sa;
+ size_t z;
+ char *addr;
+
+ NUTS_ADDR(addr, "ipc");
+ NUTS_OPEN(s);
+ NUTS_PASS(nng_dial(s, addr, &d, NNG_FLAG_NONBLOCK));
+ // Dialers don't have local addresses.
+ NUTS_FAIL(nng_dialer_get_addr(d, NNG_OPT_LOCADDR, &sa), NNG_ENOTSUP);
+
+ NUTS_FAIL(
+ nng_dialer_set(d, NNG_OPT_LOCADDR, &sa, sizeof(sa)), NNG_ENOTSUP);
+
+ z = 8192;
+ NUTS_PASS(nng_dialer_set_size(d, NNG_OPT_RECVMAXSZ, z));
+ z = 0;
+ NUTS_PASS(nng_dialer_get_size(d, NNG_OPT_RECVMAXSZ, &z));
+ NUTS_TRUE(z == 8192);
+ NUTS_FAIL(nng_dialer_set_bool(d, NNG_OPT_RAW, true), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+void
+test_ipc_listener_perms(void)
+{
+ nng_socket s;
+ nng_listener l;
+ char *addr;
+
+#ifndef _WIN32
+ char * path;
+ struct stat st;
+#endif
+
+ NUTS_ADDR(addr, "ipc");
+ NUTS_OPEN(s);
+ NUTS_PASS(nng_listener_create(&l, s, addr));
+
+#ifdef _WIN32
+ NUTS_FAIL(nng_listener_set_int(l, NNG_OPT_IPC_PERMISSIONS, 0444),
+ NNG_ENOTSUP);
+#else
+ path = &addr[strlen("ipc://")];
+
+ // Attempt to set invalid permissions fails.
+ NUTS_FAIL(nng_listener_set_int(l, NNG_OPT_IPC_PERMISSIONS, S_IFREG),
+ NNG_EINVAL);
+
+ NUTS_PASS(nng_listener_set_int(l, NNG_OPT_IPC_PERMISSIONS, 0444));
+ NUTS_PASS(nng_listener_start(l, 0));
+ NUTS_TRUE(stat(path, &st) == 0);
+ NUTS_TRUE((st.st_mode & 0777) == 0444);
+
+ // Now that it's running, we cannot set it.
+ NUTS_FAIL(
+ nng_listener_set_int(l, NNG_OPT_IPC_PERMISSIONS, 0644), NNG_EBUSY);
+#endif
+
+ NUTS_CLOSE(s);
+}
+
+void
+test_ipc_listener_properties(void)
+{
+ nng_socket s;
+ nng_listener l;
+ nng_sockaddr sa;
+ size_t z;
+ char *addr;
+
+ NUTS_ADDR(addr, "ipc");
+ NUTS_OPEN(s);
+ NUTS_PASS(nng_listen(s, addr, &l, 0));
+ NUTS_PASS(nng_listener_get_addr(l, NNG_OPT_LOCADDR, &sa));
+ NUTS_TRUE(sa.s_ipc.sa_family == NNG_AF_IPC);
+ NUTS_MATCH(sa.s_ipc.sa_path, addr + strlen("ipc://"));
+
+ NUTS_FAIL(nng_listener_set(l, NNG_OPT_LOCADDR, &sa, sizeof(sa)),
+ NNG_EREADONLY);
+ z = 8192;
+ NUTS_PASS(nng_listener_set_size(l, NNG_OPT_RECVMAXSZ, z));
+ z = 0;
+ NUTS_PASS(nng_listener_get_size(l, NNG_OPT_RECVMAXSZ, &z));
+ NUTS_TRUE(z == 8192);
+ NUTS_FAIL(nng_listener_set_bool(l, NNG_OPT_RAW, true), NNG_ENOTSUP);
+ NUTS_CLOSE(s);
+}
+
+void
+test_ipc_recv_max(void)
+{
+ char msg[256];
+ char rcvbuf[256];
+ nng_socket s0;
+ nng_socket s1;
+ nng_listener l;
+ size_t sz;
+ char *addr;
+
+ NUTS_ADDR(addr, "ipc");
+ NUTS_OPEN(s0);
+ NUTS_PASS(nng_socket_set_ms(s0, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_size(s0, NNG_OPT_RECVMAXSZ, 200));
+ NUTS_PASS(nng_listener_create(&l, s0, addr));
+ NUTS_PASS(nng_socket_get_size(s0, NNG_OPT_RECVMAXSZ, &sz));
+ NUTS_TRUE(sz == 200);
+ NUTS_PASS(nng_listener_set_size(l, NNG_OPT_RECVMAXSZ, 100));
+ NUTS_PASS(nng_listener_start(l, 0));
+
+ NUTS_OPEN(s1);
+ NUTS_PASS(nng_dial(s1, addr, NULL, 0));
+ NUTS_PASS(nng_send(s1, msg, 95, 0));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_recv(s0, rcvbuf, &sz, 0));
+ NUTS_TRUE(sz == 95);
+ NUTS_PASS(nng_send(s1, msg, 150, 0));
+ NUTS_FAIL(nng_recv(s0, rcvbuf, &sz, 0), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s0);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_abstract_sockets(void)
+{
+#ifdef NNG_HAVE_ABSTRACT_SOCKETS
+ nng_socket s1;
+ nng_socket s2;
+ char *addr;
+ nng_pipe p1;
+ nng_pipe p2;
+ nng_sockaddr sa1;
+ nng_sockaddr sa2;
+ char * prefix = "abstract://";
+
+ NUTS_ADDR(addr, "abstract");
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ NUTS_MARRY_EX(s1, s2, addr, &p1, &p2);
+ NUTS_PASS(nng_pipe_get_addr(p1, NNG_OPT_REMADDR, &sa1));
+ NUTS_PASS(nng_pipe_get_addr(p2, NNG_OPT_LOCADDR, &sa2));
+ NUTS_TRUE(sa1.s_family == sa2.s_family);
+ NUTS_TRUE(sa1.s_family == NNG_AF_ABSTRACT);
+ NUTS_TRUE(sa1.s_abstract.sa_len == strlen(addr) - strlen(prefix));
+ NUTS_TRUE(sa2.s_abstract.sa_len == strlen(addr) - strlen(prefix));
+ NUTS_SEND(s1, "ping");
+ NUTS_RECV(s2, "ping");
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+#endif
+}
+
+void
+test_abstract_auto_bind(void)
+{
+#ifdef NNG_HAVE_ABSTRACT_SOCKETS
+ nng_socket s1;
+ nng_socket s2;
+ char addr[40];
+ char name[12];
+ nng_sockaddr sa;
+ nng_listener l;
+ size_t len;
+
+ snprintf(addr, sizeof(addr), "abstract://");
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s2, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s2, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_listen(s1, addr, &l, 0));
+
+ NUTS_PASS(nng_listener_get_addr(l, NNG_OPT_LOCADDR, &sa));
+ // Under linux there are either 8 or 5 hex characters.
+ NUTS_TRUE(sa.s_family == NNG_AF_ABSTRACT);
+ NUTS_TRUE(sa.s_abstract.sa_len < 10);
+
+ len = sa.s_abstract.sa_len;
+ memcpy(name, sa.s_abstract.sa_name, len);
+ name[len] = '\0';
+ NUTS_TRUE(strlen(name) == len);
+
+ (void) snprintf(addr, sizeof(addr), "abstract://%s", name);
+ NUTS_PASS(nng_dial(s2, addr, NULL, 0));
+
+ // first send the ping
+ NUTS_SEND(s1, "ping");
+ NUTS_RECV(s2, "ping");
+
+ NUTS_SEND(s2, "pong");
+ NUTS_RECV(s1, "pong");
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+#endif
+}
+
+void
+test_abstract_too_long(void)
+{
+#ifdef NNG_HAVE_ABSTRACT_SOCKETS
+ nng_socket s1;
+ char addr[256];
+
+ // All our names have to be less than 128 bytes.
+ memset(addr, 'a', 255);
+ addr[255] = 0;
+ memcpy(addr, "abstract://", strlen("abstract://"));
+
+ NUTS_ASSERT(strlen(addr) == 255);
+ NUTS_OPEN(s1);
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_FAIL(nng_listen(s1, addr, NULL, 0), NNG_EADDRINVAL);
+ NUTS_FAIL(nng_dial(s1, addr, NULL, NNG_FLAG_NONBLOCK), NNG_EADDRINVAL);
+
+ NUTS_CLOSE(s1);
+#endif
+}
+
+void
+test_abstract_null(void)
+{
+#ifdef NNG_HAVE_ABSTRACT_SOCKETS
+ nng_socket s1;
+ nng_socket s2;
+ char addr[64];
+ char name[40];
+ char rng[20];
+
+ nng_sockaddr sa;
+ nng_listener l;
+ size_t len;
+
+ snprintf(rng, sizeof(rng), "%08x%08x", nng_random(), nng_random());
+ snprintf(name, sizeof(name), "a%%00b_%s", rng);
+ snprintf(addr, sizeof(addr), "abstract://%s", name);
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s2, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s2, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_listen(s1, addr, &l, 0));
+
+ NUTS_PASS(nng_listener_get_addr(l, NNG_OPT_LOCADDR, &sa));
+ // Under linux there are either 8 or 5 hex characters.
+ NUTS_TRUE(sa.s_family == NNG_AF_ABSTRACT);
+ NUTS_TRUE(sa.s_abstract.sa_len < 32);
+ len = sa.s_abstract.sa_len;
+ NUTS_TRUE(len == 20);
+ NUTS_TRUE(sa.s_abstract.sa_name[0] == 'a');
+ NUTS_TRUE(sa.s_abstract.sa_name[1] == '\0');
+ NUTS_TRUE(sa.s_abstract.sa_name[2] == 'b');
+ NUTS_TRUE(sa.s_abstract.sa_name[3] == '_');
+ NUTS_TRUE(memcmp(&sa.s_abstract.sa_name[4], rng, 16) == 0);
+
+ NUTS_PASS(nng_dial(s2, addr, NULL, 0));
+
+ // first send the ping
+ NUTS_SEND(s1, "1234");
+ NUTS_RECV(s2, "1234");
+
+ NUTS_SEND(s2, "5678");
+ NUTS_RECV(s1, "5678");
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+#endif
+}
+
+void
+test_unix_alias(void)
+{
+#ifdef NNG_PLATFORM_POSIX
+ nng_socket s1;
+ nng_socket s2;
+ char addr1[32];
+ char addr2[32];
+ char rng[20];
+ nng_sockaddr sa1;
+ nng_sockaddr sa2;
+ nng_msg * msg;
+ nng_pipe p;
+
+ // Presumes /tmp.
+
+ (void) snprintf(
+ rng, sizeof(rng), "%08x%08x", nng_random(), nng_random());
+ snprintf(addr1, sizeof(addr1), "ipc:///tmp/%s", rng);
+ snprintf(addr2, sizeof(addr2), "unix:///tmp/%s", rng);
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s2, NNG_OPT_SENDTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_socket_set_ms(s2, NNG_OPT_RECVTIMEO, 1000));
+ NUTS_PASS(nng_listen(s1, addr1, NULL, 0));
+ NUTS_PASS(nng_dial(s2, addr2, NULL, 0));
+
+ // first send the ping
+ NUTS_SEND(s1, "ping");
+ NUTS_PASS(nng_recvmsg(s2, &msg, 0));
+ NUTS_ASSERT(msg != NULL);
+ NUTS_TRUE(nng_msg_len(msg) == 5);
+ NUTS_MATCH(nng_msg_body(msg), "ping");
+ p = nng_msg_get_pipe(msg);
+ NUTS_PASS(nng_pipe_get_addr(p, NNG_OPT_REMADDR, &sa1));
+ NUTS_PASS(nng_pipe_get_addr(p, NNG_OPT_REMADDR, &sa2));
+ NUTS_TRUE(sa1.s_family == sa2.s_family);
+ NUTS_TRUE(sa1.s_family == NNG_AF_IPC);
+ NUTS_MATCH(sa1.s_ipc.sa_path, sa2.s_ipc.sa_path);
+ nng_msg_free(msg);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+#endif
+}
+
+TEST_LIST = {
+ { "ipc path too long", test_path_too_long },
+ { "ipc dialer perms", test_ipc_dialer_perms },
+ { "ipc dialer props", test_ipc_dialer_properties },
+ { "ipc listener perms", test_ipc_listener_perms },
+ { "ipc listener props", test_ipc_listener_properties },
+ { "ipc recv max", test_ipc_recv_max },
+ { "ipc abstract sockets", test_abstract_sockets },
+ { "ipc abstract auto bind", test_abstract_auto_bind },
+ { "ipc abstract name too long", test_abstract_too_long },
+ { "ipc abstract embedded null", test_abstract_null },
+ { "ipc unix alias", test_unix_alias },
+ { NULL, NULL },
+}; \ No newline at end of file
diff --git a/src/sp/transport/tcp/CMakeLists.txt b/src/sp/transport/tcp/CMakeLists.txt
new file mode 100644
index 00000000..d6022329
--- /dev/null
+++ b/src/sp/transport/tcp/CMakeLists.txt
@@ -0,0 +1,17 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# TCP protocol
+nng_directory(tcp)
+
+nng_sources_if(NNG_TRANSPORT_TCP tcp.c)
+nng_headers_if(NNG_TRANSPORT_TCP nng/transport/tcp/tcp.h)
+nng_defines_if(NNG_TRANSPORT_TCP NNG_TRANSPORT_TCP)
+nng_test(tcp_test) \ No newline at end of file
diff --git a/src/sp/transport/tcp/tcp.c b/src/sp/transport/tcp/tcp.c
new file mode 100644
index 00000000..524c6988
--- /dev/null
+++ b/src/sp/transport/tcp/tcp.c
@@ -0,0 +1,1263 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2019 Devolutions <info@devolutions.net>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+
+// TCP transport. Platform specific TCP operations must be
+// supplied as well.
+
+typedef struct tcptran_pipe tcptran_pipe;
+typedef struct tcptran_ep tcptran_ep;
+
+// tcp_pipe is one end of a TCP connection.
+struct tcptran_pipe {
+ nng_stream * conn;
+ nni_pipe * npipe;
+ uint16_t peer;
+ uint16_t proto;
+ size_t rcvmax;
+ bool closed;
+ nni_list_node node;
+ tcptran_ep * ep;
+ nni_atomic_flag reaped;
+ nni_reap_node reap;
+ uint8_t txlen[sizeof(uint64_t)];
+ uint8_t rxlen[sizeof(uint64_t)];
+ size_t gottxhead;
+ size_t gotrxhead;
+ size_t wanttxhead;
+ size_t wantrxhead;
+ nni_list recvq;
+ nni_list sendq;
+ nni_aio * txaio;
+ nni_aio * rxaio;
+ nni_aio * negoaio;
+ nni_msg * rxmsg;
+ nni_mtx mtx;
+};
+
+struct tcptran_ep {
+ nni_mtx mtx;
+ uint16_t proto;
+ size_t rcvmax;
+ bool fini;
+ bool started;
+ bool closed;
+ nng_url * url;
+ const char * host; // for dialers
+ nng_sockaddr src;
+ int refcnt; // active pipes
+ nni_aio * useraio;
+ nni_aio * connaio;
+ nni_aio * timeaio;
+ nni_list busypipes; // busy pipes -- ones passed to socket
+ nni_list waitpipes; // pipes waiting to match to socket
+ nni_list negopipes; // pipes busy negotiating
+ nni_reap_node reap;
+ nng_stream_dialer * dialer;
+ nng_stream_listener *listener;
+
+#ifdef NNG_ENABLE_STATS
+ nni_stat_item st_rcv_max;
+#endif
+};
+
+static void tcptran_pipe_send_start(tcptran_pipe *);
+static void tcptran_pipe_recv_start(tcptran_pipe *);
+static void tcptran_pipe_send_cb(void *);
+static void tcptran_pipe_recv_cb(void *);
+static void tcptran_pipe_nego_cb(void *);
+static void tcptran_ep_fini(void *);
+static void tcptran_pipe_fini(void *);
+
+static nni_reap_list tcptran_ep_reap_list = {
+ .rl_offset = offsetof(tcptran_ep, reap),
+ .rl_func = tcptran_ep_fini,
+};
+
+static nni_reap_list tcptran_pipe_reap_list = {
+ .rl_offset = offsetof (tcptran_pipe, reap),
+ .rl_func = tcptran_pipe_fini,
+};
+
+static int
+tcptran_init(void)
+{
+ return (0);
+}
+
+static void
+tcptran_fini(void)
+{
+}
+
+static void
+tcptran_pipe_close(void *arg)
+{
+ tcptran_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ p->closed = true;
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_close(p->rxaio);
+ nni_aio_close(p->txaio);
+ nni_aio_close(p->negoaio);
+
+ nng_stream_close(p->conn);
+}
+
+static void
+tcptran_pipe_stop(void *arg)
+{
+ tcptran_pipe *p = arg;
+
+ nni_aio_stop(p->rxaio);
+ nni_aio_stop(p->txaio);
+ nni_aio_stop(p->negoaio);
+}
+
+static int
+tcptran_pipe_init(void *arg, nni_pipe *npipe)
+{
+ tcptran_pipe *p = arg;
+ p->npipe = npipe;
+
+ return (0);
+}
+
+static void
+tcptran_pipe_fini(void *arg)
+{
+ tcptran_pipe *p = arg;
+ tcptran_ep * ep;
+
+ tcptran_pipe_stop(p);
+ if ((ep = p->ep) != NULL) {
+ nni_mtx_lock(&ep->mtx);
+ nni_list_node_remove(&p->node);
+ ep->refcnt--;
+ if (ep->fini && (ep->refcnt == 0)) {
+ nni_reap(&tcptran_ep_reap_list, ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ }
+
+ nni_aio_free(p->rxaio);
+ nni_aio_free(p->txaio);
+ nni_aio_free(p->negoaio);
+ nng_stream_free(p->conn);
+ nni_msg_free(p->rxmsg);
+ nni_mtx_fini(&p->mtx);
+ NNI_FREE_STRUCT(p);
+}
+
+static void
+tcptran_pipe_reap(tcptran_pipe *p)
+{
+ if (!nni_atomic_flag_test_and_set(&p->reaped)) {
+ if (p->conn != NULL) {
+ nng_stream_close(p->conn);
+ }
+ nni_reap(&tcptran_pipe_reap_list, p);
+ }
+}
+
+static int
+tcptran_pipe_alloc(tcptran_pipe **pipep)
+{
+ tcptran_pipe *p;
+ int rv;
+
+ if ((p = NNI_ALLOC_STRUCT(p)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&p->mtx);
+ if (((rv = nni_aio_alloc(&p->txaio, tcptran_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->rxaio, tcptran_pipe_recv_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->negoaio, tcptran_pipe_nego_cb, p)) !=
+ 0)) {
+ tcptran_pipe_fini(p);
+ return (rv);
+ }
+ nni_aio_list_init(&p->recvq);
+ nni_aio_list_init(&p->sendq);
+ nni_atomic_flag_reset(&p->reaped);
+
+ *pipep = p;
+
+ return (0);
+}
+
+static void
+tcptran_ep_match(tcptran_ep *ep)
+{
+ nni_aio * aio;
+ tcptran_pipe *p;
+
+ if (((aio = ep->useraio) == NULL) ||
+ ((p = nni_list_first(&ep->waitpipes)) == NULL)) {
+ return;
+ }
+ nni_list_remove(&ep->waitpipes, p);
+ nni_list_append(&ep->busypipes, p);
+ ep->useraio = NULL;
+ p->rcvmax = ep->rcvmax;
+ nni_aio_set_output(aio, 0, p);
+ nni_aio_finish(aio, 0, 0);
+}
+
+static void
+tcptran_pipe_nego_cb(void *arg)
+{
+ tcptran_pipe *p = arg;
+ tcptran_ep * ep = p->ep;
+ nni_aio * aio = p->negoaio;
+ nni_aio * uaio;
+ int rv;
+
+ nni_mtx_lock(&ep->mtx);
+
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ // We start transmitting before we receive.
+ if (p->gottxhead < p->wanttxhead) {
+ p->gottxhead += nni_aio_count(aio);
+ } else if (p->gotrxhead < p->wantrxhead) {
+ p->gotrxhead += nni_aio_count(aio);
+ }
+
+ if (p->gottxhead < p->wanttxhead) {
+ nni_iov iov;
+ iov.iov_len = p->wanttxhead - p->gottxhead;
+ iov.iov_buf = &p->txlen[p->gottxhead];
+ // send it down...
+ nni_aio_set_iov(aio, 1, &iov);
+ nng_stream_send(p->conn, aio);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ if (p->gotrxhead < p->wantrxhead) {
+ nni_iov iov;
+ iov.iov_len = p->wantrxhead - p->gotrxhead;
+ iov.iov_buf = &p->rxlen[p->gotrxhead];
+ nni_aio_set_iov(aio, 1, &iov);
+ nng_stream_recv(p->conn, aio);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ // We have both sent and received the headers. Lets check the
+ // receive side header.
+ if ((p->rxlen[0] != 0) || (p->rxlen[1] != 'S') ||
+ (p->rxlen[2] != 'P') || (p->rxlen[3] != 0) || (p->rxlen[6] != 0) ||
+ (p->rxlen[7] != 0)) {
+ rv = NNG_EPROTO;
+ goto error;
+ }
+
+ NNI_GET16(&p->rxlen[4], p->peer);
+
+ // We are all ready now. We put this in the wait list, and
+ // then try to run the matcher.
+ nni_list_remove(&ep->negopipes, p);
+ nni_list_append(&ep->waitpipes, p);
+
+ tcptran_ep_match(ep);
+ nni_mtx_unlock(&ep->mtx);
+
+ return;
+
+error:
+ nng_stream_close(p->conn);
+
+ if ((uaio = ep->useraio) != NULL) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(uaio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ tcptran_pipe_reap(p);
+}
+
+static void
+tcptran_pipe_send_cb(void *arg)
+{
+ tcptran_pipe *p = arg;
+ int rv;
+ nni_aio * aio;
+ size_t n;
+ nni_msg * msg;
+ nni_aio * txaio = p->txaio;
+
+ nni_mtx_lock(&p->mtx);
+ aio = nni_list_first(&p->sendq);
+
+ if ((rv = nni_aio_result(txaio)) != 0) {
+ nni_pipe_bump_error(p->npipe, rv);
+ // Intentionally we do not queue up another transfer.
+ // There's an excellent chance that the pipe is no longer
+ // usable, with a partial transfer.
+ // The protocol should see this error, and close the
+ // pipe itself, we hope.
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ n = nni_aio_count(txaio);
+ nni_aio_iov_advance(txaio, n);
+ if (nni_aio_iov_count(txaio) > 0) {
+ nng_stream_send(p->conn, txaio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+
+ nni_aio_list_remove(aio);
+ tcptran_pipe_send_start(p);
+
+ msg = nni_aio_get_msg(aio);
+ n = nni_msg_len(msg);
+ nni_pipe_bump_tx(p->npipe, n);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_set_msg(aio, NULL);
+ nni_msg_free(msg);
+ nni_aio_finish_sync(aio, 0, n);
+}
+
+static void
+tcptran_pipe_recv_cb(void *arg)
+{
+ tcptran_pipe *p = arg;
+ nni_aio * aio;
+ int rv;
+ size_t n;
+ nni_msg * msg;
+ nni_aio * rxaio = p->rxaio;
+
+ nni_mtx_lock(&p->mtx);
+ aio = nni_list_first(&p->recvq);
+
+ if ((rv = nni_aio_result(rxaio)) != 0) {
+ goto recv_error;
+ }
+
+ n = nni_aio_count(rxaio);
+ nni_aio_iov_advance(rxaio, n);
+ if (nni_aio_iov_count(rxaio) > 0) {
+ nng_stream_recv(p->conn, rxaio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+
+ // If we don't have a message yet, we were reading the TCP message
+ // header, which is just the length. This tells us the size of the
+ // message to allocate and how much more to expect.
+ if (p->rxmsg == NULL) {
+ uint64_t len;
+ // We should have gotten a message header.
+ NNI_GET64(p->rxlen, len);
+
+ // Make sure the message payload is not too big. If it is
+ // the caller will shut down the pipe.
+ if ((len > p->rcvmax) && (p->rcvmax > 0)) {
+ rv = NNG_EMSGSIZE;
+ goto recv_error;
+ }
+
+ if ((rv = nni_msg_alloc(&p->rxmsg, (size_t) len)) != 0) {
+ goto recv_error;
+ }
+
+ // Submit the rest of the data for a read -- we want to
+ // read the entire message now.
+ if (len != 0) {
+ nni_iov iov;
+ iov.iov_buf = nni_msg_body(p->rxmsg);
+ iov.iov_len = (size_t) len;
+
+ nni_aio_set_iov(rxaio, 1, &iov);
+ nng_stream_recv(p->conn, rxaio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ }
+
+ // We read a message completely. Let the user know the good news.
+ nni_aio_list_remove(aio);
+ msg = p->rxmsg;
+ p->rxmsg = NULL;
+ n = nni_msg_len(msg);
+
+ nni_pipe_bump_rx(p->npipe, n);
+ tcptran_pipe_recv_start(p);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, n);
+ return;
+
+recv_error:
+ nni_aio_list_remove(aio);
+ msg = p->rxmsg;
+ p->rxmsg = NULL;
+ nni_pipe_bump_error(p->npipe, rv);
+ // Intentionally, we do not queue up another receive.
+ // The protocol should notice this error and close the pipe.
+ nni_mtx_unlock(&p->mtx);
+
+ nni_msg_free(msg);
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+tcptran_pipe_send_cancel(nni_aio *aio, void *arg, int rv)
+{
+ tcptran_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ if (!nni_aio_list_active(aio)) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ // If this is being sent, then cancel the pending transfer.
+ // The callback on the txaio will cause the user aio to
+ // be canceled too.
+ if (nni_list_first(&p->sendq) == aio) {
+ nni_aio_abort(p->txaio, rv);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+tcptran_pipe_send_start(tcptran_pipe *p)
+{
+ nni_aio *aio;
+ nni_aio *txaio;
+ nni_msg *msg;
+ int niov;
+ nni_iov iov[3];
+ uint64_t len;
+
+ if (p->closed) {
+ while ((aio = nni_list_first(&p->sendq)) != NULL) {
+ nni_list_remove(&p->sendq, aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ return;
+ }
+
+ if ((aio = nni_list_first(&p->sendq)) == NULL) {
+ return;
+ }
+
+ // This runs to send the message.
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg) + nni_msg_header_len(msg);
+
+ NNI_PUT64(p->txlen, len);
+
+ txaio = p->txaio;
+ niov = 0;
+ iov[0].iov_buf = p->txlen;
+ iov[0].iov_len = sizeof(p->txlen);
+ niov++;
+ if (nni_msg_header_len(msg) > 0) {
+ iov[niov].iov_buf = nni_msg_header(msg);
+ iov[niov].iov_len = nni_msg_header_len(msg);
+ niov++;
+ }
+ if (nni_msg_len(msg) > 0) {
+ iov[niov].iov_buf = nni_msg_body(msg);
+ iov[niov].iov_len = nni_msg_len(msg);
+ niov++;
+ }
+ nni_aio_set_iov(txaio, niov, iov);
+ nng_stream_send(p->conn, txaio);
+}
+
+static void
+tcptran_pipe_send(void *arg, nni_aio *aio)
+{
+ tcptran_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_schedule(aio, tcptran_pipe_send_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&p->sendq, aio);
+ if (nni_list_first(&p->sendq) == aio) {
+ tcptran_pipe_send_start(p);
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+tcptran_pipe_recv_cancel(nni_aio *aio, void *arg, int rv)
+{
+ tcptran_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ if (!nni_aio_list_active(aio)) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ // If receive in progress, then cancel the pending transfer.
+ // The callback on the rxaio will cause the user aio to
+ // be canceled too.
+ if (nni_list_first(&p->recvq) == aio) {
+ nni_aio_abort(p->rxaio, rv);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+tcptran_pipe_recv_start(tcptran_pipe *p)
+{
+ nni_aio *rxaio;
+ nni_iov iov;
+ NNI_ASSERT(p->rxmsg == NULL);
+
+ if (p->closed) {
+ nni_aio *aio;
+ while ((aio = nni_list_first(&p->recvq)) != NULL) {
+ nni_list_remove(&p->recvq, aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ return;
+ }
+ if (nni_list_empty(&p->recvq)) {
+ return;
+ }
+
+ // Schedule a read of the header.
+ rxaio = p->rxaio;
+ iov.iov_buf = p->rxlen;
+ iov.iov_len = sizeof(p->rxlen);
+ nni_aio_set_iov(rxaio, 1, &iov);
+
+ nng_stream_recv(p->conn, rxaio);
+}
+
+static void
+tcptran_pipe_recv(void *arg, nni_aio *aio)
+{
+ tcptran_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_schedule(aio, tcptran_pipe_recv_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ nni_list_append(&p->recvq, aio);
+ if (nni_list_first(&p->recvq) == aio) {
+ tcptran_pipe_recv_start(p);
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static uint16_t
+tcptran_pipe_peer(void *arg)
+{
+ tcptran_pipe *p = arg;
+
+ return (p->peer);
+}
+
+static int
+tcptran_pipe_getopt(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ tcptran_pipe *p = arg;
+ return (nni_stream_get(p->conn, name, buf, szp, t));
+}
+
+static void
+tcptran_pipe_start(tcptran_pipe *p, nng_stream *conn, tcptran_ep *ep)
+{
+ nni_iov iov;
+
+ ep->refcnt++;
+
+ p->conn = conn;
+ p->ep = ep;
+ p->proto = ep->proto;
+
+ p->txlen[0] = 0;
+ p->txlen[1] = 'S';
+ p->txlen[2] = 'P';
+ p->txlen[3] = 0;
+ NNI_PUT16(&p->txlen[4], p->proto);
+ NNI_PUT16(&p->txlen[6], 0);
+
+ p->gotrxhead = 0;
+ p->gottxhead = 0;
+ p->wantrxhead = 8;
+ p->wanttxhead = 8;
+ iov.iov_len = 8;
+ iov.iov_buf = &p->txlen[0];
+ nni_aio_set_iov(p->negoaio, 1, &iov);
+ nni_list_append(&ep->negopipes, p);
+
+ nni_aio_set_timeout(p->negoaio, 10000); // 10 sec timeout to negotiate
+ nng_stream_send(p->conn, p->negoaio);
+}
+
+static void
+tcptran_ep_fini(void *arg)
+{
+ tcptran_ep *ep = arg;
+
+ nni_mtx_lock(&ep->mtx);
+ ep->fini = true;
+ if (ep->refcnt != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_stop(ep->timeaio);
+ nni_aio_stop(ep->connaio);
+ nng_stream_dialer_free(ep->dialer);
+ nng_stream_listener_free(ep->listener);
+ nni_aio_free(ep->timeaio);
+ nni_aio_free(ep->connaio);
+
+ nni_mtx_fini(&ep->mtx);
+ NNI_FREE_STRUCT(ep);
+}
+
+static void
+tcptran_ep_close(void *arg)
+{
+ tcptran_ep * ep = arg;
+ tcptran_pipe *p;
+
+ nni_mtx_lock(&ep->mtx);
+
+ ep->closed = true;
+ nni_aio_close(ep->timeaio);
+ if (ep->dialer != NULL) {
+ nng_stream_dialer_close(ep->dialer);
+ }
+ if (ep->listener != NULL) {
+ nng_stream_listener_close(ep->listener);
+ }
+ NNI_LIST_FOREACH (&ep->negopipes, p) {
+ tcptran_pipe_close(p);
+ }
+ NNI_LIST_FOREACH (&ep->waitpipes, p) {
+ tcptran_pipe_close(p);
+ }
+ NNI_LIST_FOREACH (&ep->busypipes, p) {
+ tcptran_pipe_close(p);
+ }
+ if (ep->useraio != NULL) {
+ nni_aio_finish_error(ep->useraio, NNG_ECLOSED);
+ ep->useraio = NULL;
+ }
+
+ nni_mtx_unlock(&ep->mtx);
+}
+
+// This parses off the optional source address that this transport uses.
+// The special handling of this URL format is quite honestly an historical
+// mistake, which we would remove if we could.
+static int
+tcptran_url_parse_source(nng_url *url, nng_sockaddr *sa, const nng_url *surl)
+{
+ int af;
+ char * semi;
+ char * src;
+ size_t len;
+ int rv;
+ nni_aio *aio;
+
+ // We modify the URL. This relies on the fact that the underlying
+ // transport does not free this, so we can just use references.
+
+ url->u_scheme = surl->u_scheme;
+ url->u_port = surl->u_port;
+ url->u_hostname = surl->u_hostname;
+
+ if ((semi = strchr(url->u_hostname, ';')) == NULL) {
+ memset(sa, 0, sizeof(*sa));
+ return (0);
+ }
+
+ len = (size_t)(semi - url->u_hostname);
+ url->u_hostname = semi + 1;
+
+ if (strcmp(surl->u_scheme, "tcp") == 0) {
+ af = NNG_AF_UNSPEC;
+ } else if (strcmp(surl->u_scheme, "tcp4") == 0) {
+ af = NNG_AF_INET;
+ } else if (strcmp(surl->u_scheme, "tcp6") == 0) {
+ af = NNG_AF_INET6;
+ } else {
+ return (NNG_EADDRINVAL);
+ }
+
+ if ((src = nni_alloc(len + 1)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ memcpy(src, surl->u_hostname, len);
+ src[len] = '\0';
+
+ if ((rv = nni_aio_alloc(&aio, NULL, NULL)) != 0) {
+ nni_free(src, len + 1);
+ return (rv);
+ }
+
+ nni_resolv_ip(src, "0", af, true, sa, aio);
+ nni_aio_wait(aio);
+ nni_aio_free(aio);
+ nni_free(src, len + 1);
+ return (rv);
+}
+
+static void
+tcptran_timer_cb(void *arg)
+{
+ tcptran_ep *ep = arg;
+ if (nni_aio_result(ep->timeaio) == 0) {
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ }
+}
+
+static void
+tcptran_accept_cb(void *arg)
+{
+ tcptran_ep * ep = arg;
+ nni_aio * aio = ep->connaio;
+ tcptran_pipe *p;
+ int rv;
+ nng_stream * conn;
+
+ nni_mtx_lock(&ep->mtx);
+
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ conn = nni_aio_get_output(aio, 0);
+ if ((rv = tcptran_pipe_alloc(&p)) != 0) {
+ nng_stream_free(conn);
+ goto error;
+ }
+
+ if (ep->closed) {
+ tcptran_pipe_fini(p);
+ nng_stream_free(conn);
+ rv = NNG_ECLOSED;
+ goto error;
+ }
+ tcptran_pipe_start(p, conn, ep);
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+
+error:
+ // When an error here occurs, let's send a notice up to the consumer.
+ // That way it can be reported properly.
+ if ((aio = ep->useraio) != NULL) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ switch (rv) {
+
+ case NNG_ENOMEM:
+ case NNG_ENOFILES:
+ nng_sleep_aio(10, ep->timeaio);
+ break;
+
+ default:
+ if (!ep->closed) {
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ }
+ break;
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+tcptran_dial_cb(void *arg)
+{
+ tcptran_ep * ep = arg;
+ nni_aio * aio = ep->connaio;
+ tcptran_pipe *p;
+ int rv;
+ nng_stream * conn;
+
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ conn = nni_aio_get_output(aio, 0);
+ if ((rv = tcptran_pipe_alloc(&p)) != 0) {
+ nng_stream_free(conn);
+ goto error;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ tcptran_pipe_fini(p);
+ nng_stream_free(conn);
+ rv = NNG_ECLOSED;
+ nni_mtx_unlock(&ep->mtx);
+ goto error;
+ } else {
+ tcptran_pipe_start(p, conn, ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ return;
+
+error:
+ // Error connecting. We need to pass this straight back
+ // to the user.
+ nni_mtx_lock(&ep->mtx);
+ if ((aio = ep->useraio) != NULL) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+tcptran_ep_init(tcptran_ep **epp, nng_url *url, nni_sock *sock)
+{
+ tcptran_ep *ep;
+
+ if ((ep = NNI_ALLOC_STRUCT(ep)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&ep->mtx);
+ NNI_LIST_INIT(&ep->busypipes, tcptran_pipe, node);
+ NNI_LIST_INIT(&ep->waitpipes, tcptran_pipe, node);
+ NNI_LIST_INIT(&ep->negopipes, tcptran_pipe, node);
+
+ ep->proto = nni_sock_proto_id(sock);
+ ep->url = url;
+
+#ifdef NNG_ENABLE_STATS
+ static const nni_stat_info rcv_max_info = {
+ .si_name = "rcv_max",
+ .si_desc = "maximum receive size",
+ .si_type = NNG_STAT_LEVEL,
+ .si_unit = NNG_UNIT_BYTES,
+ .si_atomic = true,
+ };
+ nni_stat_init(&ep->st_rcv_max, &rcv_max_info);
+#endif
+
+ *epp = ep;
+ return (0);
+}
+
+static int
+tcptran_dialer_init(void **dp, nng_url *url, nni_dialer *ndialer)
+{
+ tcptran_ep * ep;
+ int rv;
+ nng_sockaddr srcsa;
+ nni_sock * sock = nni_dialer_sock(ndialer);
+ nng_url myurl;
+
+ // Check for invalid URL components.
+ if ((strlen(url->u_path) != 0) && (strcmp(url->u_path, "/") != 0)) {
+ return (NNG_EADDRINVAL);
+ }
+ if ((url->u_fragment != NULL) || (url->u_userinfo != NULL) ||
+ (url->u_query != NULL) || (strlen(url->u_hostname) == 0) ||
+ (strlen(url->u_port) == 0)) {
+ return (NNG_EADDRINVAL);
+ }
+
+ if ((rv = tcptran_url_parse_source(&myurl, &srcsa, url)) != 0) {
+ return (rv);
+ }
+
+ if ((rv = tcptran_ep_init(&ep, url, sock)) != 0) {
+ return (rv);
+ }
+
+ if ((rv != 0) ||
+ ((rv = nni_aio_alloc(&ep->connaio, tcptran_dial_cb, ep)) != 0) ||
+ ((rv = nng_stream_dialer_alloc_url(&ep->dialer, &myurl)) != 0)) {
+ tcptran_ep_fini(ep);
+ return (rv);
+ }
+ if ((srcsa.s_family != NNG_AF_UNSPEC) &&
+ ((rv = nni_stream_dialer_set(ep->dialer, NNG_OPT_LOCADDR, &srcsa,
+ sizeof(srcsa), NNI_TYPE_SOCKADDR)) != 0)) {
+ tcptran_ep_fini(ep);
+ return (rv);
+ }
+
+#ifdef NNG_ENABLE_STATS
+ nni_dialer_add_stat(ndialer, &ep->st_rcv_max);
+#endif
+ *dp = ep;
+ return (0);
+}
+
+static int
+tcptran_listener_init(void **lp, nng_url *url, nni_listener *nlistener)
+{
+ tcptran_ep *ep;
+ int rv;
+ nni_sock * sock = nni_listener_sock(nlistener);
+
+ // Check for invalid URL components.
+ if ((strlen(url->u_path) != 0) && (strcmp(url->u_path, "/") != 0)) {
+ return (NNG_EADDRINVAL);
+ }
+ if ((url->u_fragment != NULL) || (url->u_userinfo != NULL) ||
+ (url->u_query != NULL)) {
+ return (NNG_EADDRINVAL);
+ }
+
+ if ((rv = tcptran_ep_init(&ep, url, sock)) != 0) {
+ return (rv);
+ }
+
+ if (((rv = nni_aio_alloc(&ep->connaio, tcptran_accept_cb, ep)) != 0) ||
+ ((rv = nni_aio_alloc(&ep->timeaio, tcptran_timer_cb, ep)) != 0) ||
+ ((rv = nng_stream_listener_alloc_url(&ep->listener, url)) != 0)) {
+ tcptran_ep_fini(ep);
+ return (rv);
+ }
+#ifdef NNG_ENABLE_STATS
+ nni_listener_add_stat(nlistener, &ep->st_rcv_max);
+#endif
+
+ *lp = ep;
+ return (0);
+}
+
+static void
+tcptran_ep_cancel(nni_aio *aio, void *arg, int rv)
+{
+ tcptran_ep *ep = arg;
+ nni_mtx_lock(&ep->mtx);
+ if (ep->useraio == aio) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+tcptran_ep_connect(void *arg, nni_aio *aio)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ if (ep->useraio != NULL) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_EBUSY);
+ return;
+ }
+ if ((rv = nni_aio_schedule(aio, tcptran_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ep->useraio = aio;
+
+ nng_stream_dialer_dial(ep->dialer, ep->connaio);
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+tcptran_ep_get_url(void *arg, void *v, size_t *szp, nni_opt_type t)
+{
+ tcptran_ep *ep = arg;
+ char * s;
+ int rv;
+ int port = 0;
+
+ if (ep->listener != NULL) {
+ (void) nng_stream_listener_get_int(
+ ep->listener, NNG_OPT_TCP_BOUND_PORT, &port);
+ }
+
+ if ((rv = nni_url_asprintf_port(&s, ep->url, port)) == 0) {
+ rv = nni_copyout_str(s, v, szp, t);
+ nni_strfree(s);
+ }
+ return (rv);
+}
+
+static int
+tcptran_ep_get_recvmaxsz(void *arg, void *v, size_t *szp, nni_opt_type t)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&ep->mtx);
+ rv = nni_copyout_size(ep->rcvmax, v, szp, t);
+ nni_mtx_unlock(&ep->mtx);
+ return (rv);
+}
+
+static int
+tcptran_ep_set_recvmaxsz(void *arg, const void *v, size_t sz, nni_opt_type t)
+{
+ tcptran_ep *ep = arg;
+ size_t val;
+ int rv;
+ if ((rv = nni_copyin_size(&val, v, sz, 0, NNI_MAXSZ, t)) == 0) {
+ tcptran_pipe *p;
+ nni_mtx_lock(&ep->mtx);
+ ep->rcvmax = val;
+ NNI_LIST_FOREACH (&ep->waitpipes, p) {
+ p->rcvmax = val;
+ }
+ NNI_LIST_FOREACH (&ep->negopipes, p) {
+ p->rcvmax = val;
+ }
+ NNI_LIST_FOREACH (&ep->busypipes, p) {
+ p->rcvmax = val;
+ }
+ nni_mtx_unlock(&ep->mtx);
+#ifdef NNG_ENABLE_STATS
+ nni_stat_set_value(&ep->st_rcv_max, val);
+#endif
+ }
+ return (rv);
+}
+
+static int
+tcptran_ep_bind(void *arg)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&ep->mtx);
+ rv = nng_stream_listener_listen(ep->listener);
+ nni_mtx_unlock(&ep->mtx);
+
+ return (rv);
+}
+
+static void
+tcptran_ep_accept(void *arg, nni_aio *aio)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ if (ep->useraio != NULL) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_EBUSY);
+ return;
+ }
+ if ((rv = nni_aio_schedule(aio, tcptran_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ep->useraio = aio;
+ if (!ep->started) {
+ ep->started = true;
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ } else {
+ tcptran_ep_match(ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static nni_tran_pipe_ops tcptran_pipe_ops = {
+ .p_init = tcptran_pipe_init,
+ .p_fini = tcptran_pipe_fini,
+ .p_stop = tcptran_pipe_stop,
+ .p_send = tcptran_pipe_send,
+ .p_recv = tcptran_pipe_recv,
+ .p_close = tcptran_pipe_close,
+ .p_peer = tcptran_pipe_peer,
+ .p_getopt = tcptran_pipe_getopt,
+};
+
+static const nni_option tcptran_ep_opts[] = {
+ {
+ .o_name = NNG_OPT_RECVMAXSZ,
+ .o_get = tcptran_ep_get_recvmaxsz,
+ .o_set = tcptran_ep_set_recvmaxsz,
+ },
+ {
+ .o_name = NNG_OPT_URL,
+ .o_get = tcptran_ep_get_url,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+tcptran_dialer_getopt(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ rv = nni_stream_dialer_get(ep->dialer, name, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_getopt(tcptran_ep_opts, name, ep, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+tcptran_dialer_setopt(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ rv = nni_stream_dialer_set(ep->dialer, name, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_setopt(tcptran_ep_opts, name, ep, buf, sz, t);
+ }
+ return (rv);
+}
+
+static int
+tcptran_listener_getopt(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ rv = nni_stream_listener_get(ep->listener, name, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_getopt(tcptran_ep_opts, name, ep, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+tcptran_listener_setopt(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ tcptran_ep *ep = arg;
+ int rv;
+
+ rv = nni_stream_listener_set(ep->listener, name, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_setopt(tcptran_ep_opts, name, ep, buf, sz, t);
+ }
+ return (rv);
+}
+
+static nni_tran_dialer_ops tcptran_dialer_ops = {
+ .d_init = tcptran_dialer_init,
+ .d_fini = tcptran_ep_fini,
+ .d_connect = tcptran_ep_connect,
+ .d_close = tcptran_ep_close,
+ .d_getopt = tcptran_dialer_getopt,
+ .d_setopt = tcptran_dialer_setopt,
+};
+
+static nni_tran_listener_ops tcptran_listener_ops = {
+ .l_init = tcptran_listener_init,
+ .l_fini = tcptran_ep_fini,
+ .l_bind = tcptran_ep_bind,
+ .l_accept = tcptran_ep_accept,
+ .l_close = tcptran_ep_close,
+ .l_getopt = tcptran_listener_getopt,
+ .l_setopt = tcptran_listener_setopt,
+};
+
+static nni_tran tcp_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "tcp",
+ .tran_dialer = &tcptran_dialer_ops,
+ .tran_listener = &tcptran_listener_ops,
+ .tran_pipe = &tcptran_pipe_ops,
+ .tran_init = tcptran_init,
+ .tran_fini = tcptran_fini,
+};
+
+static nni_tran tcp4_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "tcp4",
+ .tran_dialer = &tcptran_dialer_ops,
+ .tran_listener = &tcptran_listener_ops,
+ .tran_pipe = &tcptran_pipe_ops,
+ .tran_init = tcptran_init,
+ .tran_fini = tcptran_fini,
+};
+
+static nni_tran tcp6_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "tcp6",
+ .tran_dialer = &tcptran_dialer_ops,
+ .tran_listener = &tcptran_listener_ops,
+ .tran_pipe = &tcptran_pipe_ops,
+ .tran_init = tcptran_init,
+ .tran_fini = tcptran_fini,
+};
+
+int
+nng_tcp_register(void)
+{
+ int rv;
+ if (((rv = nni_tran_register(&tcp_tran)) != 0) ||
+ ((rv = nni_tran_register(&tcp4_tran)) != 0) ||
+ ((rv = nni_tran_register(&tcp6_tran)) != 0)) {
+ return (rv);
+ }
+ return (0);
+}
diff --git a/src/sp/transport/tcp/tcp_test.c b/src/sp/transport/tcp/tcp_test.c
new file mode 100644
index 00000000..d23227d7
--- /dev/null
+++ b/src/sp/transport/tcp/tcp_test.c
@@ -0,0 +1,297 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2018 Devolutions <info@devolutions.net>
+// Copyright 2018 Cody Piersall <cody.piersall@gmail.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+
+#include <nuts.h>
+
+// TCP tests.
+
+static void
+test_tcp_wild_card_connect_fail(void)
+{
+ nng_socket s;
+ char addr[NNG_MAXADDRLEN];
+
+ NUTS_OPEN(s);
+ (void) snprintf(addr, sizeof(addr), "tcp://*:%u", nuts_next_port());
+ NUTS_FAIL(nng_dial(s, addr, NULL, 0), NNG_EADDRINVAL);
+ NUTS_CLOSE(s);
+}
+
+void
+test_tcp_wild_card_bind(void)
+{
+ nng_socket s1;
+ nng_socket s2;
+ char addr[NNG_MAXADDRLEN];
+ uint16_t port;
+
+ port = nuts_next_port();
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ (void) snprintf(addr, sizeof(addr), "tcp4://*:%u", port);
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ (void) snprintf(addr, sizeof(addr), "tcp://127.0.0.1:%u", port);
+ NUTS_PASS(nng_dial(s2, addr, NULL, 0));
+ NUTS_CLOSE(s2);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_tcp_local_address_connect(void)
+{
+
+ nng_socket s1;
+ nng_socket s2;
+ char addr[NNG_MAXADDRLEN];
+ uint16_t port;
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ port = nuts_next_port();
+ (void) snprintf(addr, sizeof(addr), "tcp://127.0.0.1:%u", port);
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ (void) snprintf(
+ addr, sizeof(addr), "tcp://127.0.0.1;127.0.0.1:%u", port);
+ NUTS_PASS(nng_dial(s2, addr, NULL, 0));
+ NUTS_CLOSE(s2);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_tcp_port_zero_bind(void)
+{
+ nng_socket s1;
+ nng_socket s2;
+ nng_sockaddr sa;
+ nng_listener l;
+ char * addr;
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ NUTS_PASS(nng_listen(s1, "tcp://127.0.0.1:0", &l, 0));
+ NUTS_PASS(nng_listener_get_string(l, NNG_OPT_URL, &addr));
+ NUTS_TRUE(memcmp(addr, "tcp://", 6) == 0);
+ NUTS_PASS(nng_listener_get_addr(l, NNG_OPT_LOCADDR, &sa));
+ NUTS_TRUE(sa.s_in.sa_family == NNG_AF_INET);
+ NUTS_TRUE(sa.s_in.sa_port != 0);
+ NUTS_TRUE(sa.s_in.sa_addr = nuts_be32(0x7f000001));
+ NUTS_PASS(nng_dial(s2, addr, NULL, 0));
+ nng_strfree(addr);
+ NUTS_CLOSE(s2);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_tcp_bad_local_interface(void)
+{
+ nng_socket s1;
+
+ NUTS_OPEN(s1);
+ NUTS_FAIL(nng_dial(s1, "tcp://bogus1;127.0.0.1:80", NULL, 0),
+ NNG_EADDRINVAL);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_tcp_non_local_address(void)
+{
+ nng_socket s1;
+
+ NUTS_OPEN(s1);
+ NUTS_FAIL(nng_dial(s1, "tcp://8.8.8.8;127.0.0.1:80", NULL, 0),
+ NNG_EADDRINVAL);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_tcp_malformed_address(void)
+{
+ nng_socket s1;
+
+ NUTS_OPEN(s1);
+ NUTS_FAIL(
+ nng_dial(s1, "tcp://127.0.0.1", NULL, 0), NNG_EADDRINVAL);
+ NUTS_FAIL(
+ nng_dial(s1, "tcp://127.0.0.1.32", NULL, 0), NNG_EADDRINVAL);
+ NUTS_FAIL(
+ nng_dial(s1, "tcp://127.0.x.1.32", NULL, 0), NNG_EADDRINVAL);
+ NUTS_FAIL(
+ nng_listen(s1, "tcp://127.0.0.1.32", NULL, 0), NNG_EADDRINVAL);
+ NUTS_FAIL(
+ nng_listen(s1, "tcp://127.0.x.1.32", NULL, 0), NNG_EADDRINVAL);
+ NUTS_CLOSE(s1);
+}
+
+void
+test_tcp_no_delay_option(void)
+{
+ nng_socket s;
+ nng_dialer d;
+ nng_listener l;
+ bool v;
+ int x;
+ char *addr;
+
+ NUTS_ADDR(addr, "tcp");
+
+ NUTS_OPEN(s);
+#ifndef NNG_ELIDE_DEPRECATED
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_TCP_NODELAY, &v));
+ NUTS_TRUE(v);
+#endif
+ NUTS_PASS(nng_dialer_create(&d, s, addr));
+ NUTS_PASS(nng_dialer_get_bool(d, NNG_OPT_TCP_NODELAY, &v));
+ NUTS_TRUE(v);
+ NUTS_PASS(nng_dialer_set_bool(d, NNG_OPT_TCP_NODELAY, false));
+ NUTS_PASS(nng_dialer_get_bool(d, NNG_OPT_TCP_NODELAY, &v));
+ NUTS_TRUE(v == false);
+ NUTS_FAIL(
+ nng_dialer_get_int(d, NNG_OPT_TCP_NODELAY, &x), NNG_EBADTYPE);
+ x = 0;
+ NUTS_FAIL(
+ nng_dialer_set_int(d, NNG_OPT_TCP_NODELAY, x), NNG_EBADTYPE);
+ // This assumes sizeof (bool) != sizeof (int)
+ if (sizeof(bool) != sizeof(int)) {
+ NUTS_FAIL(
+ nng_dialer_set(d, NNG_OPT_TCP_NODELAY, &x, sizeof(x)),
+ NNG_EINVAL);
+ }
+
+ NUTS_PASS(nng_listener_create(&l, s, addr));
+ NUTS_PASS(nng_listener_get_bool(l, NNG_OPT_TCP_NODELAY, &v));
+ NUTS_TRUE(v == true);
+ x = 0;
+ NUTS_FAIL(
+ nng_listener_set_int(l, NNG_OPT_TCP_NODELAY, x), NNG_EBADTYPE);
+ // This assumes sizeof (bool) != sizeof (int)
+ NUTS_FAIL(nng_listener_set(l, NNG_OPT_TCP_NODELAY, &x, sizeof(x)),
+ NNG_EINVAL);
+
+ NUTS_PASS(nng_dialer_close(d));
+ NUTS_PASS(nng_listener_close(l));
+
+ // Make sure socket wide defaults apply.
+#ifndef NNG_ELIDE_DEPRECATED
+ NUTS_PASS(nng_socket_set_bool(s, NNG_OPT_TCP_NODELAY, true));
+ v = false;
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_TCP_NODELAY, &v));
+ NUTS_TRUE(v);
+ NUTS_PASS(nng_socket_set_bool(s, NNG_OPT_TCP_NODELAY, false));
+ NUTS_PASS(nng_dialer_create(&d, s, addr));
+ NUTS_PASS(nng_dialer_get_bool(d, NNG_OPT_TCP_NODELAY, &v));
+ NUTS_TRUE(v == false);
+#endif
+ NUTS_CLOSE(s);
+}
+
+void
+test_tcp_keep_alive_option(void)
+{
+ nng_socket s;
+ nng_dialer d;
+ nng_listener l;
+ bool v;
+ int x;
+ char *addr;
+
+ NUTS_ADDR(addr, "tcp");
+ NUTS_OPEN(s);
+#ifndef NNG_ELIDE_DEPRECATED
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_TCP_KEEPALIVE, &v));
+ NUTS_TRUE(v == false);
+#endif
+ NUTS_PASS(nng_dialer_create(&d, s, addr));
+ NUTS_PASS(nng_dialer_get_bool(d, NNG_OPT_TCP_KEEPALIVE, &v));
+ NUTS_TRUE(v == false);
+ NUTS_PASS(nng_dialer_set_bool(d, NNG_OPT_TCP_KEEPALIVE, true));
+ NUTS_PASS(nng_dialer_get_bool(d, NNG_OPT_TCP_KEEPALIVE, &v));
+ NUTS_TRUE(v);
+ NUTS_FAIL(
+ nng_dialer_get_int(d, NNG_OPT_TCP_KEEPALIVE, &x), NNG_EBADTYPE);
+ x = 1;
+ NUTS_FAIL(
+ nng_dialer_set_int(d, NNG_OPT_TCP_KEEPALIVE, x), NNG_EBADTYPE);
+
+ NUTS_PASS(nng_listener_create(&l, s, addr));
+ NUTS_PASS(nng_listener_get_bool(l, NNG_OPT_TCP_KEEPALIVE, &v));
+ NUTS_TRUE(v == false);
+ x = 1;
+ NUTS_FAIL(
+ nng_listener_set_int(l, NNG_OPT_TCP_KEEPALIVE, x), NNG_EBADTYPE);
+
+ NUTS_PASS(nng_dialer_close(d));
+ NUTS_PASS(nng_listener_close(l));
+
+ // Make sure socket wide defaults apply.
+#ifndef NNG_ELIDE_DEPRECATED
+ NUTS_PASS(nng_socket_set_bool(s, NNG_OPT_TCP_KEEPALIVE, false));
+ v = true;
+ NUTS_PASS(nng_socket_get_bool(s, NNG_OPT_TCP_KEEPALIVE, &v));
+ NUTS_TRUE(v == false);
+ NUTS_PASS(nng_socket_set_bool(s, NNG_OPT_TCP_KEEPALIVE, true));
+ NUTS_PASS(nng_dialer_create(&d, s, addr));
+ NUTS_PASS(nng_dialer_get_bool(d, NNG_OPT_TCP_KEEPALIVE, &v));
+ NUTS_TRUE(v);
+#endif
+ NUTS_CLOSE(s);
+}
+
+void
+test_tcp_recv_max(void)
+{
+ char msg[256];
+ char buf[256];
+ nng_socket s0;
+ nng_socket s1;
+ nng_listener l;
+ size_t sz;
+ char *addr;
+
+ NUTS_ADDR(addr, "tcp");
+
+ NUTS_OPEN(s0);
+ NUTS_PASS(nng_socket_set_ms(s0, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_size(s0, NNG_OPT_RECVMAXSZ, 200));
+ NUTS_PASS(nng_listener_create(&l, s0, addr));
+ NUTS_PASS(nng_socket_get_size(s0, NNG_OPT_RECVMAXSZ, &sz));
+ NUTS_TRUE(sz == 200);
+ NUTS_PASS(nng_listener_set_size(l, NNG_OPT_RECVMAXSZ, 100));
+ NUTS_PASS(nng_listener_start(l, 0));
+
+ NUTS_OPEN(s1);
+ NUTS_PASS(nng_dial(s1, addr, NULL, 0));
+ NUTS_PASS(nng_send(s1, msg, 95, 0));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_recv(s0, buf, &sz, 0));
+ NUTS_TRUE(sz == 95);
+ NUTS_PASS(nng_send(s1, msg, 150, 0));
+ NUTS_FAIL(nng_recv(s0, buf, &sz, 0), NNG_ETIMEDOUT);
+ NUTS_PASS(nng_close(s0));
+ NUTS_CLOSE(s1);
+}
+
+NUTS_TESTS = {
+
+ { "tcp wild card connect fail", test_tcp_wild_card_connect_fail },
+ { "tcp wild card bind", test_tcp_wild_card_bind },
+ { "tcp port zero bind", test_tcp_port_zero_bind },
+ { "tcp local address connect", test_tcp_local_address_connect },
+ { "tcp bad local interface", test_tcp_bad_local_interface },
+ { "tcp non-local address", test_tcp_non_local_address },
+ { "tcp malformed address", test_tcp_malformed_address },
+ { "tcp no delay option", test_tcp_no_delay_option },
+ { "tcp keep alive option", test_tcp_keep_alive_option },
+ { "tcp recv max", test_tcp_recv_max },
+ { NULL, NULL },
+}; \ No newline at end of file
diff --git a/src/sp/transport/tls/CMakeLists.txt b/src/sp/transport/tls/CMakeLists.txt
new file mode 100644
index 00000000..82f24c79
--- /dev/null
+++ b/src/sp/transport/tls/CMakeLists.txt
@@ -0,0 +1,16 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# TLS transport
+nng_directory(tls)
+
+nng_sources_if(NNG_TRANSPORT_TLS tls.c)
+nng_headers_if(NNG_TRANSPORT_TLS nng/transport/tls/tls.h)
+nng_defines_if(NNG_TRANSPORT_TLS NNG_TRANSPORT_TLS) \ No newline at end of file
diff --git a/src/sp/transport/tls/tls.c b/src/sp/transport/tls/tls.c
new file mode 100644
index 00000000..b6623733
--- /dev/null
+++ b/src/sp/transport/tls/tls.c
@@ -0,0 +1,1292 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2019 Devolutions <info@devolutions.net>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdbool.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+
+#include "nng/supplemental/tls/tls.h"
+#include "nng/transport/tls/tls.h"
+
+// TLS over TCP transport. Platform specific TCP operations must be
+// supplied as well, and uses the supplemental TLS v1.2 code. It is not
+// an accident that this very closely resembles the TCP transport itself.
+
+typedef struct tlstran_ep tlstran_ep;
+typedef struct tlstran_dialer tlstran_dialer;
+typedef struct tlstran_listener tlstran_listener;
+typedef struct tlstran_pipe tlstran_pipe;
+
+// tlstran_pipe is one end of a TLS connection.
+struct tlstran_pipe {
+ nng_stream * tls;
+ nni_pipe * npipe;
+ uint16_t peer;
+ uint16_t proto;
+ size_t rcvmax;
+ bool closed;
+ nni_list_node node;
+ nni_list sendq;
+ nni_list recvq;
+ tlstran_ep * ep;
+ nni_sockaddr sa;
+ nni_atomic_flag reaped;
+ nni_reap_node reap;
+ uint8_t txlen[sizeof(uint64_t)];
+ uint8_t rxlen[sizeof(uint64_t)];
+ size_t gottxhead;
+ size_t gotrxhead;
+ size_t wanttxhead;
+ size_t wantrxhead;
+ nni_aio * txaio;
+ nni_aio * rxaio;
+ nni_aio * negoaio;
+ nni_msg * rxmsg;
+ nni_mtx mtx;
+};
+
+// Stuff that is common to both dialers and listeners.
+struct tlstran_ep {
+ nni_mtx mtx;
+ uint16_t proto;
+ size_t rcvmax;
+ bool started;
+ bool closed;
+ bool fini;
+ int refcnt;
+ int authmode;
+ nni_url * url;
+ nni_list pipes;
+ nni_reap_node reap;
+ nng_stream_dialer * dialer;
+ nng_stream_listener *listener;
+ nni_aio * useraio;
+ nni_aio * connaio;
+ nni_aio * timeaio;
+ nni_list busypipes; // busy pipes -- ones passed to socket
+ nni_list waitpipes; // pipes waiting to match to socket
+ nni_list negopipes; // pipes busy negotiating
+ const char * host;
+ nng_sockaddr src;
+ nng_sockaddr sa;
+ nni_stat_item st_rcv_max;
+};
+
+static void tlstran_pipe_send_start(tlstran_pipe *);
+static void tlstran_pipe_recv_start(tlstran_pipe *);
+static void tlstran_pipe_send_cb(void *);
+static void tlstran_pipe_recv_cb(void *);
+static void tlstran_pipe_nego_cb(void *);
+static void tlstran_ep_fini(void *);
+static void tlstran_pipe_fini(void *);
+
+static nni_reap_list tlstran_ep_reap_list = {
+ .rl_offset = offsetof(tlstran_ep, reap),
+ .rl_func = tlstran_ep_fini,
+};
+
+static nni_reap_list tlstran_pipe_reap_list = {
+ .rl_offset = offsetof(tlstran_pipe, reap),
+ .rl_func = tlstran_pipe_fini,
+};
+
+static int
+tlstran_init(void)
+{
+ return (0);
+}
+
+static void
+tlstran_fini(void)
+{
+}
+
+static void
+tlstran_pipe_close(void *arg)
+{
+ tlstran_pipe *p = arg;
+
+ nni_aio_close(p->rxaio);
+ nni_aio_close(p->txaio);
+ nni_aio_close(p->negoaio);
+
+ nng_stream_close(p->tls);
+}
+
+static void
+tlstran_pipe_stop(void *arg)
+{
+ tlstran_pipe *p = arg;
+
+ nni_aio_stop(p->rxaio);
+ nni_aio_stop(p->txaio);
+ nni_aio_stop(p->negoaio);
+}
+
+static int
+tlstran_pipe_init(void *arg, nni_pipe *npipe)
+{
+ tlstran_pipe *p = arg;
+ p->npipe = npipe;
+ return (0);
+}
+
+static void
+tlstran_pipe_fini(void *arg)
+{
+ tlstran_pipe *p = arg;
+ tlstran_ep * ep;
+
+ tlstran_pipe_stop(p);
+ if ((ep = p->ep) != NULL) {
+ nni_mtx_lock(&ep->mtx);
+ nni_list_node_remove(&p->node);
+ ep->refcnt--;
+ if (ep->fini && (ep->refcnt == 0)) {
+ nni_reap(&tlstran_ep_reap_list, ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ }
+ nni_aio_free(p->rxaio);
+ nni_aio_free(p->txaio);
+ nni_aio_free(p->negoaio);
+ nng_stream_free(p->tls);
+ nni_msg_free(p->rxmsg);
+ NNI_FREE_STRUCT(p);
+}
+
+static int
+tlstran_pipe_alloc(tlstran_pipe **pipep)
+{
+ tlstran_pipe *p;
+ int rv;
+
+ if ((p = NNI_ALLOC_STRUCT(p)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&p->mtx);
+
+ if (((rv = nni_aio_alloc(&p->txaio, tlstran_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->rxaio, tlstran_pipe_recv_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->negoaio, tlstran_pipe_nego_cb, p)) !=
+ 0)) {
+ tlstran_pipe_fini(p);
+ return (rv);
+ }
+ nni_aio_list_init(&p->recvq);
+ nni_aio_list_init(&p->sendq);
+ nni_atomic_flag_reset(&p->reaped);
+
+ *pipep = p;
+ return (0);
+}
+
+static void
+tlstran_pipe_reap(tlstran_pipe *p)
+{
+ if (!nni_atomic_flag_test_and_set(&p->reaped)) {
+ if (p->tls != NULL) {
+ nng_stream_close(p->tls);
+ }
+ nni_reap(&tlstran_pipe_reap_list, p);
+ }
+}
+
+static void
+tlstran_ep_match(tlstran_ep *ep)
+{
+ nni_aio * aio;
+ tlstran_pipe *p;
+
+ if (((aio = ep->useraio) == NULL) ||
+ ((p = nni_list_first(&ep->waitpipes)) == NULL)) {
+ return;
+ }
+ nni_list_remove(&ep->waitpipes, p);
+ nni_list_append(&ep->busypipes, p);
+ ep->useraio = NULL;
+ p->rcvmax = ep->rcvmax;
+ nni_aio_set_output(aio, 0, p);
+ nni_aio_finish(aio, 0, 0);
+}
+
+static void
+tlstran_pipe_nego_cb(void *arg)
+{
+ tlstran_pipe *p = arg;
+ tlstran_ep * ep = p->ep;
+ nni_aio * aio = p->negoaio;
+ nni_aio * uaio;
+ int rv;
+
+ nni_mtx_lock(&ep->mtx);
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ // We start transmitting before we receive.
+ if (p->gottxhead < p->wanttxhead) {
+ p->gottxhead += nni_aio_count(aio);
+ } else if (p->gotrxhead < p->wantrxhead) {
+ p->gotrxhead += nni_aio_count(aio);
+ }
+
+ if (p->gottxhead < p->wanttxhead) {
+ nni_iov iov;
+ iov.iov_len = p->wanttxhead - p->gottxhead;
+ iov.iov_buf = &p->txlen[p->gottxhead];
+ nni_aio_set_iov(aio, 1, &iov);
+ // send it down...
+ nng_stream_send(p->tls, aio);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ if (p->gotrxhead < p->wantrxhead) {
+ nni_iov iov;
+ iov.iov_len = p->wantrxhead - p->gotrxhead;
+ iov.iov_buf = &p->rxlen[p->gotrxhead];
+ nni_aio_set_iov(aio, 1, &iov);
+ nng_stream_recv(p->tls, aio);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ // We have both sent and received the headers. Lets check the
+ // receive side header.
+ if ((p->rxlen[0] != 0) || (p->rxlen[1] != 'S') ||
+ (p->rxlen[2] != 'P') || (p->rxlen[3] != 0) || (p->rxlen[6] != 0) ||
+ (p->rxlen[7] != 0)) {
+ rv = NNG_EPROTO;
+ goto error;
+ }
+
+ NNI_GET16(&p->rxlen[4], p->peer);
+
+ // We are all ready now. We put this in the wait list, and
+ // then try to run the matcher.
+ nni_list_remove(&ep->negopipes, p);
+ nni_list_append(&ep->waitpipes, p);
+
+ tlstran_ep_match(ep);
+ nni_mtx_unlock(&ep->mtx);
+
+ return;
+
+error:
+ nng_stream_close(p->tls);
+
+ if ((uaio = ep->useraio) != NULL) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(uaio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ tlstran_pipe_reap(p);
+}
+
+static void
+tlstran_pipe_send_cb(void *arg)
+{
+ tlstran_pipe *p = arg;
+ int rv;
+ nni_aio * aio;
+ size_t n;
+ nni_msg * msg;
+ nni_aio * txaio = p->txaio;
+
+ nni_mtx_lock(&p->mtx);
+ aio = nni_list_first(&p->sendq);
+
+ if ((rv = nni_aio_result(txaio)) != 0) {
+ // Intentionally we do not queue up another transfer.
+ // There's an excellent chance that the pipe is no longer
+ // usable, with a partial transfer.
+ // The protocol should see this error, and close the
+ // pipe itself, we hope.
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ nni_pipe_bump_error(p->npipe, rv);
+ return;
+ }
+
+ n = nni_aio_count(txaio);
+ nni_aio_iov_advance(txaio, n);
+ if (nni_aio_iov_count(txaio) > 0) {
+ nng_stream_send(p->tls, txaio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ nni_aio_list_remove(aio);
+ tlstran_pipe_send_start(p);
+
+ msg = nni_aio_get_msg(aio);
+ n = nni_msg_len(msg);
+ nni_pipe_bump_tx(p->npipe, n);
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_set_msg(aio, NULL);
+ nni_msg_free(msg);
+ nni_aio_finish_sync(aio, 0, n);
+}
+
+static void
+tlstran_pipe_recv_cb(void *arg)
+{
+ tlstran_pipe *p = arg;
+ nni_aio * aio;
+ int rv;
+ size_t n;
+ nni_msg * msg;
+ nni_aio * rxaio = p->rxaio;
+
+ nni_mtx_lock(&p->mtx);
+ aio = nni_list_first(&p->recvq);
+
+ if ((rv = nni_aio_result(p->rxaio)) != 0) {
+ goto recv_error;
+ }
+
+ n = nni_aio_count(rxaio);
+ nni_aio_iov_advance(rxaio, n);
+ if (nni_aio_iov_count(rxaio) > 0) {
+ // Was this a partial read? If so then resubmit for the rest.
+ nng_stream_recv(p->tls, rxaio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+
+ // If we don't have a message yet, we were reading the TCP message
+ // header, which is just the length. This tells us the size of the
+ // message to allocate and how much more to expect.
+ if (p->rxmsg == NULL) {
+ uint64_t len;
+ // We should have gotten a message header.
+ NNI_GET64(p->rxlen, len);
+
+ // Make sure the message payload is not too big. If it is
+ // the caller will shut down the pipe.
+ if ((len > p->rcvmax) && (p->rcvmax > 0)) {
+ rv = NNG_EMSGSIZE;
+ goto recv_error;
+ }
+
+ if ((rv = nni_msg_alloc(&p->rxmsg, (size_t) len)) != 0) {
+ goto recv_error;
+ }
+
+ // Submit the rest of the data for a read -- we want to
+ // read the entire message now.
+ if (len != 0) {
+ nni_iov iov;
+ iov.iov_buf = nni_msg_body(p->rxmsg);
+ iov.iov_len = (size_t) len;
+ nni_aio_set_iov(rxaio, 1, &iov);
+
+ nng_stream_recv(p->tls, rxaio);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ }
+
+ // We read a message completely. Let the user know the good news.
+ nni_aio_list_remove(aio);
+ msg = p->rxmsg;
+ p->rxmsg = NULL;
+ n = nni_msg_len(msg);
+ if (!nni_list_empty(&p->recvq)) {
+ tlstran_pipe_recv_start(p);
+ }
+ nni_pipe_bump_rx(p->npipe, n);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_set_msg(aio, msg);
+ nni_aio_finish_sync(aio, 0, n);
+ return;
+
+recv_error:
+ nni_aio_list_remove(aio);
+ msg = p->rxmsg;
+ p->rxmsg = NULL;
+ nni_pipe_bump_error(p->npipe, rv);
+ // Intentionally, we do not queue up another receive.
+ // The protocol should notice this error and close the pipe.
+ nni_mtx_unlock(&p->mtx);
+ nni_msg_free(msg);
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+tlstran_pipe_send_cancel(nni_aio *aio, void *arg, int rv)
+{
+ tlstran_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ if (!nni_aio_list_active(aio)) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ // If this is being sent, then cancel the pending transfer.
+ // The callback on the txaio will cause the user aio to
+ // be canceled too.
+ if (nni_list_first(&p->sendq) == aio) {
+ nni_aio_abort(p->txaio, rv);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+tlstran_pipe_send_start(tlstran_pipe *p)
+{
+ nni_aio *txaio;
+ nni_aio *aio;
+ nni_msg *msg;
+ int niov;
+ nni_iov iov[3];
+ uint64_t len;
+
+ if ((aio = nni_list_first(&p->sendq)) == NULL) {
+ return;
+ }
+
+ msg = nni_aio_get_msg(aio);
+ len = nni_msg_len(msg) + nni_msg_header_len(msg);
+
+ NNI_PUT64(p->txlen, len);
+
+ txaio = p->txaio;
+ niov = 0;
+ iov[niov].iov_buf = p->txlen;
+ iov[niov].iov_len = sizeof(p->txlen);
+ niov++;
+ if (nni_msg_header_len(msg) > 0) {
+ iov[niov].iov_buf = nni_msg_header(msg);
+ iov[niov].iov_len = nni_msg_header_len(msg);
+ niov++;
+ }
+ if (nni_msg_len(msg) > 0) {
+ iov[niov].iov_buf = nni_msg_body(msg);
+ iov[niov].iov_len = nni_msg_len(msg);
+ niov++;
+ }
+
+ nni_aio_set_iov(txaio, niov, iov);
+ nng_stream_send(p->tls, txaio);
+}
+
+static void
+tlstran_pipe_send(void *arg, nni_aio *aio)
+{
+ tlstran_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_schedule(aio, tlstran_pipe_send_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&p->sendq, aio);
+ if (nni_list_first(&p->sendq) == aio) {
+ tlstran_pipe_send_start(p);
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+tlstran_pipe_recv_cancel(nni_aio *aio, void *arg, int rv)
+{
+ tlstran_pipe *p = arg;
+
+ nni_mtx_lock(&p->mtx);
+ if (!nni_aio_list_active(aio)) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ // If receive in progress, then cancel the pending transfer.
+ // The callback on the rxaio will cause the user aio to
+ // be canceled too.
+ if (nni_list_first(&p->recvq) == aio) {
+ nni_aio_abort(p->rxaio, rv);
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ nni_aio_list_remove(aio);
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+}
+
+static void
+tlstran_pipe_recv_start(tlstran_pipe *p)
+{
+ nni_aio *aio;
+ nni_iov iov;
+ NNI_ASSERT(p->rxmsg == NULL);
+
+ // Schedule a read of the IPC header.
+ aio = p->rxaio;
+ iov.iov_buf = p->rxlen;
+ iov.iov_len = sizeof(p->rxlen);
+ nni_aio_set_iov(aio, 1, &iov);
+
+ nng_stream_recv(p->tls, aio);
+}
+
+static void
+tlstran_pipe_recv(void *arg, nni_aio *aio)
+{
+ tlstran_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_schedule(aio, tlstran_pipe_recv_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+
+ nni_aio_list_append(&p->recvq, aio);
+ if (nni_list_first(&p->recvq) == aio) {
+ tlstran_pipe_recv_start(p);
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static uint16_t
+tlstran_pipe_peer(void *arg)
+{
+ tlstran_pipe *p = arg;
+
+ return (p->peer);
+}
+
+static void
+tlstran_pipe_start(tlstran_pipe *p, nng_stream *conn, tlstran_ep *ep)
+{
+ nni_iov iov;
+
+ ep->refcnt++;
+
+ p->tls = conn;
+ p->ep = ep;
+ p->proto = ep->proto;
+
+ p->txlen[0] = 0;
+ p->txlen[1] = 'S';
+ p->txlen[2] = 'P';
+ p->txlen[3] = 0;
+ NNI_PUT16(&p->txlen[4], p->proto);
+ NNI_PUT16(&p->txlen[6], 0);
+
+ p->gotrxhead = 0;
+ p->gottxhead = 0;
+ p->wantrxhead = 8;
+ p->wanttxhead = 8;
+ iov.iov_len = 8;
+ iov.iov_buf = &p->txlen[0];
+ nni_aio_set_iov(p->negoaio, 1, &iov);
+ nni_list_append(&ep->negopipes, p);
+
+ nni_aio_set_timeout(p->negoaio, 10000); // 10 sec timeout to negotiate
+ nng_stream_send(p->tls, p->negoaio);
+}
+
+static void
+tlstran_ep_fini(void *arg)
+{
+ tlstran_ep *ep = arg;
+
+ nni_mtx_lock(&ep->mtx);
+ ep->fini = true;
+ if (ep->refcnt != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ return;
+ }
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_stop(ep->timeaio);
+ nni_aio_stop(ep->connaio);
+ nng_stream_dialer_free(ep->dialer);
+ nng_stream_listener_free(ep->listener);
+ nni_aio_free(ep->timeaio);
+ nni_aio_free(ep->connaio);
+
+ nni_mtx_fini(&ep->mtx);
+ NNI_FREE_STRUCT(ep);
+}
+
+static void
+tlstran_ep_close(void *arg)
+{
+ tlstran_ep * ep = arg;
+ tlstran_pipe *p;
+
+ nni_mtx_lock(&ep->mtx);
+ ep->closed = true;
+ nni_aio_close(ep->timeaio);
+
+ if (ep->dialer != NULL) {
+ nng_stream_dialer_close(ep->dialer);
+ }
+ if (ep->listener != NULL) {
+ nng_stream_listener_close(ep->listener);
+ }
+ NNI_LIST_FOREACH (&ep->negopipes, p) {
+ tlstran_pipe_close(p);
+ }
+ NNI_LIST_FOREACH (&ep->waitpipes, p) {
+ tlstran_pipe_close(p);
+ }
+ NNI_LIST_FOREACH (&ep->busypipes, p) {
+ tlstran_pipe_close(p);
+ }
+ if (ep->useraio != NULL) {
+ nni_aio_finish_error(ep->useraio, NNG_ECLOSED);
+ ep->useraio = NULL;
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+// This parses off the optional source address that this transport uses.
+// The special handling of this URL format is quite honestly an historical
+// mistake, which we would remove if we could.
+static int
+tlstran_url_parse_source(nni_url *url, nng_sockaddr *sa, const nni_url *surl)
+{
+ int af;
+ char * semi;
+ char * src;
+ size_t len;
+ int rv;
+ nni_aio *aio;
+
+ // We modify the URL. This relies on the fact that the underlying
+ // transport does not free this, so we can just use references.
+
+ url->u_scheme = surl->u_scheme;
+ url->u_port = surl->u_port;
+ url->u_hostname = surl->u_hostname;
+
+ if ((semi = strchr(url->u_hostname, ';')) == NULL) {
+ memset(sa, 0, sizeof(*sa));
+ return (0);
+ }
+
+ len = (size_t)(semi - url->u_hostname);
+ url->u_hostname = semi + 1;
+
+ if (strcmp(surl->u_scheme, "tls+tcp") == 0) {
+ af = NNG_AF_UNSPEC;
+ } else if (strcmp(surl->u_scheme, "tls+tcp4") == 0) {
+ af = NNG_AF_INET;
+ } else if (strcmp(surl->u_scheme, "tls+tcp6") == 0) {
+ af = NNG_AF_INET6;
+ } else {
+ return (NNG_EADDRINVAL);
+ }
+
+ if ((src = nni_alloc(len + 1)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ memcpy(src, surl->u_hostname, len);
+ src[len] = '\0';
+
+ if ((rv = nni_aio_alloc(&aio, NULL, NULL)) != 0) {
+ nni_free(src, len + 1);
+ return (rv);
+ }
+
+ nni_resolv_ip(src, "0", af, 1, sa, aio);
+ nni_aio_wait(aio);
+ nni_aio_free(aio);
+ nni_free(src, len + 1);
+ return (rv);
+}
+
+static void
+tlstran_timer_cb(void *arg)
+{
+ tlstran_ep *ep = arg;
+ if (nni_aio_result(ep->timeaio) == 0) {
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ }
+}
+
+static void
+tlstran_accept_cb(void *arg)
+{
+ tlstran_ep * ep = arg;
+ nni_aio * aio = ep->connaio;
+ tlstran_pipe *p;
+ int rv;
+ nng_stream * conn;
+
+ nni_mtx_lock(&ep->mtx);
+
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ conn = nni_aio_get_output(aio, 0);
+ if ((rv = tlstran_pipe_alloc(&p)) != 0) {
+ nng_stream_free(conn);
+ goto error;
+ }
+
+ if (ep->closed) {
+ tlstran_pipe_fini(p);
+ nng_stream_free(conn);
+ rv = NNG_ECLOSED;
+ goto error;
+ }
+ tlstran_pipe_start(p, conn, ep);
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ nni_mtx_unlock(&ep->mtx);
+ return;
+
+error:
+ // When an error here occurs, let's send a notice up to the consumer.
+ // That way it can be reported properly.
+ if ((aio = ep->useraio) != NULL) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ switch (rv) {
+
+ case NNG_ENOMEM:
+ case NNG_ENOFILES:
+ // We need to cool down here, to avoid spinning.
+ nng_sleep_aio(10, ep->timeaio);
+ break;
+
+ default:
+ // Start another accept. This is done because we want to
+ // ensure that TLS negotiations are disconnected from
+ // the upper layer accept logic.
+ if (!ep->closed) {
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ }
+ break;
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+tlstran_dial_cb(void *arg)
+{
+ tlstran_ep * ep = arg;
+ nni_aio * aio = ep->connaio;
+ tlstran_pipe *p;
+ int rv;
+ nng_stream * conn;
+
+ if ((rv = nni_aio_result(aio)) != 0) {
+ goto error;
+ }
+
+ conn = nni_aio_get_output(aio, 0);
+ if ((rv = tlstran_pipe_alloc(&p)) != 0) {
+ nng_stream_free(conn);
+ goto error;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ tlstran_pipe_fini(p);
+ nng_stream_free(conn);
+ rv = NNG_ECLOSED;
+ nni_mtx_unlock(&ep->mtx);
+ goto error;
+ } else {
+ tlstran_pipe_start(p, conn, ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+ return;
+
+error:
+ // Error connecting. We need to pass this straight back to the user.
+ nni_mtx_lock(&ep->mtx);
+ if ((aio = ep->useraio) != NULL) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+tlstran_ep_init(tlstran_ep **epp, nng_url *url, nni_sock *sock)
+{
+ tlstran_ep *ep;
+
+ if ((ep = NNI_ALLOC_STRUCT(ep)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&ep->mtx);
+ NNI_LIST_INIT(&ep->busypipes, tlstran_pipe, node);
+ NNI_LIST_INIT(&ep->waitpipes, tlstran_pipe, node);
+ NNI_LIST_INIT(&ep->negopipes, tlstran_pipe, node);
+
+ ep->proto = nni_sock_proto_id(sock);
+ ep->url = url;
+
+#ifdef NNG_ENABLE_STATS
+ static const nni_stat_info rcv_max_info = {
+ .si_name = "rcv_max",
+ .si_desc = "maximum receive size",
+ .si_type = NNG_STAT_LEVEL,
+ .si_unit = NNG_UNIT_BYTES,
+ .si_atomic = true,
+ };
+ nni_stat_init(&ep->st_rcv_max, &rcv_max_info);
+#endif
+
+ *epp = ep;
+ return (0);
+}
+
+static int
+tlstran_ep_init_dialer(void **dp, nni_url *url, nni_dialer *ndialer)
+{
+ tlstran_ep * ep;
+ int rv;
+ nng_sockaddr srcsa;
+ nni_sock * sock = nni_dialer_sock(ndialer);
+ nni_url myurl;
+
+ // Check for invalid URL components.
+ if ((strlen(url->u_path) != 0) && (strcmp(url->u_path, "/") != 0)) {
+ return (NNG_EADDRINVAL);
+ }
+ if ((url->u_fragment != NULL) || (url->u_userinfo != NULL) ||
+ (url->u_query != NULL) || (strlen(url->u_hostname) == 0) ||
+ (strlen(url->u_port) == 0)) {
+ return (NNG_EADDRINVAL);
+ }
+
+ if ((rv = tlstran_url_parse_source(&myurl, &srcsa, url)) != 0) {
+ return (rv);
+ }
+
+ if (((rv = tlstran_ep_init(&ep, url, sock)) != 0) ||
+ ((rv = nni_aio_alloc(&ep->connaio, tlstran_dial_cb, ep)) != 0)) {
+ return (rv);
+ }
+ ep->authmode = NNG_TLS_AUTH_MODE_REQUIRED;
+
+ if ((rv != 0) ||
+ ((rv = nng_stream_dialer_alloc_url(&ep->dialer, &myurl)) != 0)) {
+ tlstran_ep_fini(ep);
+ return (rv);
+ }
+ if ((srcsa.s_family != NNG_AF_UNSPEC) &&
+ ((rv = nni_stream_dialer_set(ep->dialer, NNG_OPT_LOCADDR, &srcsa,
+ sizeof(srcsa), NNI_TYPE_SOCKADDR)) != 0)) {
+ tlstran_ep_fini(ep);
+ return (rv);
+ }
+#ifdef NNG_ENABLE_STATS
+ nni_dialer_add_stat(ndialer, &ep->st_rcv_max);
+#endif
+ *dp = ep;
+ return (0);
+}
+
+static int
+tlstran_ep_init_listener(void **lp, nni_url *url, nni_listener *nlistener)
+{
+ tlstran_ep *ep;
+ int rv;
+ uint16_t af;
+ char * host = url->u_hostname;
+ nni_aio * aio;
+ nni_sock * sock = nni_listener_sock(nlistener);
+
+ if (strcmp(url->u_scheme, "tls+tcp") == 0) {
+ af = NNG_AF_UNSPEC;
+ } else if (strcmp(url->u_scheme, "tls+tcp4") == 0) {
+ af = NNG_AF_INET;
+ } else if (strcmp(url->u_scheme, "tls+tcp6") == 0) {
+ af = NNG_AF_INET6;
+ } else {
+ return (NNG_EADDRINVAL);
+ }
+
+ // Check for invalid URL components.
+ if ((strlen(url->u_path) != 0) && (strcmp(url->u_path, "/") != 0)) {
+ return (NNG_EADDRINVAL);
+ }
+ if ((url->u_fragment != NULL) || (url->u_userinfo != NULL) ||
+ (url->u_query != NULL)) {
+ return (NNG_EADDRINVAL);
+ }
+ if (((rv = tlstran_ep_init(&ep, url, sock)) != 0) ||
+ ((rv = nni_aio_alloc(&ep->connaio, tlstran_accept_cb, ep)) != 0) ||
+ ((rv = nni_aio_alloc(&ep->timeaio, tlstran_timer_cb, ep)) != 0)) {
+ return (rv);
+ }
+
+ ep->authmode = NNG_TLS_AUTH_MODE_NONE;
+
+ if (strlen(host) == 0) {
+ host = NULL;
+ }
+
+ // XXX: We are doing lookup at listener initialization. There is
+ // a valid argument that this should be done at bind time, but that
+ // would require making bind asynchronous. In some ways this would
+ // be worse than the cost of just waiting here. We always recommend
+ // using local IP addresses rather than names when possible.
+
+ if ((rv = nni_aio_alloc(&aio, NULL, NULL)) != 0) {
+ tlstran_ep_fini(ep);
+ return (rv);
+ }
+ nni_resolv_ip(host, url->u_port, af, true, &ep->sa, aio);
+ nni_aio_wait(aio);
+ rv = nni_aio_result(aio);
+ nni_aio_free(aio);
+
+ if ((rv != 0) ||
+ ((rv = nng_stream_listener_alloc_url(&ep->listener, url)) != 0) ||
+ ((rv = nni_stream_listener_set(ep->listener, NNG_OPT_TLS_AUTH_MODE,
+ &ep->authmode, sizeof(ep->authmode), NNI_TYPE_INT32)) !=
+ 0)) {
+ tlstran_ep_fini(ep);
+ return (rv);
+ }
+#ifdef NNG_ENABLE_STATS
+ nni_listener_add_stat(nlistener, &ep->st_rcv_max);
+#endif
+ *lp = ep;
+ return (0);
+}
+
+static void
+tlstran_ep_cancel(nni_aio *aio, void *arg, int rv)
+{
+ tlstran_ep *ep = arg;
+ nni_mtx_lock(&ep->mtx);
+ if (ep->useraio == aio) {
+ ep->useraio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static void
+tlstran_ep_connect(void *arg, nni_aio *aio)
+{
+ tlstran_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ if (ep->useraio != NULL) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_EBUSY);
+ return;
+ }
+ if ((rv = nni_aio_schedule(aio, tlstran_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ep->useraio = aio;
+
+ nng_stream_dialer_dial(ep->dialer, ep->connaio);
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+tlstran_ep_bind(void *arg)
+{
+ tlstran_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&ep->mtx);
+ rv = nng_stream_listener_listen(ep->listener);
+ nni_mtx_unlock(&ep->mtx);
+
+ return (rv);
+}
+
+static void
+tlstran_ep_accept(void *arg, nni_aio *aio)
+{
+ tlstran_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&ep->mtx);
+ if (ep->closed) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ if (ep->useraio != NULL) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, NNG_EBUSY);
+ return;
+ }
+ if ((rv = nni_aio_schedule(aio, tlstran_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&ep->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ ep->useraio = aio;
+ if (!ep->started) {
+ ep->started = true;
+ nng_stream_listener_accept(ep->listener, ep->connaio);
+ } else {
+ tlstran_ep_match(ep);
+ }
+ nni_mtx_unlock(&ep->mtx);
+}
+
+static int
+tlstran_ep_set_recvmaxsz(void *arg, const void *v, size_t sz, nni_type t)
+{
+ tlstran_ep *ep = arg;
+ size_t val;
+ int rv;
+ if ((rv = nni_copyin_size(&val, v, sz, 0, NNI_MAXSZ, t)) == 0) {
+ tlstran_pipe *p;
+ nni_mtx_lock(&ep->mtx);
+ ep->rcvmax = val;
+ NNI_LIST_FOREACH (&ep->waitpipes, p) {
+ p->rcvmax = val;
+ }
+ NNI_LIST_FOREACH (&ep->negopipes, p) {
+ p->rcvmax = val;
+ }
+ NNI_LIST_FOREACH (&ep->busypipes, p) {
+ p->rcvmax = val;
+ }
+ nni_mtx_unlock(&ep->mtx);
+#ifdef NNG_ENABLE_STATS
+ nni_stat_set_value(&ep->st_rcv_max, val);
+#endif
+ }
+ return (rv);
+}
+
+static int
+tlstran_ep_get_recvmaxsz(void *arg, void *v, size_t *szp, nni_type t)
+{
+ tlstran_ep *ep = arg;
+ int rv;
+ nni_mtx_lock(&ep->mtx);
+ rv = nni_copyout_size(ep->rcvmax, v, szp, t);
+ nni_mtx_unlock(&ep->mtx);
+ return (rv);
+}
+
+static int
+tlstran_ep_get_url(void *arg, void *v, size_t *szp, nni_type t)
+{
+ tlstran_ep *ep = arg;
+ char * s;
+ int rv;
+ int port = 0;
+
+ if (ep->listener != NULL) {
+ (void) nng_stream_listener_get_int(
+ ep->listener, NNG_OPT_TCP_BOUND_PORT, &port);
+ }
+ if ((rv = nni_url_asprintf_port(&s, ep->url, port)) == 0) {
+ rv = nni_copyout_str(s, v, szp, t);
+ nni_strfree(s);
+ }
+ return (rv);
+}
+
+static const nni_option tlstran_pipe_opts[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+tlstran_pipe_getopt(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ tlstran_pipe *p = arg;
+ int rv;
+
+ if ((rv = nni_stream_get(p->tls, name, buf, szp, t)) == NNG_ENOTSUP) {
+ rv = nni_getopt(tlstran_pipe_opts, name, p, buf, szp, t);
+ }
+ return (rv);
+}
+
+static nni_tran_pipe_ops tlstran_pipe_ops = {
+ .p_init = tlstran_pipe_init,
+ .p_fini = tlstran_pipe_fini,
+ .p_stop = tlstran_pipe_stop,
+ .p_send = tlstran_pipe_send,
+ .p_recv = tlstran_pipe_recv,
+ .p_close = tlstran_pipe_close,
+ .p_peer = tlstran_pipe_peer,
+ .p_getopt = tlstran_pipe_getopt,
+};
+
+static nni_option tlstran_ep_options[] = {
+ {
+ .o_name = NNG_OPT_RECVMAXSZ,
+ .o_get = tlstran_ep_get_recvmaxsz,
+ .o_set = tlstran_ep_set_recvmaxsz,
+ },
+ {
+ .o_name = NNG_OPT_URL,
+ .o_get = tlstran_ep_get_url,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+tlstran_dialer_getopt(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ int rv;
+ tlstran_ep *ep = arg;
+
+ rv = nni_stream_dialer_get(ep->dialer, name, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_getopt(tlstran_ep_options, name, ep, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+tlstran_dialer_setopt(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ int rv;
+ tlstran_ep *ep = arg;
+
+ rv = nni_stream_dialer_set(
+ ep != NULL ? ep->dialer : NULL, name, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_setopt(tlstran_ep_options, name, ep, buf, sz, t);
+ }
+ return (rv);
+}
+
+static int
+tlstran_listener_get(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ int rv;
+ tlstran_ep *ep = arg;
+
+ rv = nni_stream_listener_get(ep->listener, name, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_getopt(tlstran_ep_options, name, ep, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+tlstran_listener_set(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ int rv;
+ tlstran_ep *ep = arg;
+
+ rv = nni_stream_listener_set(
+ ep != NULL ? ep->listener : NULL, name, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_setopt(tlstran_ep_options, name, ep, buf, sz, t);
+ }
+ return (rv);
+}
+
+static nni_tran_dialer_ops tlstran_dialer_ops = {
+ .d_init = tlstran_ep_init_dialer,
+ .d_fini = tlstran_ep_fini,
+ .d_connect = tlstran_ep_connect,
+ .d_close = tlstran_ep_close,
+ .d_getopt = tlstran_dialer_getopt,
+ .d_setopt = tlstran_dialer_setopt,
+};
+
+static nni_tran_listener_ops tlstran_listener_ops = {
+ .l_init = tlstran_ep_init_listener,
+ .l_fini = tlstran_ep_fini,
+ .l_bind = tlstran_ep_bind,
+ .l_accept = tlstran_ep_accept,
+ .l_close = tlstran_ep_close,
+ .l_getopt = tlstran_listener_get,
+ .l_setopt = tlstran_listener_set,
+};
+
+static nni_tran tls_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "tls+tcp",
+ .tran_dialer = &tlstran_dialer_ops,
+ .tran_listener = &tlstran_listener_ops,
+ .tran_pipe = &tlstran_pipe_ops,
+ .tran_init = tlstran_init,
+ .tran_fini = tlstran_fini,
+};
+
+static nni_tran tls4_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "tls+tcp4",
+ .tran_dialer = &tlstran_dialer_ops,
+ .tran_listener = &tlstran_listener_ops,
+ .tran_pipe = &tlstran_pipe_ops,
+ .tran_init = tlstran_init,
+ .tran_fini = tlstran_fini,
+};
+
+static nni_tran tls6_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "tls+tcp6",
+ .tran_dialer = &tlstran_dialer_ops,
+ .tran_listener = &tlstran_listener_ops,
+ .tran_pipe = &tlstran_pipe_ops,
+ .tran_init = tlstran_init,
+ .tran_fini = tlstran_fini,
+};
+
+int
+nng_tls_register(void)
+{
+ int rv;
+ if (((rv = nni_tran_register(&tls_tran)) != 0) ||
+ ((rv = nni_tran_register(&tls4_tran)) != 0) ||
+ ((rv = nni_tran_register(&tls6_tran)) != 0)) {
+ return (rv);
+ }
+ return (0);
+}
diff --git a/src/sp/transport/ws/CMakeLists.txt b/src/sp/transport/ws/CMakeLists.txt
new file mode 100644
index 00000000..6e409b43
--- /dev/null
+++ b/src/sp/transport/ws/CMakeLists.txt
@@ -0,0 +1,24 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# WebSocket transport
+nng_directory(ws)
+
+if (NNG_TRANSPORT_WS OR NNG_TRANSPORT_WSS)
+ set(WS_ON ON)
+endif()
+
+nng_defines_if(NNG_TRANSPORT_WS NNG_TRANSPORT_WS)
+nng_defines_if(NNG_TRANSPORT_WSS NNG_TRANSPORT_WSS)
+nng_sources_if(WS_ON websocket.c)
+nng_headers_if(WS_ON nng/transport/ws/websocket.h)
+nng_test_if(WS_ON ws_test)
+
+
diff --git a/src/sp/transport/ws/README.adoc b/src/sp/transport/ws/README.adoc
new file mode 100644
index 00000000..e3101297
--- /dev/null
+++ b/src/sp/transport/ws/README.adoc
@@ -0,0 +1,38 @@
+= websocket transport
+
+This transport provides support for SP over websocket using TCP or TLS.
+When using TCP, it is compatible with the libnanomsg legacy transport.
+It also is compatible with mangos (both TCP and TLS).
+
+TLS support requires the mbedTLS library.
+
+We set the "protocol" such as "pair.sp.nanomsg.org" in the
+Sec-WebSocket-Protocol field -- the client sets to the the server's
+protocol - i.e. the protocol that the server speaks. For example,
+if the the server is a REP, then a REQ client would send "rep.sp.nanomsg.org".
+
+The server sends the same value (it's own), per the WebSocket specs. (Note
+that the client's protocol is never sent, but assumed to be complementary
+to the protocol in the Sec-WebSocket-Protocol field.)
+
+Each SP message is a WebSocket message.
+
+WebSocket is defined in RFC 6455.
+
+== Design
+
+We unfortunately need to implement our own design for this -- the only
+reasonable client library would be libcurl, and there is a dearth of
+suitable server libraries. Since we don't have to support full HTTP, but
+just the initial handshake, this isn't too tragic.
+
+== Multiple Server Sockets
+
+In order to support Multiple Server sockets listening on the same port,
+the application must be long lived. We will set up a listener on the
+configured TCP (or TLS) port, and examine the PATH supplied in the GET.
+This will be used to match against the URL requested, and if the URL
+matches we will create the appropriate pipe.
+
+If no server endpoint at that address can be found, we return an
+HTTP error, and close the socket.
diff --git a/src/sp/transport/ws/websocket.c b/src/sp/transport/ws/websocket.c
new file mode 100644
index 00000000..3f73f47f
--- /dev/null
+++ b/src/sp/transport/ws/websocket.c
@@ -0,0 +1,740 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+// Copyright 2019 Devolutions <info@devolutions.net>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "supplemental/websocket/websocket.h"
+
+#include <nng/supplemental/tls/tls.h>
+#include <nng/transport/ws/websocket.h>
+
+typedef struct ws_dialer ws_dialer;
+typedef struct ws_listener ws_listener;
+typedef struct ws_pipe ws_pipe;
+
+struct ws_dialer {
+ uint16_t peer; // remote protocol
+ nni_list aios;
+ nni_mtx mtx;
+ nni_aio * connaio;
+ nng_stream_dialer *dialer;
+ bool started;
+};
+
+struct ws_listener {
+ uint16_t peer; // remote protocol
+ nni_list aios;
+ nni_mtx mtx;
+ nni_aio * accaio;
+ nng_stream_listener *listener;
+ bool started;
+};
+
+struct ws_pipe {
+ nni_mtx mtx;
+ bool closed;
+ uint16_t peer;
+ nni_aio * user_txaio;
+ nni_aio * user_rxaio;
+ nni_aio * txaio;
+ nni_aio * rxaio;
+ nng_stream *ws;
+};
+
+static void
+wstran_pipe_send_cb(void *arg)
+{
+ ws_pipe *p = arg;
+ nni_aio *taio;
+ nni_aio *uaio;
+
+ nni_mtx_lock(&p->mtx);
+ taio = p->txaio;
+ uaio = p->user_txaio;
+ p->user_txaio = NULL;
+
+ if (uaio != NULL) {
+ int rv;
+ if ((rv = nni_aio_result(taio)) != 0) {
+ nni_aio_finish_error(uaio, rv);
+ } else {
+ nni_aio_finish(uaio, 0, 0);
+ }
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+wstran_pipe_recv_cb(void *arg)
+{
+ ws_pipe *p = arg;
+ nni_aio *raio = p->rxaio;
+ nni_aio *uaio;
+ int rv;
+
+ nni_mtx_lock(&p->mtx);
+ uaio = p->user_rxaio;
+ p->user_rxaio = NULL;
+ if ((rv = nni_aio_result(raio)) != 0) {
+ if (uaio != NULL) {
+ nni_aio_finish_error(uaio, rv);
+ }
+ } else {
+ nni_msg *msg = nni_aio_get_msg(raio);
+ if (uaio != NULL) {
+ nni_aio_finish_msg(uaio, msg);
+ } else {
+ nni_msg_free(msg);
+ }
+ }
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+wstran_pipe_recv_cancel(nni_aio *aio, void *arg, int rv)
+{
+ ws_pipe *p = arg;
+ nni_mtx_lock(&p->mtx);
+ if (p->user_rxaio != aio) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ p->user_rxaio = NULL;
+ nni_aio_abort(p->rxaio, rv);
+ nni_aio_finish_error(aio, rv);
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+wstran_pipe_recv(void *arg, nni_aio *aio)
+{
+ ws_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_schedule(aio, wstran_pipe_recv_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ p->user_rxaio = aio;
+ nng_stream_recv(p->ws, p->rxaio);
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+wstran_pipe_send_cancel(nni_aio *aio, void *arg, int rv)
+{
+ ws_pipe *p = arg;
+ nni_mtx_lock(&p->mtx);
+ if (p->user_txaio != aio) {
+ nni_mtx_unlock(&p->mtx);
+ return;
+ }
+ p->user_txaio = NULL;
+ nni_aio_abort(p->txaio, rv);
+ nni_aio_finish_error(aio, rv);
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+wstran_pipe_send(void *arg, nni_aio *aio)
+{
+ ws_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&p->mtx);
+ if ((rv = nni_aio_schedule(aio, wstran_pipe_send_cancel, p)) != 0) {
+ nni_mtx_unlock(&p->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ p->user_txaio = aio;
+ nni_aio_set_msg(p->txaio, nni_aio_get_msg(aio));
+ nni_aio_set_msg(aio, NULL);
+
+ nng_stream_send(p->ws, p->txaio);
+ nni_mtx_unlock(&p->mtx);
+}
+
+static void
+wstran_pipe_stop(void *arg)
+{
+ ws_pipe *p = arg;
+
+ nni_aio_stop(p->rxaio);
+ nni_aio_stop(p->txaio);
+}
+
+static int
+wstran_pipe_init(void *arg, nni_pipe *pipe)
+{
+ NNI_ARG_UNUSED(arg);
+ NNI_ARG_UNUSED(pipe);
+ return (0);
+}
+
+static void
+wstran_pipe_fini(void *arg)
+{
+ ws_pipe *p = arg;
+
+ nni_aio_free(p->rxaio);
+ nni_aio_free(p->txaio);
+
+ nng_stream_free(p->ws);
+ nni_mtx_fini(&p->mtx);
+ NNI_FREE_STRUCT(p);
+}
+
+static void
+wstran_pipe_close(void *arg)
+{
+ ws_pipe *p = arg;
+
+ nni_aio_close(p->rxaio);
+ nni_aio_close(p->txaio);
+
+ nni_mtx_lock(&p->mtx);
+ nng_stream_close(p->ws);
+ nni_mtx_unlock(&p->mtx);
+}
+
+static int
+wstran_pipe_alloc(ws_pipe **pipep, void *ws)
+{
+ ws_pipe *p;
+ int rv;
+
+ if ((p = NNI_ALLOC_STRUCT(p)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&p->mtx);
+
+ // Initialize AIOs.
+ if (((rv = nni_aio_alloc(&p->txaio, wstran_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_alloc(&p->rxaio, wstran_pipe_recv_cb, p)) != 0)) {
+ wstran_pipe_fini(p);
+ return (rv);
+ }
+ p->ws = ws;
+
+ *pipep = p;
+ return (0);
+}
+
+static uint16_t
+wstran_pipe_peer(void *arg)
+{
+ ws_pipe *p = arg;
+
+ return (p->peer);
+}
+
+static int
+ws_listener_bind(void *arg)
+{
+ ws_listener *l = arg;
+ int rv;
+
+ if ((rv = nng_stream_listener_listen(l->listener)) == 0) {
+ l->started = true;
+ }
+ return (rv);
+}
+
+static void
+ws_listener_cancel(nni_aio *aio, void *arg, int rv)
+{
+ ws_listener *l = arg;
+
+ nni_mtx_lock(&l->mtx);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&l->mtx);
+}
+
+static void
+wstran_listener_accept(void *arg, nni_aio *aio)
+{
+ ws_listener *l = arg;
+ int rv;
+
+ // We already bound, so we just need to look for an available
+ // pipe (created by the handler), and match it.
+ // Otherwise we stick the AIO in the accept list.
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&l->mtx);
+ if ((rv = nni_aio_schedule(aio, ws_listener_cancel, l)) != 0) {
+ nni_mtx_unlock(&l->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_list_append(&l->aios, aio);
+ if (aio == nni_list_first(&l->aios)) {
+ nng_stream_listener_accept(l->listener, l->accaio);
+ }
+ nni_mtx_unlock(&l->mtx);
+}
+
+static void
+wstran_dialer_cancel(nni_aio *aio, void *arg, int rv)
+{
+ ws_dialer *d = arg;
+
+ nni_mtx_lock(&d->mtx);
+ if (nni_aio_list_active(aio)) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&d->mtx);
+}
+
+static void
+wstran_dialer_connect(void *arg, nni_aio *aio)
+{
+ ws_dialer *d = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+
+ nni_mtx_lock(&d->mtx);
+ if ((rv = nni_aio_schedule(aio, wstran_dialer_cancel, d)) != 0) {
+ nni_mtx_unlock(&d->mtx);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ NNI_ASSERT(nni_list_empty(&d->aios));
+ d->started = true;
+ nni_list_append(&d->aios, aio);
+ nng_stream_dialer_dial(d->dialer, d->connaio);
+ nni_mtx_unlock(&d->mtx);
+}
+
+static const nni_option ws_pipe_options[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ }
+};
+
+static int
+wstran_pipe_getopt(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ ws_pipe *p = arg;
+ int rv;
+
+ if ((rv = nni_stream_get(p->ws, name, buf, szp, t)) == NNG_ENOTSUP) {
+ rv = nni_getopt(ws_pipe_options, name, p, buf, szp, t);
+ }
+ return (rv);
+}
+
+static nni_tran_pipe_ops ws_pipe_ops = {
+ .p_init = wstran_pipe_init,
+ .p_fini = wstran_pipe_fini,
+ .p_stop = wstran_pipe_stop,
+ .p_send = wstran_pipe_send,
+ .p_recv = wstran_pipe_recv,
+ .p_close = wstran_pipe_close,
+ .p_peer = wstran_pipe_peer,
+ .p_getopt = wstran_pipe_getopt,
+};
+
+static void
+wstran_dialer_fini(void *arg)
+{
+ ws_dialer *d = arg;
+
+ nni_aio_stop(d->connaio);
+ nng_stream_dialer_free(d->dialer);
+ nni_aio_free(d->connaio);
+ nni_mtx_fini(&d->mtx);
+ NNI_FREE_STRUCT(d);
+}
+
+static void
+wstran_listener_fini(void *arg)
+{
+ ws_listener *l = arg;
+
+ nni_aio_stop(l->accaio);
+ nng_stream_listener_free(l->listener);
+ nni_aio_free(l->accaio);
+ nni_mtx_fini(&l->mtx);
+ NNI_FREE_STRUCT(l);
+}
+
+static void
+wstran_connect_cb(void *arg)
+{
+ ws_dialer * d = arg;
+ ws_pipe * p;
+ nni_aio * caio = d->connaio;
+ nni_aio * uaio;
+ int rv;
+ nng_stream *ws = NULL;
+
+ nni_mtx_lock(&d->mtx);
+ if (nni_aio_result(caio) == 0) {
+ ws = nni_aio_get_output(caio, 0);
+ }
+ if ((uaio = nni_list_first(&d->aios)) == NULL) {
+ // The client stopped caring about this!
+ nng_stream_free(ws);
+ nni_mtx_unlock(&d->mtx);
+ return;
+ }
+ nni_aio_list_remove(uaio);
+ NNI_ASSERT(nni_list_empty(&d->aios));
+ if ((rv = nni_aio_result(caio)) != 0) {
+ nni_aio_finish_error(uaio, rv);
+ } else if ((rv = wstran_pipe_alloc(&p, ws)) != 0) {
+ nng_stream_free(ws);
+ nni_aio_finish_error(uaio, rv);
+ } else {
+ p->peer = d->peer;
+
+ nni_aio_set_output(uaio, 0, p);
+ nni_aio_finish(uaio, 0, 0);
+ }
+ nni_mtx_unlock(&d->mtx);
+}
+
+static void
+wstran_dialer_close(void *arg)
+{
+ ws_dialer *d = arg;
+
+ nni_aio_close(d->connaio);
+ nng_stream_dialer_close(d->dialer);
+}
+
+static void
+wstran_listener_close(void *arg)
+{
+ ws_listener *l = arg;
+
+ nni_aio_close(l->accaio);
+ nng_stream_listener_close(l->listener);
+}
+
+static void
+wstran_accept_cb(void *arg)
+{
+ ws_listener *l = arg;
+ nni_aio * aaio = l->accaio;
+ nni_aio * uaio;
+ int rv;
+
+ nni_mtx_lock(&l->mtx);
+ uaio = nni_list_first(&l->aios);
+ if ((rv = nni_aio_result(aaio)) != 0) {
+ if (uaio != NULL) {
+ nni_aio_list_remove(uaio);
+ nni_aio_finish_error(uaio, rv);
+ }
+ } else {
+ nng_stream *ws = nni_aio_get_output(aaio, 0);
+ if (uaio != NULL) {
+ ws_pipe *p;
+ // Make a pipe
+ nni_aio_list_remove(uaio);
+ if ((rv = wstran_pipe_alloc(&p, ws)) != 0) {
+ nng_stream_close(ws);
+ nni_aio_finish_error(uaio, rv);
+ } else {
+ p->peer = l->peer;
+
+ nni_aio_set_output(uaio, 0, p);
+ nni_aio_finish(uaio, 0, 0);
+ }
+ }
+ }
+ if (!nni_list_empty(&l->aios)) {
+ nng_stream_listener_accept(l->listener, aaio);
+ }
+ nni_mtx_unlock(&l->mtx);
+}
+
+static int
+wstran_dialer_init(void **dp, nng_url *url, nni_dialer *ndialer)
+{
+ ws_dialer *d;
+ nni_sock * s = nni_dialer_sock(ndialer);
+ int rv;
+ char name[64];
+
+ if ((d = NNI_ALLOC_STRUCT(d)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&d->mtx);
+
+ nni_aio_list_init(&d->aios);
+
+ d->peer = nni_sock_peer_id(s);
+
+ snprintf(
+ name, sizeof(name), "%s.sp.nanomsg.org", nni_sock_peer_name(s));
+
+ if (((rv = nni_ws_dialer_alloc(&d->dialer, url)) != 0) ||
+ ((rv = nni_aio_alloc(&d->connaio, wstran_connect_cb, d)) != 0) ||
+ ((rv = nng_stream_dialer_set_bool(
+ d->dialer, NNI_OPT_WS_MSGMODE, true)) != 0) ||
+ ((rv = nng_stream_dialer_set_string(
+ d->dialer, NNG_OPT_WS_PROTOCOL, name)) != 0)) {
+ wstran_dialer_fini(d);
+ return (rv);
+ }
+
+ *dp = d;
+ return (0);
+}
+
+static int
+wstran_listener_init(void **lp, nng_url *url, nni_listener *listener)
+{
+ ws_listener *l;
+ int rv;
+ nni_sock * s = nni_listener_sock(listener);
+ char name[64];
+
+ if ((l = NNI_ALLOC_STRUCT(l)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&l->mtx);
+
+ nni_aio_list_init(&l->aios);
+
+ l->peer = nni_sock_peer_id(s);
+
+ snprintf(
+ name, sizeof(name), "%s.sp.nanomsg.org", nni_sock_proto_name(s));
+
+ if (((rv = nni_ws_listener_alloc(&l->listener, url)) != 0) ||
+ ((rv = nni_aio_alloc(&l->accaio, wstran_accept_cb, l)) != 0) ||
+ ((rv = nng_stream_listener_set_bool(
+ l->listener, NNI_OPT_WS_MSGMODE, true)) != 0) ||
+ ((rv = nng_stream_listener_set_string(
+ l->listener, NNG_OPT_WS_PROTOCOL, name)) != 0)) {
+ wstran_listener_fini(l);
+ return (rv);
+ }
+ *lp = l;
+ return (0);
+}
+
+static int
+wstran_init(void)
+{
+ return (0);
+}
+
+static void
+wstran_fini(void)
+{
+}
+
+static const nni_option wstran_ep_opts[] = {
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+wstran_dialer_getopt(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ ws_dialer *d = arg;
+ int rv;
+
+ rv = nni_stream_dialer_get(d->dialer, name, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_getopt(wstran_ep_opts, name, d, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+wstran_dialer_setopt(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ ws_dialer *d = arg;
+ int rv;
+
+ rv = nni_stream_dialer_set(d->dialer, name, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_setopt(wstran_ep_opts, name, d, buf, sz, t);
+ }
+ return (rv);
+}
+
+static int
+wstran_listener_get(
+ void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ ws_listener *l = arg;
+ int rv;
+
+ rv = nni_stream_listener_get(l->listener, name, buf, szp, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_getopt(wstran_ep_opts, name, l, buf, szp, t);
+ }
+ return (rv);
+}
+
+static int
+wstran_listener_set(
+ void *arg, const char *name, const void *buf, size_t sz, nni_type t)
+{
+ ws_listener *l = arg;
+ int rv;
+
+ rv = nni_stream_listener_set(l->listener, name, buf, sz, t);
+ if (rv == NNG_ENOTSUP) {
+ rv = nni_setopt(wstran_ep_opts, name, l, buf, sz, t);
+ }
+ return (rv);
+}
+
+static nni_tran_dialer_ops ws_dialer_ops = {
+ .d_init = wstran_dialer_init,
+ .d_fini = wstran_dialer_fini,
+ .d_connect = wstran_dialer_connect,
+ .d_close = wstran_dialer_close,
+ .d_setopt = wstran_dialer_setopt,
+ .d_getopt = wstran_dialer_getopt,
+};
+
+static nni_tran_listener_ops ws_listener_ops = {
+ .l_init = wstran_listener_init,
+ .l_fini = wstran_listener_fini,
+ .l_bind = ws_listener_bind,
+ .l_accept = wstran_listener_accept,
+ .l_close = wstran_listener_close,
+ .l_setopt = wstran_listener_set,
+ .l_getopt = wstran_listener_get,
+};
+
+static nni_tran ws_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "ws",
+ .tran_dialer = &ws_dialer_ops,
+ .tran_listener = &ws_listener_ops,
+ .tran_pipe = &ws_pipe_ops,
+ .tran_init = wstran_init,
+ .tran_fini = wstran_fini,
+};
+
+static nni_tran ws4_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "ws4",
+ .tran_dialer = &ws_dialer_ops,
+ .tran_listener = &ws_listener_ops,
+ .tran_pipe = &ws_pipe_ops,
+ .tran_init = wstran_init,
+ .tran_fini = wstran_fini,
+};
+
+static nni_tran ws6_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "ws6",
+ .tran_dialer = &ws_dialer_ops,
+ .tran_listener = &ws_listener_ops,
+ .tran_pipe = &ws_pipe_ops,
+ .tran_init = wstran_init,
+ .tran_fini = wstran_fini,
+};
+
+int
+nng_ws_register(void)
+{
+ int rv;
+ if (((rv = nni_tran_register(&ws_tran)) != 0) ||
+ ((rv = nni_tran_register(&ws4_tran)) != 0) ||
+ ((rv = nni_tran_register(&ws6_tran)) != 0)) {
+ return (rv);
+ }
+
+ return (0);
+}
+
+#ifdef NNG_TRANSPORT_WSS
+
+static nni_tran wss_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "wss",
+ .tran_dialer = &ws_dialer_ops,
+ .tran_listener = &ws_listener_ops,
+ .tran_pipe = &ws_pipe_ops,
+ .tran_init = wstran_init,
+ .tran_fini = wstran_fini,
+};
+
+static nni_tran wss4_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "wss4",
+ .tran_dialer = &ws_dialer_ops,
+ .tran_listener = &ws_listener_ops,
+ .tran_pipe = &ws_pipe_ops,
+ .tran_init = wstran_init,
+ .tran_fini = wstran_fini,
+};
+
+static nni_tran wss6_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "wss6",
+ .tran_dialer = &ws_dialer_ops,
+ .tran_listener = &ws_listener_ops,
+ .tran_pipe = &ws_pipe_ops,
+ .tran_init = wstran_init,
+ .tran_fini = wstran_fini,
+};
+
+int
+nng_wss_register(void)
+{
+ int rv;
+ if (((rv = nni_tran_register(&wss_tran)) != 0) ||
+ ((rv = nni_tran_register(&wss4_tran)) != 0) ||
+ ((rv = nni_tran_register(&wss6_tran)) != 0)) {
+ return (rv);
+ }
+
+ return (0);
+}
+
+#else
+
+int
+nng_wss_register(void)
+{
+ return (0);
+}
+
+#endif // NNG_TRANSPORT_WSS
diff --git a/src/sp/transport/ws/ws_test.c b/src/sp/transport/ws/ws_test.c
new file mode 100644
index 00000000..7cbcd9d7
--- /dev/null
+++ b/src/sp/transport/ws/ws_test.c
@@ -0,0 +1,181 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Cody Piersall <cody.piersall@gmail.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <nuts.h>
+
+static void
+test_ws_url_path_filters(void)
+{
+ nng_socket s1;
+ nng_socket s2;
+ char addr[NNG_MAXADDRLEN];
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+
+ nuts_scratch_addr("ws", sizeof(addr), addr);
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+
+ // Now try we just remove the last character for now.
+ // This will make the path different.
+ addr[strlen(addr) - 1] = '\0';
+ NUTS_FAIL(nng_dial(s2, addr, NULL, 0), NNG_ECONNREFUSED);
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+}
+
+static void
+test_wild_card_port(void)
+{
+ nng_socket s1;
+ nng_socket s2;
+ nng_socket s3;
+ nng_socket s4;
+ nng_socket s5;
+ nng_socket s6;
+
+ nng_listener l1;
+ nng_listener l2;
+ nng_listener l3;
+ int port1;
+ int port2;
+ int port3;
+ char ws_url[128];
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+ NUTS_OPEN(s3);
+ NUTS_OPEN(s4);
+ NUTS_OPEN(s5);
+ NUTS_OPEN(s6);
+ NUTS_PASS(nng_listen(s1, "ws://127.0.0.1:0/one", &l1, 0));
+ NUTS_PASS(
+ nng_listener_get_int(l1, NNG_OPT_TCP_BOUND_PORT, &port1));
+ NUTS_TRUE(port1 != 0);
+ snprintf(ws_url, sizeof(ws_url), "ws4://127.0.0.1:%d/two", port1);
+ NUTS_PASS(nng_listen(s2, ws_url, &l2, 0));
+ NUTS_PASS(
+ nng_listener_get_int(l2, NNG_OPT_TCP_BOUND_PORT, &port2));
+ NUTS_TRUE(port1 != 0);
+ NUTS_TRUE(port1 == port2);
+ // Now try a different wild card port.
+ NUTS_PASS(nng_listen(s3, "ws4://127.0.0.1:0/three", &l3, 0));
+ NUTS_PASS(
+ nng_listener_get_int(l3, NNG_OPT_TCP_BOUND_PORT, &port3));
+ NUTS_TRUE(port3 != 0);
+ NUTS_TRUE(port3 != port1);
+
+ // Let's make sure can dial to each.
+ snprintf(ws_url, sizeof(ws_url), "ws://127.0.0.1:%d/one", port1);
+ NUTS_PASS(nng_dial(s4, ws_url, NULL, 0));
+ snprintf(ws_url, sizeof(ws_url), "ws://127.0.0.1:%d/two", port2);
+ NUTS_PASS(nng_dial(s6, ws_url, NULL, 0));
+ snprintf(ws_url, sizeof(ws_url), "ws://127.0.0.1:%d/three", port3);
+ NUTS_PASS(nng_dial(s6, ws_url, NULL, 0));
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+ NUTS_CLOSE(s3);
+ NUTS_CLOSE(s4);
+ NUTS_CLOSE(s5);
+ NUTS_CLOSE(s6);
+}
+
+static void
+test_wild_card_host(void)
+{
+ nng_socket s1;
+ nng_socket s2;
+ char addr[NNG_MAXADDRLEN];
+ uint16_t port;
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+
+ port = nuts_next_port();
+
+ // we use ws4 to ensure 127.0.0.1 binding
+ snprintf(addr, sizeof(addr), "ws4://*:%u/test", port);
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ nng_msleep(100);
+
+ snprintf(addr, sizeof(addr), "ws://127.0.0.1:%u/test", port);
+ NUTS_PASS(nng_dial(s2, addr, NULL, 0));
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+}
+
+static void
+test_empty_host(void)
+{
+ nng_socket s1;
+ nng_socket s2;
+ char addr[NNG_MAXADDRLEN];
+ uint16_t port;
+
+ NUTS_OPEN(s1);
+ NUTS_OPEN(s2);
+
+ port = nuts_next_port();
+
+ // we use ws4 to ensure 127.0.0.1 binding
+ snprintf(addr, sizeof(addr), "ws4://:%u/test", port);
+ NUTS_PASS(nng_listen(s1, addr, NULL, 0));
+ nng_msleep(100);
+
+ snprintf(addr, sizeof(addr), "ws://127.0.0.1:%u/test", port);
+ NUTS_PASS(nng_dial(s2, addr, NULL, 0));
+
+ NUTS_CLOSE(s1);
+ NUTS_CLOSE(s2);
+}
+
+void
+test_ws_recv_max(void)
+{
+ char msg[256];
+ char buf[256];
+ nng_socket s0;
+ nng_socket s1;
+ nng_listener l;
+ size_t sz;
+ char *addr;
+
+ NUTS_ADDR(addr, "ws");
+ NUTS_OPEN(s0);
+ NUTS_PASS(nng_socket_set_ms(s0, NNG_OPT_RECVTIMEO, 100));
+ NUTS_PASS(nng_socket_set_size(s0, NNG_OPT_RECVMAXSZ, 200));
+ NUTS_PASS(nng_listener_create(&l, s0, addr));
+ NUTS_PASS(nng_socket_get_size(s0, NNG_OPT_RECVMAXSZ, &sz));
+ NUTS_TRUE(sz == 200);
+ NUTS_PASS(nng_listener_set_size(l, NNG_OPT_RECVMAXSZ, 100));
+ NUTS_PASS(nng_listener_start(l, 0));
+
+ NUTS_OPEN(s1);
+ NUTS_PASS(nng_dial(s1, addr, NULL, 0));
+ NUTS_PASS(nng_send(s1, msg, 95, 0));
+ NUTS_PASS(nng_socket_set_ms(s1, NNG_OPT_SENDTIMEO, 100));
+ NUTS_PASS(nng_recv(s0, buf, &sz, 0));
+ NUTS_TRUE(sz == 95);
+ NUTS_PASS(nng_send(s1, msg, 150, 0));
+ NUTS_FAIL(nng_recv(s0, buf, &sz, 0), NNG_ETIMEDOUT);
+ NUTS_CLOSE(s0);
+ NUTS_CLOSE(s1);
+}
+
+TEST_LIST = {
+ { "ws url path filters", test_ws_url_path_filters },
+ { "ws wild card port", test_wild_card_port },
+ { "ws wild card host", test_wild_card_host },
+ { "ws empty host", test_empty_host },
+ { "ws recv max", test_ws_recv_max },
+ { NULL, NULL },
+}; \ No newline at end of file
diff --git a/src/sp/transport/zerotier/CMakeLists.txt b/src/sp/transport/zerotier/CMakeLists.txt
new file mode 100644
index 00000000..903b7f56
--- /dev/null
+++ b/src/sp/transport/zerotier/CMakeLists.txt
@@ -0,0 +1,37 @@
+#
+# Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+# Copyright 2018 Capitar IT Group BV <info@capitar.com>
+#
+# This software is supplied under the terms of the MIT License, a
+# copy of which should be located in the distribution where this
+# file was obtained (LICENSE.txt). A copy of the license may also be
+# found online at https://opensource.org/licenses/MIT.
+#
+
+# ZeroTier protocol
+
+option (NNG_TRANSPORT_ZEROTIER "Enable ZeroTier transport (requires libzerotiercore)." OFF)
+mark_as_advanced(NNG_TRANSPORT_ZEROTIER)
+
+nng_directory(zerotier)
+
+if (NNG_TRANSPORT_ZEROTIER)
+
+ # NB: As we wind up linking libzerotiercore.a into the application,
+ # this means that your application will *also* need to either be licensed
+ # under the GPLv3, or you will need to have a commercial license from
+ # ZeroTier permitting its use elsewhere.
+
+ message(WARNING "
+ ************************************************************
+ Linking against zerotiercore changes license terms.
+ Consult a lawyer and the license files for details.
+ ************************************************************")
+
+ find_package(zerotiercore REQUIRED)
+
+ nng_link_libraries(zerotiercore::zerotiercore)
+ nng_defines(NNG_TRANSPORT_ZEROTIER)
+ nng_sources(zerotier.c zthash.c)
+ nng_headers(nng/transport/zerotier/zerotier.h)
+endif()
diff --git a/src/sp/transport/zerotier/zerotier.c b/src/sp/transport/zerotier/zerotier.c
new file mode 100644
index 00000000..896add29
--- /dev/null
+++ b/src/sp/transport/zerotier/zerotier.c
@@ -0,0 +1,3241 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "zthash.h"
+
+#include "nng/transport/zerotier/zerotier.h"
+
+#include <zerotiercore/ZeroTierOne.h>
+
+// ZeroTier Transport. This sits on the ZeroTier L2 network, which itself
+// is implemented on top of UDP. This requires the 3rd party
+// libzerotiercore library (which is GPLv3!) and platform specific UDP
+// functionality to be built in. Note that care must be taken to link
+// dynamically if one wishes to avoid making your entire application GPL3.
+// (Alternatively ZeroTier offers commercial licenses which may prevent
+// this particular problem.) This implementation does not make use of
+// certain advanced capabilities in ZeroTier such as more sophisticated
+// route management and TCP fallback. You need to have connectivity
+// to the Internet to use this. (Or at least to your Planetary root.)
+//
+// Because ZeroTier takes a while to establish connectivity, it is even
+// more important that applications using the ZeroTier transport not
+// assume that a connection will be immediately available. It can take
+// quite a few seconds for peer-to-peer connectivity to be established.
+//
+// The ZeroTier transport was funded by Capitar IT Group, BV.
+//
+// This transport is highly experimental.
+
+// ZeroTier and UDP are connectionless, but nng is designed around
+// connection oriented paradigms. An "unreliable" connection is created
+// on top using our own network protocol. The details of this are
+// documented in the RFC.
+
+// Every participant has an "address", which is a 64-bit value constructed
+// using the ZT node number in the upper 40-bits, and a 24-bit port number
+// in the lower bits. We elect to operate primarily on these addresses,
+// but the wire protocol relies on just conveying the 24-bit port along
+// with the MAC address (from which the ZT node number can be derived,
+// given the network ID.)
+
+typedef struct zt_pipe zt_pipe;
+typedef struct zt_ep zt_ep;
+typedef struct zt_node zt_node;
+typedef struct zt_frag zt_frag;
+typedef struct zt_fraglist zt_fraglist;
+
+// Port numbers are stored as 24-bit values in network byte order.
+#define ZT_GET24(ptr, v) \
+ v = (((uint32_t)((uint8_t)(ptr)[0])) << 16) + \
+ (((uint32_t)((uint8_t)(ptr)[1])) << 8) + \
+ (((uint32_t)(uint8_t)(ptr)[2]))
+
+#define ZT_PUT24(ptr, u) \
+ do { \
+ (ptr)[0] = (uint8_t)(((uint32_t)(u)) >> 16); \
+ (ptr)[1] = (uint8_t)(((uint32_t)(u)) >> 8); \
+ (ptr)[2] = (uint8_t)((uint32_t)(u)); \
+ } while (0)
+
+static const uint16_t zt_ethertype = 0x901;
+static const uint8_t zt_version = 0x01;
+static const uint32_t zt_ephemeral = 0x800000u; // start of ephemeral ports
+static const uint32_t zt_max_port = 0xffffffu; // largest port
+static const uint32_t zt_port_mask = 0xffffffu; // mask of valid ports
+static const uint32_t zt_port_shift = 24;
+static const int zt_conn_tries = 240; // max connect attempts
+static const nng_duration zt_conn_time = 500; // between attempts (msec)
+static const int zt_ping_tries = 10; // max keepalive attempts
+static const nng_duration zt_ping_time = 60000; // keepalive time (msec)
+
+// These are compile time tunables for now.
+enum zt_tunables {
+ zt_listenq = 128, // backlog queue length
+ zt_listen_expire = 10000, // maximum time in backlog (msec)
+ zt_rcv_bufsize = 4096, // max UDP recv
+ zt_udp_sendq = 16, // outgoing UDP queue length
+ zt_recvq = 2, // max pending recv (per pipe)
+ zt_recv_stale = 1000, // frags older than are stale (msec)
+};
+
+enum zt_op_codes {
+ zt_op_data = 0x00, // data, final fragment
+ zt_op_conn_req = 0x10, // connect request
+ zt_op_conn_ack = 0x12, // connect accepted
+ zt_op_disc_req = 0x20, // disconnect request (no ack)
+ zt_op_ping = 0x30, // ping request
+ zt_op_pong = 0x32, // ping response
+ zt_op_error = 0x40, // error response
+};
+
+enum zt_offsets {
+ zt_offset_op = 0x00,
+ zt_offset_flags = 0x01,
+ zt_offset_version = 0x02, // protocol version number (2 bytes)
+ zt_offset_zero1 = 0x04, // reserved, must be zero (1 byte)
+ zt_offset_dst_port = 0x05, // destination port (3 bytes)
+ zt_offset_zero2 = 0x08, // reserved, must be zero (1 byte)
+ zt_offset_src_port = 0x09, // source port number (3 bytes)
+ zt_offset_creq_proto = 0x0C, // SP protocol number (2 bytes)
+ zt_offset_cack_proto = 0x0C, // SP protocol number (2 bytes)
+ zt_offset_err_code = 0x0C, // error code (1 byte)
+ zt_offset_err_msg = 0x0D, // error message (string)
+ zt_offset_data_id = 0x0C, // message ID (2 bytes)
+ zt_offset_data_fragsz = 0x0E, // fragment size
+ zt_offset_data_frag = 0x10, // fragment number, first is 1 (2 bytes)
+ zt_offset_data_nfrag = 0x12, // total fragments (2 bytes)
+ zt_offset_data_data = 0x14, // user payload
+ zt_size_headers = 0x0C, // size of headers
+ zt_size_conn_req = 0x0E, // size of conn_req (connect request)
+ zt_size_conn_ack = 0x0E, // size of conn_ack (connect reply)
+ zt_size_disc_req = 0x0C, // size of disc_req (disconnect)
+ zt_size_ping = 0x0C, // size of ping request
+ zt_size_pong = 0x0C, // size of ping reply
+ zt_size_data = 0x14, // size of data message (w/o payload)
+};
+
+enum zt_errors {
+ zt_err_refused = 0x01, // Connection refused
+ zt_err_notconn = 0x02, // Connection does not exit
+ zt_err_wrongsp = 0x03, // SP protocol mismatch
+ zt_err_proto = 0x04, // Other protocol error
+ zt_err_msgsize = 0x05, // Message to large
+ zt_err_unknown = 0x06, // Other errors
+};
+
+// This node structure is wrapped around the ZT_node; this allows us to
+// have multiple endpoints referencing the same ZT_node, but also to
+// support different nodes (identities) based on different home dirs.
+// This means we need to stick these on a global linked list, manage
+// them with a reference count, and uniquely identify them using the
+// homedir.
+struct zt_node {
+ char zn_path[NNG_MAXADDRLEN]; // ought to be sufficient
+ nni_file_lockh *zn_flock;
+ ZT_Node * zn_znode;
+ uint64_t zn_self;
+ nni_list_node zn_link;
+ bool zn_closed;
+ nni_plat_udp * zn_udp4;
+ nni_plat_udp * zn_udp6;
+ nni_list zn_eplist;
+ nni_list zn_plist;
+ zt_hash * zn_ports;
+ zt_hash * zn_eps;
+ zt_hash * zn_lpipes;
+ zt_hash * zn_rpipes;
+ nni_aio * zn_rcv4_aio;
+ uint8_t * zn_rcv4_buf;
+ nng_sockaddr zn_rcv4_addr;
+ nni_aio * zn_rcv6_aio;
+ uint8_t * zn_rcv6_buf;
+ nng_sockaddr zn_rcv6_addr;
+ nni_thr zn_bgthr;
+ int64_t zn_bgtime;
+ nni_cv zn_bgcv;
+ nni_cv zn_snd6_cv;
+};
+
+// The fragment list is used to keep track of incoming received
+// fragments for reassembly into a complete message.
+struct zt_fraglist {
+ nni_time fl_time; // time first frag was received
+ uint32_t fl_msgid; // message id
+ int fl_ready; // we have all messages
+ size_t fl_fragsz;
+ unsigned int fl_nfrags;
+ uint8_t * fl_missing;
+ size_t fl_missingsz;
+ nni_msg * fl_msg;
+};
+
+struct zt_pipe {
+ nni_list_node zp_link;
+ zt_node * zp_ztn;
+ nni_pipe * zp_npipe;
+ uint64_t zp_nwid;
+ uint64_t zp_laddr;
+ uint64_t zp_raddr;
+ uint16_t zp_peer;
+ uint16_t zp_proto;
+ uint16_t zp_next_msgid;
+ size_t zp_rcvmax;
+ size_t zp_mtu;
+ nni_aio * zp_user_rxaio;
+ nni_time zp_last_recv;
+ zt_fraglist zp_recvq[zt_recvq];
+ int zp_ping_try;
+ int zp_ping_tries;
+ bool zp_closed;
+ nni_duration zp_ping_time;
+ nni_aio * zp_ping_aio;
+ uint8_t * zp_send_buf;
+ nni_atomic_flag zp_reaped;
+ nni_reap_item zp_reap;
+};
+
+typedef struct zt_creq zt_creq;
+struct zt_creq {
+ uint64_t cr_expire;
+ uint64_t cr_raddr;
+ uint16_t cr_proto;
+};
+
+struct zt_ep {
+ nni_list_node ze_link;
+ char ze_home[NNG_MAXADDRLEN]; // should be enough
+ zt_node * ze_ztn;
+ uint64_t ze_nwid;
+ bool ze_running;
+ uint64_t ze_raddr; // remote node address
+ uint64_t ze_laddr; // local node address
+ uint16_t ze_proto;
+ size_t ze_rcvmax;
+ nni_aio * ze_aio;
+ nni_aio * ze_creq_aio;
+ bool ze_creq_active;
+ int ze_creq_try;
+ nni_list ze_aios;
+ int ze_mtu;
+ int ze_ping_tries;
+ nni_duration ze_ping_time;
+ nni_duration ze_conn_time;
+ int ze_conn_tries;
+
+ // Incoming connection requests (server only). We only
+ // only have "accepted" requests -- that is we won't have an
+ // established connection/pipe unless the application calls
+ // accept. Since the "application" is our library, that should
+ // be pretty much as fast we can run.
+ zt_creq ze_creqs[zt_listenq];
+ int ze_creq_head;
+ int ze_creq_tail;
+ nni_dialer * ze_ndialer;
+ nni_listener *ze_nlistener;
+};
+
+// Locking strategy. At present the ZeroTier core is not reentrant or fully
+// threadsafe. (We expect this will be fixed.) Furthermore, there are
+// some significant challenges in dealing with locks associated with the
+// callbacks, etc. So we take a big-hammer approach, and just use a single
+// global lock for everything. We hold this lock when calling into the
+// ZeroTier framework. Since ZeroTier has no independent threads, that
+// means that it will always hold this lock in its core, and the lock will
+// also be held automatically in any of our callbacks. We never hold any
+// other locks across ZeroTier core calls. We may not acquire the global
+// lock in callbacks (they will already have it held). Any other locks
+// can be acquired as long as they are not held during calls into ZeroTier.
+//
+// This will have a detrimental impact on performance, but to be completely
+// honest we don't think anyone will be using the ZeroTier transport in
+// performance critical applications; scalability may become a factor for
+// large servers sitting in a ZeroTier hub situation. (Then again, since
+// only the zerotier processing is single threaded, it may not
+// be that much of a bottleneck -- really depends on how expensive these
+// operations are. We can use lockstat or other lock-hotness tools to
+// check for this later.)
+
+static nni_mtx zt_lk;
+static nni_list zt_nodes;
+
+static void zt_ep_send_conn_req(zt_ep *);
+static void zt_ep_conn_req_cb(void *);
+static void zt_ep_doaccept(zt_ep *);
+static void zt_pipe_dorecv(zt_pipe *);
+static int zt_pipe_alloc(zt_pipe **, zt_ep *, uint64_t, uint64_t, bool);
+static void zt_pipe_ping_cb(void *);
+static void zt_fraglist_clear(zt_fraglist *);
+static void zt_fraglist_free(zt_fraglist *);
+static void zt_virtual_recv(ZT_Node *, void *, void *, uint64_t, void **,
+ uint64_t, uint64_t, unsigned int, unsigned int, const void *,
+ unsigned int);
+static void zt_pipe_start_ping(zt_pipe *);
+
+static int64_t
+zt_now(void)
+{
+ // We return msec
+ return ((int64_t) nni_clock());
+}
+
+static void
+zt_bgthr(void *arg)
+{
+ zt_node *ztn = arg;
+ int64_t now;
+
+ nni_mtx_lock(&zt_lk);
+ for (;;) {
+ now = zt_now();
+
+ if (ztn->zn_closed) {
+ break;
+ }
+
+ if (now < ztn->zn_bgtime) {
+ nni_cv_until(&ztn->zn_bgcv, (nni_time) ztn->zn_bgtime);
+ continue;
+ }
+
+ ztn->zn_bgtime = 0;
+ ZT_Node_processBackgroundTasks(ztn->zn_znode, NULL, now, &now);
+
+ ztn->zn_bgtime = now;
+ }
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_node_resched(zt_node *ztn, int64_t msec)
+{
+ if (msec > ztn->zn_bgtime && ztn->zn_bgtime != 0) {
+ return;
+ }
+ ztn->zn_bgtime = msec;
+ nni_cv_wake1(&ztn->zn_bgcv);
+}
+
+static void
+zt_node_rcv4_cb(void *arg)
+{
+ zt_node * ztn = arg;
+ nni_aio * aio = ztn->zn_rcv4_aio;
+ struct sockaddr_storage sa;
+ struct sockaddr_in * sin;
+ nng_sockaddr_in * nsin;
+ int64_t now;
+
+ if (nni_aio_result(aio) != 0) {
+ // Outside of memory exhaustion, we can't really think
+ // of any reason for this to legitimately fail.
+ // Arguably we should inject a fallback delay, but for
+ // now we just carry on.
+ return;
+ }
+
+ memset(&sa, 0, sizeof(sa));
+ sin = (void *) &sa;
+ nsin = &ztn->zn_rcv4_addr.s_in;
+ sin->sin_family = AF_INET;
+ sin->sin_port = nsin->sa_port;
+ sin->sin_addr.s_addr = nsin->sa_addr;
+
+ nni_mtx_lock(&zt_lk);
+ now = zt_now();
+
+ // We are not going to perform any validation of the data; we
+ // just pass this straight into the ZeroTier core.
+ // XXX: CHECK THIS, if it fails then we have a fatal error with
+ // the znode, and have to shut everything down.
+ ZT_Node_processWirePacket(ztn->zn_znode, NULL, now, 0, (void *) &sa,
+ ztn->zn_rcv4_buf, nni_aio_count(aio), &now);
+
+ // Schedule background work
+ zt_node_resched(ztn, now);
+
+ // Schedule another receive.
+ if (ztn->zn_udp4 != NULL) {
+ nni_iov iov;
+ iov.iov_buf = ztn->zn_rcv4_buf;
+ iov.iov_len = zt_rcv_bufsize;
+ nni_aio_set_iov(aio, 1, &iov);
+
+ nni_aio_set_input(aio, 0, &ztn->zn_rcv4_addr);
+
+ nni_plat_udp_recv(ztn->zn_udp4, aio);
+ }
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_node_rcv6_cb(void *arg)
+{
+ zt_node * ztn = arg;
+ nni_aio * aio = ztn->zn_rcv6_aio;
+ struct sockaddr_storage sa;
+ struct sockaddr_in6 * sin6;
+ struct nng_sockaddr_in6 *nsin6;
+ int64_t now;
+
+ if (nni_aio_result(aio) != 0) {
+ // Outside of memory exhaustion, we can't really think
+ // of any reason for this to legitimately fail.
+ // Arguably we should inject a fallback delay, but for
+ // now we just carry on.
+ return;
+ }
+
+ memset(&sa, 0, sizeof(sa));
+ sin6 = (void *) &sa;
+ nsin6 = &ztn->zn_rcv6_addr.s_in6;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = nsin6->sa_port;
+ memcpy(&sin6->sin6_addr, nsin6->sa_addr, 16);
+
+ nni_mtx_lock(&zt_lk);
+ now = (uint64_t) zt_now(); // msec
+
+ // We are not going to perform any validation of the data; we
+ // just pass this straight into the ZeroTier core.
+ ZT_Node_processWirePacket(ztn->zn_znode, NULL, now, 0, (void *) &sa,
+ ztn->zn_rcv6_buf, nni_aio_count(aio), &now);
+
+ // Schedule background work
+ zt_node_resched(ztn, now);
+
+ // Schedule another receive.
+ if (ztn->zn_udp6 != NULL) {
+ nni_iov iov;
+ iov.iov_buf = ztn->zn_rcv6_buf;
+ iov.iov_len = zt_rcv_bufsize;
+ nni_aio_set_iov(aio, 1, &iov);
+ nni_aio_set_input(aio, 0, &ztn->zn_rcv6_addr);
+ nni_plat_udp_recv(ztn->zn_udp6, aio);
+ }
+ nni_mtx_unlock(&zt_lk);
+}
+
+static uint64_t
+zt_mac_to_node(uint64_t mac, uint64_t nwid)
+{
+ uint64_t node;
+ // This extracts a node address from a mac address. The
+ // network ID is mixed in, and has to be extricated. We
+ // the node ID is located in the lower 40 bits, and scrambled
+ // against the nwid.
+ node = mac & 0xffffffffffull;
+ node ^= ((nwid >> 8) & 0xff) << 32;
+ node ^= ((nwid >> 16) & 0xff) << 24;
+ node ^= ((nwid >> 24) & 0xff) << 16;
+ node ^= ((nwid >> 32) & 0xff) << 8;
+ node ^= (nwid >> 40) & 0xff;
+ return (node);
+}
+
+static uint64_t
+zt_node_to_mac(uint64_t node, uint64_t nwid)
+{
+ uint64_t mac;
+ // We use LSB of network ID, and make sure that we clear
+ // multicast and set local administration -- this is the first
+ // octet of the 48 bit mac address. We also avoid 0x52, which
+ // is known to be used in KVM, libvirt, etc.
+ mac = ((uint8_t)(nwid & 0xfe) | 0x02);
+ if (mac == 0x52) {
+ mac = 0x32;
+ }
+ mac <<= 40;
+ mac |= node;
+ // The rest of the network ID is XOR'd in, in reverse byte
+ // order.
+ mac ^= ((nwid >> 8) & 0xff) << 32;
+ mac ^= ((nwid >> 16) & 0xff) << 24;
+ mac ^= ((nwid >> 24) & 0xff) << 16;
+ mac ^= ((nwid >> 32) & 0xff) << 8;
+ mac ^= (nwid >> 40) & 0xff;
+ return (mac);
+}
+
+static int
+zt_result(enum ZT_ResultCode rv)
+{
+ switch (rv) {
+ case ZT_RESULT_OK:
+ return (0);
+ case ZT_RESULT_OK_IGNORED:
+ return (0);
+ case ZT_RESULT_FATAL_ERROR_OUT_OF_MEMORY:
+ return (NNG_ENOMEM);
+ case ZT_RESULT_FATAL_ERROR_DATA_STORE_FAILED:
+ return (NNG_EPERM);
+ case ZT_RESULT_FATAL_ERROR_INTERNAL:
+ return (NNG_EINTERNAL);
+ case ZT_RESULT_ERROR_NETWORK_NOT_FOUND:
+ return (NNG_EADDRINVAL);
+ case ZT_RESULT_ERROR_UNSUPPORTED_OPERATION:
+ return (NNG_ENOTSUP);
+ case ZT_RESULT_ERROR_BAD_PARAMETER:
+ return (NNG_EINVAL);
+ default:
+ return (NNG_ETRANERR + (int) rv);
+ }
+}
+
+// ZeroTier Node API callbacks
+static int
+zt_virtual_config(ZT_Node *node, void *userptr, void *thr, uint64_t nwid,
+ void **netptr, enum ZT_VirtualNetworkConfigOperation op,
+ const ZT_VirtualNetworkConfig *config)
+{
+ zt_node *ztn = userptr;
+ zt_ep * ep;
+
+ NNI_ARG_UNUSED(thr);
+ NNI_ARG_UNUSED(netptr);
+
+ NNI_ASSERT(node == ztn->zn_znode);
+
+ // Maybe we don't have to create taps or anything like that.
+ // We do get our mac and MTUs from this, so there's that.
+ switch (op) {
+ case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_UP:
+ case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_CONFIG_UPDATE:
+
+ // We only really care about changes to the MTU. From
+ // an API perspective the MAC could change, but that
+ // cannot really happen because the node identity and
+ // the nwid are fixed.
+ NNI_LIST_FOREACH (&ztn->zn_eplist, ep) {
+ NNI_ASSERT(nwid == config->nwid);
+ if (ep->ze_nwid != config->nwid) {
+ continue;
+ }
+ ep->ze_mtu = config->mtu;
+ }
+ break;
+ case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DESTROY:
+ case ZT_VIRTUAL_NETWORK_CONFIG_OPERATION_DOWN:
+ // XXX: tear down endpoints?
+ default:
+ break;
+ }
+ return (0);
+}
+
+// zt_send modifies the start of the supplied buffer to update the
+// message headers with protocol specific details (version, port numbers,
+// etc.) and then sends it over the virtual network.
+static void
+zt_send(zt_node *ztn, uint64_t nwid, uint8_t op, uint64_t raddr,
+ uint64_t laddr, uint8_t *data, size_t len)
+{
+ uint64_t srcmac = zt_node_to_mac(laddr >> 24, nwid);
+ uint64_t dstmac = zt_node_to_mac(raddr >> 24, nwid);
+ int64_t now = zt_now();
+
+ NNI_ASSERT(len >= zt_size_headers);
+ data[zt_offset_op] = op;
+ data[zt_offset_flags] = 0;
+ data[zt_offset_zero1] = 0;
+ data[zt_offset_zero2] = 0;
+ NNI_PUT16(data + zt_offset_version, zt_version);
+ ZT_PUT24(data + zt_offset_dst_port, raddr & zt_port_mask);
+ ZT_PUT24(data + zt_offset_src_port, laddr & zt_port_mask);
+
+ (void) ZT_Node_processVirtualNetworkFrame(ztn->zn_znode, NULL, now,
+ nwid, srcmac, dstmac, zt_ethertype, 0, data, len, &now);
+
+ zt_node_resched(ztn, now);
+}
+
+static void
+zt_send_err(zt_node *ztn, uint64_t nwid, uint64_t raddr, uint64_t laddr,
+ uint8_t err, const char *msg)
+{
+ uint8_t data[128];
+
+ NNI_ASSERT((strlen(msg) + zt_offset_err_msg) < sizeof(data));
+
+ data[zt_offset_err_code] = err;
+ nni_strlcpy((char *) data + zt_offset_err_msg, msg,
+ sizeof(data) - zt_offset_err_msg);
+
+ zt_send(ztn, nwid, zt_op_error, raddr, laddr, data,
+ strlen(msg) + zt_offset_err_msg);
+}
+
+static void
+zt_pipe_send_err(zt_pipe *p, uint8_t err, const char *msg)
+{
+ zt_send_err(p->zp_ztn, p->zp_nwid, p->zp_raddr, p->zp_laddr, err, msg);
+}
+
+static void
+zt_pipe_send_disc_req(zt_pipe *p)
+{
+ uint8_t data[zt_size_disc_req];
+
+ zt_send(p->zp_ztn, p->zp_nwid, zt_op_disc_req, p->zp_raddr,
+ p->zp_laddr, data, sizeof(data));
+}
+
+static void
+zt_pipe_send_ping(zt_pipe *p)
+{
+ uint8_t data[zt_size_ping];
+
+ zt_send(p->zp_ztn, p->zp_nwid, zt_op_ping, p->zp_raddr, p->zp_laddr,
+ data, sizeof(data));
+}
+
+static void
+zt_pipe_send_pong(zt_pipe *p)
+{
+ uint8_t data[zt_size_ping];
+
+ zt_send(p->zp_ztn, p->zp_nwid, zt_op_pong, p->zp_raddr, p->zp_laddr,
+ data, sizeof(data));
+}
+
+static void
+zt_pipe_send_conn_ack(zt_pipe *p)
+{
+ uint8_t data[zt_size_conn_ack];
+
+ NNI_PUT16(data + zt_offset_cack_proto, p->zp_proto);
+ zt_send(p->zp_ztn, p->zp_nwid, zt_op_conn_ack, p->zp_raddr,
+ p->zp_laddr, data, sizeof(data));
+}
+
+static void
+zt_ep_send_conn_req(zt_ep *ep)
+{
+ uint8_t data[zt_size_conn_req];
+
+ NNI_PUT16(data + zt_offset_creq_proto, ep->ze_proto);
+ zt_send(ep->ze_ztn, ep->ze_nwid, zt_op_conn_req, ep->ze_raddr,
+ ep->ze_laddr, data, sizeof(data));
+}
+
+static void
+zt_ep_recv_conn_ack(zt_ep *ep, uint64_t raddr, const uint8_t *data, size_t len)
+{
+ zt_node *ztn = ep->ze_ztn;
+ nni_aio *aio = ep->ze_creq_aio;
+ zt_pipe *p;
+ int rv;
+
+ if (ep->ze_ndialer == NULL) {
+ zt_send_err(ztn, ep->ze_nwid, raddr, ep->ze_laddr,
+ zt_err_proto, "Inappropriate operation");
+ return;
+ }
+
+ if (len != zt_size_conn_ack) {
+ zt_send_err(ztn, ep->ze_nwid, raddr, ep->ze_laddr,
+ zt_err_proto, "Bad message length");
+ return;
+ }
+
+ if (ep->ze_creq_try == 0) {
+ return;
+ }
+
+ // Do we already have a matching pipe? If so, we can discard
+ // the operation. This should not happen, since we normally,
+ // deregister the endpoint when we create the pipe.
+ if ((zt_hash_find(ztn->zn_lpipes, ep->ze_laddr, (void **) &p)) == 0) {
+ return;
+ }
+
+ if ((rv = zt_pipe_alloc(&p, ep, raddr, ep->ze_laddr, false)) != 0) {
+ // We couldn't create the pipe, just drop it.
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ NNI_GET16(data + zt_offset_cack_proto, p->zp_peer);
+
+ // Reset the address of the endpoint, so that the next call to
+ // ep_connect will bind a new one -- we are using this one for the
+ // pipe.
+ zt_hash_remove(ztn->zn_eps, ep->ze_laddr);
+ ep->ze_laddr = 0;
+
+ nni_aio_set_output(aio, 0, p);
+ nni_aio_finish(aio, 0, 0);
+}
+
+static void
+zt_ep_recv_conn_req(zt_ep *ep, uint64_t raddr, const uint8_t *data, size_t len)
+{
+ zt_node *ztn = ep->ze_ztn;
+ zt_pipe *p;
+ int i;
+
+ if (ep->ze_nlistener == NULL) {
+ zt_send_err(ztn, ep->ze_nwid, raddr, ep->ze_laddr,
+ zt_err_proto, "Inappropriate operation");
+ return;
+ }
+ if (len != zt_size_conn_req) {
+ zt_send_err(ztn, ep->ze_nwid, raddr, ep->ze_laddr,
+ zt_err_proto, "Bad message length");
+ return;
+ }
+
+ // If we already have created a pipe for this connection
+ // then just reply the conn ack.
+ if ((zt_hash_find(ztn->zn_rpipes, raddr, (void **) &p)) == 0) {
+ zt_pipe_send_conn_ack(p);
+ return;
+ }
+
+ // We may already have a connection request queued (if this was
+ // a resend for example); if that's the case we just ignore
+ // this one.
+ for (i = ep->ze_creq_tail; i != ep->ze_creq_head; i++) {
+ if (ep->ze_creqs[i % zt_listenq].cr_raddr == raddr) {
+ return;
+ }
+ }
+ // We may already have filled our listenq, in which case we just drop.
+ if ((ep->ze_creq_tail + zt_listenq) == ep->ze_creq_head) {
+ // We have taken as many as we can, so just drop it.
+ return;
+ }
+
+ // Record the connection request, and then process any
+ // pending acceptors.
+ i = ep->ze_creq_head % zt_listenq;
+
+ NNI_GET16(data + zt_offset_creq_proto, ep->ze_creqs[i].cr_proto);
+ ep->ze_creqs[i].cr_raddr = raddr;
+ ep->ze_creqs[i].cr_expire = nni_clock() + zt_listen_expire;
+ ep->ze_creq_head++;
+
+ zt_ep_doaccept(ep);
+}
+
+static void
+zt_ep_recv_error(zt_ep *ep, const uint8_t *data, size_t len)
+{
+ int code;
+
+ // Most of the time we don't care about errors. The exception here
+ // is that when we have an outstanding CON_REQ, we would like to
+ // process that appropriately.
+
+ if (ep->ze_ndialer == NULL) {
+ // Not a dialer. Drop it.
+ return;
+ }
+
+ if (len < zt_offset_err_msg) {
+ // Malformed error frame.
+ return;
+ }
+
+ code = data[zt_offset_err_code];
+ switch (code) {
+ case zt_err_refused:
+ code = NNG_ECONNREFUSED;
+ break;
+ case zt_err_notconn:
+ code = NNG_ECLOSED;
+ break;
+ case zt_err_wrongsp:
+ code = NNG_EPROTO;
+ break;
+ default:
+ code = NNG_ETRANERR;
+ break;
+ }
+
+ if (ep->ze_creq_active) {
+ ep->ze_creq_try = 0;
+ ep->ze_creq_active = 0;
+ nni_aio_finish_error(ep->ze_creq_aio, code);
+ }
+}
+
+static void
+zt_ep_virtual_recv(
+ zt_ep *ep, uint8_t op, uint64_t raddr, const uint8_t *data, size_t len)
+{
+ // Only listeners should be receiving. Dialers receive on the pipe,
+ // rather than the endpoint. The only message that endpoints can
+ // receive are connection requests.
+ switch (op) {
+ case zt_op_conn_req:
+ zt_ep_recv_conn_req(ep, raddr, data, len);
+ return;
+ case zt_op_conn_ack:
+ zt_ep_recv_conn_ack(ep, raddr, data, len);
+ return;
+ case zt_op_error:
+ zt_ep_recv_error(ep, data, len);
+ return;
+ default:
+ zt_send_err(ep->ze_ztn, ep->ze_nwid, raddr, ep->ze_laddr,
+ zt_err_proto, "Bad operation");
+ return;
+ }
+}
+
+static void
+zt_pipe_close_err(zt_pipe *p, int err, uint8_t code, const char *msg)
+{
+ nni_aio *aio;
+ if ((aio = p->zp_user_rxaio) != NULL) {
+ p->zp_user_rxaio = NULL;
+ nni_aio_finish_error(aio, err);
+ }
+ nni_aio_close(p->zp_ping_aio);
+ p->zp_closed = true;
+ if (msg != NULL) {
+ zt_pipe_send_err(p, code, msg);
+ }
+}
+
+static void
+zt_pipe_recv_data(zt_pipe *p, const uint8_t *data, size_t len)
+{
+ uint16_t msgid;
+ uint16_t fragno;
+ uint16_t nfrags;
+ size_t fragsz;
+ zt_fraglist *fl;
+ int i;
+ int slot;
+ uint8_t bit;
+ uint8_t * body;
+
+ if (len < zt_size_data) {
+ // Runt frame. Drop it and close pipe with a protocol error.
+ zt_pipe_close_err(p, NNG_EPROTO, zt_err_proto, "Runt frame");
+ return;
+ }
+
+ NNI_GET16(data + zt_offset_data_id, msgid);
+ NNI_GET16(data + zt_offset_data_fragsz, fragsz);
+ NNI_GET16(data + zt_offset_data_frag, fragno);
+ NNI_GET16(data + zt_offset_data_nfrag, nfrags);
+ len -= zt_offset_data_data;
+ data += zt_offset_data_data;
+
+ // Check for cases where message size is clearly too large. Note
+ // that we only can catch the case where a message is larger by
+ // more than a fragment, since the final fragment may be shorter,
+ // and we won't know that until we receive it.
+ if ((p->zp_rcvmax > 0) &&
+ ((nfrags * fragsz) >= (p->zp_rcvmax + fragsz))) {
+ // Discard, as the forwarder might be on the other side
+ // of a device. This is gentler than just shutting the pipe
+ // down. Sending a remote error might be polite, but since
+ // most peers will close the pipe on such an error, we
+ // simply silently discard it.
+ return;
+ }
+
+ // We run the recv logic once, to clear stale fragment entries.
+ zt_pipe_dorecv(p);
+
+ // Find a suitable fragment slot.
+ slot = -1;
+ for (i = 0; i < zt_recvq; i++) {
+ fl = &p->zp_recvq[i];
+ // This was our message ID, we always use it.
+ if (msgid == fl->fl_msgid) {
+ slot = i;
+ break;
+ }
+
+ if (slot < 0) {
+ slot = i;
+ } else if (fl->fl_time < p->zp_recvq[slot].fl_time) {
+ // This has an earlier expiration, so lets choose it.
+ slot = i;
+ }
+ }
+
+ NNI_ASSERT(slot >= 0);
+
+ fl = &p->zp_recvq[slot];
+ if (fl->fl_msgid != msgid) {
+ // First fragment we've received for this message (but might
+ // not be first fragment for message!)
+ zt_fraglist_clear(fl);
+
+ if (nni_msg_alloc(&fl->fl_msg, nfrags * fragsz) != 0) {
+ // Out of memory. We don't close the pipe, but
+ // just fail to receive the message. Bump a stat?
+ return;
+ }
+
+ fl->fl_nfrags = nfrags;
+ fl->fl_fragsz = fragsz;
+ fl->fl_msgid = msgid;
+ fl->fl_time = nni_clock();
+
+ // Set the missing mask.
+ memset(fl->fl_missing, 0xff, nfrags / 8);
+ fl->fl_missing[nfrags / 8] |= ((1 << (nfrags % 8)) - 1);
+ }
+ if ((nfrags != fl->fl_nfrags) || (fragsz != fl->fl_fragsz) ||
+ (fragno >= nfrags) || (fragsz == 0) || (nfrags == 0) ||
+ ((fragno != (nfrags - 1)) && (len != fragsz))) {
+ // Protocol error, message parameters changed.
+ zt_pipe_close_err(
+ p, NNG_EPROTO, zt_err_proto, "Invalid message parameters");
+ zt_fraglist_clear(fl);
+ return;
+ }
+
+ bit = (uint8_t)(1 << (fragno % 8));
+ if ((fl->fl_missing[fragno / 8] & bit) == 0) {
+ // We've already got this fragment, ignore it. We don't
+ // bother to check for changed data.
+ return;
+ }
+
+ fl->fl_missing[fragno / 8] &= ~(bit);
+ body = nni_msg_body(fl->fl_msg);
+ body += fragno * fragsz;
+ memcpy(body, data, len);
+ if (fragno == (nfrags - 1)) {
+ // Last frag, maybe shorten the message.
+ nni_msg_chop(fl->fl_msg, (fragsz - len));
+ if ((nni_msg_len(fl->fl_msg) > p->zp_rcvmax) &&
+ (p->zp_rcvmax > 0)) {
+ // Strict enforcement of max recv.
+ zt_fraglist_clear(fl);
+ // Just discard the message.
+ return;
+ }
+ }
+
+ for (i = 0; i < ((nfrags + 7) / 8); i++) {
+ if (fl->fl_missing[i]) {
+ return;
+ }
+ }
+
+ // We got all fragments... try to send it up.
+ fl->fl_ready = 1;
+ zt_pipe_dorecv(p);
+}
+
+static void
+zt_pipe_recv_ping(zt_pipe *p, const uint8_t *data, size_t len)
+{
+ NNI_ARG_UNUSED(data);
+
+ if (len != zt_size_ping) {
+ zt_pipe_send_err(p, zt_err_proto, "Incorrect ping size");
+ return;
+ }
+ zt_pipe_send_pong(p);
+}
+
+static void
+zt_pipe_recv_pong(zt_pipe *p, const uint8_t *data, size_t len)
+{
+ NNI_ARG_UNUSED(data);
+
+ if (len != zt_size_pong) {
+ zt_pipe_send_err(p, zt_err_proto, "Incorrect pong size");
+ }
+}
+
+static void
+zt_pipe_recv_disc_req(zt_pipe *p, const uint8_t *data, size_t len)
+{
+ nni_aio *aio;
+ NNI_ARG_UNUSED(data);
+ NNI_ARG_UNUSED(len);
+
+ // NB: lock held already.
+ // Don't bother to check the length, going to disconnect anyway.
+ if ((aio = p->zp_user_rxaio) != NULL) {
+ p->zp_user_rxaio = NULL;
+ p->zp_closed = true;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+}
+
+static void
+zt_pipe_recv_error(zt_pipe *p, const uint8_t *data, size_t len)
+{
+ nni_aio *aio;
+ NNI_ARG_UNUSED(data);
+ NNI_ARG_UNUSED(len);
+
+ // Perhaps we should log an error message, but at the end of
+ // the day, the details are just not that interesting.
+ if ((aio = p->zp_user_rxaio) != NULL) {
+ p->zp_user_rxaio = NULL;
+ p->zp_closed = true;
+ nni_aio_finish_error(aio, NNG_ETRANERR);
+ }
+}
+
+// This function is called when we have determined that a frame has
+// arrived for a pipe. The remote and local addresses were both
+// matched by the caller.
+static void
+zt_pipe_virtual_recv(zt_pipe *p, uint8_t op, const uint8_t *data, size_t len)
+{
+ // We got data, so update our recv time.
+ p->zp_last_recv = nni_clock();
+ p->zp_ping_try = 0;
+
+ switch (op) {
+ case zt_op_data:
+ zt_pipe_recv_data(p, data, len);
+ return;
+ case zt_op_disc_req:
+ zt_pipe_recv_disc_req(p, data, len);
+ return;
+ case zt_op_ping:
+ zt_pipe_recv_ping(p, data, len);
+ return;
+ case zt_op_pong:
+ zt_pipe_recv_pong(p, data, len);
+ return;
+ case zt_op_error:
+ zt_pipe_recv_error(p, data, len);
+ return;
+ case zt_op_conn_req:
+ zt_pipe_send_conn_ack(p);
+ return;
+ }
+}
+
+// This function is called when a frame arrives on the
+// *virtual* network.
+static void
+zt_virtual_recv(ZT_Node *node, void *userptr, void *thr, uint64_t nwid,
+ void **netptr, uint64_t srcmac, uint64_t dstmac, unsigned int ethertype,
+ unsigned int vlanid, const void *payload, unsigned int len)
+{
+ zt_node * ztn = userptr;
+ uint8_t op;
+ const uint8_t *data = payload;
+ uint16_t version;
+ uint32_t rport;
+ uint32_t lport;
+ zt_ep * ep;
+ zt_pipe * p;
+ uint64_t raddr;
+ uint64_t laddr;
+
+ NNI_ARG_UNUSED(node);
+ NNI_ARG_UNUSED(thr);
+ NNI_ARG_UNUSED(netptr);
+
+ if ((ethertype != zt_ethertype) || (len < zt_size_headers) ||
+ (data[zt_offset_flags] != 0) || (data[zt_offset_zero1] != 0) ||
+ (data[zt_offset_zero2] != 0)) {
+ return;
+ }
+ NNI_GET16(data + zt_offset_version, version);
+ if (version != zt_version) {
+ return;
+ }
+ if (vlanid != 0) { // for now we only use vlan 0.
+ return;
+ }
+
+ op = data[zt_offset_op];
+
+ ZT_GET24(data + zt_offset_dst_port, lport);
+ ZT_GET24(data + zt_offset_src_port, rport);
+
+ raddr = zt_mac_to_node(srcmac, nwid);
+ raddr <<= 24;
+ raddr |= rport;
+
+ laddr = zt_mac_to_node(dstmac, nwid);
+ laddr <<= 24;
+ laddr |= lport;
+
+ // NB: We are holding the zt_lock.
+
+ // Look up a pipe, but also we use this chance to check that
+ // the source address matches what the pipe was established with.
+ // If the pipe does not match then we nak it. Note that pipes can
+ // appear on the znode twice (loopback), so we have to be careful
+ // to check the entire set of parameters, and to check for server
+ // vs. client pipes separately.
+
+ // If its a local address match on a client pipe, process it.
+ if ((zt_hash_find(ztn->zn_lpipes, laddr, (void *) &p) == 0) &&
+ (p->zp_nwid == nwid) && (p->zp_raddr == raddr)) {
+ zt_pipe_virtual_recv(p, op, data, len);
+ return;
+ }
+
+ // If its a remote address match on a server pipe, process it.
+ if ((zt_hash_find(ztn->zn_rpipes, raddr, (void *) &p) == 0) &&
+ (p->zp_nwid == nwid) && (p->zp_laddr == laddr)) {
+ zt_pipe_virtual_recv(p, op, data, len);
+ return;
+ }
+
+ // No pipe, so look for an endpoint.
+ if ((zt_hash_find(ztn->zn_eps, laddr, (void **) &ep) == 0) &&
+ (ep->ze_nwid == nwid)) {
+ // direct this to an endpoint.
+ zt_ep_virtual_recv(ep, op, raddr, data, len);
+ return;
+ }
+
+ // We have a request for which we have no listener, and no
+ // pipe. For some of these we send back a NAK, but for others
+ // we just drop the frame.
+ switch (op) {
+ case zt_op_conn_req:
+ // No listener. Connection refused.
+ zt_send_err(ztn, nwid, raddr, laddr, zt_err_refused,
+ "Connection refused");
+ return;
+ case zt_op_data:
+ case zt_op_ping:
+ case zt_op_conn_ack:
+ zt_send_err(ztn, nwid, raddr, laddr, zt_err_notconn,
+ "Connection not found");
+ break;
+ case zt_op_error:
+ case zt_op_pong:
+ case zt_op_disc_req:
+ default:
+ // Just drop these.
+ break;
+ }
+}
+
+static void
+zt_event_cb(ZT_Node *node, void *userptr, void *thr, enum ZT_Event event,
+ const void *payload)
+{
+ NNI_ARG_UNUSED(node);
+ NNI_ARG_UNUSED(userptr);
+ NNI_ARG_UNUSED(thr);
+ NNI_ARG_UNUSED(payload);
+
+ switch (event) {
+ case ZT_EVENT_ONLINE: // Connected to the virtual net.
+ case ZT_EVENT_UP: // Node initialized (may not be connected).
+ case ZT_EVENT_DOWN: // Teardown of the node.
+ case ZT_EVENT_OFFLINE: // Removal of the node from the net.
+ case ZT_EVENT_TRACE: // Local trace events.
+ // printf("TRACE: %s\n", (const char *) payload);
+ break;
+ case ZT_EVENT_REMOTE_TRACE: // Remote trace, not supported.
+ default:
+ break;
+ }
+}
+
+static const char *zt_files[] = {
+ // clang-format off
+ NULL, // none, i.e. not used at all
+ "identity.public",
+ "identity.secret",
+ "planet",
+ "moon.%llx",
+ NULL, // peer, e.g. peers.d/<ID> -- we don't persist this
+ "network.%llx",
+ // clang-format on
+};
+
+static struct {
+ size_t len;
+ void * data;
+} zt_ephemeral_state[ZT_STATE_OBJECT_NETWORK_CONFIG + 1];
+
+static void
+zt_state_put(ZT_Node *node, void *userptr, void *thr,
+ enum ZT_StateObjectType objtype, const uint64_t objid[2], const void *data,
+ int len)
+{
+ zt_node *ztn = userptr;
+ char * path;
+ const char *template;
+ char fname[32];
+
+ NNI_ARG_UNUSED(node);
+ NNI_ARG_UNUSED(thr);
+ NNI_ARG_UNUSED(objid); // only use global files
+
+ if ((objtype > ZT_STATE_OBJECT_NETWORK_CONFIG) ||
+ ((template = zt_files[(int) objtype]) == NULL)) {
+ return;
+ }
+
+ (void) snprintf(fname, sizeof(fname), template,
+ (unsigned long long) objid[0], (unsigned long long) objid[1]);
+
+ // If we have no valid path, then we just use ephemeral data.
+ // Note that for moons, and so forth, we wind up just storing them
+ // all in the same place, but it does not matter since we don't
+ // really persist them anyway.
+ if (strlen(ztn->zn_path) == 0) {
+ void * ndata = NULL;
+ void * odata = zt_ephemeral_state[objtype].data;
+ size_t olen = zt_ephemeral_state[objtype].len;
+ if ((len >= 0) && ((ndata = nni_alloc(len)) != NULL)) {
+ memcpy(ndata, data, len);
+ zt_ephemeral_state[objtype].data = ndata;
+ zt_ephemeral_state[objtype].len = len;
+ } else if (len < 0) {
+ zt_ephemeral_state[objtype].data = NULL;
+ zt_ephemeral_state[objtype].len = 0;
+ }
+
+ if (olen > 0) {
+ nni_free(odata, olen);
+ }
+ return;
+ }
+
+ if ((path = nni_file_join(ztn->zn_path, fname)) == NULL) {
+ return;
+ }
+
+ if (len < 0) {
+ (void) nni_file_delete(path);
+ } else {
+ (void) nni_file_put(path, data, len);
+ }
+ nni_strfree(path);
+}
+
+static int
+zt_state_get(ZT_Node *node, void *userptr, void *thr,
+ enum ZT_StateObjectType objtype, const uint64_t objid[2], void *data,
+ unsigned int len)
+{
+ zt_node *ztn = userptr;
+ char * path;
+ char fname[32];
+ const char *template;
+ size_t sz;
+ void * buf;
+
+ NNI_ARG_UNUSED(node);
+ NNI_ARG_UNUSED(thr);
+ NNI_ARG_UNUSED(objid); // we only use global files
+
+ if ((objtype > ZT_STATE_OBJECT_NETWORK_CONFIG) ||
+ ((template = zt_files[(int) objtype]) == NULL)) {
+ return (-1);
+ }
+ snprintf(fname, sizeof(fname), template, objid[0], objid[1]);
+
+ // If no base directory, we are using ephemeral data.
+ if (strlen(ztn->zn_path) == 0) {
+ if (zt_ephemeral_state[objtype].data == NULL) {
+ return (-1);
+ }
+ if (zt_ephemeral_state[objtype].len > len) {
+ return (-1);
+ }
+ len = zt_ephemeral_state[objtype].len;
+ memcpy(data, zt_ephemeral_state[objtype].data, len);
+ return (len);
+ }
+
+ if ((path = nni_file_join(ztn->zn_path, fname)) == NULL) {
+ return (-1);
+ }
+
+ if (nni_file_get(path, &buf, &sz) != 0) {
+ nni_strfree(path);
+ return (-1);
+ }
+ nni_strfree(path);
+ if (sz > len) {
+ nni_free(buf, sz);
+ return (-1);
+ }
+ memcpy(data, buf, sz);
+ nni_free(buf, sz);
+ return ((int) sz);
+}
+
+typedef struct zt_send_hdr {
+ nni_sockaddr sa;
+ size_t len;
+} zt_send_hdr;
+
+// This function is called when ZeroTier desires to send a
+// physical frame. The data is a UDP payload, the rest of the
+// payload should be set over vanilla UDP.
+static int
+zt_wire_packet_send(ZT_Node *node, void *userptr, void *thr, int64_t socket,
+ const struct sockaddr_storage *remaddr, const void *data, unsigned int len,
+ unsigned int ttl)
+{
+ nni_aio * aio;
+ nni_sockaddr addr;
+ struct sockaddr_in * sin = (void *) remaddr;
+ struct sockaddr_in6 *sin6 = (void *) remaddr;
+ zt_node * ztn = userptr;
+ nni_plat_udp * udp;
+ uint8_t * buf;
+ zt_send_hdr * hdr;
+ nni_iov iov;
+
+ NNI_ARG_UNUSED(node);
+ NNI_ARG_UNUSED(thr);
+ NNI_ARG_UNUSED(socket);
+ NNI_ARG_UNUSED(ttl);
+
+ // Kind of unfortunate, but we have to convert the
+ // sockaddr to a neutral form, and then back again in
+ // the platform layer.
+ switch (sin->sin_family) {
+ case AF_INET:
+ addr.s_in.sa_family = NNG_AF_INET;
+ addr.s_in.sa_port = sin->sin_port;
+ addr.s_in.sa_addr = sin->sin_addr.s_addr;
+ udp = ztn->zn_udp4;
+ break;
+ case AF_INET6:
+ addr.s_in6.sa_family = NNG_AF_INET6;
+ addr.s_in6.sa_port = sin6->sin6_port;
+ udp = ztn->zn_udp6;
+ memcpy(addr.s_in6.sa_addr, sin6->sin6_addr.s6_addr, 16);
+ break;
+ default:
+ // No way to understand the address.
+ return (-1);
+ }
+
+ if (nni_aio_alloc(&aio, NULL, NULL) != 0) {
+ // Out of memory
+ return (-1);
+ }
+ if ((buf = nni_alloc(sizeof(*hdr) + len)) == NULL) {
+ nni_aio_free(aio);
+ return (-1);
+ }
+
+ hdr = (void *) buf;
+ buf += sizeof(*hdr);
+
+ memcpy(buf, data, len);
+ nni_aio_set_data(aio, 0, hdr);
+ hdr->sa = addr;
+ hdr->len = len;
+ nni_aio_set_input(aio, 0, &hdr->sa);
+
+ iov.iov_buf = buf;
+ iov.iov_len = len;
+ nni_aio_set_iov(aio, 1, &iov);
+
+ // This should be non-blocking/best-effort, so while
+ // not great that we're holding the lock, also not tragic.
+ nni_plat_udp_send(udp, aio);
+
+ // UDP sending is "fast" on all platforms -- given that its
+ // best effort only, this will complete immediately, resulting
+ // in either a message on the wire, or a discarded frame. We don't
+ // care which. (There may be a few thread context switches, but
+ // none of them are going to have to wait for some unbounded time.)
+ nni_aio_wait(aio);
+ nni_aio_free(aio);
+ nni_free(hdr, hdr->len + sizeof(*hdr));
+
+ return (0);
+}
+
+static struct ZT_Node_Callbacks zt_callbacks = {
+ .version = 0,
+ .statePutFunction = zt_state_put,
+ .stateGetFunction = zt_state_get,
+ .wirePacketSendFunction = zt_wire_packet_send,
+ .virtualNetworkFrameFunction = zt_virtual_recv,
+ .virtualNetworkConfigFunction = zt_virtual_config,
+ .eventCallback = zt_event_cb,
+ .pathCheckFunction = NULL,
+ .pathLookupFunction = NULL,
+};
+
+static void
+zt_node_destroy(zt_node *ztn)
+{
+ nni_aio_stop(ztn->zn_rcv4_aio);
+ nni_aio_stop(ztn->zn_rcv6_aio);
+
+ // Wait for background thread to exit!
+ nni_thr_fini(&ztn->zn_bgthr);
+
+ if (ztn->zn_znode != NULL) {
+ ZT_Node_delete(ztn->zn_znode);
+ }
+
+ if (ztn->zn_udp4 != NULL) {
+ nni_plat_udp_close(ztn->zn_udp4);
+ }
+ if (ztn->zn_udp6 != NULL) {
+ nni_plat_udp_close(ztn->zn_udp6);
+ }
+
+ if (ztn->zn_rcv4_buf != NULL) {
+ nni_free(ztn->zn_rcv4_buf, zt_rcv_bufsize);
+ }
+ if (ztn->zn_rcv6_buf != NULL) {
+ nni_free(ztn->zn_rcv6_buf, zt_rcv_bufsize);
+ }
+ if (ztn->zn_flock != NULL) {
+ nni_file_unlock(ztn->zn_flock);
+ }
+ nni_aio_free(ztn->zn_rcv4_aio);
+ nni_aio_free(ztn->zn_rcv6_aio);
+ zt_hash_fini(ztn->zn_eps);
+ zt_hash_fini(ztn->zn_lpipes);
+ zt_hash_fini(ztn->zn_rpipes);
+ nni_cv_fini(&ztn->zn_bgcv);
+ NNI_FREE_STRUCT(ztn);
+}
+
+static int
+zt_node_create(zt_node **ztnp, const char *path)
+{
+ zt_node * ztn;
+ nng_sockaddr sa4;
+ nng_sockaddr sa6;
+ int rv;
+ enum ZT_ResultCode zrv;
+ nni_iov iov;
+
+ // XXX: Right now we depend on having both IPv6 and IPv4 available.
+ // Probably we should support coping with the lack of either of them.
+
+ // We want to bind to any address we can (for now).
+ memset(&sa4, 0, sizeof(sa4));
+ sa4.s_in.sa_family = NNG_AF_INET;
+ memset(&sa6, 0, sizeof(sa6));
+ sa6.s_in6.sa_family = NNG_AF_INET6;
+
+ if ((ztn = NNI_ALLOC_STRUCT(ztn)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ NNI_LIST_INIT(&ztn->zn_eplist, zt_ep, ze_link);
+ NNI_LIST_INIT(&ztn->zn_plist, zt_pipe, zp_link);
+ nni_cv_init(&ztn->zn_bgcv, &zt_lk);
+ nni_aio_alloc(&ztn->zn_rcv4_aio, zt_node_rcv4_cb, ztn);
+ nni_aio_alloc(&ztn->zn_rcv6_aio, zt_node_rcv6_cb, ztn);
+
+ if (((ztn->zn_rcv4_buf = nni_alloc(zt_rcv_bufsize)) == NULL) ||
+ ((ztn->zn_rcv6_buf = nni_alloc(zt_rcv_bufsize)) == NULL)) {
+ zt_node_destroy(ztn);
+ return (NNG_ENOMEM);
+ }
+ if (((rv = zt_hash_init(&ztn->zn_ports)) != 0) ||
+ ((rv = zt_hash_init(&ztn->zn_eps)) != 0) ||
+ ((rv = zt_hash_init(&ztn->zn_lpipes)) != 0) ||
+ ((rv = zt_hash_init(&ztn->zn_rpipes)) != 0) ||
+ ((rv = nni_thr_init(&ztn->zn_bgthr, zt_bgthr, ztn)) != 0) ||
+ ((rv = nni_plat_udp_open(&ztn->zn_udp4, &sa4)) != 0) ||
+ ((rv = nni_plat_udp_open(&ztn->zn_udp6, &sa6)) != 0)) {
+ zt_node_destroy(ztn);
+ return (rv);
+ }
+ nni_thr_set_name(&ztn->zn_bgthr, "nng:zt");
+
+ if (strlen(path) > 0) {
+ char *lkfile;
+ if ((lkfile = nni_file_join(path, "lock")) == NULL) {
+ zt_node_destroy(ztn);
+ return (NNG_ENOMEM);
+ }
+
+ if ((rv = nni_file_lock(lkfile, &ztn->zn_flock)) != 0) {
+ zt_node_destroy(ztn);
+ nni_strfree(lkfile);
+ return (rv);
+ }
+ nni_strfree(lkfile);
+ }
+
+ // Setup for dynamic ephemeral port allocations. We
+ // set the range to allow for ephemeral ports, but not
+ // higher than the max port, and starting with an
+ // initial random value. Note that this should give us
+ // about 8 million possible ephemeral ports.
+ zt_hash_limits(ztn->zn_ports, zt_ephemeral, zt_max_port,
+ (nni_random() % (zt_max_port - zt_ephemeral)) + zt_ephemeral);
+
+ nni_strlcpy(ztn->zn_path, path, sizeof(ztn->zn_path));
+ zrv = ZT_Node_new(&ztn->zn_znode, ztn, NULL, &zt_callbacks, zt_now());
+ if (zrv != ZT_RESULT_OK) {
+ zt_node_destroy(ztn);
+ return (zt_result(zrv));
+ }
+
+ nni_list_append(&zt_nodes, ztn);
+
+ ztn->zn_self = ZT_Node_address(ztn->zn_znode);
+
+ nni_thr_run(&ztn->zn_bgthr);
+
+ // Schedule an initial background run.
+ zt_node_resched(ztn, 1);
+
+ // Schedule receive
+ iov.iov_buf = ztn->zn_rcv4_buf;
+ iov.iov_len = zt_rcv_bufsize;
+ nni_aio_set_iov(ztn->zn_rcv4_aio, 1, &iov);
+ nni_aio_set_input(ztn->zn_rcv4_aio, 0, &ztn->zn_rcv4_addr);
+ iov.iov_buf = ztn->zn_rcv6_buf;
+ iov.iov_len = zt_rcv_bufsize;
+ nni_aio_set_iov(ztn->zn_rcv6_aio, 1, &iov);
+ nni_aio_set_input(ztn->zn_rcv6_aio, 0, &ztn->zn_rcv6_addr);
+
+ nni_plat_udp_recv(ztn->zn_udp4, ztn->zn_rcv4_aio);
+ nni_plat_udp_recv(ztn->zn_udp6, ztn->zn_rcv6_aio);
+
+ *ztnp = ztn;
+ return (0);
+}
+
+static int
+zt_walk_moons(const char *path, void *arg)
+{
+ zt_node * ztn = arg;
+ const char *bn = nni_file_basename(path);
+ char * end;
+ uint64_t moonid;
+
+ if (strncmp(bn, "moon.", 5) != 0) {
+ return (NNI_FILE_WALK_CONTINUE);
+ }
+ if (((moonid = (uint64_t) strtoull(bn + 5, &end, 16)) != 0) &&
+ (*end == '\0')) {
+ ZT_Node_orbit(ztn->zn_znode, NULL, moonid, 0);
+ }
+ return (NNI_FILE_WALK_CONTINUE);
+}
+
+static int
+zt_node_find(zt_ep *ep)
+{
+ zt_node * ztn;
+ int rv;
+ ZT_VirtualNetworkConfig *cf;
+
+ NNI_LIST_FOREACH (&zt_nodes, ztn) {
+ if (strcmp(ep->ze_home, ztn->zn_path) == 0) {
+ goto done;
+ }
+ }
+
+ // We didn't find a node, so make one. And try to
+ // initialize it.
+ if ((rv = zt_node_create(&ztn, ep->ze_home)) != 0) {
+ return (rv);
+ }
+
+ // Load moons
+ if (strlen(ep->ze_home) != 0) {
+ (void) nni_file_walk(ep->ze_home, zt_walk_moons, ztn,
+ NNI_FILE_WALK_FILES_ONLY | NNI_FILE_WALK_SHALLOW);
+ }
+
+done:
+
+ ep->ze_ztn = ztn;
+ if (nni_list_node_active(&ep->ze_link)) {
+ nni_list_node_remove(&ep->ze_link);
+ }
+ nni_list_append(&ztn->zn_eplist, ep);
+
+ (void) ZT_Node_join(ztn->zn_znode, ep->ze_nwid, ztn, NULL);
+
+ if ((cf = ZT_Node_networkConfig(ztn->zn_znode, ep->ze_nwid)) != NULL) {
+ NNI_ASSERT(cf->nwid == ep->ze_nwid);
+ ep->ze_mtu = cf->mtu;
+ ZT_Node_freeQueryResult(ztn->zn_znode, cf);
+ }
+
+ return (0);
+}
+
+static int
+zt_tran_init(void)
+{
+ nni_mtx_init(&zt_lk);
+ NNI_LIST_INIT(&zt_nodes, zt_node, zn_link);
+ return (0);
+}
+
+static void
+zt_tran_fini(void)
+{
+ zt_node *ztn;
+
+ nni_mtx_lock(&zt_lk);
+ while ((ztn = nni_list_first(&zt_nodes)) != 0) {
+ nni_list_remove(&zt_nodes, ztn);
+ ztn->zn_closed = true;
+ nni_cv_wake(&ztn->zn_bgcv);
+ nni_mtx_unlock(&zt_lk);
+
+ zt_node_destroy(ztn);
+
+ nni_mtx_lock(&zt_lk);
+ }
+ nni_mtx_unlock(&zt_lk);
+
+ for (int i = 0; i <= ZT_STATE_OBJECT_NETWORK_CONFIG; i++) {
+ if (zt_ephemeral_state[i].len > 0) {
+ nni_free(zt_ephemeral_state[i].data,
+ zt_ephemeral_state[i].len);
+ }
+ }
+ NNI_ASSERT(nni_list_empty(&zt_nodes));
+ nni_mtx_fini(&zt_lk);
+}
+
+static int
+zt_check_recvmaxsz(const void *v, size_t sz, nni_type t)
+{
+ return (nni_copyin_size(NULL, v, sz, 0, NNI_MAXSZ, t));
+}
+
+static int
+zt_check_orbit(const void *v, size_t sz, nni_type t)
+{
+ NNI_ARG_UNUSED(v);
+ if ((t != NNI_TYPE_UINT64) && (t != NNI_TYPE_OPAQUE)) {
+ return (NNG_EBADTYPE);
+ }
+ if (sz != sizeof(uint64_t) && sz != sizeof(uint64_t) * 2) {
+ return (NNG_EINVAL);
+ }
+ return (0);
+}
+
+static int
+zt_check_deorbit(const void *v, size_t sz, nni_type t)
+{
+ return (nni_copyin_u64(NULL, v, sz, t));
+}
+
+static int
+zt_check_string(const void *v, size_t sz, nni_type t)
+{
+ size_t len;
+
+ if ((t != NNI_TYPE_OPAQUE) && (t != NNI_TYPE_STRING)) {
+ return (NNG_EBADTYPE);
+ }
+ len = nni_strnlen(v, sz);
+ if ((len >= sz) || (len >= NNG_MAXADDRLEN)) {
+ return (NNG_EINVAL);
+ }
+ return (0);
+}
+
+static int
+zt_check_time(const void *v, size_t sz, nni_type t)
+{
+ return (nni_copyin_ms(NULL, v, sz, t));
+}
+
+static int
+zt_check_tries(const void *v, size_t sz, nni_type t)
+{
+ return (nni_copyin_int(NULL, v, sz, 0, 1000000, t));
+}
+
+static void
+zt_pipe_close(void *arg)
+{
+ zt_pipe *p = arg;
+ nni_aio *aio;
+
+ nni_mtx_lock(&zt_lk);
+ p->zp_closed = true;
+ nni_aio_close(p->zp_ping_aio);
+ if ((aio = p->zp_user_rxaio) != NULL) {
+ p->zp_user_rxaio = NULL;
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+ zt_pipe_send_disc_req(p);
+ nni_mtx_unlock(&zt_lk);
+}
+
+static int
+zt_pipe_init(void *arg, nni_pipe *npipe)
+{
+ zt_pipe *p = arg;
+ p->zp_npipe = npipe;
+ return (0);
+}
+
+static void
+zt_pipe_fini(void *arg)
+{
+ zt_pipe *p = arg;
+ zt_node *ztn = p->zp_ztn;
+
+ nni_aio_free(p->zp_ping_aio);
+
+ // This tosses the connection details and all state.
+ nni_mtx_lock(&zt_lk);
+ zt_hash_remove(ztn->zn_ports, p->zp_laddr & zt_port_mask);
+ zt_hash_remove(ztn->zn_lpipes, p->zp_laddr);
+ zt_hash_remove(ztn->zn_rpipes, p->zp_raddr);
+ nni_mtx_unlock(&zt_lk);
+
+ for (int i = 0; i < zt_recvq; i++) {
+ zt_fraglist_free(&p->zp_recvq[i]);
+ }
+ nni_free(p->zp_send_buf, ZT_MAX_MTU);
+ NNI_FREE_STRUCT(p);
+}
+
+static void
+zt_pipe_reap(zt_pipe *p)
+{
+ if (!nni_atomic_flag_test_and_set(&p->zp_reaped)) {
+ nni_reap(&p->zp_reap, zt_pipe_fini, p);
+ }
+}
+
+static int
+zt_pipe_alloc(
+ zt_pipe **pipep, zt_ep *ep, uint64_t raddr, uint64_t laddr, bool listener)
+{
+ zt_pipe *p;
+ int rv;
+ zt_node *ztn = ep->ze_ztn;
+ int i;
+ size_t maxfrag;
+ size_t maxfrags = 0;
+
+ if ((p = NNI_ALLOC_STRUCT(p)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ if ((p->zp_send_buf = nni_alloc(ZT_MAX_MTU)) == NULL) {
+ NNI_FREE_STRUCT(p);
+ return (NNG_ENOMEM);
+ }
+ p->zp_ztn = ztn;
+ p->zp_raddr = raddr;
+ p->zp_laddr = laddr;
+ p->zp_proto = ep->ze_proto;
+ p->zp_nwid = ep->ze_nwid;
+ p->zp_mtu = ep->ze_mtu;
+ p->zp_rcvmax = ep->ze_rcvmax;
+ p->zp_ping_tries = ep->ze_ping_tries;
+ p->zp_ping_time = ep->ze_ping_time;
+ p->zp_next_msgid = (uint16_t) nni_random();
+ p->zp_ping_try = 0;
+ nni_atomic_flag_reset(&p->zp_reaped);
+
+ if (listener) {
+ // listener
+ rv = zt_hash_insert(ztn->zn_rpipes, raddr, p);
+ } else {
+ // dialer
+ rv = zt_hash_insert(ztn->zn_lpipes, laddr, p);
+ }
+ if ((rv != 0) ||
+ ((rv = nni_aio_alloc(&p->zp_ping_aio, zt_pipe_ping_cb, p)) != 0)) {
+ zt_pipe_reap(p);
+ return (rv);
+ }
+
+ // The largest fragment we can accept on this pipe. The MTU is
+ // configurable by the network administrator. Probably ZT would
+ // pass a larger one (up to MAXMTU), but we honor the network
+ // administration's configuration.
+ maxfrag = p->zp_mtu - zt_offset_data_data;
+
+ // The largest fragment count we can accept on this pipe.
+ // This is rounded up to account for alignment.
+ if (p->zp_rcvmax > 0) {
+ maxfrags = (p->zp_rcvmax + (maxfrag - 1)) / maxfrag;
+ }
+
+ if ((maxfrags > 0xffff) || (maxfrags == 0)) {
+ maxfrags = 0xffff;
+ }
+
+ for (i = 0; i < zt_recvq; i++) {
+ zt_fraglist *fl = &p->zp_recvq[i];
+ fl->fl_time = NNI_TIME_ZERO;
+ fl->fl_msgid = 0;
+ fl->fl_ready = 0;
+ fl->fl_missingsz = (maxfrags + 7) / 8;
+ fl->fl_missing = nni_alloc(fl->fl_missingsz);
+ if (fl->fl_missing == NULL) {
+ zt_pipe_reap(p);
+ return (NNG_ENOMEM);
+ }
+ }
+
+ *pipep = p;
+ return (0);
+}
+
+static void
+zt_pipe_send(void *arg, nni_aio *aio)
+{
+ // As we are sending UDP, and there is no callback to worry
+ // about, we just go ahead and send out a stream of messages
+ // synchronously.
+ zt_pipe *p = arg;
+ uint8_t *data = p->zp_send_buf;
+ size_t offset;
+ uint16_t id;
+ uint16_t nfrags;
+ uint16_t fragno;
+ size_t fragsz;
+ size_t bytes;
+ nni_msg *m;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ if ((m = nni_aio_get_msg(aio)) == NULL) {
+ nni_aio_finish_error(aio, NNG_EINVAL);
+ return;
+ }
+
+ nni_mtx_lock(&zt_lk);
+
+ if (p->zp_closed) {
+ nni_mtx_unlock(&zt_lk);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+
+ fragsz = p->zp_mtu - zt_offset_data_data;
+ NNI_ASSERT(fragsz < 0x10000); // Because zp_mtu is 16 bits
+
+ bytes = nni_msg_header_len(m) + nni_msg_len(m);
+ if (bytes >= (0xfffe * fragsz)) {
+ nni_aio_finish_error(aio, NNG_EMSGSIZE);
+ nni_mtx_unlock(&zt_lk);
+ return;
+ }
+ // above check means nfrags will fit in 16-bits.
+ nfrags = (uint16_t)((bytes + (fragsz - 1)) / fragsz);
+
+ // get the next message ID, but skip 0
+ if ((id = p->zp_next_msgid++) == 0) {
+ id = p->zp_next_msgid++;
+ }
+
+ offset = 0;
+ fragno = 0;
+ do {
+ uint8_t *dest = data + zt_offset_data_data;
+ size_t room = fragsz;
+ size_t fraglen = 0;
+ size_t len;
+
+ // Prepend the header first.
+ if ((len = nni_msg_header_len(m)) > 0) {
+ if (len > fragsz) {
+ // This shouldn't happen! SP headers are
+ // supposed to be quite small.
+ nni_aio_finish_error(aio, NNG_EMSGSIZE);
+ nni_mtx_unlock(&zt_lk);
+ return;
+ }
+ memcpy(dest, nni_msg_header(m), len);
+ dest += len;
+ room -= len;
+ offset += len;
+ fraglen += len;
+ nni_msg_header_clear(m);
+ }
+
+ len = nni_msg_len(m);
+ if (len > room) {
+ len = room;
+ }
+ memcpy(dest, nni_msg_body(m), len);
+
+ nng_msg_trim(m, len);
+ NNI_PUT16(data + zt_offset_data_id, id);
+ NNI_PUT16(data + zt_offset_data_fragsz, (uint16_t) fragsz);
+ NNI_PUT16(data + zt_offset_data_frag, fragno);
+ NNI_PUT16(data + zt_offset_data_nfrag, nfrags);
+ offset += len;
+ fraglen += len;
+ fragno++;
+ zt_send(p->zp_ztn, p->zp_nwid, zt_op_data, p->zp_raddr,
+ p->zp_laddr, data, fraglen + zt_offset_data_data);
+ } while (nni_msg_len(m) != 0);
+ nni_mtx_unlock(&zt_lk);
+
+ // NB, We never bothered to call nn_aio_sched, because we run this
+ // synchronously, relying on UDP to simply discard messages if we
+ // cannot deliver them. This means that pipe send operations with
+ // this transport are not cancellable.
+
+ nni_aio_set_msg(aio, NULL);
+ nni_msg_free(m);
+ nni_aio_finish(aio, 0, offset);
+}
+
+static void
+zt_pipe_cancel_recv(nni_aio *aio, void *arg, int rv)
+{
+ zt_pipe *p = arg;
+ nni_mtx_lock(&zt_lk);
+ if (p->zp_user_rxaio == aio) {
+ p->zp_user_rxaio = NULL;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_fraglist_clear(zt_fraglist *fl)
+{
+ nni_msg *msg;
+
+ fl->fl_ready = 0;
+ fl->fl_msgid = 0;
+ fl->fl_time = NNI_TIME_ZERO;
+ if ((msg = fl->fl_msg) != NULL) {
+ fl->fl_msg = NULL;
+ nni_msg_free(msg);
+ }
+ memset(fl->fl_missing, 0, fl->fl_missingsz);
+}
+
+static void
+zt_fraglist_free(zt_fraglist *fl)
+{
+ zt_fraglist_clear(fl);
+ nni_free(fl->fl_missing, fl->fl_missingsz);
+ fl->fl_missing = NULL;
+}
+
+static void
+zt_pipe_dorecv(zt_pipe *p)
+{
+ nni_aio *aio = p->zp_user_rxaio;
+ nni_time now = nni_clock();
+
+ if (aio == NULL) {
+ return;
+ }
+
+ for (int i = 0; i < zt_recvq; i++) {
+ zt_fraglist *fl = &p->zp_recvq[i];
+ nni_msg * msg;
+
+ if (now > (fl->fl_time + zt_recv_stale)) {
+ // fragment list is stale, clean it.
+ zt_fraglist_clear(fl);
+ continue;
+ }
+ if (!fl->fl_ready) {
+ continue;
+ }
+
+ // Got data. Let's pass it up.
+ msg = fl->fl_msg;
+ fl->fl_msg = NULL;
+ NNI_ASSERT(msg != NULL);
+
+ p->zp_user_rxaio = NULL;
+ nni_aio_finish_msg(aio, msg);
+ zt_fraglist_clear(fl);
+ return;
+ }
+}
+
+static void
+zt_pipe_recv(void *arg, nni_aio *aio)
+{
+ zt_pipe *p = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&zt_lk);
+ if (p->zp_closed) {
+ nni_mtx_unlock(&zt_lk);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ return;
+ }
+ if ((rv = nni_aio_schedule(aio, zt_pipe_cancel_recv, p)) != 0) {
+ nni_mtx_unlock(&zt_lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ p->zp_user_rxaio = aio;
+ zt_pipe_dorecv(p);
+ nni_mtx_unlock(&zt_lk);
+}
+
+static uint16_t
+zt_pipe_peer(void *arg)
+{
+ zt_pipe *pipe = arg;
+
+ return (pipe->zp_peer);
+}
+
+static int
+zt_get_nw_status(zt_node *ztn, uint64_t nwid, int *statusp)
+{
+ ZT_VirtualNetworkConfig *vcfg;
+ int status;
+
+ vcfg = ZT_Node_networkConfig(ztn->zn_znode, nwid);
+ if (vcfg == NULL) {
+ return (NNG_ECLOSED);
+ }
+ switch (vcfg->status) {
+ case ZT_NETWORK_STATUS_REQUESTING_CONFIGURATION:
+ status = NNG_ZT_STATUS_CONFIG;
+ break;
+ case ZT_NETWORK_STATUS_OK:
+ status = NNG_ZT_STATUS_UP;
+ break;
+ case ZT_NETWORK_STATUS_ACCESS_DENIED:
+ status = NNG_ZT_STATUS_DENIED;
+ break;
+ case ZT_NETWORK_STATUS_NOT_FOUND:
+ status = NNG_ZT_STATUS_NOTFOUND;
+ break;
+ case ZT_NETWORK_STATUS_PORT_ERROR:
+ status = NNG_ZT_STATUS_ERROR;
+ break;
+ case ZT_NETWORK_STATUS_CLIENT_TOO_OLD:
+ status = NNG_ZT_STATUS_OBSOLETE;
+ break;
+ default:
+ status = NNG_ZT_STATUS_UNKNOWN;
+ break;
+ }
+ ZT_Node_freeQueryResult(ztn->zn_znode, vcfg);
+
+ *statusp = status;
+ return (0);
+}
+
+static int
+zt_get_nw_name(zt_node *ztn, uint64_t nwid, void *buf, size_t *szp, nni_type t)
+{
+ ZT_VirtualNetworkConfig *vcfg;
+ int rv;
+
+ vcfg = ZT_Node_networkConfig(ztn->zn_znode, nwid);
+ if (vcfg == NULL) {
+ return (NNG_ECLOSED);
+ }
+
+ rv = nni_copyout_str(vcfg->name, buf, szp, t);
+ ZT_Node_freeQueryResult(ztn->zn_znode, vcfg);
+
+ return (rv);
+}
+
+static int
+zt_pipe_get_recvmaxsz(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ zt_pipe *p = arg;
+ return (nni_copyout_size(p->zp_rcvmax, buf, szp, t));
+}
+
+static int
+zt_pipe_get_nwid(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ zt_pipe *p = arg;
+ return (nni_copyout_u64(p->zp_nwid, buf, szp, t));
+}
+
+static int
+zt_pipe_get_node(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ zt_pipe *p = arg;
+ return (nni_copyout_u64(p->zp_laddr >> 24, buf, szp, t));
+}
+
+static void
+zt_pipe_ping_cb(void *arg)
+{
+ zt_pipe *p = arg;
+ nni_aio *aio = p->zp_ping_aio;
+ int rv;
+
+ if ((rv = nni_aio_result(aio)) != 0) {
+ // We were canceled. That means we're done.
+ return;
+ }
+ nni_mtx_lock(&zt_lk);
+ if (p->zp_closed || aio == NULL || (p->zp_ping_tries == 0) ||
+ (p->zp_ping_time == NNG_DURATION_INFINITE) ||
+ (p->zp_ping_time == NNG_DURATION_ZERO)) {
+ nni_mtx_unlock(&zt_lk);
+ return;
+ }
+ if (p->zp_ping_try > p->zp_ping_tries) {
+ // Ping count exceeded; the other side is AFK.
+ // Close the pipe, but no need to send a reason to the peer.
+ zt_pipe_close_err(p, NNG_ECLOSED, 0, NULL);
+ nni_mtx_unlock(&zt_lk);
+ return;
+ }
+
+ if (nni_clock() > (p->zp_last_recv + p->zp_ping_time)) {
+ p->zp_ping_try++;
+ zt_pipe_send_ping(p);
+ }
+
+ nni_sleep_aio(p->zp_ping_time, aio); // Schedule a recheck.
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_pipe_start_ping(zt_pipe *p)
+{
+ // send a gratuitous ping, and start the ping interval timer.
+ if ((p->zp_ping_tries > 0) && (p->zp_ping_time != NNG_DURATION_ZERO) &&
+ (p->zp_ping_time != NNG_DURATION_INFINITE)) {
+ p->zp_ping_try = 0;
+ zt_pipe_send_ping(p);
+ nni_sleep_aio(p->zp_ping_time, p->zp_ping_aio);
+ }
+}
+
+static void
+zt_ep_fini(void *arg)
+{
+ zt_ep *ep = arg;
+ nni_aio_stop(ep->ze_creq_aio);
+ nni_aio_free(ep->ze_creq_aio);
+ NNI_FREE_STRUCT(ep);
+}
+
+static int
+zt_parsehex(const char **sp, uint64_t *valp, bool wildok)
+{
+ int n;
+ const char *s = *sp;
+ char c;
+ uint64_t v;
+
+ if (wildok && *s == '*') {
+ *valp = 0;
+ s++;
+ *sp = s;
+ return (0);
+ }
+
+ for (v = 0, n = 0; (n < 16) && isxdigit(c = tolower(*s)); n++, s++) {
+ v *= 16;
+ if (isdigit(c)) {
+ v += (c - '0');
+ } else {
+ v += ((c - 'a') + 10);
+ }
+ }
+
+ *sp = s;
+ *valp = v;
+ return (n ? 0 : NNG_EINVAL);
+}
+
+static int
+zt_parsedec(const char **sp, uint64_t *valp)
+{
+ int n;
+ const char *s = *sp;
+ char c;
+ uint64_t v;
+
+ for (v = 0, n = 0; (n < 20) && isdigit(c = *s); n++, s++) {
+ v *= 10;
+ v += (c - '0');
+ }
+ *sp = s;
+ *valp = v;
+ return (n ? 0 : NNG_EINVAL);
+}
+
+static int
+zt_ep_init(void **epp, nni_url *url, nni_sock *sock, nni_dialer *ndialer,
+ nni_listener *nlistener)
+{
+ zt_ep * ep;
+ uint64_t node;
+ uint64_t port;
+ int rv;
+ const char *h;
+
+ if ((ep = NNI_ALLOC_STRUCT(ep)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+
+ ep->ze_mtu = ZT_MIN_MTU;
+ ep->ze_aio = NULL;
+ ep->ze_ping_tries = zt_ping_tries;
+ ep->ze_ping_time = zt_ping_time;
+ ep->ze_conn_time = zt_conn_time;
+ ep->ze_conn_tries = zt_conn_tries;
+ ep->ze_proto = nni_sock_proto_id(sock);
+ ep->ze_ndialer = ndialer;
+ ep->ze_nlistener = nlistener;
+
+ nni_aio_list_init(&ep->ze_aios);
+
+ rv = nni_aio_alloc(&ep->ze_creq_aio, zt_ep_conn_req_cb, ep);
+ if (rv != 0) {
+ zt_ep_fini(ep);
+ return (rv);
+ }
+
+ // Our URL format is:
+ //
+ // zt://<nodeid>.<nwid>:<port>
+ //
+ // The port must be specified, but may be zero. The nodeid
+ // may be '*' to refer to ourself. There may be a trailing slash
+ // which will be ignored.
+
+ h = url->u_hostname;
+ if (((strlen(url->u_path) == 1) && (url->u_path[0] != '/')) ||
+ (strlen(url->u_path) > 1) || (url->u_fragment != NULL) ||
+ (url->u_query != NULL) || (url->u_userinfo != NULL) ||
+ (zt_parsehex(&h, &node, true) != 0) || (*h++ != '.') ||
+ (zt_parsehex(&h, &ep->ze_nwid, false) != 0) ||
+ (node > 0xffffffffffull)) {
+ return (NNG_EADDRINVAL);
+ }
+ h = url->u_port;
+ if ((zt_parsedec(&h, &port) != 0) || (port > zt_max_port)) {
+ return (NNG_EADDRINVAL);
+ }
+
+ // Parse the URL.
+ if (nlistener != NULL) {
+ // listener
+ ep->ze_laddr = node;
+ ep->ze_laddr <<= 24;
+ ep->ze_laddr |= port;
+ ep->ze_raddr = 0;
+ ep->ze_nlistener = nlistener;
+ } else {
+ // dialer
+ if (port == 0) {
+ return (NNG_EADDRINVAL);
+ }
+ ep->ze_raddr = node;
+ ep->ze_raddr <<= 24;
+ ep->ze_raddr |= port;
+ ep->ze_laddr = 0;
+ ep->ze_ndialer = ndialer;
+ }
+
+ *epp = ep;
+ return (0);
+}
+
+static int
+zt_dialer_init(void **epp, nni_url *url, nni_dialer *d)
+{
+ return (zt_ep_init(epp, url, nni_dialer_sock(d), d, NULL));
+}
+
+static int
+zt_listener_init(void **epp, nni_url *url, nni_listener *l)
+{
+ return (zt_ep_init(epp, url, nni_listener_sock(l), NULL, l));
+}
+
+static void
+zt_ep_close(void *arg)
+{
+ zt_ep * ep = arg;
+ zt_node *ztn;
+ nni_aio *aio;
+
+ nni_aio_abort(ep->ze_creq_aio, NNG_ECLOSED);
+
+ // Cancel any outstanding user operation(s) - they should have
+ // been aborted by the above cancellation, but we need to be
+ // sure, as the cancellation callback may not have run yet.
+
+ nni_mtx_lock(&zt_lk);
+ while ((aio = nni_list_first(&ep->ze_aios)) != NULL) {
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, NNG_ECLOSED);
+ }
+
+ // Endpoint framework guarantees to only call us once,
+ // and to not call other things while we are closed.
+ ztn = ep->ze_ztn;
+ // If we're on the ztn node list, pull us off.
+ if (ztn != NULL) {
+ nni_list_node_remove(&ep->ze_link);
+ zt_hash_remove(ztn->zn_ports, ep->ze_laddr & zt_port_mask);
+ zt_hash_remove(ztn->zn_eps, ep->ze_laddr);
+ }
+
+ nni_mtx_unlock(&zt_lk);
+}
+
+static int
+zt_ep_bind_locked(zt_ep *ep)
+{
+ int rv;
+ uint64_t port;
+ uint64_t node;
+ zt_node *ztn;
+
+ // If we haven't already got a ZT node, get one.
+ if ((ztn = ep->ze_ztn) == NULL) {
+ if ((rv = zt_node_find(ep)) != 0) {
+ return (rv);
+ }
+ ztn = ep->ze_ztn;
+ }
+
+ node = ep->ze_laddr >> 24;
+ if ((node != 0) && (node != ztn->zn_self)) {
+ // User requested node id, but it doesn't match our
+ // own.
+ return (NNG_EADDRINVAL);
+ }
+
+ if ((ep->ze_laddr & zt_port_mask) == 0) {
+ // ask for an ephemeral port
+ if ((rv = zt_hash_alloc(ztn->zn_ports, &port, ep)) != 0) {
+ return (rv);
+ }
+ NNI_ASSERT(port & zt_ephemeral);
+ } else {
+ void *conflict;
+ // make sure port requested is free.
+ port = ep->ze_laddr & zt_port_mask;
+
+ if (zt_hash_find(ztn->zn_ports, port, &conflict) == 0) {
+ return (NNG_EADDRINUSE);
+ }
+ if ((rv = zt_hash_insert(ztn->zn_ports, port, ep)) != 0) {
+ return (rv);
+ }
+ }
+ NNI_ASSERT(port <= zt_max_port);
+ NNI_ASSERT(port > 0);
+
+ ep->ze_laddr = ztn->zn_self;
+ ep->ze_laddr <<= 24;
+ ep->ze_laddr |= port;
+ ep->ze_running = true;
+
+ if ((rv = zt_hash_insert(ztn->zn_eps, ep->ze_laddr, ep)) != 0) {
+ zt_hash_remove(ztn->zn_ports, port);
+ return (rv);
+ }
+
+ return (0);
+}
+
+static int
+zt_ep_bind(void *arg)
+{
+ int rv;
+ zt_ep *ep = arg;
+
+ nni_mtx_lock(&zt_lk);
+ rv = zt_ep_bind_locked(ep);
+ nni_mtx_unlock(&zt_lk);
+
+ return (rv);
+}
+
+static void
+zt_ep_cancel(nni_aio *aio, void *arg, int rv)
+{
+ zt_ep *ep = arg;
+
+ nni_mtx_lock(&zt_lk);
+ if (nni_aio_list_active(aio)) {
+ if (ep->ze_aio != NULL) {
+ nni_aio_abort(ep->ze_aio, rv);
+ }
+ nni_aio_list_remove(aio);
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_ep_doaccept(zt_ep *ep)
+{
+ // Call with ep lock held.
+ nni_time now;
+ zt_pipe *p;
+ int rv;
+
+ now = nni_clock();
+ // Consume any timedout connect requests.
+ while (ep->ze_creq_tail != ep->ze_creq_head) {
+ zt_creq creq;
+ nni_aio *aio;
+
+ creq = ep->ze_creqs[ep->ze_creq_tail % zt_listenq];
+ // Discard old connection requests.
+ if (creq.cr_expire < now) {
+ ep->ze_creq_tail++;
+ continue;
+ }
+
+ if ((aio = nni_list_first(&ep->ze_aios)) == NULL) {
+ // No outstanding accept. We're done.
+ break;
+ }
+
+ // We have both conn request, and a place to accept it.
+
+ // Advance the tail.
+ ep->ze_creq_tail++;
+
+ // We remove this AIO. This keeps it from being canceled.
+ nni_aio_list_remove(aio);
+
+ rv = zt_pipe_alloc(&p, ep, creq.cr_raddr, ep->ze_laddr, true);
+ if (rv != 0) {
+ zt_send_err(ep->ze_ztn, ep->ze_nwid, creq.cr_raddr,
+ ep->ze_laddr, zt_err_unknown,
+ "Failed creating pipe");
+ nni_aio_finish_error(aio, rv);
+ continue;
+ }
+ p->zp_peer = creq.cr_proto;
+ zt_pipe_send_conn_ack(p);
+ zt_pipe_start_ping(p);
+ nni_aio_set_output(aio, 0, p);
+ nni_aio_finish(aio, 0, 0);
+ }
+}
+
+static void
+zt_ep_accept(void *arg, nni_aio *aio)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ nni_mtx_lock(&zt_lk);
+ if ((rv = nni_aio_schedule(aio, zt_ep_cancel, ep)) != 0) {
+ nni_mtx_unlock(&zt_lk);
+ nni_aio_finish_error(aio, rv);
+ return;
+ }
+ nni_aio_list_append(&ep->ze_aios, aio);
+ zt_ep_doaccept(ep);
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_ep_conn_req_cancel(nni_aio *aio, void *arg, int rv)
+{
+ zt_ep *ep = arg;
+ // We don't have much to do here. The AIO will have been
+ // canceled as a result of the "parent" AIO canceling.
+ nni_mtx_lock(&zt_lk);
+ if (ep->ze_creq_active) {
+ ep->ze_creq_active = false;
+ nni_aio_finish_error(aio, rv);
+ }
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_ep_conn_req_cb(void *arg)
+{
+ zt_ep * ep = arg;
+ zt_pipe *p;
+ nni_aio *aio = ep->ze_creq_aio;
+ nni_aio *uaio;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+
+ ep->ze_creq_active = false;
+ switch ((rv = nni_aio_result(aio))) {
+ case 0:
+ p = nni_aio_get_output(aio, 0);
+ // Already canceled, or already handled?
+ if ((uaio = nni_list_first(&ep->ze_aios)) != NULL) {
+ nni_aio_list_remove(uaio);
+ zt_pipe_start_ping(p);
+ nni_aio_set_output(uaio, 0, p);
+ nni_aio_finish(uaio, 0, 0);
+ } else {
+ // We have a pipe, but nowhere to stick it.
+ // Just discard it.
+ zt_pipe_fini(p);
+ }
+ ep->ze_creq_try = 0;
+ break;
+
+ case NNG_ETIMEDOUT:
+ if ((ep->ze_creq_try > ep->ze_conn_tries) &&
+ (ep->ze_conn_tries > 0)) {
+ // Final timeout attempt.
+ if ((uaio = nni_list_first(&ep->ze_aios)) != NULL) {
+ nni_aio_list_remove(uaio);
+ nni_aio_finish_error(uaio, rv);
+ // reset the counter.
+ ep->ze_creq_try = 0;
+ }
+ }
+ break;
+
+ default:
+ // Failed hard?
+ if ((uaio = nni_list_first(&ep->ze_aios)) != NULL) {
+ nni_aio_list_remove(uaio);
+ nni_aio_finish_error(uaio, rv);
+ }
+ ep->ze_creq_try = 0;
+ break;
+ }
+
+ if (nni_list_first(&ep->ze_aios) != NULL) {
+ nni_aio_set_timeout(aio, ep->ze_conn_time);
+ if (nni_aio_begin(aio) == 0) {
+ rv = nni_aio_schedule(aio, zt_ep_conn_req_cancel, ep);
+ if (rv != 0) {
+ nni_aio_finish_error(aio, rv);
+ } else {
+ ep->ze_creq_active = true;
+ ep->ze_creq_try++;
+ zt_ep_send_conn_req(ep);
+ }
+ }
+ }
+
+ nni_mtx_unlock(&zt_lk);
+}
+
+static void
+zt_ep_connect(void *arg, nni_aio *aio)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ if (nni_aio_begin(aio) != 0) {
+ return;
+ }
+ // We bind locally. We'll use the address later when we give
+ // it to the pipe, but this allows us to receive the initial
+ // ack back from the server. (This gives us an ephemeral
+ // address to work with.)
+ nni_mtx_lock(&zt_lk);
+
+ // Clear the port so we get an ephemeral port.
+ ep->ze_laddr &= ~((uint64_t) zt_port_mask);
+
+ if ((rv = zt_ep_bind_locked(ep)) != 0) {
+ nni_aio_finish_error(aio, rv);
+ nni_mtx_unlock(&zt_lk);
+ return;
+ }
+
+ if ((ep->ze_raddr >> 24) == 0) {
+ ep->ze_raddr |= (ep->ze_ztn->zn_self << zt_port_shift);
+ }
+ if ((rv = nni_aio_schedule(aio, zt_ep_cancel, ep)) != 0) {
+ nni_aio_finish_error(aio, rv);
+ nni_mtx_unlock(&zt_lk);
+ return;
+ }
+ nni_aio_list_append(&ep->ze_aios, aio);
+ ep->ze_running = true;
+
+ nni_aio_set_timeout(ep->ze_creq_aio, ep->ze_conn_time);
+ if (nni_aio_begin(ep->ze_creq_aio) == 0) {
+ rv = nni_aio_schedule(
+ ep->ze_creq_aio, zt_ep_conn_req_cancel, ep);
+ if (rv != 0) {
+ nni_aio_finish_error(ep->ze_creq_aio, rv);
+ } else {
+ // Send out the first connect message; if not
+ // yet attached to network message will be dropped.
+ ep->ze_creq_try = 1;
+ ep->ze_creq_active = true;
+ zt_ep_send_conn_req(ep);
+ }
+ }
+ nni_mtx_unlock(&zt_lk);
+}
+
+static int
+zt_ep_set_recvmaxsz(void *arg, const void *data, size_t sz, nni_type t)
+{
+ zt_ep *ep = arg;
+ size_t val;
+ int rv;
+
+ if ((rv = nni_copyin_size(&val, data, sz, 0, NNI_MAXSZ, t)) == 0) {
+ nni_mtx_lock(&zt_lk);
+ ep->ze_rcvmax = val;
+ nni_mtx_unlock(&zt_lk);
+ }
+ return (rv);
+}
+
+static int
+zt_ep_get_recvmaxsz(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+ nni_mtx_lock(&zt_lk);
+ rv = nni_copyout_size(ep->ze_rcvmax, data, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_set_home(void *arg, const void *data, size_t sz, nni_type t)
+{
+ int rv;
+ zt_ep *ep = arg;
+
+ if ((rv = zt_check_string(data, sz, t)) == 0) {
+ nni_mtx_lock(&zt_lk);
+ if (ep->ze_running) {
+ rv = NNG_ESTATE;
+ } else {
+ nni_strlcpy(ep->ze_home, data, sizeof(ep->ze_home));
+ if ((rv = zt_node_find(ep)) != 0) {
+ ep->ze_ztn = NULL;
+ }
+ }
+ nni_mtx_unlock(&zt_lk);
+ }
+
+ return (rv);
+}
+
+static int
+zt_ep_get_home(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ rv = nni_copyout_str(ep->ze_home, data, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_get_url(void *arg, void *data, size_t *szp, nni_type t)
+{
+ char ustr[64]; // more than plenty
+ zt_ep * ep = arg;
+ uint64_t addr;
+
+ nni_mtx_lock(&zt_lk);
+ addr = ep->ze_nlistener != NULL ? ep->ze_laddr : ep->ze_raddr;
+ snprintf(ustr, sizeof(ustr), "zt://%llx.%llx:%u",
+ (unsigned long long) addr >> zt_port_shift,
+ (unsigned long long) ep->ze_nwid,
+ (unsigned) (addr & zt_port_mask));
+ nni_mtx_unlock(&zt_lk);
+ return (nni_copyout_str(ustr, data, szp, t));
+}
+
+static int
+zt_ep_set_orbit(void *arg, const void *data, size_t sz, nni_type t)
+{
+ uint64_t moonid;
+ uint64_t peerid;
+ zt_ep * ep = arg;
+ int rv;
+ enum ZT_ResultCode zrv;
+
+ if ((t != NNI_TYPE_UINT64) && (t != NNI_TYPE_OPAQUE)) {
+ return (NNG_EBADTYPE);
+ }
+ if (sz == sizeof(uint64_t)) {
+ memcpy(&moonid, data, sizeof(moonid));
+ peerid = 0;
+ } else if (sz == sizeof(uint64_t) * 2) {
+ memcpy(&moonid, data, sizeof(moonid));
+ memcpy(&peerid, ((char *) data) + sizeof(uint64_t),
+ sizeof(peerid));
+ } else {
+ return (NNG_EINVAL);
+ }
+
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ zrv = ZT_Node_orbit(ep->ze_ztn->zn_znode, NULL, moonid, peerid);
+ nni_mtx_unlock(&zt_lk);
+
+ return (zt_result(zrv));
+}
+
+static int
+zt_ep_set_deorbit(void *arg, const void *data, size_t sz, nni_type t)
+{
+ uint64_t moonid;
+ zt_ep * ep = arg;
+ int rv;
+
+ if ((rv = nni_copyin_u64(&moonid, data, sz, t)) == 0) {
+ enum ZT_ResultCode zrv;
+
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ zrv = ZT_Node_deorbit(ep->ze_ztn->zn_znode, NULL, moonid);
+ nni_mtx_unlock(&zt_lk);
+ rv = zt_result(zrv);
+ }
+ return (rv);
+}
+
+static int
+zt_ep_set_add_local_addr(void *arg, const void *data, size_t sz, nni_type t)
+{
+ nng_sockaddr sa;
+ zt_ep * ep = arg;
+ int rv;
+
+ if ((rv = nni_copyin_sockaddr(&sa, data, sz, t)) == 0) {
+ enum ZT_ResultCode zrv;
+ zt_node * ztn;
+ struct sockaddr_storage ss;
+ struct sockaddr_in * sin;
+ struct sockaddr_in6 * sin6;
+
+ memset(&ss, 0, sizeof(ss));
+ switch (sa.s_family) {
+ case NNG_AF_INET:
+ sin = (void *) &ss;
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = sa.s_in.sa_addr;
+ sin->sin_port = 0;
+ break;
+ case NNG_AF_INET6:
+ sin6 = (void *) &ss;
+ sin6->sin6_family = AF_INET6;
+ sin6->sin6_port = 0;
+ memcpy(&sin6->sin6_addr, sa.s_in6.sa_addr, 16);
+ break;
+ default:
+ return (NNG_EINVAL);
+ }
+
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ ztn = ep->ze_ztn;
+ zrv = ZT_Node_addLocalInterfaceAddress(ztn->zn_znode, &ss);
+ nni_mtx_unlock(&zt_lk);
+ rv = zt_result(zrv);
+ }
+ return (rv);
+}
+
+static int
+zt_ep_set_clear_local_addrs(void *arg, const void *data, size_t sz, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+ NNI_ARG_UNUSED(data);
+ NNI_ARG_UNUSED(sz);
+ NNI_ARG_UNUSED(t);
+
+ ZT_Node *zn;
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ zn = ep->ze_ztn;
+ ZT_Node_clearLocalInterfaceAddresses(zn);
+ nni_mtx_unlock(&zt_lk);
+ return (0);
+}
+
+static int
+zt_ep_get_node(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+
+ rv = nni_copyout_u64(ep->ze_ztn->zn_self, data, szp, t);
+
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_get_nwid(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ rv = nni_copyout_u64(ep->ze_nwid, data, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_get_nw_name(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ rv = zt_get_nw_name(ep->ze_ztn, ep->ze_nwid, buf, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_get_nw_status(void *arg, void *buf, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+ int status;
+
+ nni_mtx_lock(&zt_lk);
+ if ((ep->ze_ztn == NULL) && ((rv = zt_node_find(ep)) != 0)) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ if ((rv = zt_get_nw_status(ep->ze_ztn, ep->ze_nwid, &status)) != 0) {
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+ }
+ nni_mtx_unlock(&zt_lk);
+ return (nni_copyout_int(status, buf, szp, t));
+}
+
+static int
+zt_ep_set_ping_time(void *arg, const void *data, size_t sz, nni_type t)
+{
+ zt_ep * ep = arg;
+ nng_duration val;
+ int rv;
+
+ if ((rv = nni_copyin_ms(&val, data, sz, t)) == 0) {
+ nni_mtx_lock(&zt_lk);
+ ep->ze_ping_time = val;
+ nni_mtx_unlock(&zt_lk);
+ }
+ return (rv);
+}
+
+static int
+zt_ep_get_ping_time(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ rv = nni_copyout_ms(ep->ze_ping_time, data, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_set_ping_tries(void *arg, const void *data, size_t sz, nni_type t)
+{
+ zt_ep *ep = arg;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, data, sz, 0, 1000000, t)) == 0) {
+ nni_mtx_lock(&zt_lk);
+ ep->ze_ping_tries = val;
+ nni_mtx_unlock(&zt_lk);
+ }
+ return (rv);
+}
+
+static int
+zt_ep_get_ping_tries(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ rv = nni_copyout_int(ep->ze_ping_tries, data, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_set_conn_time(void *arg, const void *data, size_t sz, nni_type t)
+{
+ zt_ep * ep = arg;
+ nng_duration val;
+ int rv;
+
+ if ((rv = nni_copyin_ms(&val, data, sz, t)) == 0) {
+ nni_mtx_lock(&zt_lk);
+ ep->ze_conn_time = val;
+ nni_mtx_unlock(&zt_lk);
+ }
+ return (rv);
+}
+
+static int
+zt_ep_get_conn_time(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ rv = nni_copyout_ms(ep->ze_conn_time, data, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_set_conn_tries(void *arg, const void *data, size_t sz, nni_type t)
+{
+ zt_ep *ep = arg;
+ int val;
+ int rv;
+
+ if ((rv = nni_copyin_int(&val, data, sz, 0, 1000000, t)) == 0) {
+ nni_mtx_lock(&zt_lk);
+ ep->ze_conn_tries = val;
+ nni_mtx_unlock(&zt_lk);
+ }
+ return (rv);
+}
+
+static int
+zt_ep_get_conn_tries(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep *ep = arg;
+ int rv;
+
+ nni_mtx_lock(&zt_lk);
+ rv = nni_copyout_int(ep->ze_conn_tries, data, szp, t);
+ nni_mtx_unlock(&zt_lk);
+ return (rv);
+}
+
+static int
+zt_ep_get_locaddr(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_ep * ep = arg;
+ nng_sockaddr sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.s_zt.sa_family = NNG_AF_ZT;
+ nni_mtx_lock(&zt_lk);
+ sa.s_zt.sa_nwid = ep->ze_nwid;
+ sa.s_zt.sa_nodeid = ep->ze_laddr >> zt_port_shift;
+ sa.s_zt.sa_port = ep->ze_laddr & zt_port_mask;
+ nni_mtx_unlock(&zt_lk);
+ return (nni_copyout_sockaddr(&sa, data, szp, t));
+}
+
+static int
+zt_pipe_get_locaddr(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_pipe * p = arg;
+ nng_sockaddr sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.s_zt.sa_family = NNG_AF_ZT;
+ sa.s_zt.sa_nwid = p->zp_nwid;
+ sa.s_zt.sa_nodeid = p->zp_laddr >> zt_port_shift;
+ sa.s_zt.sa_port = p->zp_laddr & zt_port_mask;
+ return (nni_copyout_sockaddr(&sa, data, szp, t));
+}
+
+static int
+zt_pipe_get_remaddr(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_pipe * p = arg;
+ nng_sockaddr sa;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.s_zt.sa_family = NNG_AF_ZT;
+ sa.s_zt.sa_nwid = p->zp_nwid;
+ sa.s_zt.sa_nodeid = p->zp_raddr >> zt_port_shift;
+ sa.s_zt.sa_port = p->zp_raddr & zt_port_mask;
+ return (nni_copyout_sockaddr(&sa, data, szp, t));
+}
+
+static int
+zt_pipe_get_mtu(void *arg, void *data, size_t *szp, nni_type t)
+{
+ zt_pipe *p = arg;
+ return (nni_copyout_size(p->zp_mtu, data, szp, t));
+}
+
+static const nni_option zt_pipe_options[] = {
+ {
+ .o_name = NNG_OPT_LOCADDR,
+ .o_get = zt_pipe_get_locaddr,
+ },
+ {
+ .o_name = NNG_OPT_REMADDR,
+ .o_get = zt_pipe_get_remaddr,
+ },
+ {
+ .o_name = NNG_OPT_ZT_MTU,
+ .o_get = zt_pipe_get_mtu,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NWID,
+ .o_get = zt_pipe_get_nwid,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NODE,
+ .o_get = zt_pipe_get_node,
+ },
+ {
+ .o_name = NNG_OPT_RECVMAXSZ,
+ .o_get = zt_pipe_get_recvmaxsz,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static int
+zt_pipe_getopt(void *arg, const char *name, void *buf, size_t *szp, nni_type t)
+{
+ zt_pipe *p = arg;
+ return (nni_getopt(zt_pipe_options, name, p, buf, szp, t));
+}
+
+static nni_tran_pipe_ops zt_pipe_ops = {
+ .p_init = zt_pipe_init,
+ .p_fini = zt_pipe_fini,
+ .p_send = zt_pipe_send,
+ .p_recv = zt_pipe_recv,
+ .p_close = zt_pipe_close,
+ .p_peer = zt_pipe_peer,
+ .p_getopt = zt_pipe_getopt,
+};
+
+static nni_option zt_dialer_options[] = {
+ {
+ .o_name = NNG_OPT_RECVMAXSZ,
+ .o_get = zt_ep_get_recvmaxsz,
+ .o_set = zt_ep_set_recvmaxsz,
+ },
+ {
+ .o_name = NNG_OPT_URL,
+ .o_get = zt_ep_get_url,
+ },
+ {
+ .o_name = NNG_OPT_ZT_HOME,
+ .o_get = zt_ep_get_home,
+ .o_set = zt_ep_set_home,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NODE,
+ .o_get = zt_ep_get_node,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NWID,
+ .o_get = zt_ep_get_nwid,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NETWORK_STATUS,
+ .o_get = zt_ep_get_nw_status,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NETWORK_NAME,
+ .o_get = zt_ep_get_nw_name,
+ },
+ {
+ .o_name = NNG_OPT_ZT_PING_TIME,
+ .o_get = zt_ep_get_ping_time,
+ .o_set = zt_ep_set_ping_time,
+ },
+ {
+ .o_name = NNG_OPT_ZT_PING_TRIES,
+ .o_get = zt_ep_get_ping_tries,
+ .o_set = zt_ep_set_ping_tries,
+ },
+ {
+ .o_name = NNG_OPT_ZT_CONN_TIME,
+ .o_get = zt_ep_get_conn_time,
+ .o_set = zt_ep_set_conn_time,
+ },
+ {
+ .o_name = NNG_OPT_ZT_CONN_TRIES,
+ .o_get = zt_ep_get_conn_tries,
+ .o_set = zt_ep_set_conn_tries,
+ },
+ {
+ .o_name = NNG_OPT_ZT_ORBIT,
+ .o_set = zt_ep_set_orbit,
+ },
+ {
+ .o_name = NNG_OPT_ZT_DEORBIT,
+ .o_set = zt_ep_set_deorbit,
+ },
+ {
+ .o_name = NNG_OPT_ZT_ADD_LOCAL_ADDR,
+ .o_set = zt_ep_set_add_local_addr,
+ },
+ {
+ .o_name = NNG_OPT_ZT_CLEAR_LOCAL_ADDRS,
+ .o_set = zt_ep_set_clear_local_addrs,
+ },
+
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_option zt_listener_options[] = {
+ {
+ .o_name = NNG_OPT_RECVMAXSZ,
+ .o_get = zt_ep_get_recvmaxsz,
+ .o_set = zt_ep_set_recvmaxsz,
+ },
+ {
+ .o_name = NNG_OPT_URL,
+ .o_get = zt_ep_get_url,
+ },
+ {
+ .o_name = NNG_OPT_ZT_HOME,
+ .o_get = zt_ep_get_home,
+ .o_set = zt_ep_set_home,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NODE,
+ .o_get = zt_ep_get_node,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NWID,
+ .o_get = zt_ep_get_nwid,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NETWORK_STATUS,
+ .o_get = zt_ep_get_nw_status,
+ },
+ {
+ .o_name = NNG_OPT_ZT_NETWORK_NAME,
+ .o_get = zt_ep_get_nw_name,
+ },
+ {
+ .o_name = NNG_OPT_ZT_PING_TIME,
+ .o_get = zt_ep_get_ping_time,
+ .o_set = zt_ep_set_ping_time,
+ },
+ {
+ .o_name = NNG_OPT_ZT_PING_TRIES,
+ .o_get = zt_ep_get_ping_tries,
+ .o_set = zt_ep_set_ping_tries,
+ },
+ {
+ .o_name = NNG_OPT_ZT_ORBIT,
+ .o_set = zt_ep_set_orbit,
+ },
+ {
+ .o_name = NNG_OPT_ZT_DEORBIT,
+ .o_set = zt_ep_set_deorbit,
+ },
+ {
+ .o_name = NNG_OPT_LOCADDR,
+ .o_get = zt_ep_get_locaddr,
+ },
+ // terminate list
+ {
+ .o_name = NULL,
+ },
+};
+
+static nni_tran_dialer_ops zt_dialer_ops = {
+ .d_init = zt_dialer_init,
+ .d_fini = zt_ep_fini,
+ .d_connect = zt_ep_connect,
+ .d_close = zt_ep_close,
+ .d_options = zt_dialer_options,
+};
+
+static nni_tran_listener_ops zt_listener_ops = {
+ .l_init = zt_listener_init,
+ .l_fini = zt_ep_fini,
+ .l_bind = zt_ep_bind,
+ .l_accept = zt_ep_accept,
+ .l_close = zt_ep_close,
+ .l_options = zt_listener_options,
+};
+
+// This is the ZeroTier transport linkage, and should be the
+// only global symbol in this entire file.
+static struct nni_tran zt_tran = {
+ .tran_version = NNI_TRANSPORT_VERSION,
+ .tran_scheme = "zt",
+ .tran_dialer = &zt_dialer_ops,
+ .tran_listener = &zt_listener_ops,
+ .tran_pipe = &zt_pipe_ops,
+ .tran_init = zt_tran_init,
+ .tran_fini = zt_tran_fini,
+};
+
+int
+nng_zt_register(void)
+{
+ return (nni_tran_register(&zt_tran));
+}
diff --git a/src/sp/transport/zerotier/zthash.c b/src/sp/transport/zerotier/zthash.c
new file mode 100644
index 00000000..ca46b373
--- /dev/null
+++ b/src/sp/transport/zerotier/zthash.c
@@ -0,0 +1,302 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include "core/nng_impl.h"
+#include "zthash.h"
+
+struct zt_hash_entry {
+ uint64_t key;
+ void * val;
+ uint32_t skips;
+};
+
+int
+zt_hash_init(zt_hash **hp)
+{
+ zt_hash *h;
+
+ if ((h = NNI_ALLOC_STRUCT(h)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ h->ih_entries = NULL;
+ h->ih_count = 0;
+ h->ih_load = 0;
+ h->ih_cap = 0;
+ h->ih_maxload = 0;
+ h->ih_minload = 0; // never shrink below this
+ h->ih_minval = 0;
+ h->ih_maxval = 0xffffffff;
+ h->ih_dynval = 0;
+
+ *hp = h;
+ return (0);
+}
+
+void
+zt_hash_fini(zt_hash *h)
+{
+ if (h != NULL) {
+ if (h->ih_entries != NULL) {
+ NNI_FREE_STRUCTS(h->ih_entries, h->ih_cap);
+ h->ih_entries = NULL;
+ h->ih_cap = h->ih_count = 0;
+ h->ih_load = h->ih_minload = h->ih_maxload = 0;
+ }
+
+ NNI_FREE_STRUCT(h);
+ }
+}
+
+void
+zt_hash_limits(zt_hash *h, uint64_t minval, uint64_t maxval, uint64_t start)
+{
+ if (start < minval) {
+ start = minval;
+ }
+ if (start > maxval) {
+ start = maxval;
+ }
+
+ h->ih_minval = minval;
+ h->ih_maxval = maxval;
+ h->ih_dynval = start;
+ NNI_ASSERT(minval < maxval);
+ NNI_ASSERT(start >= minval);
+ NNI_ASSERT(start <= maxval);
+}
+
+// Inspired by Python dict implementation. This probe will visit every
+// cell. We always hash consecutively assigned IDs.
+#define ZT_HASH_NEXT(h, j) ((((j) *5) + 1) & (h->ih_cap - 1))
+#define ZT_HASH_INDEX(h, j) ((j) & (h->ih_cap - 1))
+
+static size_t
+zt_hash_find_index(zt_hash *h, uint64_t id)
+{
+ size_t index;
+ size_t start;
+ if (h->ih_count == 0) {
+ return ((size_t) -1);
+ }
+
+ index = ZT_HASH_INDEX(h, id);
+ start = index;
+ for (;;) {
+ // The value of ihe_key is only valid if ihe_val is not NULL.
+ if ((h->ih_entries[index].key == id) &&
+ (h->ih_entries[index].val != NULL)) {
+ return (index);
+ }
+ if (h->ih_entries[index].skips == 0) {
+ return ((size_t) -1);
+ }
+ index = ZT_HASH_NEXT(h, index);
+
+ if (index == start) {
+ break;
+ }
+ }
+
+ return ((size_t) -1);
+}
+
+int
+zt_hash_find(zt_hash *h, uint64_t id, void **vp)
+{
+ size_t index;
+ if ((index = zt_hash_find_index(h, id)) == (size_t) -1) {
+ return (NNG_ENOENT);
+ }
+ *vp = h->ih_entries[index].val;
+ return (0);
+}
+
+static int
+zt_hash_resize(zt_hash *h)
+{
+ size_t newsize;
+ size_t oldsize;
+ zt_hash_entry *newents;
+ zt_hash_entry *oldents;
+ uint32_t i;
+
+ if ((h->ih_load < h->ih_maxload) && (h->ih_load >= h->ih_minload)) {
+ // No resize needed.
+ return (0);
+ }
+
+ oldsize = h->ih_cap;
+
+ newsize = 8;
+ while (newsize < (h->ih_count * 2)) {
+ newsize *= 2;
+ }
+ if (newsize == oldsize) {
+ // Same size.
+ return (0);
+ }
+
+ oldents = h->ih_entries;
+ newents = NNI_ALLOC_STRUCTS(newents, newsize);
+ if (newents == NULL) {
+ return (NNG_ENOMEM);
+ }
+
+ h->ih_entries = newents;
+ h->ih_cap = newsize;
+ h->ih_load = 0;
+ if (newsize > 8) {
+ h->ih_minload = newsize / 8;
+ h->ih_maxload = newsize * 2 / 3;
+ } else {
+ h->ih_minload = 0;
+ h->ih_maxload = 5;
+ }
+ for (i = 0; i < oldsize; i++) {
+ size_t index;
+ if (oldents[i].val == NULL) {
+ continue;
+ }
+ index = oldents[i].key & (newsize - 1);
+ for (;;) {
+ // Increment the load unconditionally. It counts
+ // once for every item stored, plus once for each
+ // hashing operation we use to store the item (i.e.
+ // one for the item, plus once for each rehash.)
+ h->ih_load++;
+ if (newents[index].val == NULL) {
+ // As we are hitting this entry for the first
+ // time, it won't have any skips.
+ NNI_ASSERT(newents[index].skips == 0);
+ newents[index].val = oldents[i].val;
+ newents[index].key = oldents[i].key;
+ break;
+ }
+ newents[index].skips++;
+ index = ZT_HASH_NEXT(h, index);
+ }
+ }
+ if (oldsize != 0) {
+ NNI_FREE_STRUCTS(oldents, oldsize);
+ }
+ return (0);
+}
+
+int
+zt_hash_remove(zt_hash *h, uint64_t id)
+{
+ size_t index;
+ size_t probe;
+
+ if ((index = zt_hash_find_index(h, id)) == (size_t) -1) {
+ return (NNG_ENOENT);
+ }
+
+ // Now we have found the index where the object exists. We are going
+ // to restart the search, until the index matches, to decrement the
+ // skips counter.
+ probe = (int) ZT_HASH_INDEX(h, id);
+
+ for (;;) {
+ zt_hash_entry *entry;
+ // The load was increased once each hashing operation we used
+ // to place the the item. Decrement it accordingly.
+ h->ih_load--;
+ entry = &h->ih_entries[probe];
+ if (probe == index) {
+ entry->val = NULL;
+ entry->key = 0;
+ break;
+ }
+ NNI_ASSERT(entry->skips > 0);
+ entry->skips--;
+ probe = ZT_HASH_NEXT(h, probe);
+ }
+
+ h->ih_count--;
+
+ // Shrink -- but it's ok if we can't.
+ (void) zt_hash_resize(h);
+
+ return (0);
+}
+
+int
+zt_hash_insert(zt_hash *h, uint64_t id, void *val)
+{
+ size_t index;
+ zt_hash_entry *ent;
+
+ // Try to resize -- if we don't need to, this will be a no-op.
+ if (zt_hash_resize(h) != 0) {
+ return (NNG_ENOMEM);
+ }
+
+ // If it already exists, just overwrite the old value.
+ if ((index = zt_hash_find_index(h, id)) != (size_t) -1) {
+ ent = &h->ih_entries[index];
+ ent->val = val;
+ return (0);
+ }
+
+ index = ZT_HASH_INDEX(h, id);
+ for (;;) {
+ ent = &h->ih_entries[index];
+
+ // Increment the load count. We do this each time time we
+ // rehash. This may over-count items that collide on the
+ // same rehashing, but this should just cause a table to
+ // grow sooner, which is probably a good thing.
+ h->ih_load++;
+ if (ent->val == NULL) {
+ h->ih_count++;
+ ent->key = id;
+ ent->val = val;
+ return (0);
+ }
+ // Record the skip count. This being non-zero informs
+ // that a rehash will be necessary. Without this we
+ // would need to scan the entire hash for the match.
+ ent->skips++;
+ index = ZT_HASH_NEXT(h, index);
+ }
+}
+
+int
+zt_hash_alloc(zt_hash *h, uint64_t *idp, void *val)
+{
+ uint64_t id;
+ int rv;
+
+ NNI_ASSERT(val != NULL);
+
+ if (h->ih_count > (h->ih_maxval - h->ih_minval)) {
+ // Really more like ENOSPC.. the table is filled to max.
+ return (NNG_ENOMEM);
+ }
+
+ for (;;) {
+ id = h->ih_dynval;
+ h->ih_dynval++;
+ if (h->ih_dynval > h->ih_maxval) {
+ h->ih_dynval = h->ih_minval;
+ }
+
+ if (zt_hash_find_index(h, id) == (size_t) -1) {
+ break;
+ }
+ }
+
+ rv = zt_hash_insert(h, id, val);
+ if (rv == 0) {
+ *idp = id;
+ }
+ return (rv);
+}
diff --git a/src/sp/transport/zerotier/zthash.h b/src/sp/transport/zerotier/zthash.h
new file mode 100644
index 00000000..249eabbf
--- /dev/null
+++ b/src/sp/transport/zerotier/zthash.h
@@ -0,0 +1,43 @@
+//
+// Copyright 2020 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#ifndef ZT_HASH_H
+#define ZT_HASH_H
+
+#include <stdint.h>
+
+// This code is derived from id hash, but supports 64-bit IDs.
+
+typedef struct zt_hash zt_hash;
+typedef struct zt_hash_entry zt_hash_entry;
+
+// NB: These details are entirely private to the hash implementation.
+// They are provided here to facilitate inlining in structures.
+struct zt_hash {
+ size_t ih_cap;
+ size_t ih_count;
+ size_t ih_load;
+ size_t ih_minload; // considers placeholders
+ size_t ih_maxload;
+ uint64_t ih_minval;
+ uint64_t ih_maxval;
+ uint64_t ih_dynval;
+ zt_hash_entry *ih_entries;
+};
+
+extern int zt_hash_init(zt_hash **);
+extern void zt_hash_fini(zt_hash *);
+extern void zt_hash_limits(zt_hash *, uint64_t, uint64_t, uint64_t);
+extern int zt_hash_find(zt_hash *, uint64_t, void **);
+extern int zt_hash_remove(zt_hash *, uint64_t);
+extern int zt_hash_insert(zt_hash *, uint64_t, void *);
+extern int zt_hash_alloc(zt_hash *, uint64_t *, void *);
+
+#endif // CORE_IDHASH_H