aboutsummaryrefslogtreecommitdiff
path: root/src/protocol/reqrep0/xrep.c
diff options
context:
space:
mode:
authorGarrett D'Amore <garrett@damore.org>2018-04-04 13:36:54 -0700
committerGarrett D'Amore <garrett@damore.org>2018-04-10 15:40:00 -0700
commit5f7289e1f8e1427c9214c8e3e96ad56b1f868d53 (patch)
tree39debf4ecde234b2a0be19c9cb15628cc32c2edb /src/protocol/reqrep0/xrep.c
parent56f1bf30e61c53646dd2f8425da7c7fa0d97b3e1 (diff)
downloadnng-5f7289e1f8e1427c9214c8e3e96ad56b1f868d53.tar.gz
nng-5f7289e1f8e1427c9214c8e3e96ad56b1f868d53.tar.bz2
nng-5f7289e1f8e1427c9214c8e3e96ad56b1f868d53.zip
fixes #334 Separate context for state machines from sockets
This provides context support for REQ and REP sockets. More discussion around this is in the issue itself. Optionally we would like to extend this to the surveyor pattern. Note that we specifically do not support pollable descriptors for non-default contexts, and the results of using file descriptors for polling (NNG_OPT_SENDFD and NNG_OPT_RECVFD) is undefined. In the future, it might be nice to figure out how to factor in optional use of a message queue for users who want more buffering, but we think there is little need for this with cooked mode.
Diffstat (limited to 'src/protocol/reqrep0/xrep.c')
-rw-r--r--src/protocol/reqrep0/xrep.c434
1 files changed, 434 insertions, 0 deletions
diff --git a/src/protocol/reqrep0/xrep.c b/src/protocol/reqrep0/xrep.c
new file mode 100644
index 00000000..f7189453
--- /dev/null
+++ b/src/protocol/reqrep0/xrep.c
@@ -0,0 +1,434 @@
+//
+// Copyright 2018 Staysail Systems, Inc. <info@staysail.tech>
+// Copyright 2018 Capitar IT Group BV <info@capitar.com>
+//
+// This software is supplied under the terms of the MIT License, a
+// copy of which should be located in the distribution where this
+// file was obtained (LICENSE.txt). A copy of the license may also be
+// found online at https://opensource.org/licenses/MIT.
+//
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "core/nng_impl.h"
+#include "protocol/reqrep0/rep.h"
+
+// Response protocol in raw mode. The REP protocol is the "reply" side of a
+// request-reply pair. This is useful for building RPC servers, for
+// example.
+
+#ifndef NNI_PROTO_REQ_V0
+#define NNI_PROTO_REQ_V0 NNI_PROTO(3, 0)
+#endif
+
+#ifndef NNI_PROTO_REP_V0
+#define NNI_PROTO_REP_V0 NNI_PROTO(3, 1)
+#endif
+
+typedef struct xrep0_pipe xrep0_pipe;
+typedef struct xrep0_sock xrep0_sock;
+
+static void xrep0_sock_getq_cb(void *);
+static void xrep0_pipe_getq_cb(void *);
+static void xrep0_pipe_putq_cb(void *);
+static void xrep0_pipe_send_cb(void *);
+static void xrep0_pipe_recv_cb(void *);
+static void xrep0_pipe_fini(void *);
+
+// xrep0_sock is our per-socket protocol private structure.
+struct xrep0_sock {
+ nni_msgq * uwq;
+ nni_msgq * urq;
+ nni_mtx lk;
+ int ttl;
+ nni_idhash *pipes;
+ nni_aio * aio_getq;
+};
+
+// xrep0_pipe is our per-pipe protocol private structure.
+struct xrep0_pipe {
+ nni_pipe * pipe;
+ xrep0_sock *rep;
+ nni_msgq * sendq;
+ nni_aio * aio_getq;
+ nni_aio * aio_send;
+ nni_aio * aio_recv;
+ nni_aio * aio_putq;
+};
+
+static void
+xrep0_sock_fini(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ nni_aio_stop(s->aio_getq);
+ nni_aio_fini(s->aio_getq);
+ nni_idhash_fini(s->pipes);
+ nni_mtx_fini(&s->lk);
+ NNI_FREE_STRUCT(s);
+}
+
+static int
+xrep0_sock_init(void **sp, nni_sock *sock)
+{
+ xrep0_sock *s;
+ int rv;
+
+ if ((s = NNI_ALLOC_STRUCT(s)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+ nni_mtx_init(&s->lk);
+ if (((rv = nni_idhash_init(&s->pipes)) != 0) ||
+ ((rv = nni_aio_init(&s->aio_getq, xrep0_sock_getq_cb, s)) != 0)) {
+ xrep0_sock_fini(s);
+ return (rv);
+ }
+
+ s->ttl = 8; // Per RFC
+ s->uwq = nni_sock_sendq(sock);
+ s->urq = nni_sock_recvq(sock);
+
+ *sp = s;
+
+ return (0);
+}
+
+static void
+xrep0_sock_open(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ // This starts us retrieving message from the upper write q.
+ nni_msgq_aio_get(s->uwq, s->aio_getq);
+}
+
+static void
+xrep0_sock_close(void *arg)
+{
+ xrep0_sock *s = arg;
+
+ nni_aio_abort(s->aio_getq, NNG_ECLOSED);
+}
+
+static void
+xrep0_pipe_fini(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ nni_aio_fini(p->aio_getq);
+ nni_aio_fini(p->aio_send);
+ nni_aio_fini(p->aio_recv);
+ nni_aio_fini(p->aio_putq);
+ nni_msgq_fini(p->sendq);
+ NNI_FREE_STRUCT(p);
+}
+
+static int
+xrep0_pipe_init(void **pp, nni_pipe *pipe, void *s)
+{
+ xrep0_pipe *p;
+ int rv;
+
+ if ((p = NNI_ALLOC_STRUCT(p)) == NULL) {
+ return (NNG_ENOMEM);
+ }
+
+ // We want a pretty deep sendq on pipes. The rationale here is
+ // that the send rate will be mitigated by the receive rate.
+ // If a slow pipe (req pipe not reading its own responses!?)
+ // comes up, then we will start discarding its replies eventually,
+ // but it takes some time. It would be poor form for a peer to
+ // smash us with requests, but be unable to handle replies faster
+ // than we can forward them. If they do that, their replies get
+ // dropped. (From a DDoS perspective, it might be nice in the
+ // future if we had a way to exert backpressure to the send side --
+ // essentially don't let peers send requests faster than they are
+ // willing to receive replies. Something to think about for the
+ // future.)
+ if (((rv = nni_msgq_init(&p->sendq, 64)) != 0) ||
+ ((rv = nni_aio_init(&p->aio_getq, xrep0_pipe_getq_cb, p)) != 0) ||
+ ((rv = nni_aio_init(&p->aio_send, xrep0_pipe_send_cb, p)) != 0) ||
+ ((rv = nni_aio_init(&p->aio_recv, xrep0_pipe_recv_cb, p)) != 0) ||
+ ((rv = nni_aio_init(&p->aio_putq, xrep0_pipe_putq_cb, p)) != 0)) {
+ xrep0_pipe_fini(p);
+ return (rv);
+ }
+
+ p->pipe = pipe;
+ p->rep = s;
+ *pp = p;
+ return (0);
+}
+
+static int
+xrep0_pipe_start(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+ int rv;
+
+ if ((rv = nni_idhash_insert(s->pipes, nni_pipe_id(p->pipe), p)) != 0) {
+ return (rv);
+ }
+
+ nni_msgq_aio_get(p->sendq, p->aio_getq);
+ nni_pipe_recv(p->pipe, p->aio_recv);
+ return (0);
+}
+
+static void
+xrep0_pipe_stop(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+
+ nni_msgq_close(p->sendq);
+ nni_aio_stop(p->aio_getq);
+ nni_aio_stop(p->aio_send);
+ nni_aio_stop(p->aio_recv);
+ nni_aio_stop(p->aio_putq);
+
+ nni_idhash_remove(s->pipes, nni_pipe_id(p->pipe));
+}
+
+static void
+xrep0_sock_getq_cb(void *arg)
+{
+ xrep0_sock *s = arg;
+ nni_msgq * uwq = s->uwq;
+ nni_msg * msg;
+ uint32_t id;
+ xrep0_pipe *p;
+ int rv;
+
+ // This watches for messages from the upper write queue,
+ // extracts the destination pipe, and forwards it to the appropriate
+ // destination pipe via a separate queue. This prevents a single bad
+ // or slow pipe from gumming up the works for the entire socket.
+
+ if (nni_aio_result(s->aio_getq) != 0) {
+ // Closed socket?
+ return;
+ }
+
+ msg = nni_aio_get_msg(s->aio_getq);
+ nni_aio_set_msg(s->aio_getq, NULL);
+
+ // We yank the outgoing pipe id from the header
+ if (nni_msg_header_len(msg) < 4) {
+ nni_msg_free(msg);
+
+ // Look for another message on the upper write queue.
+ nni_msgq_aio_get(uwq, s->aio_getq);
+ return;
+ }
+
+ id = nni_msg_header_trim_u32(msg);
+
+ // Look for the pipe, and attempt to put the message there
+ // (nonblocking) if we can. If we can't for any reason, then we
+ // free the message.
+ if ((rv = nni_idhash_find(s->pipes, id, (void **) &p)) == 0) {
+ rv = nni_msgq_tryput(p->sendq, msg);
+ }
+ if (rv != 0) {
+ nni_msg_free(msg);
+ }
+
+ // Now look for another message on the upper write queue.
+ nni_msgq_aio_get(uwq, s->aio_getq);
+}
+
+static void
+xrep0_pipe_getq_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_getq) != 0) {
+ nni_pipe_stop(p->pipe);
+ return;
+ }
+
+ nni_aio_set_msg(p->aio_send, nni_aio_get_msg(p->aio_getq));
+ nni_aio_set_msg(p->aio_getq, NULL);
+
+ nni_pipe_send(p->pipe, p->aio_send);
+}
+
+static void
+xrep0_pipe_send_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_send) != 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_send));
+ nni_aio_set_msg(p->aio_send, NULL);
+ nni_pipe_stop(p->pipe);
+ return;
+ }
+
+ nni_msgq_aio_get(p->sendq, p->aio_getq);
+}
+
+static void
+xrep0_pipe_recv_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+ xrep0_sock *s = p->rep;
+ nni_msg * msg;
+ int rv;
+ uint8_t * body;
+ int hops;
+
+ if (nni_aio_result(p->aio_recv) != 0) {
+ nni_pipe_stop(p->pipe);
+ return;
+ }
+
+ msg = nni_aio_get_msg(p->aio_recv);
+ nni_aio_set_msg(p->aio_recv, NULL);
+
+ nni_msg_set_pipe(msg, nni_pipe_id(p->pipe));
+
+ // Store the pipe id in the header, first thing.
+ rv = nni_msg_header_append_u32(msg, nni_pipe_id(p->pipe));
+ if (rv != 0) {
+ // Failure here causes us to drop the message.
+ goto drop;
+ }
+
+ // Move backtrace from body to header
+ hops = 1;
+ for (;;) {
+ int end = 0;
+ if (hops > s->ttl) {
+ // This isn't malformed, but it has gone through
+ // too many hops. Do not disconnect, because we
+ // can legitimately receive messages with too many
+ // hops from devices, etc.
+ goto drop;
+ }
+ hops++;
+ if (nni_msg_len(msg) < 4) {
+ // Peer is speaking garbage. Kick it.
+ nni_msg_free(msg);
+ nni_pipe_stop(p->pipe);
+ return;
+ }
+ body = nni_msg_body(msg);
+ end = (body[0] & 0x80) ? 1 : 0;
+ rv = nni_msg_header_append(msg, body, 4);
+ if (rv != 0) {
+ // Out of memory most likely, but keep going to
+ // avoid breaking things.
+ goto drop;
+ }
+ nni_msg_trim(msg, 4);
+ if (end) {
+ break;
+ }
+ }
+
+ // Go ahead and send it up.
+ nni_aio_set_msg(p->aio_putq, msg);
+ nni_msgq_aio_put(s->urq, p->aio_putq);
+ return;
+
+drop:
+ nni_msg_free(msg);
+ nni_pipe_recv(p->pipe, p->aio_recv);
+}
+
+static void
+xrep0_pipe_putq_cb(void *arg)
+{
+ xrep0_pipe *p = arg;
+
+ if (nni_aio_result(p->aio_putq) != 0) {
+ nni_msg_free(nni_aio_get_msg(p->aio_putq));
+ nni_aio_set_msg(p->aio_putq, NULL);
+ nni_pipe_stop(p->pipe);
+ return;
+ }
+
+ nni_pipe_recv(p->pipe, p->aio_recv);
+}
+
+static int
+xrep0_sock_setopt_maxttl(void *arg, const void *buf, size_t sz, int typ)
+{
+ xrep0_sock *s = arg;
+ return (nni_copyin_int(&s->ttl, buf, sz, 1, 255, typ));
+}
+
+static int
+xrep0_sock_getopt_maxttl(void *arg, void *buf, size_t *szp, int typ)
+{
+ xrep0_sock *s = arg;
+ return (nni_copyout_int(s->ttl, buf, szp, typ));
+}
+
+static void
+xrep0_sock_send(void *arg, nni_aio *aio)
+{
+ xrep0_sock *s = arg;
+
+ nni_msgq_aio_put(s->uwq, aio);
+}
+
+static void
+xrep0_sock_recv(void *arg, nni_aio *aio)
+{
+ xrep0_sock *s = arg;
+
+ nni_msgq_aio_get(s->urq, aio);
+}
+
+// This is the global protocol structure -- our linkage to the core.
+// This should be the only global non-static symbol in this file.
+static nni_proto_pipe_ops xrep0_pipe_ops = {
+ .pipe_init = xrep0_pipe_init,
+ .pipe_fini = xrep0_pipe_fini,
+ .pipe_start = xrep0_pipe_start,
+ .pipe_stop = xrep0_pipe_stop,
+};
+
+static nni_proto_sock_option xrep0_sock_options[] = {
+ {
+ .pso_name = NNG_OPT_MAXTTL,
+ .pso_type = NNI_TYPE_INT32,
+ .pso_getopt = xrep0_sock_getopt_maxttl,
+ .pso_setopt = xrep0_sock_setopt_maxttl,
+ },
+ // terminate list
+ {
+ .pso_name = NULL,
+ },
+};
+
+static nni_proto_sock_ops xrep0_sock_ops = {
+ .sock_init = xrep0_sock_init,
+ .sock_fini = xrep0_sock_fini,
+ .sock_open = xrep0_sock_open,
+ .sock_close = xrep0_sock_close,
+ .sock_options = xrep0_sock_options,
+ .sock_filter = NULL, // No filtering for raw mode
+ .sock_send = xrep0_sock_send,
+ .sock_recv = xrep0_sock_recv,
+};
+
+static nni_proto xrep0_proto = {
+ .proto_version = NNI_PROTOCOL_VERSION,
+ .proto_self = { NNI_PROTO_REP_V0, "rep" },
+ .proto_peer = { NNI_PROTO_REQ_V0, "req" },
+ .proto_flags = NNI_PROTO_FLAG_SNDRCV | NNI_PROTO_FLAG_RAW,
+ .proto_sock_ops = &xrep0_sock_ops,
+ .proto_pipe_ops = &xrep0_pipe_ops,
+};
+
+int
+nng_rep0_open_raw(nng_socket *sidp)
+{
+ return (nni_proto_open(sidp, &xrep0_proto));
+}