2 * Copyright (c) 2021, 2024 Omar Polo <op@omarpolo.com>
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <sys/types.h>
21 #include <sys/socket.h>
24 #include <netinet/in.h>
37 #include <openssl/err.h>
43 #include "telescope.h"
46 static struct imsgev *iev_ui;
48 /* a pending request */
62 struct bufferevent *bev;
64 struct addrinfo *servinfo, *p;
66 struct addrinfo hints;
67 struct event_asr *asrev;
70 TAILQ_ENTRY(req) reqs;
73 static struct req *req_by_id(uint32_t);
75 static void die(void) __attribute__((__noreturn__));
77 static void try_to_connect(int, short, void*);
80 static void query_done(struct asr_result*, void*);
82 static void conn_towards(struct req*);
84 static void close_with_err(struct req*, const char*);
85 static void close_with_errf(struct req*, const char*, ...)
86 __attribute__((format(printf, 2, 3)));
88 static void net_tls_handshake(int, short, void *);
89 static void net_tls_readcb(int, short, void *);
90 static void net_tls_writecb(int, short, void *);
92 static int gemini_parse_reply(struct req *, const char *, size_t);
94 static void net_ready(struct req *req);
95 static void net_read(struct bufferevent *, void *);
96 static void net_write(struct bufferevent *, void *);
97 static void net_error(struct bufferevent *, short, void *);
99 static void handle_dispatch_imsg(int, short, void*);
101 static int net_send_ui(int, uint32_t, const void *, uint16_t);
103 /* TODO: making this customizable */
104 struct timeval timeout_for_handshake = { 5, 0 };
106 typedef void (*statefn)(int, short, void*);
108 TAILQ_HEAD(, req) reqhead;
111 yield_r(struct req *req, statefn fn, struct timeval *tv)
113 event_once(req->fd, EV_READ, fn, req, tv);
117 yield_w(struct req *req, statefn fn, struct timeval *tv)
119 event_once(req->fd, EV_WRITE, fn, req, tv);
123 req_by_id(uint32_t id)
127 TAILQ_FOREACH(r, &reqhead, reqs) {
135 static void __attribute__((__noreturn__))
142 try_to_connect(int fd, short ev, void *d)
146 socklen_t len = sizeof(error);
153 if (getsockopt(req->fd, SOL_SOCKET, SO_ERROR, &error,
163 req->fd = socket(req->p->ai_family, req->p->ai_socktype,
164 req->p->ai_protocol);
166 req->p = req->p->ai_next;
170 if (!mark_nonblock_cloexec(req->fd))
172 if (connect(req->fd, req->p->ai_addr, req->p->ai_addrlen) == 0)
174 yield_w(req, try_to_connect, NULL);
178 freeaddrinfo(req->servinfo);
179 close_with_errf(req, "failed to connect to %s", req->host);
183 freeaddrinfo(req->servinfo);
185 switch (req->proto) {
188 /* finger and gopher don't have a header nor TLS */
189 req->done_header = 1;
194 struct tls_config *conf;
196 if ((conf = tls_config_new()) == NULL)
199 tls_config_insecure_noverifycert(conf);
200 tls_config_insecure_noverifyname(conf);
202 if (req->ccert && tls_config_set_keypair_mem(conf,
203 req->ccert, req->ccert_len, req->ccert, req->ccert_len)
205 close_with_errf(req, "failed to load keypair: %s",
206 tls_config_error(conf));
207 tls_config_free(conf);
212 if ((req->ctx = tls_client()) == NULL) {
213 close_with_errf(req, "tls_client: %s",
215 tls_config_free(conf);
219 if (tls_configure(req->ctx, conf) == -1) {
220 close_with_errf(req, "tls_configure: %s",
221 tls_error(req->ctx));
222 tls_config_free(conf);
225 tls_config_free(conf);
227 if (tls_connect_socket(req->ctx, req->fd, req->host)
229 close_with_errf(req, "tls_connect_socket: %s",
230 tls_error(req->ctx));
233 yield_w(req, net_tls_handshake, &timeout_for_handshake);
244 query_done(struct asr_result *res, void *d)
249 if (res->ar_gai_errno != 0) {
250 close_with_errf(req, "failed to resolve %s: %s",
251 req->host, gai_strerror(res->ar_gai_errno));
256 req->servinfo = res->ar_addrinfo;
257 req->p = res->ar_addrinfo;
258 try_to_connect(0, 0, req);
262 conn_towards(struct req *req)
266 req->hints.ai_family = AF_UNSPEC;
267 req->hints.ai_socktype = SOCK_STREAM;
268 q = getaddrinfo_async(req->host, req->port, &req->hints,
270 req->asrev = event_asr_run(q, query_done, req);
274 conn_towards(struct req *req)
276 struct addrinfo hints;
279 memset(&hints, 0, sizeof(hints));
280 hints.ai_family = AF_UNSPEC;
281 hints.ai_socktype = SOCK_STREAM;
283 if ((status = getaddrinfo(req->host, req->port, &hints,
285 close_with_errf(req, "failed to resolve %s: %s",
286 req->host, gai_strerror(status));
291 req->p = req->servinfo;
292 try_to_connect(0, 0, req);
297 ssl_error(const char *where)
302 fprintf(stderr, "failure(s) in %s:\n", where);
303 while ((code = ERR_get_error()) != 0) {
304 ERR_error_string_n(code, errbuf, sizeof(errbuf));
305 fprintf(stderr, "- %s\n", errbuf);
310 close_conn(int fd, short ev, void *d)
315 if (req->asrev != NULL)
316 event_asr_abort(req->asrev);
319 if (req->bev != NULL) {
320 bufferevent_free(req->bev);
324 if (req->ctx != NULL) {
325 switch (tls_close(req->ctx)) {
326 case TLS_WANT_POLLIN:
327 yield_r(req, close_conn, NULL);
329 case TLS_WANT_POLLOUT:
330 yield_w(req, close_conn, NULL);
333 ssl_error("tls_close");
340 if (req->ccert != NULL) {
341 munmap(req->ccert, req->ccert_len);
342 close(req->ccert_fd);
349 TAILQ_REMOVE(&reqhead, req, reqs);
356 close_with_err(struct req *req, const char *err)
358 net_send_ui(IMSG_ERR, req->id, err, strlen(err)+1);
359 close_conn(0, 0, req);
363 close_with_errf(struct req *req, const char *fmt, ...)
369 if (vasprintf(&s, fmt, ap) == -1)
373 close_with_err(req, s);
378 net_tls_handshake(int fd, short event, void *d)
383 if (event == EV_TIMEOUT) {
384 close_with_err(req, "Timeout loading page");
388 switch (tls_handshake(req->ctx)) {
389 case TLS_WANT_POLLIN:
390 yield_r(req, net_tls_handshake, NULL);
392 case TLS_WANT_POLLOUT:
393 yield_w(req, net_tls_handshake, NULL);
396 ssl_error("tls_handshake");
399 hash = tls_peer_cert_hash(req->ctx);
401 ssl_error("tls_peer_cert_hash");
402 close_with_errf(req, "handshake failed: %s",
403 tls_error(req->ctx));
406 net_send_ui(IMSG_CHECK_CERT, req->id, hash, strlen(hash)+1);
410 net_tls_readcb(int fd, short event, void *d)
412 struct bufferevent *bufev = d;
413 struct req *req = bufev->cbarg;
414 char buf[IBUF_READ_SIZE];
415 int what = EVBUFFER_READ;
416 int howmuch = IBUF_READ_SIZE;
421 if (event == EV_TIMEOUT) {
422 what |= EVBUFFER_TIMEOUT;
426 if (bufev->wm_read.high != 0)
427 howmuch = MIN(sizeof(buf), bufev->wm_read.high);
429 switch (ret = tls_read(req->ctx, buf, howmuch)) {
430 case TLS_WANT_POLLIN:
431 case TLS_WANT_POLLOUT:
434 ssl_error("tls_read");
435 what |= EVBUFFER_ERROR;
441 what |= EVBUFFER_EOF;
445 res = evbuffer_add(bufev->input, buf, len);
447 what |= EVBUFFER_ERROR;
451 event_add(&bufev->ev_read, NULL);
453 len = EVBUFFER_LENGTH(bufev->input);
454 if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
457 if (bufev->readcb != NULL)
458 (*bufev->readcb)(bufev, bufev->cbarg);
462 event_add(&bufev->ev_read, NULL);
466 (*bufev->errorcb)(bufev, what, bufev->cbarg);
470 net_tls_writecb(int fd, short event, void *d)
472 struct bufferevent *bufev = d;
473 struct req *req = bufev->cbarg;
476 short what = EVBUFFER_WRITE;
478 if (event & EV_TIMEOUT) {
479 what |= EVBUFFER_TIMEOUT;
483 if (EVBUFFER_LENGTH(bufev->output) != 0) {
484 ret = tls_write(req->ctx, EVBUFFER_DATA(bufev->output),
485 EVBUFFER_LENGTH(bufev->output));
487 case TLS_WANT_POLLIN:
488 case TLS_WANT_POLLOUT:
491 ssl_error("tls_write");
492 what |= EVBUFFER_ERROR;
497 evbuffer_drain(bufev->output, len);
500 if (EVBUFFER_LENGTH(bufev->output) != 0)
501 event_add(&bufev->ev_write, NULL);
503 if (bufev->writecb != NULL &&
504 EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
505 (*bufev->writecb)(bufev, bufev->cbarg);
509 event_add(&bufev->ev_write, NULL);
513 (*bufev->errorcb)(bufev, what, bufev->cbarg);
517 gemini_parse_reply(struct req *req, const char *header, size_t len)
526 if (!isdigit(header[0]) || !isdigit(header[1]))
529 code = (header[0] - '0')*10 + (header[1] - '0');
530 if (header[2] != ' ')
536 if ((ibuf = imsg_create(&iev_ui->ibuf, IMSG_REPLY, req->id, 0,
537 sizeof(code) + len)) == NULL)
539 if (imsg_add(ibuf, &code, sizeof(code)) == -1 ||
540 imsg_add(ibuf, t, len) == -1)
542 imsg_close(&iev_ui->ibuf, ibuf);
543 imsg_event_add(iev_ui);
545 bufferevent_disable(req->bev, EV_READ|EV_WRITE);
550 /* called when we're ready to read/write */
552 net_ready(struct req *req)
554 req->bev = bufferevent_new(req->fd, net_read, net_write, net_error,
556 if (req->bev == NULL)
560 evbuffer_unfreeze(req->bev->input, 0);
561 evbuffer_unfreeze(req->bev->output, 1);
564 /* setup tls i/o layer */
565 if (req->ctx != NULL) {
566 event_set(&req->bev->ev_read, req->fd, EV_READ,
567 net_tls_readcb, req->bev);
568 event_set(&req->bev->ev_write, req->fd, EV_WRITE,
569 net_tls_writecb, req->bev);
572 /* TODO: adjust watermarks */
573 bufferevent_setwatermark(req->bev, EV_WRITE, 1, 0);
574 bufferevent_setwatermark(req->bev, EV_READ, 1, 0);
576 bufferevent_enable(req->bev, EV_READ|EV_WRITE);
578 bufferevent_write(req->bev, req->req, req->len);
581 /* called after a read has been done */
583 net_read(struct bufferevent *bev, void *d)
585 static char buf[4096];
587 struct evbuffer *src = EVBUFFER_INPUT(bev);
592 if (!req->done_header) {
593 header = evbuffer_readln(src, &len, EVBUFFER_EOL_CRLF_STRICT);
594 if (header == NULL && EVBUFFER_LENGTH(src) >= 1024) {
595 (*bev->errorcb)(bev, EVBUFFER_READ, bev->cbarg);
600 r = gemini_parse_reply(req, header, len);
602 req->done_header = 1;
604 (*bev->errorcb)(bev, EVBUFFER_READ, bev->cbarg);
607 if (r < 20 || r >= 30) {
608 close_conn(0, 0, req);
614 * Split data into chunks before sending. imsg can't handle
615 * message that are "too big".
618 if ((len = bufferevent_read(bev, buf, sizeof(buf))) == 0)
620 net_send_ui(IMSG_BUF, req->id, buf, len);
624 /* called after a write has been done */
626 net_write(struct bufferevent *bev, void *d)
628 struct evbuffer *dst = EVBUFFER_OUTPUT(bev);
630 if (EVBUFFER_LENGTH(dst) == 0)
631 (*bev->errorcb)(bev, EVBUFFER_WRITE, bev->cbarg);
635 net_error(struct bufferevent *bev, short error, void *d)
638 struct evbuffer *src;
640 if (error & EVBUFFER_TIMEOUT) {
641 close_with_err(req, "Timeout loading page");
645 if (error & EVBUFFER_ERROR) {
646 close_with_errf(req, "%s error (0x%x)",
647 (error & EVBUFFER_READ) ? "read" : "write", error);
651 if (error & EVBUFFER_EOF) {
652 /* EOF and no header */
653 if (!req->done_header) {
654 close_with_err(req, "protocol error");
658 src = EVBUFFER_INPUT(req->bev);
659 if (EVBUFFER_LENGTH(src) != 0)
660 net_send_ui(IMSG_BUF, req->id, EVBUFFER_DATA(src),
661 EVBUFFER_LENGTH(src));
662 net_send_ui(IMSG_EOF, req->id, NULL, 0);
663 close_conn(0, 0, req);
667 if (error & EVBUFFER_WRITE) {
668 /* finished sending request */
669 bufferevent_disable(bev, EV_WRITE);
673 if (error & EVBUFFER_READ) {
674 close_with_err(req, "protocol error");
678 close_with_errf(req, "unknown event error %x", error);
682 load_cert(struct imsg *imsg, struct req *req)
687 if ((fd = imsg_get_fd(imsg)) == -1)
690 if (fstat(fd, &sb) == -1)
694 if (sb.st_size >= (off_t)SIZE_MAX) {
700 req->ccert = mmap(NULL, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
701 if (req->ccert == MAP_FAILED) {
707 req->ccert_len = sb.st_size;
714 handle_dispatch_imsg(int fd, short event, void *d)
716 struct imsgev *iev = d;
717 struct imsgbuf *ibuf = &iev->ibuf;
724 if (event & EV_READ) {
725 if ((n = imsg_read(ibuf)) == -1 && errno != EAGAIN)
728 err(1, "connection closed");
730 if (event & EV_WRITE) {
731 if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
732 err(1, "msgbuf_write");
734 err(1, "connection closed");
738 if ((n = imsg_get(ibuf, &imsg)) == -1)
742 switch (imsg_get_type(&imsg)) {
744 if (imsg_get_data(&imsg, &r, sizeof(r)) == -1 ||
745 r.host[sizeof(r.host) - 1] != '\0' ||
746 r.port[sizeof(r.port) - 1] != '\0' ||
747 r.req[sizeof(r.req) - 1] != '\0')
749 if (r.proto != PROTO_FINGER &&
750 r.proto != PROTO_GEMINI &&
751 r.proto != PROTO_GOPHER)
754 if ((req = calloc(1, sizeof(*req))) == NULL)
758 req->id = imsg_get_id(&imsg);
759 TAILQ_INSERT_HEAD(&reqhead, req, reqs);
761 if ((req->host = strdup(r.host)) == NULL)
763 if ((req->port = strdup(r.port)) == NULL)
765 if ((req->req = strdup(r.req)) == NULL)
767 if (load_cert(&imsg, req) == -1)
770 req->len = strlen(req->req);
772 req->proto = r.proto;
776 case IMSG_CERT_STATUS:
777 if ((req = req_by_id(imsg_get_id(&imsg))) == NULL)
780 if (imsg_get_data(&imsg, &certok, sizeof(certok)) ==
786 close_conn(0, 0, req);
790 if ((req = req_by_id(imsg_get_id(&imsg))) == NULL)
792 bufferevent_enable(req->bev, EV_READ);
796 if ((req = req_by_id(imsg_get_id(&imsg))) == NULL)
798 close_conn(0, 0, req);
807 errx(1, "got unknown imsg %d", imsg_get_type(&imsg));
817 net_send_ui(int type, uint32_t peerid, const void *data,
820 return imsg_compose_event(iev_ui, type, peerid, 0, -1,
829 TAILQ_INIT(&reqhead);
833 /* Setup pipe and event handler to the main process */
834 if ((iev_ui = malloc(sizeof(*iev_ui))) == NULL)
836 imsg_init(&iev_ui->ibuf, 3);
837 iev_ui->handler = handle_dispatch_imsg;
838 iev_ui->events = EV_READ;
839 event_set(&iev_ui->ev, iev_ui->ibuf.fd, iev_ui->events,
840 iev_ui->handler, iev_ui);
841 event_add(&iev_ui->ev, NULL);
843 sandbox_net_process();
847 msgbuf_clear(&iev_ui->ibuf.w);
848 close(iev_ui->ibuf.fd);