2 * Copyright (c) 2021 Omar Polo <op@omarpolo.com>
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 #include <sys/types.h>
20 #include <sys/socket.h>
22 #include <netinet/in.h>
39 #define DNS_RDATACLASS_IN 1
40 #define DNS_RDATATYPE_TXT 16
42 #include "telescope.h"
44 static struct imsgev *iev_ui;
45 static struct tls_config *tlsconf;
47 /* a pending request */
57 struct bufferevent *bev;
59 struct addrinfo *servinfo, *p;
61 struct addrinfo hints;
62 struct event_asr *asrev;
65 TAILQ_ENTRY(req) reqs;
68 static struct req *req_by_id(uint32_t);
70 static void die(void) __attribute__((__noreturn__));
72 static void try_to_connect(int, short, void*);
74 static void offline_dns_done(struct rrsetinfo *, struct req *);
75 static void offline_dns_query(struct req *);
78 static void query_done(struct asr_result*, void*);
79 static void async_conn_towards(struct req*);
80 static void offline_query_done(struct asr_result *, void *);
82 static void blocking_conn_towards(struct req*);
85 static void close_with_err(struct req*, const char*);
86 static void close_with_errf(struct req*, const char*, ...)
87 __attribute__((format(printf, 2, 3)));
89 static void net_tls_handshake(int, short, void *);
90 static void net_tls_readcb(int, short, void *);
91 static void net_tls_writecb(int, short, void *);
93 static int gemini_parse_reply(struct req *, const char *, size_t);
95 static void net_ready(struct req *req);
96 static void net_read(struct bufferevent *, void *);
97 static void net_write(struct bufferevent *, void *);
98 static void net_error(struct bufferevent *, short, void *);
100 static void handle_get_raw(struct imsg *, size_t);
101 static void handle_cert_status(struct imsg*, size_t);
102 static void handle_proceed(struct imsg*, size_t);
103 static void handle_stop(struct imsg*, size_t);
104 static void handle_quit(struct imsg*, size_t);
105 static void handle_dispatch_imsg(int, short, void*);
107 static int net_send_ui(int, uint32_t, const void *, uint16_t);
109 /* TODO: making this customizable */
110 struct timeval timeout_for_handshake = { 5, 0 };
112 static imsg_handlerfn *handlers[] = {
113 [IMSG_GET_RAW] = handle_get_raw,
114 [IMSG_CERT_STATUS] = handle_cert_status,
115 [IMSG_PROCEED] = handle_proceed,
116 [IMSG_STOP] = handle_stop,
117 [IMSG_QUIT] = handle_quit,
120 typedef void (*statefn)(int, short, void*);
122 TAILQ_HEAD(, req) reqhead;
125 yield_r(struct req *req, statefn fn, struct timeval *tv)
127 event_once(req->fd, EV_READ, fn, req, tv);
131 yield_w(struct req *req, statefn fn, struct timeval *tv)
133 event_once(req->fd, EV_WRITE, fn, req, tv);
137 req_by_id(uint32_t id)
141 TAILQ_FOREACH(r, &reqhead, reqs) {
149 static void __attribute__((__noreturn__))
156 try_to_connect(int fd, short ev, void *d)
160 socklen_t len = sizeof(error);
167 if (getsockopt(req->fd, SOL_SOCKET, SO_ERROR, &error,
177 req->fd = socket(req->p->ai_family, req->p->ai_socktype,
178 req->p->ai_protocol);
180 req->p = req->p->ai_next;
183 mark_nonblock(req->fd);
184 if (connect(req->fd, req->p->ai_addr, req->p->ai_addrlen) == 0)
186 yield_w(req, try_to_connect, NULL);
191 freeaddrinfo(req->servinfo);
192 offline_dns_query(req);
196 freeaddrinfo(req->servinfo);
198 switch (req->proto) {
201 /* finger and gopher don't have a header nor TLS */
202 req->done_header = 1;
208 if ((req->ctx = tls_client()) == NULL) {
209 close_with_errf(req, "tls_client: %s",
213 if (tls_configure(req->ctx, tlsconf) == -1) {
214 close_with_errf(req, "tls_configure: %s",
215 tls_error(req->ctx));
218 if (tls_connect_socket(req->ctx, req->fd, req->url.host)
220 close_with_errf(req, "tls_connect_socket: %s",
221 tls_error(req->ctx));
224 yield_w(req, net_tls_handshake, &timeout_for_handshake);
233 offline_dns_done(struct rrsetinfo *res, struct req *req)
236 const char *reason = NULL;
238 for (i = 0; i < res->rri_nrdatas; ++i) {
239 if (res->rri_rdatas[i].rdi_length <= 1)
242 len = *(uint8_t*)res->rri_rdatas[i].rdi_data;
243 reason = res->rri_rdatas[i].rdi_data+1;
248 close_with_errf(req, "failed to connect to %s", req->url.host);
251 "failed to connect to %s\n\nThe site says: %*s\n",
252 req->url.host, (int)len, reason);
258 offline_dns_query(struct req *req)
262 strlcpy(hostname, "_offline.", sizeof(hostname));
263 strlcat(hostname, req->url.host, sizeof(hostname));
269 q = getrrsetbyname_async(hostname, DNS_RDATACLASS_IN,
270 DNS_RDATATYPE_TXT, 0, NULL);
271 req->asrev = event_asr_run(q, offline_query_done, req);
275 struct rrsetinfo *res;
277 if (getrrsetbyname(hostname, DNS_RDATACLASS_IN,
278 DNS_RDATATYPE_TXT, 0, &res))
279 close_with_errf(req, "failed to connect to %s",
282 offline_dns_done(res, req);
289 query_done(struct asr_result *res, void *d)
294 if (res->ar_gai_errno != 0) {
295 close_with_errf(req, "failed to resolve %s: %s",
296 req->url.host, gai_strerror(res->ar_gai_errno));
301 req->servinfo = res->ar_addrinfo;
302 req->p = res->ar_addrinfo;
303 try_to_connect(0, 0, req);
307 async_conn_towards(struct req *req)
310 const char *proto = "1965";
312 if (*req->url.port != '\0')
313 proto = req->url.port;
315 req->hints.ai_family = AF_UNSPEC;
316 req->hints.ai_socktype = SOCK_STREAM;
317 q = getaddrinfo_async(req->url.host, proto, &req->hints, NULL);
318 req->asrev = event_asr_run(q, query_done, req);
322 offline_query_done(struct asr_result *res, void *d)
327 if (res->ar_rrset_errno != 0) {
328 close_with_errf(req, "failed to resolve %s", req->url.host);
332 offline_dns_done(res->ar_rrsetinfo, req);
337 blocking_conn_towards(struct req *req)
339 struct addrinfo hints;
340 struct phos_uri *url = &req->url;
342 const char *proto = "1965";
344 if (*url->port != '\0')
347 memset(&hints, 0, sizeof(hints));
348 hints.ai_family = AF_UNSPEC;
349 hints.ai_socktype = SOCK_STREAM;
351 if ((status = getaddrinfo(url->host, proto, &hints, &req->servinfo))) {
352 close_with_errf(req, "failed to resolve %s: %s",
353 url->host, gai_strerror(status));
358 req->p = req->servinfo;
359 try_to_connect(0, 0, req);
364 close_conn(int fd, short ev, void *d)
369 if (req->asrev != NULL)
370 event_asr_abort(req->asrev);
373 if (req->bev != NULL) {
374 bufferevent_free(req->bev);
378 if (req->ctx != NULL) {
379 switch (tls_close(req->ctx)) {
380 case TLS_WANT_POLLIN:
381 yield_r(req, close_conn, NULL);
383 case TLS_WANT_POLLOUT:
384 yield_w(req, close_conn, NULL);
392 TAILQ_REMOVE(&reqhead, req, reqs);
399 close_with_err(struct req *req, const char *err)
401 net_send_ui(IMSG_ERR, req->id, err, strlen(err)+1);
402 close_conn(0, 0, req);
406 close_with_errf(struct req *req, const char *fmt, ...)
412 if (vasprintf(&s, fmt, ap) == -1)
416 close_with_err(req, s);
421 net_tls_handshake(int fd, short event, void *d)
426 if (event == EV_TIMEOUT) {
427 close_with_err(req, "Timeout loading page");
431 switch (tls_handshake(req->ctx)) {
432 case TLS_WANT_POLLIN:
433 yield_r(req, net_tls_handshake, NULL);
435 case TLS_WANT_POLLOUT:
436 yield_w(req, net_tls_handshake, NULL);
440 hash = tls_peer_cert_hash(req->ctx);
442 close_with_errf(req, "handshake failed: %s",
443 tls_error(req->ctx));
446 net_send_ui(IMSG_CHECK_CERT, req->id, hash, strlen(hash)+1);
450 net_tls_readcb(int fd, short event, void *d)
452 struct bufferevent *bufev = d;
453 struct req *req = bufev->cbarg;
454 char buf[IBUF_READ_SIZE];
455 int what = EVBUFFER_READ;
456 int howmuch = IBUF_READ_SIZE;
461 if (event == EV_TIMEOUT) {
462 what |= EVBUFFER_TIMEOUT;
466 if (bufev->wm_read.high != 0)
467 howmuch = MIN(sizeof(buf), bufev->wm_read.high);
469 switch (ret = tls_read(req->ctx, buf, howmuch)) {
470 case TLS_WANT_POLLIN:
471 case TLS_WANT_POLLOUT:
474 what |= EVBUFFER_ERROR;
480 what |= EVBUFFER_EOF;
484 res = evbuffer_add(bufev->input, buf, len);
486 what |= EVBUFFER_ERROR;
490 event_add(&bufev->ev_read, NULL);
492 len = EVBUFFER_LENGTH(bufev->input);
493 if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
496 if (bufev->readcb != NULL)
497 (*bufev->readcb)(bufev, bufev->cbarg);
501 event_add(&bufev->ev_read, NULL);
505 (*bufev->errorcb)(bufev, what, bufev->cbarg);
509 net_tls_writecb(int fd, short event, void *d)
511 struct bufferevent *bufev = d;
512 struct req *req = bufev->cbarg;
515 short what = EVBUFFER_WRITE;
517 if (event & EV_TIMEOUT) {
518 what |= EVBUFFER_TIMEOUT;
522 if (EVBUFFER_LENGTH(bufev->output) != 0) {
523 ret = tls_write(req->ctx, EVBUFFER_DATA(bufev->output),
524 EVBUFFER_LENGTH(bufev->output));
526 case TLS_WANT_POLLIN:
527 case TLS_WANT_POLLOUT:
530 what |= EVBUFFER_ERROR;
535 evbuffer_drain(bufev->output, len);
538 if (EVBUFFER_LENGTH(bufev->output) != 0)
539 event_add(&bufev->ev_write, NULL);
541 if (bufev->writecb != NULL &&
542 EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
543 (*bufev->writecb)(bufev, bufev->cbarg);
547 event_add(&bufev->ev_write, NULL);
551 (*bufev->errorcb)(bufev, what, bufev->cbarg);
555 gemini_parse_reply(struct req *req, const char *header, size_t len)
563 if (!isdigit(header[0]) || !isdigit(header[1]))
566 code = (header[0] - '0')*10 + (header[1] - '0');
567 if (header[2] != ' ')
572 net_send_ui(IMSG_GOT_CODE, req->id, &code, sizeof(code));
573 net_send_ui(IMSG_GOT_META, req->id, t, strlen(t)+1);
575 bufferevent_disable(req->bev, EV_READ|EV_WRITE);
577 if (code < 20 || code >= 30)
578 close_conn(0, 0, req);
582 /* called when we're ready to read/write */
584 net_ready(struct req *req)
586 req->bev = bufferevent_new(req->fd, net_read, net_write, net_error,
588 if (req->bev == NULL)
592 evbuffer_unfreeze(req->bev->input, 0);
593 evbuffer_unfreeze(req->bev->output, 1);
596 /* setup tls i/o layer */
597 if (req->ctx != NULL) {
598 event_set(&req->bev->ev_read, req->fd, EV_READ,
599 net_tls_readcb, req->bev);
600 event_set(&req->bev->ev_write, req->fd, EV_WRITE,
601 net_tls_writecb, req->bev);
604 /* TODO: adjust watermarks */
605 bufferevent_setwatermark(req->bev, EV_WRITE, 1, 0);
606 bufferevent_setwatermark(req->bev, EV_READ, 1, 0);
608 bufferevent_enable(req->bev, EV_READ|EV_WRITE);
610 bufferevent_write(req->bev, req->req, req->len);
613 /* called after a read has been done */
615 net_read(struct bufferevent *bev, void *d)
618 struct evbuffer *src = EVBUFFER_INPUT(bev);
624 if (!req->done_header) {
625 header = evbuffer_readln(src, &len, EVBUFFER_EOL_CRLF_STRICT);
626 if (header == NULL && EVBUFFER_LENGTH(src) >= 1024)
630 r = gemini_parse_reply(req, header, len);
634 req->done_header = 1;
638 if ((len = EVBUFFER_LENGTH(src)) == 0)
640 data = EVBUFFER_DATA(src);
643 * Split data into chunks before sending. imsg can't handle
644 * message that are "too big".
647 chunk = MIN(len, 4096);
648 net_send_ui(IMSG_BUF, req->id, data, chunk);
653 evbuffer_drain(src, EVBUFFER_LENGTH(src));
657 (*bev->errorcb)(bev, EVBUFFER_READ, bev->cbarg);
660 /* called after a write has been done */
662 net_write(struct bufferevent *bev, void *d)
664 struct evbuffer *dst = EVBUFFER_OUTPUT(bev);
666 if (EVBUFFER_LENGTH(dst) == 0)
667 (*bev->errorcb)(bev, EVBUFFER_WRITE, bev->cbarg);
671 net_error(struct bufferevent *bev, short error, void *d)
674 struct evbuffer *src;
676 if (error & EVBUFFER_TIMEOUT) {
677 close_with_err(req, "Timeout loading page");
681 if (error & EVBUFFER_ERROR) {
682 close_with_err(req, "buffer event error");
686 if (error & EVBUFFER_EOF) {
687 src = EVBUFFER_INPUT(req->bev);
688 if (EVBUFFER_LENGTH(src) != 0)
689 net_send_ui(IMSG_BUF, req->id, EVBUFFER_DATA(src),
690 EVBUFFER_LENGTH(src));
691 net_send_ui(IMSG_EOF, req->id, NULL, 0);
692 close_conn(0, 0, req);
696 if (error & EVBUFFER_WRITE) {
697 /* finished sending request */
698 bufferevent_disable(bev, EV_WRITE);
702 if (error & EVBUFFER_READ) {
703 close_with_err(req, "protocol error");
707 close_with_errf(req, "unknown event error %x", error);
711 handle_get_raw(struct imsg *imsg, size_t datalen)
718 if (datalen != sizeof(*r))
721 if ((req = calloc(1, sizeof(*req))) == NULL)
724 req->id = imsg->hdr.peerid;
725 TAILQ_INSERT_HEAD(&reqhead, req, reqs);
727 strlcpy(req->url.host, r->host, sizeof(req->url.host));
728 strlcpy(req->url.port, r->port, sizeof(req->url.port));
730 strlcpy(req->req, r->req, sizeof(req->req));
731 req->len = strlen(r->req);
733 req->proto = r->proto;
736 async_conn_towards(req);
738 blocking_conn_towards(req);
743 handle_cert_status(struct imsg *imsg, size_t datalen)
748 req = req_by_id(imsg->hdr.peerid);
750 if (datalen < sizeof(is_ok))
752 memcpy(&is_ok, imsg->data, sizeof(is_ok));
757 close_conn(0, 0, req);
761 handle_proceed(struct imsg *imsg, size_t datalen)
765 if ((req = req_by_id(imsg->hdr.peerid)) == NULL)
768 bufferevent_enable(req->bev, EV_READ);
772 handle_stop(struct imsg *imsg, size_t datalen)
776 if ((req = req_by_id(imsg->hdr.peerid)) == NULL)
778 close_conn(0, 0, req);
782 handle_quit(struct imsg *imsg, size_t datalen)
788 handle_dispatch_imsg(int fd, short ev, void *d)
790 struct imsgev *iev = d;
792 if (dispatch_imsg(iev, ev, handlers, sizeof(handlers)) == -1)
793 err(1, "connection closed");
797 net_send_ui(int type, uint32_t peerid, const void *data,
800 return imsg_compose_event(iev_ui, type, peerid, 0, -1,
809 TAILQ_INIT(&reqhead);
811 if ((tlsconf = tls_config_new()) == NULL)
813 tls_config_insecure_noverifycert(tlsconf);
814 tls_config_insecure_noverifyname(tlsconf);
818 /* Setup pipe and event handler to the main process */
819 if ((iev_ui = malloc(sizeof(*iev_ui))) == NULL)
821 imsg_init(&iev_ui->ibuf, 3);
822 iev_ui->handler = handle_dispatch_imsg;
823 iev_ui->events = EV_READ;
824 event_set(&iev_ui->ev, iev_ui->ibuf.fd, iev_ui->events,
825 iev_ui->handler, iev_ui);
826 event_add(&iev_ui->ev, NULL);
828 sandbox_net_process();
832 tls_config_free(tlsconf);
833 msgbuf_clear(&iev_ui->ibuf.w);
834 close(iev_ui->ibuf.fd);