Blob


1 /*
2 * Copyright (c) 2021 Omar Polo <op@omarpolo.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
17 #include "compat.h"
19 #include <sys/types.h>
20 #include <sys/socket.h>
22 #include <netinet/in.h>
24 #include <assert.h>
25 #include <ctype.h>
26 #include <errno.h>
27 #include <netdb.h>
28 #include <stdarg.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <tls.h>
33 #include <unistd.h>
35 #if HAVE_ASR_RUN
36 # include <asr.h>
37 #endif
39 #include "telescope.h"
40 #include "utils.h"
42 static struct imsgev *iev_ui;
44 /* a pending request */
45 struct req {
46 struct phos_uri url;
47 uint32_t id;
48 int proto;
49 int fd;
50 struct tls *ctx;
51 char req[1024];
52 size_t len;
53 int done_header;
54 struct bufferevent *bev;
56 struct addrinfo *servinfo, *p;
57 #if HAVE_ASR_RUN
58 struct addrinfo hints;
59 struct event_asr *asrev;
60 #endif
62 TAILQ_ENTRY(req) reqs;
63 };
65 static struct req *req_by_id(uint32_t);
67 static void die(void) __attribute__((__noreturn__));
69 static void try_to_connect(int, short, void*);
71 #if HAVE_ASR_RUN
72 static void query_done(struct asr_result*, void*);
73 static void async_conn_towards(struct req*);
74 #else
75 static void blocking_conn_towards(struct req*);
76 #endif
78 static void close_with_err(struct req*, const char*);
79 static void close_with_errf(struct req*, const char*, ...)
80 __attribute__((format(printf, 2, 3)));
82 static void net_tls_handshake(int, short, void *);
83 static void net_tls_readcb(int, short, void *);
84 static void net_tls_writecb(int, short, void *);
86 static int gemini_parse_reply(struct req *, const char *, size_t);
88 static void net_ready(struct req *req);
89 static void net_read(struct bufferevent *, void *);
90 static void net_write(struct bufferevent *, void *);
91 static void net_error(struct bufferevent *, short, void *);
93 static void handle_get_raw(struct imsg *, size_t);
94 static void handle_cert_status(struct imsg*, size_t);
95 static void handle_proceed(struct imsg*, size_t);
96 static void handle_stop(struct imsg*, size_t);
97 static void handle_quit(struct imsg*, size_t);
98 static void handle_dispatch_imsg(int, short, void*);
100 static int net_send_ui(int, uint32_t, const void *, uint16_t);
102 /* TODO: making this customizable */
103 struct timeval timeout_for_handshake = { 5, 0 };
105 static imsg_handlerfn *handlers[] = {
106 [IMSG_GET_RAW] = handle_get_raw,
107 [IMSG_CERT_STATUS] = handle_cert_status,
108 [IMSG_PROCEED] = handle_proceed,
109 [IMSG_STOP] = handle_stop,
110 [IMSG_QUIT] = handle_quit,
111 };
113 typedef void (*statefn)(int, short, void*);
115 TAILQ_HEAD(, req) reqhead;
117 static inline void
118 yield_r(struct req *req, statefn fn, struct timeval *tv)
120 event_once(req->fd, EV_READ, fn, req, tv);
123 static inline void
124 yield_w(struct req *req, statefn fn, struct timeval *tv)
126 event_once(req->fd, EV_WRITE, fn, req, tv);
129 static struct req *
130 req_by_id(uint32_t id)
132 struct req *r;
134 TAILQ_FOREACH(r, &reqhead, reqs) {
135 if (r->id == id)
136 return r;
139 return NULL;
142 static void __attribute__((__noreturn__))
143 die(void)
145 abort(); /* TODO */
148 static void
149 try_to_connect(int fd, short ev, void *d)
151 struct req *req = d;
152 int error = 0;
153 socklen_t len = sizeof(error);
155 again:
156 if (req->p == NULL)
157 goto err;
159 if (req->fd != -1) {
160 if (getsockopt(req->fd, SOL_SOCKET, SO_ERROR, &error,
161 &len) == -1)
162 goto err;
163 if (error != 0) {
164 errno = error;
165 goto err;
167 goto done;
170 req->fd = socket(req->p->ai_family, req->p->ai_socktype,
171 req->p->ai_protocol);
172 if (req->fd == -1) {
173 req->p = req->p->ai_next;
174 goto again;
175 } else {
176 if (!mark_nonblock_cloexec(req->fd))
177 goto err;
178 if (connect(req->fd, req->p->ai_addr, req->p->ai_addrlen) == 0)
179 goto done;
180 yield_w(req, try_to_connect, NULL);
182 return;
184 err:
185 freeaddrinfo(req->servinfo);
186 close_with_errf(req, "failed to connect to %s",
187 req->url.host);
188 return;
190 done:
191 freeaddrinfo(req->servinfo);
193 switch (req->proto) {
194 case PROTO_FINGER:
195 case PROTO_GOPHER:
196 /* finger and gopher don't have a header nor TLS */
197 req->done_header = 1;
198 net_ready(req);
199 break;
201 case PROTO_GEMINI: {
202 struct tls_config *conf;
204 if ((conf = tls_config_new()) == NULL)
205 die();
207 tls_config_insecure_noverifycert(conf);
208 tls_config_insecure_noverifyname(conf);
210 /* prepare tls */
211 if ((req->ctx = tls_client()) == NULL) {
212 close_with_errf(req, "tls_client: %s",
213 strerror(errno));
214 return;
217 if (tls_configure(req->ctx, conf) == -1) {
218 close_with_errf(req, "tls_configure: %s",
219 tls_error(req->ctx));
220 return;
222 tls_config_free(conf);
224 if (tls_connect_socket(req->ctx, req->fd, req->url.host)
225 == -1) {
226 close_with_errf(req, "tls_connect_socket: %s",
227 tls_error(req->ctx));
228 return;
230 yield_w(req, net_tls_handshake, &timeout_for_handshake);
231 break;
234 default:
235 die();
239 #if HAVE_ASR_RUN
240 static void
241 query_done(struct asr_result *res, void *d)
243 struct req *req = d;
245 req->asrev = NULL;
246 if (res->ar_gai_errno != 0) {
247 close_with_errf(req, "failed to resolve %s: %s",
248 req->url.host, gai_strerror(res->ar_gai_errno));
249 return;
252 req->fd = -1;
253 req->servinfo = res->ar_addrinfo;
254 req->p = res->ar_addrinfo;
255 try_to_connect(0, 0, req);
258 static void
259 async_conn_towards(struct req *req)
261 struct asr_query *q;
262 const char *proto = "1965";
264 if (*req->url.port != '\0')
265 proto = req->url.port;
267 req->hints.ai_family = AF_UNSPEC;
268 req->hints.ai_socktype = SOCK_STREAM;
269 q = getaddrinfo_async(req->url.host, proto, &req->hints, NULL);
270 req->asrev = event_asr_run(q, query_done, req);
272 #else
273 static void
274 blocking_conn_towards(struct req *req)
276 struct addrinfo hints;
277 struct phos_uri *url = &req->url;
278 int status;
279 const char *proto = "1965";
281 if (*url->port != '\0')
282 proto = url->port;
284 memset(&hints, 0, sizeof(hints));
285 hints.ai_family = AF_UNSPEC;
286 hints.ai_socktype = SOCK_STREAM;
288 if ((status = getaddrinfo(url->host, proto, &hints, &req->servinfo))) {
289 close_with_errf(req, "failed to resolve %s: %s",
290 url->host, gai_strerror(status));
291 return;
294 req->fd = -1;
295 req->p = req->servinfo;
296 try_to_connect(0, 0, req);
298 #endif
300 static void
301 close_conn(int fd, short ev, void *d)
303 struct req *req = d;
305 #if HAVE_ASR_RUN
306 if (req->asrev != NULL)
307 event_asr_abort(req->asrev);
308 #endif
310 if (req->bev != NULL) {
311 bufferevent_free(req->bev);
312 req->bev = NULL;
315 if (req->ctx != NULL) {
316 switch (tls_close(req->ctx)) {
317 case TLS_WANT_POLLIN:
318 yield_r(req, close_conn, NULL);
319 return;
320 case TLS_WANT_POLLOUT:
321 yield_w(req, close_conn, NULL);
322 return;
325 tls_free(req->ctx);
326 req->ctx = NULL;
329 TAILQ_REMOVE(&reqhead, req, reqs);
330 if (req->fd != -1)
331 close(req->fd);
332 free(req);
335 static void
336 close_with_err(struct req *req, const char *err)
338 net_send_ui(IMSG_ERR, req->id, err, strlen(err)+1);
339 close_conn(0, 0, req);
342 static void
343 close_with_errf(struct req *req, const char *fmt, ...)
345 va_list ap;
346 char *s;
348 va_start(ap, fmt);
349 if (vasprintf(&s, fmt, ap) == -1)
350 abort();
351 va_end(ap);
353 close_with_err(req, s);
354 free(s);
357 static void
358 net_tls_handshake(int fd, short event, void *d)
360 struct req *req = d;
361 const char *hash;
363 if (event == EV_TIMEOUT) {
364 close_with_err(req, "Timeout loading page");
365 return;
368 switch (tls_handshake(req->ctx)) {
369 case TLS_WANT_POLLIN:
370 yield_r(req, net_tls_handshake, NULL);
371 return;
372 case TLS_WANT_POLLOUT:
373 yield_w(req, net_tls_handshake, NULL);
374 return;
377 hash = tls_peer_cert_hash(req->ctx);
378 if (hash == NULL) {
379 close_with_errf(req, "handshake failed: %s",
380 tls_error(req->ctx));
381 return;
383 net_send_ui(IMSG_CHECK_CERT, req->id, hash, strlen(hash)+1);
386 static void
387 net_tls_readcb(int fd, short event, void *d)
389 struct bufferevent *bufev = d;
390 struct req *req = bufev->cbarg;
391 char buf[IBUF_READ_SIZE];
392 int what = EVBUFFER_READ;
393 int howmuch = IBUF_READ_SIZE;
394 int res;
395 ssize_t ret;
396 size_t len;
398 if (event == EV_TIMEOUT) {
399 what |= EVBUFFER_TIMEOUT;
400 goto err;
403 if (bufev->wm_read.high != 0)
404 howmuch = MIN(sizeof(buf), bufev->wm_read.high);
406 switch (ret = tls_read(req->ctx, buf, howmuch)) {
407 case TLS_WANT_POLLIN:
408 case TLS_WANT_POLLOUT:
409 goto retry;
410 case -1:
411 what |= EVBUFFER_ERROR;
412 goto err;
414 len = ret;
416 if (len == 0) {
417 what |= EVBUFFER_EOF;
418 goto err;
421 res = evbuffer_add(bufev->input, buf, len);
422 if (res == -1) {
423 what |= EVBUFFER_ERROR;
424 goto err;
427 event_add(&bufev->ev_read, NULL);
429 len = EVBUFFER_LENGTH(bufev->input);
430 if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
431 return;
433 if (bufev->readcb != NULL)
434 (*bufev->readcb)(bufev, bufev->cbarg);
435 return;
437 retry:
438 event_add(&bufev->ev_read, NULL);
439 return;
441 err:
442 (*bufev->errorcb)(bufev, what, bufev->cbarg);
445 static void
446 net_tls_writecb(int fd, short event, void *d)
448 struct bufferevent *bufev = d;
449 struct req *req = bufev->cbarg;
450 ssize_t ret;
451 size_t len;
452 short what = EVBUFFER_WRITE;
454 if (event & EV_TIMEOUT) {
455 what |= EVBUFFER_TIMEOUT;
456 goto err;
459 if (EVBUFFER_LENGTH(bufev->output) != 0) {
460 ret = tls_write(req->ctx, EVBUFFER_DATA(bufev->output),
461 EVBUFFER_LENGTH(bufev->output));
462 switch (ret) {
463 case TLS_WANT_POLLIN:
464 case TLS_WANT_POLLOUT:
465 goto retry;
466 case -1:
467 what |= EVBUFFER_ERROR;
468 goto err;
470 len = ret;
472 evbuffer_drain(bufev->output, len);
475 if (EVBUFFER_LENGTH(bufev->output) != 0)
476 event_add(&bufev->ev_write, NULL);
478 if (bufev->writecb != NULL &&
479 EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
480 (*bufev->writecb)(bufev, bufev->cbarg);
481 return;
483 retry:
484 event_add(&bufev->ev_write, NULL);
485 return;
487 err:
488 (*bufev->errorcb)(bufev, what, bufev->cbarg);
491 static int
492 gemini_parse_reply(struct req *req, const char *header, size_t len)
494 int code;
495 const char *t;
497 if (len < 4)
498 return 0;
500 if (!isdigit(header[0]) || !isdigit(header[1]))
501 return 0;
503 code = (header[0] - '0')*10 + (header[1] - '0');
504 if (header[2] != ' ')
505 return 0;
507 t = header + 3;
509 net_send_ui(IMSG_GOT_CODE, req->id, &code, sizeof(code));
510 net_send_ui(IMSG_GOT_META, req->id, t, strlen(t)+1);
512 bufferevent_disable(req->bev, EV_READ|EV_WRITE);
514 return code;
517 /* called when we're ready to read/write */
518 static void
519 net_ready(struct req *req)
521 req->bev = bufferevent_new(req->fd, net_read, net_write, net_error,
522 req);
523 if (req->bev == NULL)
524 die();
526 #if HAVE_EVENT2
527 evbuffer_unfreeze(req->bev->input, 0);
528 evbuffer_unfreeze(req->bev->output, 1);
529 #endif
531 /* setup tls i/o layer */
532 if (req->ctx != NULL) {
533 event_set(&req->bev->ev_read, req->fd, EV_READ,
534 net_tls_readcb, req->bev);
535 event_set(&req->bev->ev_write, req->fd, EV_WRITE,
536 net_tls_writecb, req->bev);
539 /* TODO: adjust watermarks */
540 bufferevent_setwatermark(req->bev, EV_WRITE, 1, 0);
541 bufferevent_setwatermark(req->bev, EV_READ, 1, 0);
543 bufferevent_enable(req->bev, EV_READ|EV_WRITE);
545 bufferevent_write(req->bev, req->req, req->len);
548 /* called after a read has been done */
549 static void
550 net_read(struct bufferevent *bev, void *d)
552 struct req *req = d;
553 struct evbuffer *src = EVBUFFER_INPUT(bev);
554 uint8_t *data;
555 size_t len, chunk;
556 int r;
557 char *header;
559 if (!req->done_header) {
560 header = evbuffer_readln(src, &len, EVBUFFER_EOL_CRLF_STRICT);
561 if (header == NULL && EVBUFFER_LENGTH(src) >= 1024)
562 goto err;
563 if (header == NULL)
564 return;
565 r = gemini_parse_reply(req, header, len);
566 free(header);
567 req->done_header = 1;
568 if (r == 0)
569 goto err;
570 else if (r < 20 || r >= 30)
571 close_conn(0, 0, req);
572 return;
575 if ((len = EVBUFFER_LENGTH(src)) == 0)
576 return;
577 data = EVBUFFER_DATA(src);
579 /*
580 * Split data into chunks before sending. imsg can't handle
581 * message that are "too big".
582 */
583 while (len != 0) {
584 chunk = MIN(len, 4096);
585 net_send_ui(IMSG_BUF, req->id, data, chunk);
586 data += chunk;
587 len -= chunk;
590 evbuffer_drain(src, EVBUFFER_LENGTH(src));
591 return;
593 err:
594 (*bev->errorcb)(bev, EVBUFFER_READ, bev->cbarg);
597 /* called after a write has been done */
598 static void
599 net_write(struct bufferevent *bev, void *d)
601 struct evbuffer *dst = EVBUFFER_OUTPUT(bev);
603 if (EVBUFFER_LENGTH(dst) == 0)
604 (*bev->errorcb)(bev, EVBUFFER_WRITE, bev->cbarg);
607 static void
608 net_error(struct bufferevent *bev, short error, void *d)
610 struct req *req = d;
611 struct evbuffer *src;
613 if (error & EVBUFFER_TIMEOUT) {
614 close_with_err(req, "Timeout loading page");
615 return;
618 if (error & EVBUFFER_ERROR) {
619 close_with_err(req, "buffer event error");
620 return;
623 if (error & EVBUFFER_EOF) {
624 /* EOF and no header */
625 if (!req->done_header) {
626 close_with_err(req, "protocol error");
627 return;
630 src = EVBUFFER_INPUT(req->bev);
631 if (EVBUFFER_LENGTH(src) != 0)
632 net_send_ui(IMSG_BUF, req->id, EVBUFFER_DATA(src),
633 EVBUFFER_LENGTH(src));
634 net_send_ui(IMSG_EOF, req->id, NULL, 0);
635 close_conn(0, 0, req);
636 return;
639 if (error & EVBUFFER_WRITE) {
640 /* finished sending request */
641 bufferevent_disable(bev, EV_WRITE);
642 return;
645 if (error & EVBUFFER_READ) {
646 close_with_err(req, "protocol error");
647 return;
650 close_with_errf(req, "unknown event error %x", error);
653 static void
654 handle_get_raw(struct imsg *imsg, size_t datalen)
656 struct req *req;
657 struct get_req *r;
659 r = imsg->data;
661 if (datalen != sizeof(*r))
662 die();
664 if ((req = calloc(1, sizeof(*req))) == NULL)
665 die();
667 req->id = imsg->hdr.peerid;
668 TAILQ_INSERT_HEAD(&reqhead, req, reqs);
670 strlcpy(req->url.host, r->host, sizeof(req->url.host));
671 strlcpy(req->url.port, r->port, sizeof(req->url.port));
673 strlcpy(req->req, r->req, sizeof(req->req));
674 req->len = strlen(r->req);
676 req->proto = r->proto;
678 #if HAVE_ASR_RUN
679 async_conn_towards(req);
680 #else
681 blocking_conn_towards(req);
682 #endif
685 static void
686 handle_cert_status(struct imsg *imsg, size_t datalen)
688 struct req *req;
689 int is_ok;
691 req = req_by_id(imsg->hdr.peerid);
693 if (datalen < sizeof(is_ok))
694 die();
695 memcpy(&is_ok, imsg->data, sizeof(is_ok));
697 if (is_ok)
698 net_ready(req);
699 else
700 close_conn(0, 0, req);
703 static void
704 handle_proceed(struct imsg *imsg, size_t datalen)
706 struct req *req;
708 if ((req = req_by_id(imsg->hdr.peerid)) == NULL)
709 return;
711 bufferevent_enable(req->bev, EV_READ);
714 static void
715 handle_stop(struct imsg *imsg, size_t datalen)
717 struct req *req;
719 if ((req = req_by_id(imsg->hdr.peerid)) == NULL)
720 return;
721 close_conn(0, 0, req);
724 static void
725 handle_quit(struct imsg *imsg, size_t datalen)
727 event_loopbreak();
730 static void
731 handle_dispatch_imsg(int fd, short ev, void *d)
733 struct imsgev *iev = d;
735 if (dispatch_imsg(iev, ev, handlers, sizeof(handlers)) == -1)
736 err(1, "connection closed");
739 static int
740 net_send_ui(int type, uint32_t peerid, const void *data,
741 uint16_t datalen)
743 return imsg_compose_event(iev_ui, type, peerid, 0, -1,
744 data, datalen);
747 int
748 net_main(void)
750 setproctitle("net");
752 TAILQ_INIT(&reqhead);
754 event_init();
756 /* Setup pipe and event handler to the main process */
757 if ((iev_ui = malloc(sizeof(*iev_ui))) == NULL)
758 die();
759 imsg_init(&iev_ui->ibuf, 3);
760 iev_ui->handler = handle_dispatch_imsg;
761 iev_ui->events = EV_READ;
762 event_set(&iev_ui->ev, iev_ui->ibuf.fd, iev_ui->events,
763 iev_ui->handler, iev_ui);
764 event_add(&iev_ui->ev, NULL);
766 sandbox_net_process();
768 event_dispatch();
770 msgbuf_clear(&iev_ui->ibuf.w);
771 close(iev_ui->ibuf.fd);
772 free(iev_ui);
774 return 0;