Blob


1 /*
2 * Copyright (c) 2021 Omar Polo <op@omarpolo.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
17 #include "compat.h"
19 #include <sys/types.h>
20 #include <sys/socket.h>
22 #include <netinet/in.h>
24 #include <assert.h>
25 #include <ctype.h>
26 #include <errno.h>
27 #include <netdb.h>
28 #include <stdarg.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <tls.h>
33 #include <unistd.h>
35 #if HAVE_ASR_RUN
36 # include <asr.h>
37 #endif
39 #include "telescope.h"
41 static struct imsgev *iev_ui;
42 static struct tls_config *tlsconf;
44 struct req;
46 static void die(void) __attribute__((__noreturn__));
48 static void try_to_connect(int, short, void*);
50 #if HAVE_ASR_RUN
51 static void query_done(struct asr_result*, void*);
52 static void async_conn_towards(struct req*);
53 #else
54 static void blocking_conn_towards(struct req*);
55 #endif
57 static void close_with_err(struct req*, const char*);
58 static void close_with_errf(struct req*, const char*, ...) __attribute__((format(printf, 2, 3)));
59 static struct req *req_by_id(uint32_t);
60 static struct req *req_by_id_try(uint32_t);
62 static void setup_tls(struct req*);
64 static void net_tls_handshake(int, short, void *);
65 static void net_tls_readcb(int, short, void *);
66 static void net_tls_writecb(int, short, void *);
68 static int gemini_parse_reply(struct req *, const char *, size_t);
70 static void net_ready(struct req *req);
71 static void net_read(struct bufferevent *, void *);
72 static void net_write(struct bufferevent *, void *);
73 static void net_error(struct bufferevent *, short, void *);
75 static void handle_get_raw(struct imsg *, size_t);
76 static void handle_cert_status(struct imsg*, size_t);
77 static void handle_proceed(struct imsg*, size_t);
78 static void handle_stop(struct imsg*, size_t);
79 static void handle_quit(struct imsg*, size_t);
80 static void handle_dispatch_imsg(int, short, void*);
82 static int net_send_ui(int, uint32_t, const void *, uint16_t);
84 /* TODO: making this customizable */
85 struct timeval timeout_for_handshake = { 5, 0 };
87 static imsg_handlerfn *handlers[] = {
88 [IMSG_GET_RAW] = handle_get_raw,
89 [IMSG_CERT_STATUS] = handle_cert_status,
90 [IMSG_PROCEED] = handle_proceed,
91 [IMSG_STOP] = handle_stop,
92 [IMSG_QUIT] = handle_quit,
93 };
95 typedef void (*statefn)(int, short, void*);
97 TAILQ_HEAD(, req) reqhead;
98 /* a pending request */
99 struct req {
100 struct phos_uri url;
101 uint32_t id;
102 int fd;
103 struct tls *ctx;
104 char req[1024];
105 size_t len;
106 int done_header;
107 struct bufferevent *bev;
109 struct addrinfo *servinfo, *p;
110 #if HAVE_ASR_RUN
111 struct addrinfo hints;
112 struct event_asr *asrev;
113 #endif
115 TAILQ_ENTRY(req) reqs;
116 };
118 static inline void
119 yield_r(struct req *req, statefn fn, struct timeval *tv)
121 event_once(req->fd, EV_READ, fn, req, tv);
124 static inline void
125 yield_w(struct req *req, statefn fn, struct timeval *tv)
127 event_once(req->fd, EV_WRITE, fn, req, tv);
130 static void __attribute__((__noreturn__))
131 die(void)
133 abort(); /* TODO */
136 static void
137 try_to_connect(int fd, short ev, void *d)
139 struct req *req = d;
140 int error = 0;
141 socklen_t len = sizeof(error);
143 again:
144 if (req->p == NULL)
145 goto err;
147 if (req->fd != -1) {
148 if (getsockopt(req->fd, SOL_SOCKET, SO_ERROR, &error, &len) == -1)
149 goto err;
150 if (error != 0) {
151 errno = error;
152 goto err;
154 goto done;
157 req->fd = socket(req->p->ai_family, req->p->ai_socktype, req->p->ai_protocol);
158 if (req->fd == -1) {
159 req->p = req->p->ai_next;
160 goto again;
161 } else {
162 mark_nonblock(req->fd);
163 if (connect(req->fd, req->p->ai_addr, req->p->ai_addrlen) == 0)
164 goto done;
165 yield_w(req, try_to_connect, NULL);
167 return;
169 err:
170 freeaddrinfo(req->servinfo);
171 close_with_errf(req, "failed to connect to %s",
172 req->url.host);
173 return;
175 done:
176 freeaddrinfo(req->servinfo);
177 setup_tls(req);
180 #if HAVE_ASR_RUN
181 static void
182 query_done(struct asr_result *res, void *d)
184 struct req *req = d;
186 req->asrev = NULL;
187 if (res->ar_gai_errno != 0) {
188 close_with_errf(req, "failed to resolve %s: %s",
189 req->url.host, gai_strerror(res->ar_gai_errno));
190 return;
193 req->fd = -1;
194 req->servinfo = res->ar_addrinfo;
195 req->p = res->ar_addrinfo;
196 try_to_connect(0, 0, req);
199 static void
200 async_conn_towards(struct req *req)
202 struct asr_query *q;
203 const char *proto = "1965";
205 if (*req->url.port != '\0')
206 proto = req->url.port;
208 req->hints.ai_family = AF_UNSPEC;
209 req->hints.ai_socktype = SOCK_STREAM;
210 q = getaddrinfo_async(req->url.host, proto, &req->hints, NULL);
211 req->asrev = event_asr_run(q, query_done, req);
213 #else
214 static void
215 blocking_conn_towards(struct req *req)
217 struct addrinfo hints;
218 struct phos_uri *url = &req->url;
219 int status;
220 const char *proto = "1965";
222 if (*url->port != '\0')
223 proto = url->port;
225 memset(&hints, 0, sizeof(hints));
226 hints.ai_family = AF_UNSPEC;
227 hints.ai_socktype = SOCK_STREAM;
229 if ((status = getaddrinfo(url->host, proto, &hints, &req->servinfo))) {
230 close_with_errf(req, "failed to resolve %s: %s",
231 url->host, gai_strerror(status));
232 return;
235 req->fd = -1;
236 req->p = req->servinfo;
237 try_to_connect(0, 0, req);
239 #endif
241 static struct req *
242 req_by_id(uint32_t id)
244 struct req *r;
246 if ((r = req_by_id_try(id)) == NULL)
247 die();
248 return r;
251 static struct req *
252 req_by_id_try(uint32_t id)
254 struct req *r;
256 TAILQ_FOREACH(r, &reqhead, reqs) {
257 if (r->id == id)
258 return r;
261 return NULL;
264 static void
265 close_conn(int fd, short ev, void *d)
267 struct req *req = d;
269 #if HAVE_ASR_RUN
270 if (req->asrev != NULL)
271 event_asr_abort(req->asrev);
272 #endif
274 if (req->bev != NULL) {
275 bufferevent_free(req->bev);
276 req->bev = NULL;
279 if (req->ctx != NULL) {
280 switch (tls_close(req->ctx)) {
281 case TLS_WANT_POLLIN:
282 yield_r(req, close_conn, NULL);
283 return;
284 case TLS_WANT_POLLOUT:
285 yield_w(req, close_conn, NULL);
286 return;
289 tls_free(req->ctx);
290 req->ctx = NULL;
293 TAILQ_REMOVE(&reqhead, req, reqs);
294 if (req->fd != -1)
295 close(req->fd);
296 free(req);
299 static void
300 close_with_err(struct req *req, const char *err)
302 net_send_ui(IMSG_ERR, req->id, err, strlen(err)+1);
303 close_conn(0, 0, req);
306 static void
307 close_with_errf(struct req *req, const char *fmt, ...)
309 va_list ap;
310 char *s;
312 va_start(ap, fmt);
313 if (vasprintf(&s, fmt, ap) == -1)
314 abort();
315 va_end(ap);
317 close_with_err(req, s);
318 free(s);
321 static void
322 setup_tls(struct req *req)
324 if ((req->ctx = tls_client()) == NULL) {
325 close_with_errf(req, "tls_client: %s", strerror(errno));
326 return;
328 if (tls_configure(req->ctx, tlsconf) == -1) {
329 close_with_errf(req, "tls_configure: %s", tls_error(req->ctx));
330 return;
332 if (tls_connect_socket(req->ctx, req->fd, req->url.host) == -1) {
333 close_with_errf(req, "tls_connect_socket: %s", tls_error(req->ctx));
334 return;
336 yield_w(req, net_tls_handshake, &timeout_for_handshake);
339 static void
340 net_tls_handshake(int fd, short event, void *d)
342 struct req *req = d;
343 const char *hash;
345 if (event == EV_TIMEOUT) {
346 close_with_err(req, "Timeout loading page");
347 return;
350 switch (tls_handshake(req->ctx)) {
351 case TLS_WANT_POLLIN:
352 yield_r(req, net_tls_handshake, NULL);
353 return;
354 case TLS_WANT_POLLOUT:
355 yield_w(req, net_tls_handshake, NULL);
356 return;
359 hash = tls_peer_cert_hash(req->ctx);
360 if (hash == NULL) {
361 close_with_errf(req, "handshake failed: %s", tls_error(req->ctx));
362 return;
364 net_send_ui(IMSG_CHECK_CERT, req->id, hash, strlen(hash)+1);
367 static void
368 net_tls_readcb(int fd, short event, void *d)
370 struct bufferevent *bufev = d;
371 struct req *req = bufev->cbarg;
372 char buf[BUFSIZ];
373 int what = EVBUFFER_READ;
374 int howmuch = IBUF_READ_SIZE;
375 ssize_t ret;
376 size_t len;
378 if (event == EV_TIMEOUT) {
379 what |= EVBUFFER_TIMEOUT;
380 goto err;
383 if (bufev->wm_read.high != 0)
384 howmuch = MIN(sizeof(buf), bufev->wm_read.high);
386 switch (ret = tls_read(req->ctx, buf, howmuch)) {
387 case TLS_WANT_POLLIN:
388 case TLS_WANT_POLLOUT:
389 goto retry;
390 case -1:
391 what |= EVBUFFER_ERROR;
392 goto err;
394 len = ret;
396 if (len == 0) {
397 what |= EVBUFFER_EOF;
398 goto err;
401 if (evbuffer_add(bufev->input, buf, len) == -1) {
402 what |= EVBUFFER_ERROR;
403 goto err;
406 event_add(&bufev->ev_read, NULL);
408 len = EVBUFFER_LENGTH(bufev->input);
409 if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
410 return;
412 if (bufev->readcb != NULL)
413 (*bufev->readcb)(bufev, bufev->cbarg);
414 return;
416 retry:
417 event_add(&bufev->ev_read, NULL);
418 return;
420 err:
421 (*bufev->errorcb)(bufev, what, bufev->cbarg);
424 static void
425 net_tls_writecb(int fd, short event, void *d)
427 struct bufferevent *bufev = d;
428 struct req *req = bufev->cbarg;
429 ssize_t ret;
430 size_t len;
431 short what = EVBUFFER_WRITE;
433 if (event & EV_TIMEOUT) {
434 what |= EVBUFFER_TIMEOUT;
435 goto err;
438 if (EVBUFFER_LENGTH(bufev->output) != 0) {
439 ret = tls_write(req->ctx, EVBUFFER_DATA(bufev->output),
440 EVBUFFER_LENGTH(bufev->output));
441 switch (ret) {
442 case TLS_WANT_POLLIN:
443 case TLS_WANT_POLLOUT:
444 goto retry;
445 case -1:
446 what |= EVBUFFER_ERROR;
447 goto err;
449 len = ret;
450 evbuffer_drain(bufev->output, len);
453 if (EVBUFFER_LENGTH(bufev->output) != 0)
454 event_add(&bufev->ev_write, NULL);
456 if (bufev->writecb != NULL &&
457 EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
458 (*bufev->writecb)(bufev, bufev->cbarg);
459 return;
461 retry:
462 event_add(&bufev->ev_write, NULL);
463 return;
465 err:
466 (*bufev->errorcb)(bufev, what, bufev->cbarg);
469 static int
470 gemini_parse_reply(struct req *req, const char *header, size_t len)
472 int code;
473 const char *t;
475 if (len < 4)
476 return 0;
478 if (!isdigit(header[0]) || !isdigit(header[1]))
479 return 0;
481 code = (header[0] - '0')*10 + (header[1] - '0');
482 if (header[2] != ' ')
483 return 0;
485 t = header + 3;
487 net_send_ui(IMSG_GOT_CODE, req->id, &code, sizeof(code));
488 net_send_ui(IMSG_GOT_META, req->id, t, strlen(t)+1);
490 bufferevent_disable(req->bev, EV_READ|EV_WRITE);
492 if (code < 20 || code >= 30)
493 close_conn(0, 0, req);
494 return 1;
497 /* called when we're ready to read/write */
498 static void
499 net_ready(struct req *req)
501 req->bev = bufferevent_new(req->fd, net_read, net_write, net_error,
502 req);
503 if (req->bev == NULL)
504 die();
506 /* setup tls i/o layer */
507 if (req->ctx != NULL) {
508 event_set(&req->bev->ev_read, req->fd, EV_READ,
509 net_tls_readcb, req->bev);
510 event_set(&req->bev->ev_write, req->fd, EV_WRITE,
511 net_tls_writecb, req->bev);
514 /* TODO: adjust watermarks */
515 bufferevent_setwatermark(req->bev, EV_WRITE, 1, 0);
516 bufferevent_setwatermark(req->bev, EV_READ, 1, 0);
518 bufferevent_enable(req->bev, EV_READ|EV_WRITE);
520 bufferevent_write(req->bev, req->req, req->len);
523 /* called after a read has been done */
524 static void
525 net_read(struct bufferevent *bev, void *d)
527 struct req *req = d;
528 struct evbuffer *src = EVBUFFER_INPUT(bev);
529 void *data;
530 size_t len;
531 int r;
532 char *header;
534 if (!req->done_header) {
535 header = evbuffer_readln(src, &len, EVBUFFER_EOL_CRLF_STRICT);
536 if (header == NULL && EVBUFFER_LENGTH(src) >= 1024)
537 goto err;
538 if (header == NULL)
539 return;
540 r = gemini_parse_reply(req, header, len);
541 free(header);
542 if (!r)
543 goto err;
544 req->done_header = 1;
545 return;
548 if ((len = EVBUFFER_LENGTH(src)) == 0)
549 return;
550 data = EVBUFFER_DATA(src);
551 net_send_ui(IMSG_BUF, req->id, data, len);
552 evbuffer_drain(src, len);
553 return;
555 err:
556 (*bev->errorcb)(bev, EVBUFFER_READ, bev->cbarg);
559 /* called after a write has been done */
560 static void
561 net_write(struct bufferevent *bev, void *d)
563 struct evbuffer *dst = EVBUFFER_OUTPUT(bev);
565 if (EVBUFFER_LENGTH(dst) == 0)
566 (*bev->errorcb)(bev, EVBUFFER_WRITE, bev->cbarg);
569 static void
570 net_error(struct bufferevent *bev, short error, void *d)
572 struct req *req = d;
574 if (error & EVBUFFER_TIMEOUT) {
575 close_with_err(req, "Timeout loading page");
576 return;
579 if (error & EVBUFFER_ERROR) {
580 close_with_err(req, "buffer event error");
581 return;
584 if (error & EVBUFFER_EOF) {
585 net_send_ui(IMSG_EOF, req->id, NULL, 0);
586 close_conn(0, 0, req);
587 return;
590 if (error & EVBUFFER_WRITE) {
591 /* finished sending request */
592 bufferevent_disable(bev, EV_WRITE);
593 return;
596 if (error & EVBUFFER_READ) {
597 close_with_err(req, "protocol error");
598 return;
601 close_with_errf(req, "unknown event error %x", error);
604 static void
605 handle_get_raw(struct imsg *imsg, size_t datalen)
607 struct req *req;
608 struct get_req *r;
610 r = imsg->data;
612 if (datalen != sizeof(*r))
613 die();
615 if ((req = calloc(1, sizeof(*req))) == NULL)
616 die();
618 req->id = imsg->hdr.peerid;
619 TAILQ_INSERT_HEAD(&reqhead, req, reqs);
621 strlcpy(req->url.host, r->host, sizeof(req->url.host));
622 strlcpy(req->url.port, r->port, sizeof(req->url.port));
624 strlcpy(req->req, r->req, sizeof(req->req));
625 req->len = strlen(r->req);
627 #if HAVE_ASR_RUN
628 async_conn_towards(req);
629 #else
630 blocking_conn_towards(req);
631 #endif
634 static void
635 handle_cert_status(struct imsg *imsg, size_t datalen)
637 struct req *req;
638 int is_ok;
640 req = req_by_id(imsg->hdr.peerid);
642 if (datalen < sizeof(is_ok))
643 die();
644 memcpy(&is_ok, imsg->data, sizeof(is_ok));
646 if (is_ok)
647 net_ready(req);
648 else
649 close_conn(0, 0, req);
652 static void
653 handle_proceed(struct imsg *imsg, size_t datalen)
655 struct req *req;
657 if ((req = req_by_id_try(imsg->hdr.peerid)) == NULL)
658 return;
660 bufferevent_enable(req->bev, EV_READ);
663 static void
664 handle_stop(struct imsg *imsg, size_t datalen)
666 struct req *req;
668 if ((req = req_by_id_try(imsg->hdr.peerid)) == NULL)
669 return;
670 close_conn(0, 0, req);
673 static void
674 handle_quit(struct imsg *imsg, size_t datalen)
676 event_loopbreak();
679 static void
680 handle_dispatch_imsg(int fd, short ev, void *d)
682 struct imsgev *iev = d;
684 if (dispatch_imsg(iev, ev, handlers, sizeof(handlers)) == -1)
685 err(1, "connection closed");
688 static int
689 net_send_ui(int type, uint32_t peerid, const void *data,
690 uint16_t datalen)
692 return imsg_compose_event(iev_ui, type, peerid, 0, -1,
693 data, datalen);
696 int
697 net_main(void)
699 setproctitle("net");
701 TAILQ_INIT(&reqhead);
703 if ((tlsconf = tls_config_new()) == NULL)
704 die();
705 tls_config_insecure_noverifycert(tlsconf);
706 tls_config_insecure_noverifyname(tlsconf);
708 event_init();
710 /* Setup pipe and event handler to the main process */
711 if ((iev_ui = malloc(sizeof(*iev_ui))) == NULL)
712 die();
713 imsg_init(&iev_ui->ibuf, 3);
714 iev_ui->handler = handle_dispatch_imsg;
715 iev_ui->events = EV_READ;
716 event_set(&iev_ui->ev, iev_ui->ibuf.fd, iev_ui->events,
717 iev_ui->handler, iev_ui);
718 event_add(&iev_ui->ev, NULL);
720 sandbox_net_process();
722 event_dispatch();
724 tls_config_free(tlsconf);
725 msgbuf_clear(&iev_ui->ibuf.w);
726 close(iev_ui->ibuf.fd);
727 free(iev_ui);
729 return 0;