Blob


1 /*
2 * Copyright (c) 2021 Omar Polo <op@omarpolo.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
17 #include "compat.h"
19 #include <sys/types.h>
20 #include <sys/socket.h>
22 #include <netinet/in.h>
24 #include <assert.h>
25 #include <ctype.h>
26 #include <errno.h>
27 #include <netdb.h>
28 #include <stdarg.h>
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <tls.h>
33 #include <unistd.h>
35 #if HAVE_ASR_RUN
36 # include <asr.h>
37 #endif
39 #include "telescope.h"
41 static struct imsgev *iev_ui;
43 /* a pending request */
44 struct req {
45 struct phos_uri url;
46 uint32_t id;
47 int proto;
48 int fd;
49 struct tls *ctx;
50 char req[1024];
51 size_t len;
52 int done_header;
53 struct bufferevent *bev;
55 struct addrinfo *servinfo, *p;
56 #if HAVE_ASR_RUN
57 struct addrinfo hints;
58 struct event_asr *asrev;
59 #endif
61 TAILQ_ENTRY(req) reqs;
62 };
64 static struct req *req_by_id(uint32_t);
66 static void die(void) __attribute__((__noreturn__));
68 static void try_to_connect(int, short, void*);
70 #if HAVE_ASR_RUN
71 static void query_done(struct asr_result*, void*);
72 static void async_conn_towards(struct req*);
73 #else
74 static void blocking_conn_towards(struct req*);
75 #endif
77 static void close_with_err(struct req*, const char*);
78 static void close_with_errf(struct req*, const char*, ...)
79 __attribute__((format(printf, 2, 3)));
81 static void net_tls_handshake(int, short, void *);
82 static void net_tls_readcb(int, short, void *);
83 static void net_tls_writecb(int, short, void *);
85 static int gemini_parse_reply(struct req *, const char *, size_t);
87 static void net_ready(struct req *req);
88 static void net_read(struct bufferevent *, void *);
89 static void net_write(struct bufferevent *, void *);
90 static void net_error(struct bufferevent *, short, void *);
92 static void handle_get_raw(struct imsg *, size_t);
93 static void handle_cert_status(struct imsg*, size_t);
94 static void handle_proceed(struct imsg*, size_t);
95 static void handle_stop(struct imsg*, size_t);
96 static void handle_quit(struct imsg*, size_t);
97 static void handle_dispatch_imsg(int, short, void*);
99 static int net_send_ui(int, uint32_t, const void *, uint16_t);
101 /* TODO: making this customizable */
102 struct timeval timeout_for_handshake = { 5, 0 };
104 static imsg_handlerfn *handlers[] = {
105 [IMSG_GET_RAW] = handle_get_raw,
106 [IMSG_CERT_STATUS] = handle_cert_status,
107 [IMSG_PROCEED] = handle_proceed,
108 [IMSG_STOP] = handle_stop,
109 [IMSG_QUIT] = handle_quit,
110 };
112 typedef void (*statefn)(int, short, void*);
114 TAILQ_HEAD(, req) reqhead;
116 static inline void
117 yield_r(struct req *req, statefn fn, struct timeval *tv)
119 event_once(req->fd, EV_READ, fn, req, tv);
122 static inline void
123 yield_w(struct req *req, statefn fn, struct timeval *tv)
125 event_once(req->fd, EV_WRITE, fn, req, tv);
128 static struct req *
129 req_by_id(uint32_t id)
131 struct req *r;
133 TAILQ_FOREACH(r, &reqhead, reqs) {
134 if (r->id == id)
135 return r;
138 return NULL;
141 static void __attribute__((__noreturn__))
142 die(void)
144 abort(); /* TODO */
147 static void
148 try_to_connect(int fd, short ev, void *d)
150 struct req *req = d;
151 int error = 0;
152 socklen_t len = sizeof(error);
154 again:
155 if (req->p == NULL)
156 goto err;
158 if (req->fd != -1) {
159 if (getsockopt(req->fd, SOL_SOCKET, SO_ERROR, &error,
160 &len) == -1)
161 goto err;
162 if (error != 0) {
163 errno = error;
164 goto err;
166 goto done;
169 req->fd = socket(req->p->ai_family, req->p->ai_socktype,
170 req->p->ai_protocol);
171 if (req->fd == -1) {
172 req->p = req->p->ai_next;
173 goto again;
174 } else {
175 mark_nonblock(req->fd);
176 if (connect(req->fd, req->p->ai_addr, req->p->ai_addrlen) == 0)
177 goto done;
178 yield_w(req, try_to_connect, NULL);
180 return;
182 err:
183 freeaddrinfo(req->servinfo);
184 close_with_errf(req, "failed to connect to %s",
185 req->url.host);
186 return;
188 done:
189 freeaddrinfo(req->servinfo);
191 switch (req->proto) {
192 case PROTO_FINGER:
193 case PROTO_GOPHER:
194 /* finger and gopher don't have a header nor TLS */
195 req->done_header = 1;
196 net_ready(req);
197 break;
199 case PROTO_GEMINI: {
200 struct tls_config *conf;
202 if ((conf = tls_config_new()) == NULL)
203 die();
205 tls_config_insecure_noverifycert(conf);
206 tls_config_insecure_noverifyname(conf);
208 /* prepare tls */
209 if ((req->ctx = tls_client()) == NULL) {
210 close_with_errf(req, "tls_client: %s",
211 strerror(errno));
212 return;
215 if (tls_configure(req->ctx, conf) == -1) {
216 close_with_errf(req, "tls_configure: %s",
217 tls_error(req->ctx));
218 return;
220 tls_config_free(conf);
222 if (tls_connect_socket(req->ctx, req->fd, req->url.host)
223 == -1) {
224 close_with_errf(req, "tls_connect_socket: %s",
225 tls_error(req->ctx));
226 return;
228 yield_w(req, net_tls_handshake, &timeout_for_handshake);
229 break;
232 default:
233 die();
237 #if HAVE_ASR_RUN
238 static void
239 query_done(struct asr_result *res, void *d)
241 struct req *req = d;
243 req->asrev = NULL;
244 if (res->ar_gai_errno != 0) {
245 close_with_errf(req, "failed to resolve %s: %s",
246 req->url.host, gai_strerror(res->ar_gai_errno));
247 return;
250 req->fd = -1;
251 req->servinfo = res->ar_addrinfo;
252 req->p = res->ar_addrinfo;
253 try_to_connect(0, 0, req);
256 static void
257 async_conn_towards(struct req *req)
259 struct asr_query *q;
260 const char *proto = "1965";
262 if (*req->url.port != '\0')
263 proto = req->url.port;
265 req->hints.ai_family = AF_UNSPEC;
266 req->hints.ai_socktype = SOCK_STREAM;
267 q = getaddrinfo_async(req->url.host, proto, &req->hints, NULL);
268 req->asrev = event_asr_run(q, query_done, req);
270 #else
271 static void
272 blocking_conn_towards(struct req *req)
274 struct addrinfo hints;
275 struct phos_uri *url = &req->url;
276 int status;
277 const char *proto = "1965";
279 if (*url->port != '\0')
280 proto = url->port;
282 memset(&hints, 0, sizeof(hints));
283 hints.ai_family = AF_UNSPEC;
284 hints.ai_socktype = SOCK_STREAM;
286 if ((status = getaddrinfo(url->host, proto, &hints, &req->servinfo))) {
287 close_with_errf(req, "failed to resolve %s: %s",
288 url->host, gai_strerror(status));
289 return;
292 req->fd = -1;
293 req->p = req->servinfo;
294 try_to_connect(0, 0, req);
296 #endif
298 static void
299 close_conn(int fd, short ev, void *d)
301 struct req *req = d;
303 #if HAVE_ASR_RUN
304 if (req->asrev != NULL)
305 event_asr_abort(req->asrev);
306 #endif
308 if (req->bev != NULL) {
309 bufferevent_free(req->bev);
310 req->bev = NULL;
313 if (req->ctx != NULL) {
314 switch (tls_close(req->ctx)) {
315 case TLS_WANT_POLLIN:
316 yield_r(req, close_conn, NULL);
317 return;
318 case TLS_WANT_POLLOUT:
319 yield_w(req, close_conn, NULL);
320 return;
323 tls_free(req->ctx);
324 req->ctx = NULL;
327 TAILQ_REMOVE(&reqhead, req, reqs);
328 if (req->fd != -1)
329 close(req->fd);
330 free(req);
333 static void
334 close_with_err(struct req *req, const char *err)
336 net_send_ui(IMSG_ERR, req->id, err, strlen(err)+1);
337 close_conn(0, 0, req);
340 static void
341 close_with_errf(struct req *req, const char *fmt, ...)
343 va_list ap;
344 char *s;
346 va_start(ap, fmt);
347 if (vasprintf(&s, fmt, ap) == -1)
348 abort();
349 va_end(ap);
351 close_with_err(req, s);
352 free(s);
355 static void
356 net_tls_handshake(int fd, short event, void *d)
358 struct req *req = d;
359 const char *hash;
361 if (event == EV_TIMEOUT) {
362 close_with_err(req, "Timeout loading page");
363 return;
366 switch (tls_handshake(req->ctx)) {
367 case TLS_WANT_POLLIN:
368 yield_r(req, net_tls_handshake, NULL);
369 return;
370 case TLS_WANT_POLLOUT:
371 yield_w(req, net_tls_handshake, NULL);
372 return;
375 hash = tls_peer_cert_hash(req->ctx);
376 if (hash == NULL) {
377 close_with_errf(req, "handshake failed: %s",
378 tls_error(req->ctx));
379 return;
381 net_send_ui(IMSG_CHECK_CERT, req->id, hash, strlen(hash)+1);
384 static void
385 net_tls_readcb(int fd, short event, void *d)
387 struct bufferevent *bufev = d;
388 struct req *req = bufev->cbarg;
389 char buf[IBUF_READ_SIZE];
390 int what = EVBUFFER_READ;
391 int howmuch = IBUF_READ_SIZE;
392 int res;
393 ssize_t ret;
394 size_t len;
396 if (event == EV_TIMEOUT) {
397 what |= EVBUFFER_TIMEOUT;
398 goto err;
401 if (bufev->wm_read.high != 0)
402 howmuch = MIN(sizeof(buf), bufev->wm_read.high);
404 switch (ret = tls_read(req->ctx, buf, howmuch)) {
405 case TLS_WANT_POLLIN:
406 case TLS_WANT_POLLOUT:
407 goto retry;
408 case -1:
409 what |= EVBUFFER_ERROR;
410 goto err;
412 len = ret;
414 if (len == 0) {
415 what |= EVBUFFER_EOF;
416 goto err;
419 res = evbuffer_add(bufev->input, buf, len);
420 if (res == -1) {
421 what |= EVBUFFER_ERROR;
422 goto err;
425 event_add(&bufev->ev_read, NULL);
427 len = EVBUFFER_LENGTH(bufev->input);
428 if (bufev->wm_read.low != 0 && len < bufev->wm_read.low)
429 return;
431 if (bufev->readcb != NULL)
432 (*bufev->readcb)(bufev, bufev->cbarg);
433 return;
435 retry:
436 event_add(&bufev->ev_read, NULL);
437 return;
439 err:
440 (*bufev->errorcb)(bufev, what, bufev->cbarg);
443 static void
444 net_tls_writecb(int fd, short event, void *d)
446 struct bufferevent *bufev = d;
447 struct req *req = bufev->cbarg;
448 ssize_t ret;
449 size_t len;
450 short what = EVBUFFER_WRITE;
452 if (event & EV_TIMEOUT) {
453 what |= EVBUFFER_TIMEOUT;
454 goto err;
457 if (EVBUFFER_LENGTH(bufev->output) != 0) {
458 ret = tls_write(req->ctx, EVBUFFER_DATA(bufev->output),
459 EVBUFFER_LENGTH(bufev->output));
460 switch (ret) {
461 case TLS_WANT_POLLIN:
462 case TLS_WANT_POLLOUT:
463 goto retry;
464 case -1:
465 what |= EVBUFFER_ERROR;
466 goto err;
468 len = ret;
470 evbuffer_drain(bufev->output, len);
473 if (EVBUFFER_LENGTH(bufev->output) != 0)
474 event_add(&bufev->ev_write, NULL);
476 if (bufev->writecb != NULL &&
477 EVBUFFER_LENGTH(bufev->output) <= bufev->wm_write.low)
478 (*bufev->writecb)(bufev, bufev->cbarg);
479 return;
481 retry:
482 event_add(&bufev->ev_write, NULL);
483 return;
485 err:
486 (*bufev->errorcb)(bufev, what, bufev->cbarg);
489 static int
490 gemini_parse_reply(struct req *req, const char *header, size_t len)
492 int code;
493 const char *t;
495 if (len < 4)
496 return 0;
498 if (!isdigit(header[0]) || !isdigit(header[1]))
499 return 0;
501 code = (header[0] - '0')*10 + (header[1] - '0');
502 if (header[2] != ' ')
503 return 0;
505 t = header + 3;
507 net_send_ui(IMSG_GOT_CODE, req->id, &code, sizeof(code));
508 net_send_ui(IMSG_GOT_META, req->id, t, strlen(t)+1);
510 bufferevent_disable(req->bev, EV_READ|EV_WRITE);
512 if (code < 20 || code >= 30)
513 close_conn(0, 0, req);
514 return 1;
517 /* called when we're ready to read/write */
518 static void
519 net_ready(struct req *req)
521 req->bev = bufferevent_new(req->fd, net_read, net_write, net_error,
522 req);
523 if (req->bev == NULL)
524 die();
526 #if HAVE_EVENT2
527 evbuffer_unfreeze(req->bev->input, 0);
528 evbuffer_unfreeze(req->bev->output, 1);
529 #endif
531 /* setup tls i/o layer */
532 if (req->ctx != NULL) {
533 event_set(&req->bev->ev_read, req->fd, EV_READ,
534 net_tls_readcb, req->bev);
535 event_set(&req->bev->ev_write, req->fd, EV_WRITE,
536 net_tls_writecb, req->bev);
539 /* TODO: adjust watermarks */
540 bufferevent_setwatermark(req->bev, EV_WRITE, 1, 0);
541 bufferevent_setwatermark(req->bev, EV_READ, 1, 0);
543 bufferevent_enable(req->bev, EV_READ|EV_WRITE);
545 bufferevent_write(req->bev, req->req, req->len);
548 /* called after a read has been done */
549 static void
550 net_read(struct bufferevent *bev, void *d)
552 struct req *req = d;
553 struct evbuffer *src = EVBUFFER_INPUT(bev);
554 void *data;
555 size_t len, chunk;
556 int r;
557 char *header;
559 if (!req->done_header) {
560 header = evbuffer_readln(src, &len, EVBUFFER_EOL_CRLF_STRICT);
561 if (header == NULL && EVBUFFER_LENGTH(src) >= 1024)
562 goto err;
563 if (header == NULL)
564 return;
565 r = gemini_parse_reply(req, header, len);
566 free(header);
567 if (!r)
568 goto err;
569 req->done_header = 1;
570 return;
573 if ((len = EVBUFFER_LENGTH(src)) == 0)
574 return;
575 data = EVBUFFER_DATA(src);
577 /*
578 * Split data into chunks before sending. imsg can't handle
579 * message that are "too big".
580 */
581 while (len != 0) {
582 chunk = MIN(len, 4096);
583 net_send_ui(IMSG_BUF, req->id, data, chunk);
584 data += chunk;
585 len -= chunk;
588 evbuffer_drain(src, EVBUFFER_LENGTH(src));
589 return;
591 err:
592 (*bev->errorcb)(bev, EVBUFFER_READ, bev->cbarg);
595 /* called after a write has been done */
596 static void
597 net_write(struct bufferevent *bev, void *d)
599 struct evbuffer *dst = EVBUFFER_OUTPUT(bev);
601 if (EVBUFFER_LENGTH(dst) == 0)
602 (*bev->errorcb)(bev, EVBUFFER_WRITE, bev->cbarg);
605 static void
606 net_error(struct bufferevent *bev, short error, void *d)
608 struct req *req = d;
609 struct evbuffer *src;
611 if (error & EVBUFFER_TIMEOUT) {
612 close_with_err(req, "Timeout loading page");
613 return;
616 if (error & EVBUFFER_ERROR) {
617 close_with_err(req, "buffer event error");
618 return;
621 if (error & EVBUFFER_EOF) {
622 /* EOF and no header */
623 if (!req->done_header) {
624 close_with_err(req, "protocol error");
625 return;
628 src = EVBUFFER_INPUT(req->bev);
629 if (EVBUFFER_LENGTH(src) != 0)
630 net_send_ui(IMSG_BUF, req->id, EVBUFFER_DATA(src),
631 EVBUFFER_LENGTH(src));
632 net_send_ui(IMSG_EOF, req->id, NULL, 0);
633 close_conn(0, 0, req);
634 return;
637 if (error & EVBUFFER_WRITE) {
638 /* finished sending request */
639 bufferevent_disable(bev, EV_WRITE);
640 return;
643 if (error & EVBUFFER_READ) {
644 close_with_err(req, "protocol error");
645 return;
648 close_with_errf(req, "unknown event error %x", error);
651 static void
652 handle_get_raw(struct imsg *imsg, size_t datalen)
654 struct req *req;
655 struct get_req *r;
657 r = imsg->data;
659 if (datalen != sizeof(*r))
660 die();
662 if ((req = calloc(1, sizeof(*req))) == NULL)
663 die();
665 req->id = imsg->hdr.peerid;
666 TAILQ_INSERT_HEAD(&reqhead, req, reqs);
668 strlcpy(req->url.host, r->host, sizeof(req->url.host));
669 strlcpy(req->url.port, r->port, sizeof(req->url.port));
671 strlcpy(req->req, r->req, sizeof(req->req));
672 req->len = strlen(r->req);
674 req->proto = r->proto;
676 #if HAVE_ASR_RUN
677 async_conn_towards(req);
678 #else
679 blocking_conn_towards(req);
680 #endif
683 static void
684 handle_cert_status(struct imsg *imsg, size_t datalen)
686 struct req *req;
687 int is_ok;
689 req = req_by_id(imsg->hdr.peerid);
691 if (datalen < sizeof(is_ok))
692 die();
693 memcpy(&is_ok, imsg->data, sizeof(is_ok));
695 if (is_ok)
696 net_ready(req);
697 else
698 close_conn(0, 0, req);
701 static void
702 handle_proceed(struct imsg *imsg, size_t datalen)
704 struct req *req;
706 if ((req = req_by_id(imsg->hdr.peerid)) == NULL)
707 return;
709 bufferevent_enable(req->bev, EV_READ);
712 static void
713 handle_stop(struct imsg *imsg, size_t datalen)
715 struct req *req;
717 if ((req = req_by_id(imsg->hdr.peerid)) == NULL)
718 return;
719 close_conn(0, 0, req);
722 static void
723 handle_quit(struct imsg *imsg, size_t datalen)
725 event_loopbreak();
728 static void
729 handle_dispatch_imsg(int fd, short ev, void *d)
731 struct imsgev *iev = d;
733 if (dispatch_imsg(iev, ev, handlers, sizeof(handlers)) == -1)
734 err(1, "connection closed");
737 static int
738 net_send_ui(int type, uint32_t peerid, const void *data,
739 uint16_t datalen)
741 return imsg_compose_event(iev_ui, type, peerid, 0, -1,
742 data, datalen);
745 int
746 net_main(void)
748 setproctitle("net");
750 TAILQ_INIT(&reqhead);
752 event_init();
754 /* Setup pipe and event handler to the main process */
755 if ((iev_ui = malloc(sizeof(*iev_ui))) == NULL)
756 die();
757 imsg_init(&iev_ui->ibuf, 3);
758 iev_ui->handler = handle_dispatch_imsg;
759 iev_ui->events = EV_READ;
760 event_set(&iev_ui->ev, iev_ui->ibuf.fd, iev_ui->events,
761 iev_ui->handler, iev_ui);
762 event_add(&iev_ui->ev, NULL);
764 sandbox_net_process();
766 event_dispatch();
768 msgbuf_clear(&iev_ui->ibuf.w);
769 close(iev_ui->ibuf.fd);
770 free(iev_ui);
772 return 0;