Blob


1 /*
2 * Copyright (c) 2021 Omar Polo <op@omarpolo.com>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
17 /*
18 * TODO:
19 * - move the various
20 * imsg_compose(...);
21 * imsg_flush(...);
22 * to something more asynchronous
23 */
25 #include <telescope.h>
27 #include <sys/types.h>
28 #include <sys/socket.h>
30 #include <netinet/in.h>
32 #include <assert.h>
33 #include <ctype.h>
34 #include <errno.h>
35 #include <netdb.h>
36 #include <stdarg.h>
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <string.h>
40 #include <tls.h>
41 #include <unistd.h>
43 #if HAVE_ASR_RUN
44 # include <asr.h>
45 #endif
47 static struct event imsgev;
48 static struct tls_config *tlsconf;
49 static struct imsgbuf *ibuf;
51 struct req;
53 static void die(void) __attribute__((__noreturn__));
55 #if HAVE_ASR_RUN
56 static void try_to_connect(int, short, void*);
57 static void query_done(struct asr_result*, void*);
58 static void async_conn_towards(struct req*);
59 #else
60 static void blocking_conn_towards(struct req*);
61 #endif
63 static void close_with_err(struct req*, const char*);
64 static void close_with_errf(struct req*, const char*, ...) __attribute__((format(printf, 2, 3)));
65 static struct req *req_by_id(uint32_t);
66 static struct req *req_by_id_try(uint32_t);
68 static void setup_tls(struct req*);
69 static void do_handshake(int, short, void*);
70 static void write_request(int, short, void*);
71 static void read_reply(int, short, void*);
72 static void parse_reply(struct req*);
73 static void copy_body(int, short, void*);
75 static void handle_get(struct imsg*, size_t);
76 static void handle_cert_status(struct imsg*, size_t);
77 static void handle_proceed(struct imsg*, size_t);
78 static void handle_stop(struct imsg*, size_t);
79 static void handle_quit(struct imsg*, size_t);
80 static void handle_dispatch_imsg(int, short, void*);
82 /* TODO: making this customizable */
83 struct timeval timeout_for_handshake = { 5, 0 };
85 static imsg_handlerfn *handlers[] = {
86 [IMSG_GET] = handle_get,
87 [IMSG_CERT_STATUS] = handle_cert_status,
88 [IMSG_PROCEED] = handle_proceed,
89 [IMSG_STOP] = handle_stop,
90 [IMSG_QUIT] = handle_quit,
91 };
93 typedef void (*statefn)(int, short, void*);
95 TAILQ_HEAD(, req) reqhead;
96 /* a pending request */
97 struct req {
98 struct event ev;
99 struct url url;
100 uint32_t id;
101 int fd;
102 struct tls *ctx;
103 char buf[1024];
104 size_t off;
106 #if HAVE_ASR_RUN
107 struct addrinfo hints, *servinfo, *p;
108 struct event_asr *asrev;
109 #endif
111 TAILQ_ENTRY(req) reqs;
112 };
114 static inline void
115 yield_r(struct req *req, statefn fn, struct timeval *tv)
117 event_once(req->fd, EV_READ, fn, req, tv);
120 static inline void
121 yield_w(struct req *req, statefn fn, struct timeval *tv)
123 event_once(req->fd, EV_WRITE, fn, req, tv);
126 static inline void
127 advance_buf(struct req *req, size_t len)
129 assert(len <= req->off);
131 req->off -= len;
132 memmove(req->buf, req->buf + len, req->off);
135 static void __attribute__((__noreturn__))
136 die(void)
138 abort(); /* TODO */
141 #if HAVE_ASR_RUN
142 static void
143 try_to_connect(int fd, short ev, void *d)
145 struct req *req = d;
146 int error = 0;
147 socklen_t len = sizeof(error);
149 again:
150 if (req->p == NULL)
151 goto err;
153 if (req->fd != -1) {
154 if (getsockopt(req->fd, SOL_SOCKET, SO_ERROR, &error, &len) == -1)
155 goto err;
156 if (error != 0) {
157 errno = error;
158 goto err;
160 goto done;
163 req->fd = socket(req->p->ai_family, req->p->ai_socktype, req->p->ai_protocol);
164 if (req->fd == -1) {
165 req->p = req->p->ai_next;
166 goto again;
167 } else {
168 mark_nonblock(req->fd);
169 if (connect(req->fd, req->p->ai_addr, req->p->ai_addrlen) == 0)
170 goto done;
171 yield_w(req, try_to_connect, NULL);
173 return;
175 err:
176 freeaddrinfo(req->servinfo);
177 close_with_errf(req, "failed to connect to %s",
178 req->url.host);
179 return;
181 done:
182 freeaddrinfo(req->servinfo);
183 setup_tls(req);
186 static void
187 query_done(struct asr_result *res, void *d)
189 struct req *req = d;
191 req->asrev = NULL;
192 if (res->ar_gai_errno != 0) {
193 close_with_errf(req, "failed to resolve %s: %s",
194 req->url.host, gai_strerror(res->ar_gai_errno));
195 return;
198 req->fd = -1;
199 req->servinfo = res->ar_addrinfo;
200 req->p = res->ar_addrinfo;
201 try_to_connect(0, 0, req);
204 static void
205 async_conn_towards(struct req *req)
207 struct asr_query *q;
208 const char *proto = "1965";
210 if (*req->url.port != '\0')
211 proto = req->url.port;
213 req->hints.ai_family = AF_UNSPEC;
214 req->hints.ai_socktype = SOCK_STREAM;
215 q = getaddrinfo_async(req->url.host, proto, &req->hints, NULL);
216 req->asrev = event_asr_run(q, query_done, req);
218 #else
219 static void
220 blocking_conn_towards(struct req *req)
222 struct addrinfo hints, *servinfo, *p;
223 struct url *url = &req->url;
224 int status, sock;
225 const char *proto = "1965";
227 if (*url->port != '\0')
228 proto = url->port;
230 memset(&hints, 0, sizeof(hints));
231 hints.ai_family = AF_UNSPEC;
232 hints.ai_socktype = SOCK_STREAM;
234 if ((status = getaddrinfo(url->host, proto, &hints, &servinfo))) {
235 close_with_errf(req, "failed to resolve %s: %s",
236 url->host, gai_strerror(status));
237 return;
240 sock = -1;
241 for (p = servinfo; p != NULL; p = p->ai_next) {
242 if ((sock = socket(p->ai_family, p->ai_socktype, p->ai_protocol)) == -1)
243 continue;
244 if (connect(sock, p->ai_addr, p->ai_addrlen) != -1)
245 break;
246 close(sock);
248 freeaddrinfo(servinfo);
250 if (sock == -1) {
251 close_with_errf(req, "couldn't connect to %s", url->host);
252 return;
255 req->fd = sock;
256 mark_nonblock(req->fd);
257 setup_tls(req);
259 #endif
261 static struct req *
262 req_by_id(uint32_t id)
264 struct req *r;
266 if ((r = req_by_id_try(id)) == NULL)
267 die();
268 return r;
271 static struct req *
272 req_by_id_try(uint32_t id)
274 struct req *r;
276 TAILQ_FOREACH(r, &reqhead, reqs) {
277 if (r->id == id)
278 return r;
281 return NULL;
284 static void
285 close_conn(int fd, short ev, void *d)
287 struct req *req = d;
289 #if HAVE_ASR_RUN
290 if (req->asrev != NULL)
291 event_asr_abort(req->asrev);
292 #endif
294 if (req->ctx != NULL) {
295 switch (tls_close(req->ctx)) {
296 case TLS_WANT_POLLIN:
297 yield_r(req, close_conn, NULL);
298 return;
299 case TLS_WANT_POLLOUT:
300 yield_w(req, close_conn, NULL);
301 return;
304 tls_free(req->ctx);
307 TAILQ_REMOVE(&reqhead, req, reqs);
308 if (req->fd != -1)
309 close(req->fd);
310 free(req);
313 static void
314 close_with_err(struct req *req, const char *err)
316 imsg_compose(ibuf, IMSG_ERR, req->id, 0, -1, err, strlen(err)+1);
317 imsg_flush(ibuf);
318 close_conn(0, 0, req);
321 static void
322 close_with_errf(struct req *req, const char *fmt, ...)
324 va_list ap;
325 char *s;
327 va_start(ap, fmt);
328 if (vasprintf(&s, fmt, ap) == -1)
329 abort();
330 va_end(ap);
332 close_with_err(req, s);
333 free(s);
336 static void
337 setup_tls(struct req *req)
339 if ((req->ctx = tls_client()) == NULL) {
340 close_with_errf(req, "tls_client: %s", strerror(errno));
341 return;
343 if (tls_configure(req->ctx, tlsconf) == -1) {
344 close_with_errf(req, "tls_configure: %s", tls_error(req->ctx));
345 return;
347 if (tls_connect_socket(req->ctx, req->fd, req->url.host) == -1) {
348 close_with_errf(req, "tls_connect_socket: %s", tls_error(req->ctx));
349 return;
351 yield_w(req, do_handshake, &timeout_for_handshake);
354 static void
355 do_handshake(int fd, short ev, void *d)
357 struct req *req = d;
358 const char *hash;
360 if (ev == EV_TIMEOUT) {
361 close_with_err(req, "Timeout loading page");
362 return;
365 switch (tls_handshake(req->ctx)) {
366 case TLS_WANT_POLLIN:
367 yield_r(req, do_handshake, NULL);
368 return;
369 case TLS_WANT_POLLOUT:
370 yield_w(req, do_handshake, NULL);
371 return;
374 hash = tls_peer_cert_hash(req->ctx);
375 if (hash == NULL) {
376 close_with_errf(req, "handshake failed: %s", tls_error(req->ctx));
377 return;
379 imsg_compose(ibuf, IMSG_CHECK_CERT, req->id, 0, -1, hash, strlen(hash)+1);
380 imsg_flush(ibuf);
383 static void
384 write_request(int fd, short ev, void *d)
386 struct req *req = d;
387 ssize_t r;
388 size_t len;
389 char buf[1027]; /* URL + \r\n\0 */
391 strlcpy(buf, "gemini://", sizeof(buf));
392 strlcat(buf, req->url.host, sizeof(buf));
393 strlcat(buf, "/", sizeof(buf));
394 strlcat(buf, req->url.path, sizeof(buf));
396 if (req->url.query[0] != '\0') {
397 strlcat(buf, "?", sizeof(buf));
398 strlcat(buf, req->url.query, sizeof(buf));
401 len = strlcat(buf, "\r\n", sizeof(buf));
403 assert(len <= sizeof(buf));
405 switch (r = tls_write(req->ctx, buf, len)) {
406 case -1:
407 close_with_errf(req, "tls_write: %s", tls_error(req->ctx));
408 break;
409 case TLS_WANT_POLLIN:
410 yield_r(req, write_request, NULL);
411 break;
412 case TLS_WANT_POLLOUT:
413 yield_w(req, write_request, NULL);
414 break;
415 default:
416 /* assume r == len */
417 (void)r;
418 yield_r(req, read_reply, NULL);
419 break;
423 static void
424 read_reply(int fd, short ev, void *d)
426 struct req *req = d;
427 size_t len;
428 ssize_t r;
429 char *buf;
431 buf = req->buf + req->off;
432 len = sizeof(req->buf) - req->off;
434 switch (r = tls_read(req->ctx, buf, len)) {
435 case -1:
436 close_with_errf(req, "tls_read: %s", tls_error(req->ctx));
437 break;
438 case TLS_WANT_POLLIN:
439 yield_r(req, read_reply, NULL);
440 break;
441 case TLS_WANT_POLLOUT:
442 yield_w(req, read_reply, NULL);
443 break;
444 default:
445 req->off += r;
447 if (memmem(req->buf, req->off, "\r\n", 2) != NULL)
448 parse_reply(req);
449 else if (req->off == sizeof(req->buf))
450 close_with_err(req, "invalid response");
451 else
452 yield_r(req, read_reply, NULL);
453 break;
457 static void
458 parse_reply(struct req *req)
460 int code;
461 char *e;
462 size_t len;
464 if (req->off < 4)
465 goto err;
467 if (!isdigit(req->buf[0]) || !isdigit(req->buf[1]))
468 goto err;
470 code = (req->buf[0] - '0')*10 + (req->buf[1] - '0');
472 if (!isspace(req->buf[2]))
473 goto err;
475 advance_buf(req, 3);
476 if ((e = memmem(req->buf, req->off, "\r\n", 2)) == NULL)
477 goto err;
479 *e = '\0';
480 e++;
481 len = e - req->buf;
482 imsg_compose(ibuf, IMSG_GOT_CODE, req->id, 0, -1, &code, sizeof(code));
483 imsg_compose(ibuf, IMSG_GOT_META, req->id, 0, -1, req->buf, len);
484 imsg_flush(ibuf);
486 if (code != 20)
487 close_conn(0, 0, req);
488 else
489 advance_buf(req, len+1); /* skip \n too */
491 return;
493 err:
494 close_with_err(req, "malformed request");
497 static void
498 copy_body(int fd, short ev, void *d)
500 struct req *req = d;
501 ssize_t r;
503 for (;;) {
504 if (req->off != 0) {
505 imsg_compose(ibuf, IMSG_BUF, req->id, 0, -1,
506 req->buf, req->off);
507 imsg_flush(ibuf);
508 req->off = 0;
511 switch (r = tls_read(req->ctx, req->buf, sizeof(req->buf))) {
512 case TLS_WANT_POLLIN:
513 yield_r(req, copy_body, NULL);
514 return;
515 case TLS_WANT_POLLOUT:
516 yield_w(req, copy_body, NULL);
517 return;
518 case 0:
519 imsg_compose(ibuf, IMSG_EOF, req->id, 0, -1, NULL, 0);
520 imsg_flush(ibuf);
521 close_conn(0, 0, req);
522 return;
523 default:
524 req->off = r;
529 static void
530 handle_get(struct imsg *imsg, size_t datalen)
532 struct req *req;
533 const char *e;
534 char *data;
536 data = imsg->data;
538 if (data[datalen-1] != '\0')
539 die();
541 if ((req = calloc(1, sizeof(*req))) == NULL)
542 die();
544 req->id = imsg->hdr.peerid;
545 TAILQ_INSERT_HEAD(&reqhead, req, reqs);
547 if (!url_parse(imsg->data, &req->url, &e)) {
548 close_with_err(req, e);
549 return;
552 #if HAVE_ASR_RUN
553 async_conn_towards(req);
554 #else
555 blocking_conn_towards(req);
556 #endif
559 static void
560 handle_cert_status(struct imsg *imsg, size_t datalen)
562 struct req *req;
563 int is_ok;
565 req = req_by_id(imsg->hdr.peerid);
567 if (datalen < sizeof(is_ok))
568 die();
569 memcpy(&is_ok, imsg->data, sizeof(is_ok));
571 if (is_ok)
572 yield_w(req, write_request, NULL);
573 else
574 close_conn(0, 0, req);
577 static void
578 handle_proceed(struct imsg *imsg, size_t datalen)
580 yield_r(req_by_id(imsg->hdr.peerid),
581 copy_body, NULL);
584 static void
585 handle_stop(struct imsg *imsg, size_t datalen)
587 struct req *req;
589 if ((req = req_by_id_try(imsg->hdr.peerid)) == NULL)
590 return;
591 close_conn(0, 0, req);
594 static void
595 handle_quit(struct imsg *imsg, size_t datalen)
597 event_loopbreak();
600 static void
601 handle_dispatch_imsg(int fd, short ev, void *d)
603 struct imsgbuf *ibuf = d;
604 dispatch_imsg(ibuf, handlers, sizeof(handlers));
607 int
608 client_main(struct imsgbuf *b)
610 ibuf = b;
612 TAILQ_INIT(&reqhead);
614 if ((tlsconf = tls_config_new()) == NULL)
615 die();
616 tls_config_insecure_noverifycert(tlsconf);
617 tls_config_insecure_noverifyname(tlsconf);
619 event_init();
621 event_set(&imsgev, ibuf->fd, EV_READ | EV_PERSIST, handle_dispatch_imsg, ibuf);
622 event_add(&imsgev, NULL);
624 sandbox_network_process();
626 event_dispatch();
627 return 0;