0
|
1 |
/* $OpenBSD: ioev.c,v 1.1 2014/01/27 15:49:52 sunil Exp $ */ |
|
2 |
|
|
3 |
/* |
|
4 |
* Copyright (c) 2012 Eric Faurot <eric@openbsd.org> |
|
5 |
* |
|
6 |
* Permission to use, copy, modify, and distribute this software for any |
|
7 |
* purpose with or without fee is hereby granted, provided that the above |
|
8 |
* copyright notice and this permission notice appear in all copies. |
|
9 |
* |
|
10 |
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
|
11 |
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
|
12 |
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
|
13 |
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
|
14 |
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
|
15 |
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
|
16 |
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
|
17 |
*/ |
|
18 |
|
|
19 |
#include <sys/types.h> |
|
20 |
#include <sys/queue.h> |
|
21 |
#include <sys/socket.h> |
|
22 |
|
|
23 |
#include <err.h> |
|
24 |
#include <errno.h> |
|
25 |
#include <fcntl.h> |
|
26 |
#include <inttypes.h> |
|
27 |
#include <stdlib.h> |
|
28 |
#include <string.h> |
|
29 |
#include <stdio.h> |
|
30 |
#include <unistd.h> |
|
31 |
|
|
32 |
#include "ioev.h" |
|
33 |
#include "iobuf.h" |
|
34 |
|
|
35 |
#ifdef IO_SSL |
|
36 |
#include <openssl/err.h> |
|
37 |
#include <openssl/ssl.h> |
|
38 |
#endif |
|
39 |
|
|
40 |
enum { |
|
41 |
IO_STATE_NONE, |
|
42 |
IO_STATE_CONNECT, |
|
43 |
IO_STATE_CONNECT_SSL, |
|
44 |
IO_STATE_ACCEPT_SSL, |
|
45 |
IO_STATE_UP, |
|
46 |
|
|
47 |
IO_STATE_MAX, |
|
48 |
}; |
|
49 |
|
|
50 |
const char* io_strflags(int); |
|
51 |
const char* io_evstr(short); |
|
52 |
|
|
53 |
void _io_init(void); |
|
54 |
void io_hold(struct io *); |
|
55 |
void io_release(struct io *); |
|
56 |
void io_callback(struct io*, int); |
|
57 |
void io_dispatch(int, short, void *); |
|
58 |
void io_dispatch_connect(int, short, void *); |
|
59 |
size_t io_pending(struct io *); |
|
60 |
size_t io_queued(struct io*); |
|
61 |
void io_reset(struct io *, short, void (*)(int, short, void*)); |
|
62 |
void io_frame_enter(const char *, struct io *, int); |
|
63 |
void io_frame_leave(struct io *); |
|
64 |
|
|
65 |
#ifdef IO_SSL |
|
66 |
void ssl_error(const char *); /* XXX external */ |
|
67 |
|
|
68 |
static const char* io_ssl_error(void); |
|
69 |
void io_dispatch_accept_ssl(int, short, void *); |
|
70 |
void io_dispatch_connect_ssl(int, short, void *); |
|
71 |
void io_dispatch_read_ssl(int, short, void *); |
|
72 |
void io_dispatch_write_ssl(int, short, void *); |
|
73 |
void io_reload_ssl(struct io *io); |
|
74 |
#endif |
|
75 |
|
|
76 |
static struct io *current = NULL; |
|
77 |
static uint64_t frame = 0; |
|
78 |
static int _io_debug = 0; |
|
79 |
|
|
80 |
#define io_debug(args...) do { if (_io_debug) printf(args); } while(0) |
|
81 |
|
|
82 |
|
|
83 |
const char* |
|
84 |
io_strio(struct io *io) |
|
85 |
{ |
|
86 |
static char buf[128]; |
|
87 |
char ssl[128]; |
|
88 |
|
|
89 |
ssl[0] = '\0'; |
|
90 |
#ifdef IO_SSL |
|
91 |
if (io->ssl) { |
|
92 |
snprintf(ssl, sizeof ssl, " ssl=%s:%s:%i", |
|
93 |
SSL_get_cipher_version(io->ssl), |
|
94 |
SSL_get_cipher_name(io->ssl), |
|
95 |
SSL_get_cipher_bits(io->ssl, NULL)); |
|
96 |
} |
|
97 |
#endif |
|
98 |
|
|
99 |
if (io->iobuf == NULL) |
|
100 |
snprintf(buf, sizeof buf, |
|
101 |
"<io:%p fd=%i to=%i fl=%s%s>", |
|
102 |
io, io->sock, io->timeout, io_strflags(io->flags), ssl); |
|
103 |
else |
|
104 |
snprintf(buf, sizeof buf, |
|
105 |
"<io:%p fd=%i to=%i fl=%s%s ib=%zu ob=%zu>", |
|
106 |
io, io->sock, io->timeout, io_strflags(io->flags), ssl, |
|
107 |
io_pending(io), io_queued(io)); |
|
108 |
|
|
109 |
return (buf); |
|
110 |
} |
|
111 |
|
|
112 |
#define CASE(x) case x : return #x |
|
113 |
|
|
114 |
const char* |
|
115 |
io_strevent(int evt) |
|
116 |
{ |
|
117 |
static char buf[32]; |
|
118 |
|
|
119 |
switch (evt) { |
|
120 |
CASE(IO_CONNECTED); |
|
121 |
CASE(IO_TLSREADY); |
|
122 |
CASE(IO_TLSVERIFIED); |
|
123 |
CASE(IO_DATAIN); |
|
124 |
CASE(IO_LOWAT); |
|
125 |
CASE(IO_DISCONNECTED); |
|
126 |
CASE(IO_TIMEOUT); |
|
127 |
CASE(IO_ERROR); |
|
128 |
default: |
|
129 |
snprintf(buf, sizeof(buf), "IO_? %i", evt); |
|
130 |
return buf; |
|
131 |
} |
|
132 |
} |
|
133 |
|
|
134 |
void |
|
135 |
io_set_blocking(int fd, int blocking) |
|
136 |
{ |
|
137 |
int flags; |
|
138 |
|
|
139 |
if ((flags = fcntl(fd, F_GETFL, 0)) == -1) |
|
140 |
err(1, "io_set_blocking:fcntl(F_GETFL)"); |
|
141 |
|
|
142 |
if (blocking) |
|
143 |
flags &= ~O_NONBLOCK; |
|
144 |
else |
|
145 |
flags |= O_NONBLOCK; |
|
146 |
|
|
147 |
if ((flags = fcntl(fd, F_SETFL, flags)) == -1) |
|
148 |
err(1, "io_set_blocking:fcntl(F_SETFL)"); |
|
149 |
} |
|
150 |
|
|
151 |
void |
|
152 |
io_set_linger(int fd, int linger) |
|
153 |
{ |
|
154 |
struct linger l; |
|
155 |
|
|
156 |
bzero(&l, sizeof(l)); |
|
157 |
l.l_onoff = linger ? 1 : 0; |
|
158 |
l.l_linger = linger; |
|
159 |
if (setsockopt(fd, SOL_SOCKET, SO_LINGER, &l, sizeof(l)) == -1) |
|
160 |
err(1, "io_set_linger:setsockopt()"); |
|
161 |
} |
|
162 |
|
|
163 |
/* |
|
164 |
* Event framing must not rely on an io pointer to refer to the "same" io |
|
165 |
* throughout the frame, beacuse this is not always the case: |
|
166 |
* |
|
167 |
* 1) enter(addr0) -> free(addr0) -> leave(addr0) = SEGV |
|
168 |
* 2) enter(addr0) -> free(addr0) -> malloc == addr0 -> leave(addr0) = BAD! |
|
169 |
* |
|
170 |
* In both case, the problem is that the io is freed in the callback, so |
|
171 |
* the pointer becomes invalid. If that happens, the user is required to |
|
172 |
* call io_clear, so we can adapt the frame state there. |
|
173 |
*/ |
|
174 |
void |
|
175 |
io_frame_enter(const char *where, struct io *io, int ev) |
|
176 |
{ |
|
177 |
io_debug("\n=== %" PRIu64 " ===\n" |
|
178 |
"io_frame_enter(%s, %s, %s)\n", |
|
179 |
frame, where, io_evstr(ev), io_strio(io)); |
|
180 |
|
|
181 |
if (current) |
|
182 |
errx(1, "io_frame_enter: interleaved frames"); |
|
183 |
|
|
184 |
current = io; |
|
185 |
|
|
186 |
io_hold(io); |
|
187 |
} |
|
188 |
|
|
189 |
void |
|
190 |
io_frame_leave(struct io *io) |
|
191 |
{ |
|
192 |
io_debug("io_frame_leave(%" PRIu64 ")\n", frame); |
|
193 |
|
|
194 |
if (current && current != io) |
|
195 |
errx(1, "io_frame_leave: io mismatch"); |
|
196 |
|
|
197 |
/* io has been cleared */ |
|
198 |
if (current == NULL) |
|
199 |
goto done; |
|
200 |
|
|
201 |
/* TODO: There is a possible optimization there: |
|
202 |
* In a typical half-duplex request/response scenario, |
|
203 |
* the io is waiting to read a request, and when done, it queues |
|
204 |
* the response in the output buffer and goes to write mode. |
|
205 |
* There, the write event is set and will be triggered in the next |
|
206 |
* event frame. In most case, the write call could be done |
|
207 |
* immediatly as part of the last read frame, thus avoiding to go |
|
208 |
* through the event loop machinery. So, as an optimisation, we |
|
209 |
* could detect that case here and force an event dispatching. |
|
210 |
*/ |
|
211 |
|
|
212 |
/* Reload the io if it has not been reset already. */ |
|
213 |
io_release(io); |
|
214 |
current = NULL; |
|
215 |
done: |
|
216 |
io_debug("=== /%" PRIu64 "\n", frame); |
|
217 |
|
|
218 |
frame += 1; |
|
219 |
} |
|
220 |
|
|
221 |
void |
|
222 |
_io_init() |
|
223 |
{ |
|
224 |
static int init = 0; |
|
225 |
|
|
226 |
if (init) |
|
227 |
return; |
|
228 |
|
|
229 |
init = 1; |
|
230 |
_io_debug = getenv("IO_DEBUG") != NULL; |
|
231 |
} |
|
232 |
|
|
233 |
void |
|
234 |
io_init(struct io *io, int sock, void *arg, |
|
235 |
void(*cb)(struct io*, int), struct iobuf *iobuf) |
|
236 |
{ |
|
237 |
_io_init(); |
|
238 |
|
|
239 |
memset(io, 0, sizeof *io); |
|
240 |
|
|
241 |
io->sock = sock; |
|
242 |
io->timeout = -1; |
|
243 |
io->arg = arg; |
|
244 |
io->iobuf = iobuf; |
|
245 |
io->cb = cb; |
|
246 |
|
|
247 |
if (sock != -1) |
|
248 |
io_reload(io); |
|
249 |
} |
|
250 |
|
|
251 |
void |
|
252 |
io_clear(struct io *io) |
|
253 |
{ |
|
254 |
io_debug("io_clear(%p)\n", io); |
|
255 |
|
|
256 |
/* the current io is virtually dead */ |
|
257 |
if (io == current) |
|
258 |
current = NULL; |
|
259 |
|
|
260 |
#ifdef IO_SSL |
|
261 |
if (io->ssl) { |
|
262 |
SSL_shutdown(io->ssl); |
|
263 |
SSL_free(io->ssl); |
|
264 |
io->ssl = NULL; |
|
265 |
} |
|
266 |
#endif |
|
267 |
|
|
268 |
event_del(&io->ev); |
|
269 |
if (io->sock != -1) { |
|
270 |
close(io->sock); |
|
271 |
io->sock = -1; |
|
272 |
} |
|
273 |
} |
|
274 |
|
|
275 |
void |
|
276 |
io_hold(struct io *io) |
|
277 |
{ |
|
278 |
io_debug("io_enter(%p)\n", io); |
|
279 |
|
|
280 |
if (io->flags & IO_HELD) |
|
281 |
errx(1, "io_hold: io is already held"); |
|
282 |
|
|
283 |
io->flags &= ~IO_RESET; |
|
284 |
io->flags |= IO_HELD; |
|
285 |
} |
|
286 |
|
|
287 |
void |
|
288 |
io_release(struct io *io) |
|
289 |
{ |
|
290 |
if (!(io->flags & IO_HELD)) |
|
291 |
errx(1, "io_release: io is not held"); |
|
292 |
|
|
293 |
io->flags &= ~IO_HELD; |
|
294 |
if (!(io->flags & IO_RESET)) |
|
295 |
io_reload(io); |
|
296 |
} |
|
297 |
|
|
298 |
void |
|
299 |
io_set_timeout(struct io *io, int msec) |
|
300 |
{ |
|
301 |
io_debug("io_set_timeout(%p, %i)\n", io, msec); |
|
302 |
|
|
303 |
io->timeout = msec; |
|
304 |
} |
|
305 |
|
|
306 |
void |
|
307 |
io_set_lowat(struct io *io, size_t lowat) |
|
308 |
{ |
|
309 |
io_debug("io_set_lowat(%p, %zu)\n", io, lowat); |
|
310 |
|
|
311 |
io->lowat = lowat; |
|
312 |
} |
|
313 |
|
|
314 |
void |
|
315 |
io_pause(struct io *io, int dir) |
|
316 |
{ |
|
317 |
io_debug("io_pause(%p, %x)\n", io, dir); |
|
318 |
|
|
319 |
io->flags |= dir & (IO_PAUSE_IN | IO_PAUSE_OUT); |
|
320 |
io_reload(io); |
|
321 |
} |
|
322 |
|
|
323 |
void |
|
324 |
io_resume(struct io *io, int dir) |
|
325 |
{ |
|
326 |
io_debug("io_resume(%p, %x)\n", io, dir); |
|
327 |
|
|
328 |
io->flags &= ~(dir & (IO_PAUSE_IN | IO_PAUSE_OUT)); |
|
329 |
io_reload(io); |
|
330 |
} |
|
331 |
|
|
332 |
void |
|
333 |
io_set_read(struct io *io) |
|
334 |
{ |
|
335 |
int mode; |
|
336 |
|
|
337 |
io_debug("io_set_read(%p)\n", io); |
|
338 |
|
|
339 |
mode = io->flags & IO_RW; |
|
340 |
if (!(mode == 0 || mode == IO_WRITE)) |
|
341 |
errx(1, "io_set_read(): full-duplex or reading"); |
|
342 |
|
|
343 |
io->flags &= ~IO_RW; |
|
344 |
io->flags |= IO_READ; |
|
345 |
io_reload(io); |
|
346 |
} |
|
347 |
|
|
348 |
void |
|
349 |
io_set_write(struct io *io) |
|
350 |
{ |
|
351 |
int mode; |
|
352 |
|
|
353 |
io_debug("io_set_write(%p)\n", io); |
|
354 |
|
|
355 |
mode = io->flags & IO_RW; |
|
356 |
if (!(mode == 0 || mode == IO_READ)) |
|
357 |
errx(1, "io_set_write(): full-duplex or writing"); |
|
358 |
|
|
359 |
io->flags &= ~IO_RW; |
|
360 |
io->flags |= IO_WRITE; |
|
361 |
io_reload(io); |
|
362 |
} |
|
363 |
|
|
364 |
#define IO_READING(io) (((io)->flags & IO_RW) != IO_WRITE) |
|
365 |
#define IO_WRITING(io) (((io)->flags & IO_RW) != IO_READ) |
|
366 |
|
|
367 |
/* |
|
368 |
* Setup the necessary events as required by the current io state, |
|
369 |
* honouring duplex mode and i/o pauses. |
|
370 |
*/ |
|
371 |
void |
|
372 |
io_reload(struct io *io) |
|
373 |
{ |
|
374 |
short events; |
|
375 |
|
|
376 |
/* io will be reloaded at release time */ |
|
377 |
if (io->flags & IO_HELD) |
|
378 |
return; |
|
379 |
|
|
380 |
#ifdef IO_SSL |
|
381 |
if (io->ssl) { |
|
382 |
io_reload_ssl(io); |
|
383 |
return; |
|
384 |
} |
|
385 |
#endif |
|
386 |
|
|
387 |
io_debug("io_reload(%p)\n", io); |
|
388 |
|
|
389 |
events = 0; |
|
390 |
if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) |
|
391 |
events = EV_READ; |
|
392 |
if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) |
|
393 |
events |= EV_WRITE; |
|
394 |
|
|
395 |
io_reset(io, events, io_dispatch); |
|
396 |
} |
|
397 |
|
|
398 |
/* Set the requested event. */ |
|
399 |
void |
|
400 |
io_reset(struct io *io, short events, void (*dispatch)(int, short, void*)) |
|
401 |
{ |
|
402 |
struct timeval tv, *ptv; |
|
403 |
|
|
404 |
io_debug("io_reset(%p, %s, %p) -> %s\n", |
|
405 |
io, io_evstr(events), dispatch, io_strio(io)); |
|
406 |
|
|
407 |
/* |
|
408 |
* Indicate that the event has already been reset so that reload |
|
409 |
* is not called on frame_leave. |
|
410 |
*/ |
|
411 |
io->flags |= IO_RESET; |
|
412 |
|
|
413 |
event_del(&io->ev); |
|
414 |
|
|
415 |
/* |
|
416 |
* The io is paused by the user, so we don't want the timeout to be |
|
417 |
* effective. |
|
418 |
*/ |
|
419 |
if (events == 0) |
|
420 |
return; |
|
421 |
|
|
422 |
event_set(&io->ev, io->sock, events, dispatch, io); |
|
423 |
if (io->timeout >= 0) { |
|
424 |
tv.tv_sec = io->timeout / 1000; |
|
425 |
tv.tv_usec = (io->timeout % 1000) * 1000; |
|
426 |
ptv = &tv; |
|
427 |
} else |
|
428 |
ptv = NULL; |
|
429 |
|
|
430 |
event_add(&io->ev, ptv); |
|
431 |
} |
|
432 |
|
|
433 |
size_t |
|
434 |
io_pending(struct io *io) |
|
435 |
{ |
|
436 |
return iobuf_len(io->iobuf); |
|
437 |
} |
|
438 |
|
|
439 |
size_t |
|
440 |
io_queued(struct io *io) |
|
441 |
{ |
|
442 |
return iobuf_queued(io->iobuf); |
|
443 |
} |
|
444 |
|
|
445 |
const char* |
|
446 |
io_strflags(int flags) |
|
447 |
{ |
|
448 |
static char buf[64]; |
|
449 |
|
|
450 |
buf[0] = '\0'; |
|
451 |
|
|
452 |
switch (flags & IO_RW) { |
|
453 |
case 0: |
|
454 |
strlcat(buf, "rw", sizeof buf); |
|
455 |
break; |
|
456 |
case IO_READ: |
|
457 |
strlcat(buf, "R", sizeof buf); |
|
458 |
break; |
|
459 |
case IO_WRITE: |
|
460 |
strlcat(buf, "W", sizeof buf); |
|
461 |
break; |
|
462 |
case IO_RW: |
|
463 |
strlcat(buf, "RW", sizeof buf); |
|
464 |
break; |
|
465 |
} |
|
466 |
|
|
467 |
if (flags & IO_PAUSE_IN) |
|
468 |
strlcat(buf, ",F_PI", sizeof buf); |
|
469 |
if (flags & IO_PAUSE_OUT) |
|
470 |
strlcat(buf, ",F_PO", sizeof buf); |
|
471 |
|
|
472 |
return buf; |
|
473 |
} |
|
474 |
|
|
475 |
const char* |
|
476 |
io_evstr(short ev) |
|
477 |
{ |
|
478 |
static char buf[64]; |
|
479 |
char buf2[16]; |
|
480 |
int n; |
|
481 |
|
|
482 |
n = 0; |
|
483 |
buf[0] = '\0'; |
|
484 |
|
|
485 |
if (ev == 0) { |
|
486 |
strlcat(buf, "<NONE>", sizeof(buf)); |
|
487 |
return buf; |
|
488 |
} |
|
489 |
|
|
490 |
if (ev & EV_TIMEOUT) { |
|
491 |
strlcat(buf, "EV_TIMEOUT", sizeof(buf)); |
|
492 |
ev &= ~EV_TIMEOUT; |
|
493 |
n++; |
|
494 |
} |
|
495 |
|
|
496 |
if (ev & EV_READ) { |
|
497 |
if (n) |
|
498 |
strlcat(buf, "|", sizeof(buf)); |
|
499 |
strlcat(buf, "EV_READ", sizeof(buf)); |
|
500 |
ev &= ~EV_READ; |
|
501 |
n++; |
|
502 |
} |
|
503 |
|
|
504 |
if (ev & EV_WRITE) { |
|
505 |
if (n) |
|
506 |
strlcat(buf, "|", sizeof(buf)); |
|
507 |
strlcat(buf, "EV_WRITE", sizeof(buf)); |
|
508 |
ev &= ~EV_WRITE; |
|
509 |
n++; |
|
510 |
} |
|
511 |
|
|
512 |
if (ev & EV_SIGNAL) { |
|
513 |
if (n) |
|
514 |
strlcat(buf, "|", sizeof(buf)); |
|
515 |
strlcat(buf, "EV_SIGNAL", sizeof(buf)); |
|
516 |
ev &= ~EV_SIGNAL; |
|
517 |
n++; |
|
518 |
} |
|
519 |
|
|
520 |
if (ev) { |
|
521 |
if (n) |
|
522 |
strlcat(buf, "|", sizeof(buf)); |
|
523 |
strlcat(buf, "EV_?=0x", sizeof(buf)); |
|
524 |
snprintf(buf2, sizeof(buf2), "%hx", ev); |
|
525 |
strlcat(buf, buf2, sizeof(buf)); |
|
526 |
} |
|
527 |
|
|
528 |
return buf; |
|
529 |
} |
|
530 |
|
|
531 |
void |
|
532 |
io_dispatch(int fd, short ev, void *humppa) |
|
533 |
{ |
|
534 |
struct io *io = humppa; |
|
535 |
size_t w; |
|
536 |
ssize_t n; |
|
537 |
int saved_errno; |
|
538 |
|
|
539 |
io_frame_enter("io_dispatch", io, ev); |
|
540 |
|
|
541 |
if (ev == EV_TIMEOUT) { |
|
542 |
io_callback(io, IO_TIMEOUT); |
|
543 |
goto leave; |
|
544 |
} |
|
545 |
|
|
546 |
if (ev & EV_WRITE && (w = io_queued(io))) { |
|
547 |
if ((n = iobuf_write(io->iobuf, io->sock)) < 0) { |
|
548 |
if (n == IOBUF_WANT_WRITE) /* kqueue bug? */ |
|
549 |
goto read; |
|
550 |
if (n == IOBUF_CLOSED) |
|
551 |
io_callback(io, IO_DISCONNECTED); |
|
552 |
else { |
|
553 |
saved_errno = errno; |
|
554 |
io->error = strerror(errno); |
|
555 |
errno = saved_errno; |
|
556 |
io_callback(io, IO_ERROR); |
|
557 |
} |
|
558 |
goto leave; |
|
559 |
} |
|
560 |
if (w > io->lowat && w - n <= io->lowat) |
|
561 |
io_callback(io, IO_LOWAT); |
|
562 |
} |
|
563 |
read: |
|
564 |
|
|
565 |
if (ev & EV_READ) { |
|
566 |
if ((n = iobuf_read(io->iobuf, io->sock)) < 0) { |
|
567 |
if (n == IOBUF_CLOSED) |
|
568 |
io_callback(io, IO_DISCONNECTED); |
|
569 |
else { |
|
570 |
saved_errno = errno; |
|
571 |
io->error = strerror(errno); |
|
572 |
errno = saved_errno; |
|
573 |
io_callback(io, IO_ERROR); |
|
574 |
} |
|
575 |
goto leave; |
|
576 |
} |
|
577 |
if (n) |
|
578 |
io_callback(io, IO_DATAIN); |
|
579 |
} |
|
580 |
|
|
581 |
leave: |
|
582 |
io_frame_leave(io); |
|
583 |
} |
|
584 |
|
|
585 |
void |
|
586 |
io_callback(struct io *io, int evt) |
|
587 |
{ |
|
588 |
io->cb(io, evt); |
|
589 |
} |
|
590 |
|
|
591 |
int |
|
592 |
io_connect(struct io *io, const struct sockaddr *sa, const struct sockaddr *bsa) |
|
593 |
{ |
|
594 |
int sock, errno_save; |
|
595 |
|
|
596 |
if ((sock = socket(sa->sa_family, SOCK_STREAM, 0)) == -1) |
|
597 |
goto fail; |
|
598 |
|
|
599 |
io_set_blocking(sock, 0); |
|
600 |
io_set_linger(sock, 0); |
|
601 |
|
|
602 |
if (bsa && bind(sock, bsa, bsa->sa_len) == -1) |
|
603 |
goto fail; |
|
604 |
|
|
605 |
if (connect(sock, sa, sa->sa_len) == -1) |
|
606 |
if (errno != EINPROGRESS) |
|
607 |
goto fail; |
|
608 |
|
|
609 |
io->sock = sock; |
|
610 |
io_reset(io, EV_WRITE, io_dispatch_connect); |
|
611 |
|
|
612 |
return (sock); |
|
613 |
|
|
614 |
fail: |
|
615 |
if (sock != -1) { |
|
616 |
errno_save = errno; |
|
617 |
close(sock); |
|
618 |
errno = errno_save; |
|
619 |
io->error = strerror(errno); |
|
620 |
} |
|
621 |
return (-1); |
|
622 |
} |
|
623 |
|
|
624 |
void |
|
625 |
io_dispatch_connect(int fd, short ev, void *humppa) |
|
626 |
{ |
|
627 |
struct io *io = humppa; |
|
628 |
int r, e; |
|
629 |
socklen_t sl; |
|
630 |
|
|
631 |
io_frame_enter("io_dispatch_connect", io, ev); |
|
632 |
|
|
633 |
if (ev == EV_TIMEOUT) { |
|
634 |
close(fd); |
|
635 |
io->sock = -1; |
|
636 |
io_callback(io, IO_TIMEOUT); |
|
637 |
} else { |
|
638 |
sl = sizeof(e); |
|
639 |
r = getsockopt(fd, SOL_SOCKET, SO_ERROR, &e, &sl); |
|
640 |
if (r == -1) { |
|
641 |
warn("io_dispatch_connect: getsockopt"); |
|
642 |
e = errno; |
|
643 |
} |
|
644 |
if (e) { |
|
645 |
close(fd); |
|
646 |
io->sock = -1; |
|
647 |
io->error = strerror(e); |
|
648 |
io_callback(io, e == ETIMEDOUT ? IO_TIMEOUT : IO_ERROR); |
|
649 |
} |
|
650 |
else { |
|
651 |
io->state = IO_STATE_UP; |
|
652 |
io_callback(io, IO_CONNECTED); |
|
653 |
} |
|
654 |
} |
|
655 |
|
|
656 |
io_frame_leave(io); |
|
657 |
} |
|
658 |
|
|
659 |
#ifdef IO_SSL |
|
660 |
|
|
661 |
static const char* |
|
662 |
io_ssl_error(void) |
|
663 |
{ |
|
664 |
static char buf[128]; |
|
665 |
unsigned long e; |
|
666 |
|
|
667 |
e = ERR_peek_last_error(); |
|
668 |
if (e) { |
|
669 |
ERR_error_string(e, buf); |
|
670 |
return (buf); |
|
671 |
} |
|
672 |
|
|
673 |
return ("No SSL error"); |
|
674 |
} |
|
675 |
|
|
676 |
int |
|
677 |
io_start_tls(struct io *io, void *ssl) |
|
678 |
{ |
|
679 |
int mode; |
|
680 |
|
|
681 |
mode = io->flags & IO_RW; |
|
682 |
if (mode == 0 || mode == IO_RW) |
|
683 |
errx(1, "io_start_tls(): full-duplex or unset"); |
|
684 |
|
|
685 |
if (io->ssl) |
|
686 |
errx(1, "io_start_tls(): SSL already started"); |
|
687 |
io->ssl = ssl; |
|
688 |
|
|
689 |
if (SSL_set_fd(io->ssl, io->sock) == 0) { |
|
690 |
ssl_error("io_start_ssl:SSL_set_fd"); |
|
691 |
return (-1); |
|
692 |
} |
|
693 |
|
|
694 |
if (mode == IO_WRITE) { |
|
695 |
io->state = IO_STATE_CONNECT_SSL; |
|
696 |
SSL_set_connect_state(io->ssl); |
|
697 |
io_reset(io, EV_WRITE, io_dispatch_connect_ssl); |
|
698 |
} else { |
|
699 |
io->state = IO_STATE_ACCEPT_SSL; |
|
700 |
SSL_set_accept_state(io->ssl); |
|
701 |
io_reset(io, EV_READ, io_dispatch_accept_ssl); |
|
702 |
} |
|
703 |
|
|
704 |
return (0); |
|
705 |
} |
|
706 |
|
|
707 |
void |
|
708 |
io_dispatch_accept_ssl(int fd, short event, void *humppa) |
|
709 |
{ |
|
710 |
struct io *io = humppa; |
|
711 |
int e, ret; |
|
712 |
|
|
713 |
io_frame_enter("io_dispatch_accept_ssl", io, event); |
|
714 |
|
|
715 |
if (event == EV_TIMEOUT) { |
|
716 |
io_callback(io, IO_TIMEOUT); |
|
717 |
goto leave; |
|
718 |
} |
|
719 |
|
|
720 |
if ((ret = SSL_accept(io->ssl)) > 0) { |
|
721 |
io->state = IO_STATE_UP; |
|
722 |
io_callback(io, IO_TLSREADY); |
|
723 |
goto leave; |
|
724 |
} |
|
725 |
|
|
726 |
switch ((e = SSL_get_error(io->ssl, ret))) { |
|
727 |
case SSL_ERROR_WANT_READ: |
|
728 |
io_reset(io, EV_READ, io_dispatch_accept_ssl); |
|
729 |
break; |
|
730 |
case SSL_ERROR_WANT_WRITE: |
|
731 |
io_reset(io, EV_WRITE, io_dispatch_accept_ssl); |
|
732 |
break; |
|
733 |
default: |
|
734 |
io->error = io_ssl_error(); |
|
735 |
ssl_error("io_dispatch_accept_ssl:SSL_accept"); |
|
736 |
io_callback(io, IO_ERROR); |
|
737 |
break; |
|
738 |
} |
|
739 |
|
|
740 |
leave: |
|
741 |
io_frame_leave(io); |
|
742 |
} |
|
743 |
|
|
744 |
void |
|
745 |
io_dispatch_connect_ssl(int fd, short event, void *humppa) |
|
746 |
{ |
|
747 |
struct io *io = humppa; |
|
748 |
int e, ret; |
|
749 |
|
|
750 |
io_frame_enter("io_dispatch_connect_ssl", io, event); |
|
751 |
|
|
752 |
if (event == EV_TIMEOUT) { |
|
753 |
io_callback(io, IO_TIMEOUT); |
|
754 |
goto leave; |
|
755 |
} |
|
756 |
|
|
757 |
if ((ret = SSL_connect(io->ssl)) > 0) { |
|
758 |
io->state = IO_STATE_UP; |
|
759 |
io_callback(io, IO_TLSREADY); |
|
760 |
goto leave; |
|
761 |
} |
|
762 |
|
|
763 |
switch ((e = SSL_get_error(io->ssl, ret))) { |
|
764 |
case SSL_ERROR_WANT_READ: |
|
765 |
io_reset(io, EV_READ, io_dispatch_connect_ssl); |
|
766 |
break; |
|
767 |
case SSL_ERROR_WANT_WRITE: |
|
768 |
io_reset(io, EV_WRITE, io_dispatch_connect_ssl); |
|
769 |
break; |
|
770 |
default: |
|
771 |
io->error = io_ssl_error(); |
|
772 |
ssl_error("io_dispatch_connect_ssl:SSL_connect"); |
|
773 |
io_callback(io, IO_ERROR); |
|
774 |
break; |
|
775 |
} |
|
776 |
|
|
777 |
leave: |
|
778 |
io_frame_leave(io); |
|
779 |
} |
|
780 |
|
|
781 |
void |
|
782 |
io_dispatch_read_ssl(int fd, short event, void *humppa) |
|
783 |
{ |
|
784 |
struct io *io = humppa; |
|
785 |
int n, saved_errno; |
|
786 |
|
|
787 |
io_frame_enter("io_dispatch_read_ssl", io, event); |
|
788 |
|
|
789 |
if (event == EV_TIMEOUT) { |
|
790 |
io_callback(io, IO_TIMEOUT); |
|
791 |
goto leave; |
|
792 |
} |
|
793 |
|
|
794 |
again: |
|
795 |
switch ((n = iobuf_read_ssl(io->iobuf, (SSL*)io->ssl))) { |
|
796 |
case IOBUF_WANT_READ: |
|
797 |
io_reset(io, EV_READ, io_dispatch_read_ssl); |
|
798 |
break; |
|
799 |
case IOBUF_WANT_WRITE: |
|
800 |
io_reset(io, EV_WRITE, io_dispatch_read_ssl); |
|
801 |
break; |
|
802 |
case IOBUF_CLOSED: |
|
803 |
io_callback(io, IO_DISCONNECTED); |
|
804 |
break; |
|
805 |
case IOBUF_ERROR: |
|
806 |
saved_errno = errno; |
|
807 |
io->error = strerror(errno); |
|
808 |
errno = saved_errno; |
|
809 |
io_callback(io, IO_ERROR); |
|
810 |
break; |
|
811 |
case IOBUF_SSLERROR: |
|
812 |
io->error = io_ssl_error(); |
|
813 |
ssl_error("io_dispatch_read_ssl:SSL_read"); |
|
814 |
io_callback(io, IO_ERROR); |
|
815 |
break; |
|
816 |
default: |
|
817 |
io_debug("io_dispatch_read_ssl(...) -> r=%i\n", n); |
|
818 |
io_callback(io, IO_DATAIN); |
|
819 |
if (current == io && IO_READING(io) && SSL_pending(io->ssl)) |
|
820 |
goto again; |
|
821 |
} |
|
822 |
|
|
823 |
leave: |
|
824 |
io_frame_leave(io); |
|
825 |
} |
|
826 |
|
|
827 |
void |
|
828 |
io_dispatch_write_ssl(int fd, short event, void *humppa) |
|
829 |
{ |
|
830 |
struct io *io = humppa; |
|
831 |
int n, saved_errno; |
|
832 |
size_t w2, w; |
|
833 |
|
|
834 |
io_frame_enter("io_dispatch_write_ssl", io, event); |
|
835 |
|
|
836 |
if (event == EV_TIMEOUT) { |
|
837 |
io_callback(io, IO_TIMEOUT); |
|
838 |
goto leave; |
|
839 |
} |
|
840 |
|
|
841 |
w = io_queued(io); |
|
842 |
switch ((n = iobuf_write_ssl(io->iobuf, (SSL*)io->ssl))) { |
|
843 |
case IOBUF_WANT_READ: |
|
844 |
io_reset(io, EV_READ, io_dispatch_write_ssl); |
|
845 |
break; |
|
846 |
case IOBUF_WANT_WRITE: |
|
847 |
io_reset(io, EV_WRITE, io_dispatch_write_ssl); |
|
848 |
break; |
|
849 |
case IOBUF_CLOSED: |
|
850 |
io_callback(io, IO_DISCONNECTED); |
|
851 |
break; |
|
852 |
case IOBUF_ERROR: |
|
853 |
saved_errno = errno; |
|
854 |
io->error = strerror(errno); |
|
855 |
errno = saved_errno; |
|
856 |
io_callback(io, IO_ERROR); |
|
857 |
break; |
|
858 |
case IOBUF_SSLERROR: |
|
859 |
io->error = io_ssl_error(); |
|
860 |
ssl_error("io_dispatch_write_ssl:SSL_write"); |
|
861 |
io_callback(io, IO_ERROR); |
|
862 |
break; |
|
863 |
default: |
|
864 |
io_debug("io_dispatch_write_ssl(...) -> w=%i\n", n); |
|
865 |
w2 = io_queued(io); |
|
866 |
if (w > io->lowat && w2 <= io->lowat) |
|
867 |
io_callback(io, IO_LOWAT); |
|
868 |
break; |
|
869 |
} |
|
870 |
|
|
871 |
leave: |
|
872 |
io_frame_leave(io); |
|
873 |
} |
|
874 |
|
|
875 |
void |
|
876 |
io_reload_ssl(struct io *io) |
|
877 |
{ |
|
878 |
short ev = 0; |
|
879 |
void (*dispatch)(int, short, void*) = NULL; |
|
880 |
|
|
881 |
switch (io->state) { |
|
882 |
case IO_STATE_CONNECT_SSL: |
|
883 |
ev = EV_WRITE; |
|
884 |
dispatch = io_dispatch_connect_ssl; |
|
885 |
break; |
|
886 |
case IO_STATE_ACCEPT_SSL: |
|
887 |
ev = EV_READ; |
|
888 |
dispatch = io_dispatch_accept_ssl; |
|
889 |
break; |
|
890 |
case IO_STATE_UP: |
|
891 |
ev = 0; |
|
892 |
if (IO_READING(io) && !(io->flags & IO_PAUSE_IN)) { |
|
893 |
ev = EV_READ; |
|
894 |
dispatch = io_dispatch_read_ssl; |
|
895 |
} |
|
896 |
else if (IO_WRITING(io) && !(io->flags & IO_PAUSE_OUT) && io_queued(io)) { |
|
897 |
ev = EV_WRITE; |
|
898 |
dispatch = io_dispatch_write_ssl; |
|
899 |
} |
|
900 |
if (! ev) |
|
901 |
return; /* paused */ |
|
902 |
break; |
|
903 |
default: |
|
904 |
errx(1, "io_reload_ssl(): bad state"); |
|
905 |
} |
|
906 |
|
|
907 |
io_reset(io, ev, dispatch); |
|
908 |
} |
|
909 |
|
|
910 |
#endif /* IO_SSL */ |