Line data Source code
1 : /*
2 : Unix SMB/CIFS implementation.
3 : main select loop and event handling
4 : Copyright (C) Andrew Tridgell 2003
5 : Copyright (C) Stefan Metzmacher 2009
6 :
7 : ** NOTE! The following LGPL license applies to the tevent
8 : ** library. This does NOT imply that all of Samba is released
9 : ** under the LGPL
10 :
11 : This library is free software; you can redistribute it and/or
12 : modify it under the terms of the GNU Lesser General Public
13 : License as published by the Free Software Foundation; either
14 : version 3 of the License, or (at your option) any later version.
15 :
16 : This library is distributed in the hope that it will be useful,
17 : but WITHOUT ANY WARRANTY; without even the implied warranty of
18 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 : Lesser General Public License for more details.
20 :
21 : You should have received a copy of the GNU Lesser General Public
22 : License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 : */
24 :
25 : /*
26 : PLEASE READ THIS BEFORE MODIFYING!
27 :
28 : This module is a general abstraction for the main select loop and
29 : event handling. Do not ever put any localised hacks in here, instead
30 : register one of the possible event types and implement that event
31 : somewhere else.
32 :
33 : There are 2 types of event handling that are handled in this module:
34 :
35 : 1) a file descriptor becoming readable or writeable. This is mostly
36 : used for network sockets, but can be used for any type of file
37 : descriptor. You may only register one handler for each file
38 : descriptor/io combination or you will get unpredictable results
39 : (this means that you can have a handler for read events, and a
40 : separate handler for write events, but not two handlers that are
41 : both handling read events)
42 :
43 : 2) a timed event. You can register an event that happens at a
44 : specific time. You can register as many of these as you
45 : like. They are single shot - add a new timed event in the event
46 : handler to get another event.
47 :
48 : To setup a set of events you first need to create a event_context
49 : structure using the function tevent_context_init(); This returns a
50 : 'struct tevent_context' that you use in all subsequent calls.
51 :
52 : After that you can add/remove events that you are interested in
53 : using tevent_add_*() and talloc_free()
54 :
55 : Finally, you call tevent_loop_wait_once() to block waiting for one of the
56 : events to occor or tevent_loop_wait() which will loop
57 : forever.
58 :
59 : */
60 : #include "replace.h"
61 : #include "system/filesys.h"
62 : #ifdef HAVE_PTHREAD
63 : #include "system/threads.h"
64 : #endif
65 : #define TEVENT_DEPRECATED 1
66 : #include "tevent.h"
67 : #include "tevent_internal.h"
68 : #include "tevent_util.h"
69 : #ifdef HAVE_EVENTFD
70 : #include <sys/eventfd.h>
71 : #endif
72 :
73 : struct tevent_ops_list {
74 : struct tevent_ops_list *next, *prev;
75 : const char *name;
76 : const struct tevent_ops *ops;
77 : };
78 :
79 : /* list of registered event backends */
80 : static struct tevent_ops_list *tevent_backends = NULL;
81 : static char *tevent_default_backend = NULL;
82 :
83 : /*
84 : register an events backend
85 : */
86 121796 : bool tevent_register_backend(const char *name, const struct tevent_ops *ops)
87 : {
88 : struct tevent_ops_list *e;
89 :
90 304490 : for (e = tevent_backends; e != NULL; e = e->next) {
91 182694 : if (0 == strcmp(e->name, name)) {
92 : /* already registered, skip it */
93 0 : return true;
94 : }
95 : }
96 :
97 121796 : e = talloc(NULL, struct tevent_ops_list);
98 121796 : if (e == NULL) return false;
99 :
100 121796 : e->name = name;
101 121796 : e->ops = ops;
102 121796 : DLIST_ADD(tevent_backends, e);
103 :
104 121796 : return true;
105 : }
106 :
107 : /*
108 : set the default event backend
109 : */
110 0 : void tevent_set_default_backend(const char *backend)
111 : {
112 0 : talloc_free(tevent_default_backend);
113 0 : tevent_default_backend = talloc_strdup(NULL, backend);
114 0 : }
115 :
116 : /*
117 : initialise backends if not already done
118 : */
119 169814843 : static void tevent_backend_init(void)
120 : {
121 : static bool done;
122 :
123 169814843 : if (done) {
124 169784394 : return;
125 : }
126 :
127 30449 : done = true;
128 :
129 30449 : tevent_poll_init();
130 30449 : tevent_poll_mt_init();
131 : #if defined(HAVE_EPOLL)
132 30449 : tevent_epoll_init();
133 : #endif
134 :
135 30449 : tevent_standard_init();
136 : }
137 :
138 169813875 : const struct tevent_ops *tevent_find_ops_byname(const char *name)
139 : {
140 : struct tevent_ops_list *e;
141 :
142 169813875 : tevent_backend_init();
143 :
144 169813875 : if (name == NULL) {
145 56604624 : name = tevent_default_backend;
146 : }
147 169813875 : if (name == NULL) {
148 56604624 : name = "standard";
149 : }
150 :
151 396232375 : for (e = tevent_backends; e != NULL; e = e->next) {
152 396232375 : if (0 == strcmp(e->name, name)) {
153 169813875 : return e->ops;
154 : }
155 : }
156 :
157 0 : return NULL;
158 : }
159 :
160 : /*
161 : list available backends
162 : */
163 968 : const char **tevent_backend_list(TALLOC_CTX *mem_ctx)
164 : {
165 968 : const char **list = NULL;
166 : struct tevent_ops_list *e;
167 968 : size_t idx = 0;
168 :
169 968 : tevent_backend_init();
170 :
171 4840 : for (e=tevent_backends;e;e=e->next) {
172 3872 : idx += 1;
173 : }
174 :
175 968 : list = talloc_zero_array(mem_ctx, const char *, idx+1);
176 968 : if (list == NULL) {
177 0 : return NULL;
178 : }
179 :
180 968 : idx = 0;
181 4840 : for (e=tevent_backends;e;e=e->next) {
182 3872 : list[idx] = talloc_strdup(list, e->name);
183 3872 : if (list[idx] == NULL) {
184 0 : TALLOC_FREE(list);
185 0 : return NULL;
186 : }
187 3872 : idx += 1;
188 : }
189 :
190 968 : return list;
191 : }
192 :
193 : static void tevent_common_wakeup_fini(struct tevent_context *ev);
194 :
195 : #ifdef HAVE_PTHREAD
196 :
197 : static pthread_mutex_t tevent_contexts_mutex = PTHREAD_MUTEX_INITIALIZER;
198 : static struct tevent_context *tevent_contexts = NULL;
199 : static pthread_once_t tevent_atfork_initialized = PTHREAD_ONCE_INIT;
200 : static pid_t tevent_cached_global_pid = 0;
201 :
202 10701726 : static void tevent_atfork_prepare(void)
203 : {
204 : struct tevent_context *ev;
205 : int ret;
206 :
207 10701726 : ret = pthread_mutex_lock(&tevent_contexts_mutex);
208 10701726 : if (ret != 0) {
209 0 : abort();
210 : }
211 :
212 32234051 : for (ev = tevent_contexts; ev != NULL; ev = ev->next) {
213 : struct tevent_threaded_context *tctx;
214 :
215 21534631 : for (tctx = ev->threaded_contexts; tctx != NULL;
216 2306 : tctx = tctx->next) {
217 2306 : ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
218 2306 : if (ret != 0) {
219 0 : tevent_abort(ev, "pthread_mutex_lock failed");
220 : }
221 : }
222 :
223 21532325 : ret = pthread_mutex_lock(&ev->scheduled_mutex);
224 21532325 : if (ret != 0) {
225 0 : tevent_abort(ev, "pthread_mutex_lock failed");
226 : }
227 : }
228 10701726 : }
229 :
230 10664107 : static void tevent_atfork_parent(void)
231 : {
232 : struct tevent_context *ev;
233 : int ret;
234 :
235 32121606 : for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
236 21457499 : ev = DLIST_PREV(ev)) {
237 : struct tevent_threaded_context *tctx;
238 :
239 21457499 : ret = pthread_mutex_unlock(&ev->scheduled_mutex);
240 21457499 : if (ret != 0) {
241 0 : tevent_abort(ev, "pthread_mutex_unlock failed");
242 : }
243 :
244 21459755 : for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
245 2256 : tctx = DLIST_PREV(tctx)) {
246 2256 : ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
247 2256 : if (ret != 0) {
248 0 : tevent_abort(
249 : ev, "pthread_mutex_unlock failed");
250 : }
251 : }
252 : }
253 :
254 10664107 : ret = pthread_mutex_unlock(&tevent_contexts_mutex);
255 10664107 : if (ret != 0) {
256 0 : abort();
257 : }
258 10664107 : }
259 :
260 37619 : static void tevent_atfork_child(void)
261 : {
262 : struct tevent_context *ev;
263 : int ret;
264 :
265 37619 : tevent_cached_global_pid = getpid();
266 :
267 112445 : for (ev = DLIST_TAIL(tevent_contexts); ev != NULL;
268 74826 : ev = DLIST_PREV(ev)) {
269 : struct tevent_threaded_context *tctx;
270 :
271 74876 : for (tctx = DLIST_TAIL(ev->threaded_contexts); tctx != NULL;
272 50 : tctx = DLIST_PREV(tctx)) {
273 50 : tctx->event_ctx = NULL;
274 :
275 50 : ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
276 50 : if (ret != 0) {
277 0 : tevent_abort(
278 : ev, "pthread_mutex_unlock failed");
279 : }
280 : }
281 :
282 74826 : ev->threaded_contexts = NULL;
283 :
284 74826 : ret = pthread_mutex_unlock(&ev->scheduled_mutex);
285 74826 : if (ret != 0) {
286 0 : tevent_abort(ev, "pthread_mutex_unlock failed");
287 : }
288 : }
289 :
290 37619 : ret = pthread_mutex_unlock(&tevent_contexts_mutex);
291 37619 : if (ret != 0) {
292 0 : abort();
293 : }
294 37619 : }
295 :
296 30393 : static void tevent_prep_atfork(void)
297 : {
298 : int ret;
299 :
300 30393 : ret = pthread_atfork(tevent_atfork_prepare,
301 : tevent_atfork_parent,
302 : tevent_atfork_child);
303 30393 : if (ret != 0) {
304 0 : abort();
305 : }
306 :
307 30393 : tevent_cached_global_pid = getpid();
308 30393 : }
309 :
310 : #endif
311 :
312 204809262 : static int tevent_init_globals(void)
313 : {
314 : #ifdef HAVE_PTHREAD
315 : int ret;
316 :
317 204809262 : ret = pthread_once(&tevent_atfork_initialized, tevent_prep_atfork);
318 204809262 : if (ret != 0) {
319 0 : return ret;
320 : }
321 : #endif
322 :
323 204809262 : return 0;
324 : }
325 :
326 148162900 : _PUBLIC_ pid_t tevent_cached_getpid(void)
327 : {
328 : #ifdef HAVE_PTHREAD
329 148162900 : tevent_init_globals();
330 : #ifdef TEVENT_VERIFY_CACHED_GETPID
331 : if (tevent_cached_global_pid != getpid()) {
332 : tevent_abort(NULL, "tevent_cached_global_pid invalid");
333 : }
334 : #endif
335 148162900 : if (tevent_cached_global_pid != 0) {
336 148162900 : return tevent_cached_global_pid;
337 : }
338 : #endif
339 0 : return getpid();
340 : }
341 :
342 56636748 : int tevent_common_context_destructor(struct tevent_context *ev)
343 : {
344 : struct tevent_fd *fd, *fn;
345 : struct tevent_timer *te, *tn;
346 : struct tevent_immediate *ie, *in;
347 : struct tevent_signal *se, *sn;
348 : struct tevent_wrapper_glue *gl, *gn;
349 : #ifdef HAVE_PTHREAD
350 : int ret;
351 : #endif
352 :
353 56636748 : if (ev->wrapper.glue != NULL) {
354 0 : tevent_abort(ev,
355 : "tevent_common_context_destructor() active on wrapper");
356 : }
357 :
358 : #ifdef HAVE_PTHREAD
359 56636748 : ret = pthread_mutex_lock(&tevent_contexts_mutex);
360 56636748 : if (ret != 0) {
361 0 : abort();
362 : }
363 :
364 56636748 : DLIST_REMOVE(tevent_contexts, ev);
365 :
366 56636748 : ret = pthread_mutex_unlock(&tevent_contexts_mutex);
367 56636748 : if (ret != 0) {
368 0 : abort();
369 : }
370 :
371 64149838 : while (ev->threaded_contexts != NULL) {
372 3 : struct tevent_threaded_context *tctx = ev->threaded_contexts;
373 :
374 3 : ret = pthread_mutex_lock(&tctx->event_ctx_mutex);
375 3 : if (ret != 0) {
376 0 : abort();
377 : }
378 :
379 : /*
380 : * Indicate to the thread that the tevent_context is
381 : * gone. The counterpart of this is in
382 : * _tevent_threaded_schedule_immediate, there we read
383 : * this under the threaded_context's mutex.
384 : */
385 :
386 3 : tctx->event_ctx = NULL;
387 :
388 3 : ret = pthread_mutex_unlock(&tctx->event_ctx_mutex);
389 3 : if (ret != 0) {
390 0 : abort();
391 : }
392 :
393 3 : DLIST_REMOVE(ev->threaded_contexts, tctx);
394 : }
395 :
396 56636748 : ret = pthread_mutex_destroy(&ev->scheduled_mutex);
397 56636748 : if (ret != 0) {
398 0 : abort();
399 : }
400 : #endif
401 :
402 56636748 : for (gl = ev->wrapper.list; gl; gl = gn) {
403 0 : gn = gl->next;
404 :
405 0 : gl->main_ev = NULL;
406 0 : DLIST_REMOVE(ev->wrapper.list, gl);
407 : }
408 :
409 56636748 : tevent_common_wakeup_fini(ev);
410 :
411 57032979 : for (fd = ev->fd_events; fd; fd = fn) {
412 396231 : fn = fd->next;
413 396231 : tevent_trace_fd_callback(fd->event_ctx, fd, TEVENT_EVENT_TRACE_DETACH);
414 396231 : fd->wrapper = NULL;
415 396231 : fd->event_ctx = NULL;
416 396231 : DLIST_REMOVE(ev->fd_events, fd);
417 : }
418 :
419 56636748 : ev->last_zero_timer = NULL;
420 56689992 : for (te = ev->timer_events; te; te = tn) {
421 53244 : tn = te->next;
422 53244 : tevent_trace_timer_callback(te->event_ctx, te, TEVENT_EVENT_TRACE_DETACH);
423 53244 : te->wrapper = NULL;
424 53244 : te->event_ctx = NULL;
425 53244 : DLIST_REMOVE(ev->timer_events, te);
426 : }
427 :
428 56636839 : for (ie = ev->immediate_events; ie; ie = in) {
429 91 : in = ie->next;
430 91 : tevent_trace_immediate_callback(ie->event_ctx, ie, TEVENT_EVENT_TRACE_DETACH);
431 91 : ie->wrapper = NULL;
432 91 : ie->event_ctx = NULL;
433 91 : ie->cancel_fn = NULL;
434 91 : DLIST_REMOVE(ev->immediate_events, ie);
435 : }
436 :
437 56731679 : for (se = ev->signal_events; se; se = sn) {
438 94931 : sn = se->next;
439 94931 : tevent_trace_signal_callback(se->event_ctx, se, TEVENT_EVENT_TRACE_DETACH);
440 94931 : se->wrapper = NULL;
441 94931 : se->event_ctx = NULL;
442 94931 : DLIST_REMOVE(ev->signal_events, se);
443 : /*
444 : * This is important, Otherwise signals
445 : * are handled twice in child. eg, SIGHUP.
446 : * one added in parent, and another one in
447 : * the child. -- BoYang
448 : */
449 94931 : tevent_cleanup_pending_signal_handlers(se);
450 : }
451 :
452 : /* removing nesting hook or we get an abort when nesting is
453 : * not allowed. -- SSS
454 : * Note that we need to leave the allowed flag at its current
455 : * value, otherwise the use in tevent_re_initialise() will
456 : * leave the event context with allowed forced to false, which
457 : * will break users that expect nesting to be allowed
458 : */
459 56636748 : ev->nesting.level = 0;
460 56636748 : ev->nesting.hook_fn = NULL;
461 56636748 : ev->nesting.hook_private = NULL;
462 :
463 56636748 : return 0;
464 : }
465 :
466 56646362 : static int tevent_common_context_constructor(struct tevent_context *ev)
467 : {
468 : int ret;
469 :
470 56646362 : ret = tevent_init_globals();
471 56646362 : if (ret != 0) {
472 0 : return ret;
473 : }
474 :
475 : #ifdef HAVE_PTHREAD
476 :
477 56646362 : ret = pthread_mutex_init(&ev->scheduled_mutex, NULL);
478 56646362 : if (ret != 0) {
479 0 : return ret;
480 : }
481 :
482 56646362 : ret = pthread_mutex_lock(&tevent_contexts_mutex);
483 56646362 : if (ret != 0) {
484 0 : pthread_mutex_destroy(&ev->scheduled_mutex);
485 0 : return ret;
486 : }
487 :
488 56646362 : DLIST_ADD(tevent_contexts, ev);
489 :
490 56646362 : ret = pthread_mutex_unlock(&tevent_contexts_mutex);
491 56646362 : if (ret != 0) {
492 0 : abort();
493 : }
494 : #endif
495 :
496 56646362 : talloc_set_destructor(ev, tevent_common_context_destructor);
497 :
498 56646362 : return 0;
499 : }
500 :
501 77969157 : void tevent_common_check_double_free(TALLOC_CTX *ptr, const char *reason)
502 : {
503 77969157 : void *parent_ptr = talloc_parent(ptr);
504 77969157 : size_t parent_blocks = talloc_total_blocks(parent_ptr);
505 :
506 77969157 : if (parent_ptr != NULL && parent_blocks == 0) {
507 : /*
508 : * This is an implicit talloc free, as we still have a parent
509 : * but it's already being destroyed. Note that
510 : * talloc_total_blocks(ptr) also just returns 0 if a
511 : * talloc_free(ptr) is still in progress of freeing all
512 : * children.
513 : */
514 77969157 : return;
515 : }
516 :
517 0 : tevent_abort(NULL, reason);
518 : }
519 :
520 : /*
521 : create a event_context structure for a specific implemementation.
522 : This must be the first events call, and all subsequent calls pass
523 : this event_context as the first element. Event handlers also
524 : receive this as their first argument.
525 :
526 : This function is for allowing third-party-applications to hook in gluecode
527 : to their own event loop code, so that they can make async usage of our client libs
528 :
529 : NOTE: use tevent_context_init() inside of samba!
530 : */
531 56604625 : struct tevent_context *tevent_context_init_ops(TALLOC_CTX *mem_ctx,
532 : const struct tevent_ops *ops,
533 : void *additional_data)
534 : {
535 : struct tevent_context *ev;
536 : int ret;
537 :
538 56604625 : ev = talloc_zero(mem_ctx, struct tevent_context);
539 56604625 : if (!ev) return NULL;
540 :
541 56604625 : ret = tevent_common_context_constructor(ev);
542 56604625 : if (ret != 0) {
543 0 : talloc_free(ev);
544 0 : return NULL;
545 : }
546 :
547 56604625 : ev->ops = ops;
548 56604625 : ev->additional_data = additional_data;
549 :
550 56604625 : ret = ev->ops->context_init(ev);
551 56604625 : if (ret != 0) {
552 0 : talloc_free(ev);
553 0 : return NULL;
554 : }
555 :
556 56604625 : return ev;
557 : }
558 :
559 : /*
560 : create a event_context structure. This must be the first events
561 : call, and all subsequent calls pass this event_context as the first
562 : element. Event handlers also receive this as their first argument.
563 : */
564 56604625 : struct tevent_context *tevent_context_init_byname(TALLOC_CTX *mem_ctx,
565 : const char *name)
566 : {
567 : const struct tevent_ops *ops;
568 :
569 56604625 : ops = tevent_find_ops_byname(name);
570 56604625 : if (ops == NULL) {
571 0 : return NULL;
572 : }
573 :
574 56604625 : return tevent_context_init_ops(mem_ctx, ops, NULL);
575 : }
576 :
577 :
578 : /*
579 : create a event_context structure. This must be the first events
580 : call, and all subsequent calls pass this event_context as the first
581 : element. Event handlers also receive this as their first argument.
582 : */
583 56604624 : struct tevent_context *tevent_context_init(TALLOC_CTX *mem_ctx)
584 : {
585 56604624 : return tevent_context_init_byname(mem_ctx, NULL);
586 : }
587 :
588 : /*
589 : add a fd based event
590 : return NULL on failure (memory allocation error)
591 : */
592 11775096 : struct tevent_fd *_tevent_add_fd(struct tevent_context *ev,
593 : TALLOC_CTX *mem_ctx,
594 : int fd,
595 : uint16_t flags,
596 : tevent_fd_handler_t handler,
597 : void *private_data,
598 : const char *handler_name,
599 : const char *location)
600 : {
601 11775096 : return ev->ops->add_fd(ev, mem_ctx, fd, flags, handler, private_data,
602 : handler_name, location);
603 : }
604 :
605 : /*
606 : set a close function on the fd event
607 : */
608 9711362 : void tevent_fd_set_close_fn(struct tevent_fd *fde,
609 : tevent_fd_close_fn_t close_fn)
610 : {
611 9711362 : if (!fde) return;
612 9711362 : if (!fde->event_ctx) return;
613 9711362 : fde->event_ctx->ops->set_fd_close_fn(fde, close_fn);
614 : }
615 :
616 9488396 : static void tevent_fd_auto_close_fn(struct tevent_context *ev,
617 : struct tevent_fd *fde,
618 : int fd,
619 : void *private_data)
620 : {
621 9488396 : close(fd);
622 9488396 : }
623 :
624 9493810 : void tevent_fd_set_auto_close(struct tevent_fd *fde)
625 : {
626 9493810 : tevent_fd_set_close_fn(fde, tevent_fd_auto_close_fn);
627 9493810 : }
628 :
629 : /*
630 : return the fd event flags
631 : */
632 28047342 : uint16_t tevent_fd_get_flags(struct tevent_fd *fde)
633 : {
634 28047342 : if (!fde) return 0;
635 27781852 : if (!fde->event_ctx) return 0;
636 27714976 : return fde->event_ctx->ops->get_fd_flags(fde);
637 : }
638 :
639 : /*
640 : set the fd event flags
641 : */
642 18795812 : void tevent_fd_set_flags(struct tevent_fd *fde, uint16_t flags)
643 : {
644 18795812 : if (!fde) return;
645 18791085 : if (!fde->event_ctx) return;
646 18769586 : fde->event_ctx->ops->set_fd_flags(fde, flags);
647 : }
648 :
649 1 : bool tevent_signal_support(struct tevent_context *ev)
650 : {
651 1 : if (ev->ops->add_signal) {
652 1 : return true;
653 : }
654 0 : return false;
655 : }
656 :
657 : static void (*tevent_abort_fn)(const char *reason);
658 :
659 29549 : void tevent_set_abort_fn(void (*abort_fn)(const char *reason))
660 : {
661 29549 : tevent_abort_fn = abort_fn;
662 29549 : }
663 :
664 0 : void tevent_abort(struct tevent_context *ev, const char *reason)
665 : {
666 0 : if (ev != NULL) {
667 0 : tevent_debug(ev, TEVENT_DEBUG_FATAL,
668 : "abort: %s\n", reason);
669 : }
670 :
671 0 : if (!tevent_abort_fn) {
672 0 : abort();
673 : }
674 :
675 0 : tevent_abort_fn(reason);
676 0 : }
677 :
678 : /*
679 : add a timer event
680 : return NULL on failure
681 : */
682 288214364 : struct tevent_timer *_tevent_add_timer(struct tevent_context *ev,
683 : TALLOC_CTX *mem_ctx,
684 : struct timeval next_event,
685 : tevent_timer_handler_t handler,
686 : void *private_data,
687 : const char *handler_name,
688 : const char *location)
689 : {
690 288214364 : return ev->ops->add_timer(ev, mem_ctx, next_event, handler, private_data,
691 : handler_name, location);
692 : }
693 :
694 : /*
695 : allocate an immediate event
696 : return NULL on failure (memory allocation error)
697 : */
698 75989631 : struct tevent_immediate *_tevent_create_immediate(TALLOC_CTX *mem_ctx,
699 : const char *location)
700 : {
701 : struct tevent_immediate *im;
702 :
703 75989631 : im = talloc(mem_ctx, struct tevent_immediate);
704 75989631 : if (im == NULL) return NULL;
705 :
706 75989631 : *im = (struct tevent_immediate) { .create_location = location };
707 :
708 75989631 : return im;
709 : }
710 :
711 : /*
712 : schedule an immediate event
713 : */
714 17397548 : void _tevent_schedule_immediate(struct tevent_immediate *im,
715 : struct tevent_context *ev,
716 : tevent_immediate_handler_t handler,
717 : void *private_data,
718 : const char *handler_name,
719 : const char *location)
720 : {
721 17397548 : ev->ops->schedule_immediate(im, ev, handler, private_data,
722 : handler_name, location);
723 17397548 : }
724 :
725 : /*
726 : add a signal event
727 :
728 : sa_flags are flags to sigaction(2)
729 :
730 : return NULL on failure
731 : */
732 121910 : struct tevent_signal *_tevent_add_signal(struct tevent_context *ev,
733 : TALLOC_CTX *mem_ctx,
734 : int signum,
735 : int sa_flags,
736 : tevent_signal_handler_t handler,
737 : void *private_data,
738 : const char *handler_name,
739 : const char *location)
740 : {
741 121910 : return ev->ops->add_signal(ev, mem_ctx, signum, sa_flags, handler, private_data,
742 : handler_name, location);
743 : }
744 :
745 56481866 : void tevent_loop_allow_nesting(struct tevent_context *ev)
746 : {
747 56481866 : if (ev->wrapper.glue != NULL) {
748 0 : tevent_abort(ev, "tevent_loop_allow_nesting() on wrapper");
749 0 : return;
750 : }
751 :
752 56481866 : if (ev->wrapper.list != NULL) {
753 0 : tevent_abort(ev, "tevent_loop_allow_nesting() with wrapper");
754 0 : return;
755 : }
756 :
757 56481866 : ev->nesting.allowed = true;
758 : }
759 :
760 1327 : void tevent_loop_set_nesting_hook(struct tevent_context *ev,
761 : tevent_nesting_hook hook,
762 : void *private_data)
763 : {
764 1327 : if (ev->nesting.hook_fn &&
765 1323 : (ev->nesting.hook_fn != hook ||
766 1323 : ev->nesting.hook_private != private_data)) {
767 : /* the way the nesting hook code is currently written
768 : we cannot support two different nesting hooks at the
769 : same time. */
770 0 : tevent_abort(ev, "tevent: Violation of nesting hook rules\n");
771 : }
772 1327 : ev->nesting.hook_fn = hook;
773 1327 : ev->nesting.hook_private = private_data;
774 1327 : }
775 :
776 0 : static void tevent_abort_nesting(struct tevent_context *ev, const char *location)
777 : {
778 : const char *reason;
779 :
780 0 : reason = talloc_asprintf(NULL, "tevent_loop_once() nesting at %s",
781 : location);
782 0 : if (!reason) {
783 0 : reason = "tevent_loop_once() nesting";
784 : }
785 :
786 0 : tevent_abort(ev, reason);
787 0 : }
788 :
789 : /*
790 : do a single event loop using the events defined in ev
791 : */
792 218142983 : int _tevent_loop_once(struct tevent_context *ev, const char *location)
793 : {
794 : int ret;
795 218142983 : void *nesting_stack_ptr = NULL;
796 :
797 218142983 : ev->nesting.level++;
798 :
799 218142983 : if (ev->nesting.level > 1) {
800 63698368 : if (!ev->nesting.allowed) {
801 0 : tevent_abort_nesting(ev, location);
802 0 : errno = ELOOP;
803 0 : return -1;
804 : }
805 : }
806 218142983 : if (ev->nesting.level > 0) {
807 218142983 : if (ev->nesting.hook_fn) {
808 : int ret2;
809 2948977 : ret2 = ev->nesting.hook_fn(ev,
810 : ev->nesting.hook_private,
811 : ev->nesting.level,
812 : true,
813 : (void *)&nesting_stack_ptr,
814 : location);
815 2948977 : if (ret2 != 0) {
816 0 : ret = ret2;
817 0 : goto done;
818 : }
819 : }
820 : }
821 :
822 218142983 : tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
823 218142983 : ret = ev->ops->loop_once(ev, location);
824 218101193 : tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
825 :
826 : /* New event (and request) will always start with call depth 0. */
827 218101193 : tevent_thread_call_depth_set(0);
828 :
829 218101193 : if (ev->nesting.level > 0) {
830 218101193 : if (ev->nesting.hook_fn) {
831 : int ret2;
832 2948978 : ret2 = ev->nesting.hook_fn(ev,
833 : ev->nesting.hook_private,
834 : ev->nesting.level,
835 : false,
836 : (void *)&nesting_stack_ptr,
837 : location);
838 2948978 : if (ret2 != 0) {
839 0 : ret = ret2;
840 0 : goto done;
841 : }
842 : }
843 : }
844 :
845 218101193 : done:
846 218101193 : ev->nesting.level--;
847 218101193 : return ret;
848 : }
849 :
850 : /*
851 : this is a performance optimization for the samba4 nested event loop problems
852 : */
853 0 : int _tevent_loop_until(struct tevent_context *ev,
854 : bool (*finished)(void *private_data),
855 : void *private_data,
856 : const char *location)
857 : {
858 0 : int ret = 0;
859 0 : void *nesting_stack_ptr = NULL;
860 :
861 0 : ev->nesting.level++;
862 :
863 0 : if (ev->nesting.level > 1) {
864 0 : if (!ev->nesting.allowed) {
865 0 : tevent_abort_nesting(ev, location);
866 0 : errno = ELOOP;
867 0 : return -1;
868 : }
869 : }
870 0 : if (ev->nesting.level > 0) {
871 0 : if (ev->nesting.hook_fn) {
872 : int ret2;
873 0 : ret2 = ev->nesting.hook_fn(ev,
874 : ev->nesting.hook_private,
875 : ev->nesting.level,
876 : true,
877 : (void *)&nesting_stack_ptr,
878 : location);
879 0 : if (ret2 != 0) {
880 0 : ret = ret2;
881 0 : goto done;
882 : }
883 : }
884 : }
885 :
886 0 : while (!finished(private_data)) {
887 0 : tevent_trace_point_callback(ev, TEVENT_TRACE_BEFORE_LOOP_ONCE);
888 0 : ret = ev->ops->loop_once(ev, location);
889 0 : tevent_trace_point_callback(ev, TEVENT_TRACE_AFTER_LOOP_ONCE);
890 0 : if (ret != 0) {
891 0 : break;
892 : }
893 : }
894 :
895 0 : if (ev->nesting.level > 0) {
896 0 : if (ev->nesting.hook_fn) {
897 : int ret2;
898 0 : ret2 = ev->nesting.hook_fn(ev,
899 : ev->nesting.hook_private,
900 : ev->nesting.level,
901 : false,
902 : (void *)&nesting_stack_ptr,
903 : location);
904 0 : if (ret2 != 0) {
905 0 : ret = ret2;
906 0 : goto done;
907 : }
908 : }
909 : }
910 :
911 0 : done:
912 0 : ev->nesting.level--;
913 0 : return ret;
914 : }
915 :
916 47908522 : bool tevent_common_have_events(struct tevent_context *ev)
917 : {
918 47908522 : if (ev->fd_events != NULL) {
919 47908521 : if (ev->fd_events != ev->wakeup_fde) {
920 47873675 : return true;
921 : }
922 34846 : if (ev->fd_events->next != NULL) {
923 34846 : return true;
924 : }
925 :
926 : /*
927 : * At this point we just have the wakeup pipe event as
928 : * the only fd_event. That one does not count as a
929 : * regular event, so look at the other event types.
930 : */
931 : }
932 :
933 2 : return ((ev->timer_events != NULL) ||
934 2 : (ev->immediate_events != NULL) ||
935 1 : (ev->signal_events != NULL));
936 : }
937 :
938 : /*
939 : return on failure or (with 0) if all fd events are removed
940 : */
941 41701 : int tevent_common_loop_wait(struct tevent_context *ev,
942 : const char *location)
943 : {
944 : /*
945 : * loop as long as we have events pending
946 : */
947 47908522 : while (tevent_common_have_events(ev)) {
948 : int ret;
949 47908521 : ret = _tevent_loop_once(ev, location);
950 47866821 : if (ret != 0) {
951 0 : tevent_debug(ev, TEVENT_DEBUG_FATAL,
952 : "_tevent_loop_once() failed: %d - %s\n",
953 0 : ret, strerror(errno));
954 0 : return ret;
955 : }
956 : }
957 :
958 1 : tevent_debug(ev, TEVENT_DEBUG_WARNING,
959 : "tevent_common_loop_wait() out of events\n");
960 1 : return 0;
961 : }
962 :
963 : /*
964 : return on failure or (with 0) if all fd events are removed
965 : */
966 41701 : int _tevent_loop_wait(struct tevent_context *ev, const char *location)
967 : {
968 41701 : return ev->ops->loop_wait(ev, location);
969 : }
970 :
971 :
972 : /*
973 : re-initialise a tevent context. This leaves you with the same
974 : event context, but all events are wiped and the structure is
975 : re-initialised. This is most useful after a fork()
976 :
977 : zero is returned on success, non-zero on failure
978 : */
979 41737 : int tevent_re_initialise(struct tevent_context *ev)
980 : {
981 41737 : tevent_common_context_destructor(ev);
982 :
983 41737 : tevent_common_context_constructor(ev);
984 :
985 41737 : return ev->ops->context_init(ev);
986 : }
987 :
988 1166248 : static void wakeup_pipe_handler(struct tevent_context *ev,
989 : struct tevent_fd *fde,
990 : uint16_t flags, void *_private)
991 : {
992 : ssize_t ret;
993 :
994 : do {
995 : /*
996 : * This is the boilerplate for eventfd, but it works
997 : * for pipes too. And as we don't care about the data
998 : * we read, we're fine.
999 : */
1000 : uint64_t val;
1001 1166248 : ret = read(fde->fd, &val, sizeof(val));
1002 1166248 : } while (ret == -1 && errno == EINTR);
1003 1166248 : }
1004 :
1005 : /*
1006 : * Initialize the wakeup pipe and pipe fde
1007 : */
1008 :
1009 122314 : int tevent_common_wakeup_init(struct tevent_context *ev)
1010 : {
1011 : int ret, read_fd;
1012 :
1013 122314 : if (ev->wakeup_fde != NULL) {
1014 64055 : return 0;
1015 : }
1016 :
1017 : #ifdef HAVE_EVENTFD
1018 58259 : ret = eventfd(0, EFD_NONBLOCK);
1019 58259 : if (ret == -1) {
1020 0 : return errno;
1021 : }
1022 58259 : read_fd = ev->wakeup_fd = ret;
1023 : #else
1024 : {
1025 : int pipe_fds[2];
1026 : ret = pipe(pipe_fds);
1027 : if (ret == -1) {
1028 : return errno;
1029 : }
1030 : ev->wakeup_fd = pipe_fds[1];
1031 : ev->wakeup_read_fd = pipe_fds[0];
1032 :
1033 : ev_set_blocking(ev->wakeup_fd, false);
1034 : ev_set_blocking(ev->wakeup_read_fd, false);
1035 :
1036 : read_fd = ev->wakeup_read_fd;
1037 : }
1038 : #endif
1039 :
1040 58259 : ev->wakeup_fde = tevent_add_fd(ev, ev, read_fd, TEVENT_FD_READ,
1041 : wakeup_pipe_handler, NULL);
1042 58259 : if (ev->wakeup_fde == NULL) {
1043 0 : close(ev->wakeup_fd);
1044 : #ifndef HAVE_EVENTFD
1045 : close(ev->wakeup_read_fd);
1046 : #endif
1047 0 : return ENOMEM;
1048 : }
1049 :
1050 58259 : return 0;
1051 : }
1052 :
1053 1170468 : int tevent_common_wakeup_fd(int fd)
1054 : {
1055 : ssize_t ret;
1056 :
1057 : do {
1058 : #ifdef HAVE_EVENTFD
1059 1170468 : uint64_t val = 1;
1060 1170468 : ret = write(fd, &val, sizeof(val));
1061 : #else
1062 : char c = '\0';
1063 : ret = write(fd, &c, 1);
1064 : #endif
1065 1170468 : } while ((ret == -1) && (errno == EINTR));
1066 :
1067 1170468 : return 0;
1068 : }
1069 :
1070 1168745 : int tevent_common_wakeup(struct tevent_context *ev)
1071 : {
1072 1168745 : if (ev->wakeup_fde == NULL) {
1073 0 : return ENOTCONN;
1074 : }
1075 :
1076 1168745 : return tevent_common_wakeup_fd(ev->wakeup_fd);
1077 : }
1078 :
1079 56636748 : static void tevent_common_wakeup_fini(struct tevent_context *ev)
1080 : {
1081 56636748 : if (ev->wakeup_fde == NULL) {
1082 56578783 : return;
1083 : }
1084 :
1085 57965 : TALLOC_FREE(ev->wakeup_fde);
1086 :
1087 57965 : close(ev->wakeup_fd);
1088 : #ifndef HAVE_EVENTFD
1089 : close(ev->wakeup_read_fd);
1090 : #endif
1091 : }
|