Class: IO::Event::Selector::EPoll
- Inherits:
-
Object
- Object
- IO::Event::Selector::EPoll
- Defined in:
- ext/io/event/selector/epoll.c
Instance Method Summary collapse
- #close ⇒ Object
- #idle_duration ⇒ Object
- #initialize(loop) ⇒ Object constructor
- #io_read(*args) ⇒ Object
- #io_wait(fiber, io, events) ⇒ Object
- #io_write(*args) ⇒ Object
- #loop ⇒ Object
-
#process_wait(fiber, _pid, _flags) ⇒ Object
rb_define_method(IO_Event_Selector_EPoll, “io_write”, IO_Event_Selector_EPoll_io_write, 5);.
- #push(fiber) ⇒ Object
- #raise(*args) ⇒ Object
- #ready? ⇒ Boolean
- #resume(*args) ⇒ Object
-
#select(duration) ⇒ Object
TODO This function is not re-entrant and we should document and assert as such.
- #transfer ⇒ Object
- #wakeup ⇒ Object
- #yield ⇒ Object
Constructor Details
#initialize(loop) ⇒ Object
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 |
# File 'ext/io/event/selector/epoll.c', line 362
VALUE IO_Event_Selector_EPoll_initialize(VALUE self, VALUE loop) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
IO_Event_Selector_initialize(&selector->backend, loop);
int result = epoll_create1(EPOLL_CLOEXEC);
if (result == -1) {
rb_sys_fail("IO_Event_Selector_EPoll_initialize:epoll_create");
} else {
selector->descriptor = result;
rb_update_max_fd(selector->descriptor);
}
IO_Event_Interrupt_open(&selector->interrupt);
IO_Event_Interrupt_add(&selector->interrupt, selector);
return self;
}
|
Instance Method Details
#close ⇒ Object
399 400 401 402 403 404 405 406 |
# File 'ext/io/event/selector/epoll.c', line 399
VALUE IO_Event_Selector_EPoll_close(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
close_internal(selector);
return Qnil;
}
|
#idle_duration ⇒ Object
390 391 392 393 394 395 396 397 |
# File 'ext/io/event/selector/epoll.c', line 390
VALUE IO_Event_Selector_EPoll_idle_duration(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
double duration = selector->idle_duration.tv_sec + (selector->idle_duration.tv_nsec / 1000000000.0);
return DBL2NUM(duration);
}
|
#io_read(*args) ⇒ Object
672 673 674 675 676 677 678 679 680 681 682 683 |
# File 'ext/io/event/selector/epoll.c', line 672
VALUE IO_Event_Selector_EPoll_io_read_compatible(int argc, VALUE *argv, VALUE self)
{
rb_check_arity(argc, 4, 5);
VALUE _offset = SIZET2NUM(0);
if (argc == 5) {
_offset = argv[4];
}
return IO_Event_Selector_EPoll_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset);
}
|
#io_wait(fiber, io, events) ⇒ Object
559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 |
# File 'ext/io/event/selector/epoll.c', line 559
VALUE IO_Event_Selector_EPoll_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
int descriptor = IO_Event_Selector_io_descriptor(io);
struct IO_Event_Selector_EPoll_Waiting waiting = {
.list = {.type = &IO_Event_Selector_EPoll_io_wait_list_type},
.fiber = fiber,
.events = RB_NUM2INT(events),
};
int result = IO_Event_Selector_EPoll_Waiting_register(selector, io, descriptor, &waiting);
if (result == -1) {
if (errno == EPERM) {
IO_Event_Selector_queue_push(&selector->backend, fiber);
IO_Event_Selector_yield(&selector->backend);
return events;
}
rb_sys_fail("IO_Event_Selector_EPoll_io_wait:IO_Event_Selector_EPoll_Waiting_register");
}
struct io_wait_arguments io_wait_arguments = {
.selector = selector,
.waiting = &waiting,
};
return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments);
}
|
#io_write(*args) ⇒ Object
768 769 770 771 772 773 774 775 776 777 778 779 |
# File 'ext/io/event/selector/epoll.c', line 768
VALUE IO_Event_Selector_EPoll_io_write_compatible(int argc, VALUE *argv, VALUE self)
{
rb_check_arity(argc, 4, 5);
VALUE _offset = SIZET2NUM(0);
if (argc == 5) {
_offset = argv[4];
}
return IO_Event_Selector_EPoll_io_write(self, argv[0], argv[1], argv[2], argv[3], _offset);
}
|
#loop ⇒ Object
383 384 385 386 387 388 |
# File 'ext/io/event/selector/epoll.c', line 383
VALUE IO_Event_Selector_EPoll_loop(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return selector->backend.loop;
}
|
#process_wait(fiber, _pid, _flags) ⇒ Object
rb_define_method(IO_Event_Selector_EPoll, “io_write”, IO_Event_Selector_EPoll_io_write, 5);
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 |
# File 'ext/io/event/selector/epoll.c', line 491
VALUE IO_Event_Selector_EPoll_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
pid_t pid = NUM2PIDT(_pid);
int flags = NUM2INT(_flags);
int descriptor = pidfd_open(pid, 0);
if (descriptor == -1) {
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:pidfd_open");
}
rb_update_max_fd(descriptor);
struct IO_Event_Selector_EPoll_Waiting waiting = {
.list = {.type = &IO_Event_Selector_EPoll_process_wait_list_type},
.fiber = fiber,
.events = IO_EVENT_READABLE,
};
int result = IO_Event_Selector_EPoll_Waiting_register(selector, 0, descriptor, &waiting);
if (result == -1) {
close(descriptor);
rb_sys_fail("IO_Event_Selector_EPoll_process_wait:IO_Event_Selector_EPoll_Waiting_register");
}
struct process_wait_arguments process_wait_arguments = {
.selector = selector,
.pid = pid,
.flags = flags,
.descriptor = descriptor,
.waiting = &waiting,
};
return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments);
}
|
#push(fiber) ⇒ Object
432 433 434 435 436 437 438 439 440 |
# File 'ext/io/event/selector/epoll.c', line 432
VALUE IO_Event_Selector_EPoll_push(VALUE self, VALUE fiber)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
IO_Event_Selector_queue_push(&selector->backend, fiber);
return Qnil;
}
|
#raise(*args) ⇒ Object
442 443 444 445 446 447 448 |
# File 'ext/io/event/selector/epoll.c', line 442
VALUE IO_Event_Selector_EPoll_raise(int argc, VALUE *argv, VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_raise(&selector->backend, argc, argv);
}
|
#ready? ⇒ Boolean
450 451 452 453 454 455 |
# File 'ext/io/event/selector/epoll.c', line 450
VALUE IO_Event_Selector_EPoll_ready_p(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return selector->backend.ready ? Qtrue : Qfalse;
}
|
#resume(*args) ⇒ Object
416 417 418 419 420 421 422 |
# File 'ext/io/event/selector/epoll.c', line 416
VALUE IO_Event_Selector_EPoll_resume(int argc, VALUE *argv, VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_resume(&selector->backend, argc, argv);
}
|
#select(duration) ⇒ Object
TODO This function is not re-entrant and we should document and assert as such.
974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 |
# File 'ext/io/event/selector/epoll.c', line 974
VALUE IO_Event_Selector_EPoll_select(VALUE self, VALUE duration) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
selector->idle_duration.tv_sec = 0;
selector->idle_duration.tv_nsec = 0;
int ready = IO_Event_Selector_queue_flush(&selector->backend);
struct select_arguments arguments = {
.selector = selector,
.storage = {
.tv_sec = 0,
.tv_nsec = 0
},
.saved = {},
};
arguments.timeout = &arguments.storage;
// Process any currently pending events:
select_internal_with_gvl(&arguments);
// If we:
// 1. Didn't process any ready fibers, and
// 2. Didn't process any events from non-blocking select (above), and
// 3. There are no items in the ready list,
// then we can perform a blocking select.
if (!ready && !arguments.count && !selector->backend.ready) {
arguments.timeout = make_timeout(duration, &arguments.storage);
if (!timeout_nonblocking(arguments.timeout)) {
struct timespec start_time;
IO_Event_Selector_current_time(&start_time);
// Wait for events to occur:
select_internal_without_gvl(&arguments);
struct timespec end_time;
IO_Event_Selector_current_time(&end_time);
IO_Event_Selector_elapsed_time(&start_time, &end_time, &selector->idle_duration);
}
}
if (arguments.count) {
return rb_ensure(select_handle_events, (VALUE)&arguments, select_handle_events_ensure, (VALUE)&arguments);
} else {
return RB_INT2NUM(0);
}
}
|
#transfer ⇒ Object
408 409 410 411 412 413 414 |
# File 'ext/io/event/selector/epoll.c', line 408
VALUE IO_Event_Selector_EPoll_transfer(VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL);
}
|
#wakeup ⇒ Object
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 |
# File 'ext/io/event/selector/epoll.c', line 1025
VALUE IO_Event_Selector_EPoll_wakeup(VALUE self) {
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
// If we are blocking, we can schedule a nop event to wake up the selector:
if (selector->blocked) {
IO_Event_Interrupt_signal(&selector->interrupt);
return Qtrue;
}
return Qfalse;
}
|
#yield ⇒ Object
424 425 426 427 428 429 430 |
# File 'ext/io/event/selector/epoll.c', line 424
VALUE IO_Event_Selector_EPoll_yield(VALUE self)
{
struct IO_Event_Selector_EPoll *selector = NULL;
TypedData_Get_Struct(self, struct IO_Event_Selector_EPoll, &IO_Event_Selector_EPoll_Type, selector);
return IO_Event_Selector_yield(&selector->backend);
}
|