Class: IO::Event::Selector::URing
- Inherits:
-
Object
- Object
- IO::Event::Selector::URing
- Defined in:
- ext/io/event/selector/uring.c
Instance Method Summary collapse
- #close ⇒ Object
- #idle_duration ⇒ Object
- #initialize(loop) ⇒ Object constructor
- #io_close(io) ⇒ Object
- #io_read(*args) ⇒ Object
- #io_wait(fiber, io, events) ⇒ Object
- #io_write(*args) ⇒ Object
- #loop ⇒ Object
- #process_wait(fiber, _pid, _flags) ⇒ Object
- #push(fiber) ⇒ Object
- #raise(*args) ⇒ Object
- #ready? ⇒ Boolean
- #resume(*args) ⇒ Object
- #select(duration) ⇒ Object
- #transfer ⇒ Object
- #wakeup ⇒ Object
- #yield ⇒ Object
Constructor Details
#initialize(loop) ⇒ Object
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 |
# File 'ext/io/event/selector/uring.c', line 248 VALUE IO_Event_Selector_URing_initialize(VALUE self, VALUE loop) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); IO_Event_Selector_initialize(&selector->backend, loop); int result = io_uring_queue_init(URING_ENTRIES, &selector->ring, 0); if (result < 0) { rb_syserr_fail(-result, "IO_Event_Selector_URing_initialize:io_uring_queue_init"); } rb_update_max_fd(selector->ring.ring_fd); return self; } |
Instance Method Details
#close ⇒ Object
280 281 282 283 284 285 286 287 |
# File 'ext/io/event/selector/uring.c', line 280 VALUE IO_Event_Selector_URing_close(VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); close_internal(selector); return Qnil; } |
#idle_duration ⇒ Object
271 272 273 274 275 276 277 278 |
# File 'ext/io/event/selector/uring.c', line 271 VALUE IO_Event_Selector_URing_idle_duration(VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); double duration = selector->idle_duration.tv_sec + (selector->idle_duration.tv_nsec / 1000000000.0); return DBL2NUM(duration); } |
#io_close(io) ⇒ Object
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 |
# File 'ext/io/event/selector/uring.c', line 877 VALUE IO_Event_Selector_URing_io_close(VALUE self, VALUE io) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); int descriptor = IO_Event_Selector_io_descriptor(io); if (ASYNC_CLOSE) { struct io_uring_sqe *sqe = io_get_sqe(selector); io_uring_prep_close(sqe, descriptor); io_uring_sqe_set_data(sqe, NULL); io_uring_submit_now(selector); } else { close(descriptor); } // We don't wait for the result of close since it has no use in pratice: return Qtrue; } |
#io_read(*args) ⇒ Object
734 735 736 737 738 739 740 741 742 743 744 745 |
# File 'ext/io/event/selector/uring.c', line 734 static VALUE IO_Event_Selector_URing_io_read_compatible(int argc, VALUE *argv, VALUE self) { rb_check_arity(argc, 4, 5); VALUE _offset = SIZET2NUM(0); if (argc == 5) { _offset = argv[4]; } return IO_Event_Selector_URing_io_read(self, argv[0], argv[1], argv[2], argv[3], _offset); } |
#io_wait(fiber, io, events) ⇒ Object
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 |
# File 'ext/io/event/selector/uring.c', line 576 VALUE IO_Event_Selector_URing_io_wait(VALUE self, VALUE fiber, VALUE io, VALUE events) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); int descriptor = IO_Event_Selector_io_descriptor(io); short flags = poll_flags_from_events(NUM2INT(events)); if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_io_wait:io_uring_prep_poll_add(descriptor=%d, flags=%d, fiber=%p)\n", descriptor, flags, (void*)fiber); struct IO_Event_Selector_URing_Waiting waiting = { .fiber = fiber, }; struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting); struct io_uring_sqe *sqe = io_get_sqe(selector); io_uring_prep_poll_add(sqe, descriptor, flags); io_uring_sqe_set_data(sqe, completion); // If we are going to wait, we assume that we are waiting for a while: io_uring_submit_pending(selector); struct io_wait_arguments io_wait_arguments = { .selector = selector, .waiting = &waiting, .flags = flags }; return rb_ensure(io_wait_transfer, (VALUE)&io_wait_arguments, io_wait_ensure, (VALUE)&io_wait_arguments); } |
#io_write(*args) ⇒ Object
858 859 860 861 862 863 864 865 866 867 868 869 |
# File 'ext/io/event/selector/uring.c', line 858 static VALUE IO_Event_Selector_URing_io_write_compatible(int argc, VALUE *argv, VALUE self) { rb_check_arity(argc, 4, 5); VALUE _offset = SIZET2NUM(0); if (argc == 5) { _offset = argv[4]; } return IO_Event_Selector_URing_io_write(self, argv[0], argv[1], argv[2], argv[3], _offset); } |
#loop ⇒ Object
264 265 266 267 268 269 |
# File 'ext/io/event/selector/uring.c', line 264 VALUE IO_Event_Selector_URing_loop(VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); return selector->backend.loop; } |
#process_wait(fiber, _pid, _flags) ⇒ Object
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 |
# File 'ext/io/event/selector/uring.c', line 470 VALUE IO_Event_Selector_URing_process_wait(VALUE self, VALUE fiber, VALUE _pid, VALUE _flags) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); pid_t pid = NUM2PIDT(_pid); int flags = NUM2INT(_flags); int descriptor = pidfd_open(pid, 0); if (descriptor < 0) { rb_syserr_fail(errno, "IO_Event_Selector_URing_process_wait:pidfd_open"); } rb_update_max_fd(descriptor); struct IO_Event_Selector_URing_Waiting waiting = { .fiber = fiber, }; struct IO_Event_Selector_URing_Completion *completion = IO_Event_Selector_URing_Completion_acquire(selector, &waiting); struct process_wait_arguments process_wait_arguments = { .selector = selector, .waiting = &waiting, .pid = pid, .flags = flags, .descriptor = descriptor, }; if (DEBUG) fprintf(stderr, "IO_Event_Selector_URing_process_wait:io_uring_prep_poll_add(%p)\n", (void*)fiber); struct io_uring_sqe *sqe = io_get_sqe(selector); io_uring_prep_poll_add(sqe, descriptor, POLLIN|POLLHUP|POLLERR); io_uring_sqe_set_data(sqe, completion); io_uring_submit_pending(selector); return rb_ensure(process_wait_transfer, (VALUE)&process_wait_arguments, process_wait_ensure, (VALUE)&process_wait_arguments); } |
#push(fiber) ⇒ Object
313 314 315 316 317 318 319 320 321 |
# File 'ext/io/event/selector/uring.c', line 313 VALUE IO_Event_Selector_URing_push(VALUE self, VALUE fiber) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); IO_Event_Selector_queue_push(&selector->backend, fiber); return Qnil; } |
#raise(*args) ⇒ Object
323 324 325 326 327 328 329 |
# File 'ext/io/event/selector/uring.c', line 323 VALUE IO_Event_Selector_URing_raise(int argc, VALUE *argv, VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); return IO_Event_Selector_raise(&selector->backend, argc, argv); } |
#ready? ⇒ Boolean
331 332 333 334 335 336 |
# File 'ext/io/event/selector/uring.c', line 331 VALUE IO_Event_Selector_URing_ready_p(VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); return selector->backend.ready ? Qtrue : Qfalse; } |
#resume(*args) ⇒ Object
297 298 299 300 301 302 303 |
# File 'ext/io/event/selector/uring.c', line 297 VALUE IO_Event_Selector_URing_resume(int argc, VALUE *argv, VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); return IO_Event_Selector_resume(&selector->backend, argc, argv); } |
#select(duration) ⇒ Object
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 |
# File 'ext/io/event/selector/uring.c', line 1016 VALUE IO_Event_Selector_URing_select(VALUE self, VALUE duration) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); selector->idle_duration.tv_sec = 0; selector->idle_duration.tv_nsec = 0; // Flush any pending events: io_uring_submit_flush(selector); int ready = IO_Event_Selector_queue_flush(&selector->backend); int result = select_process_completions(selector); // If we: // 1. Didn't process any ready fibers, and // 2. Didn't process any events from non-blocking select (above), and // 3. There are no items in the ready list, // then we can perform a blocking select. if (!ready && !result && !selector->backend.ready) { // We might need to wait for events: struct select_arguments arguments = { .selector = selector, .timeout = NULL, }; arguments.timeout = make_timeout(duration, &arguments.storage); if (!selector->backend.ready && !timeout_nonblocking(arguments.timeout)) { struct timespec start_time; IO_Event_Selector_current_time(&start_time); // This is a blocking operation, we wait for events: result = select_internal_without_gvl(&arguments); struct timespec end_time; IO_Event_Selector_current_time(&end_time); IO_Event_Selector_elapsed_time(&start_time, &end_time, &selector->idle_duration); // After waiting/flushing the SQ, check if there are any completions: if (result > 0) { result = select_process_completions(selector); } } } return RB_INT2NUM(result); } |
#transfer ⇒ Object
289 290 291 292 293 294 295 |
# File 'ext/io/event/selector/uring.c', line 289 VALUE IO_Event_Selector_URing_transfer(VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); return IO_Event_Selector_fiber_transfer(selector->backend.loop, 0, NULL); } |
#wakeup ⇒ Object
1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 |
# File 'ext/io/event/selector/uring.c', line 1065 VALUE IO_Event_Selector_URing_wakeup(VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); // If we are blocking, we can schedule a nop event to wake up the selector: if (selector->blocked) { struct io_uring_sqe *sqe = NULL; while (true) { sqe = io_uring_get_sqe(&selector->ring); if (sqe) break; rb_thread_schedule(); // It's possible we became unblocked already, so we can assume the selector has already cycled at least once: if (!selector->blocked) return Qfalse; } io_uring_prep_nop(sqe); // If you don't set this line, the SQE will eventually be recycled and have valid user selector which can cause odd behaviour: io_uring_sqe_set_data(sqe, NULL); io_uring_submit(&selector->ring); return Qtrue; } return Qfalse; } |
#yield ⇒ Object
305 306 307 308 309 310 311 |
# File 'ext/io/event/selector/uring.c', line 305 VALUE IO_Event_Selector_URing_yield(VALUE self) { struct IO_Event_Selector_URing *selector = NULL; TypedData_Get_Struct(self, struct IO_Event_Selector_URing, &IO_Event_Selector_URing_Type, selector); return IO_Event_Selector_yield(&selector->backend); } |