Ensure struct rb_io is passed through to thread.c. (#13134)

This commit is contained in:
Samuel Williams 2025-04-19 09:55:16 +09:00 committed by GitHub
parent 8bf14b048f
commit 20a1c1dc6b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
Notes: git 2025-04-19 00:55:31 +00:00
Merged-By: ioquatix <samuel@codeotaku.com>
7 changed files with 41 additions and 29 deletions

8
file.c
View File

@ -2625,11 +2625,11 @@ io_blocking_fchmod(void *ptr)
} }
static int static int
rb_fchmod(int fd, mode_t mode) rb_fchmod(struct rb_io* io, mode_t mode)
{ {
(void)rb_chmod; /* suppress unused-function warning when HAVE_FCHMOD */ (void)rb_chmod; /* suppress unused-function warning when HAVE_FCHMOD */
struct nogvl_fchmod_data data = {.fd = fd, .mode = mode}; struct nogvl_fchmod_data data = {.fd = io->fd, .mode = mode};
return (int)rb_thread_io_blocking_region(io_blocking_fchmod, &data, fd); return (int)rb_thread_io_blocking_region(io, io_blocking_fchmod, &data);
} }
#endif #endif
@ -2659,7 +2659,7 @@ rb_file_chmod(VALUE obj, VALUE vmode)
GetOpenFile(obj, fptr); GetOpenFile(obj, fptr);
#ifdef HAVE_FCHMOD #ifdef HAVE_FCHMOD
if (rb_fchmod(fptr->fd, mode) == -1) { if (rb_fchmod(fptr, mode) == -1) {
if (HAVE_FCHMOD || errno != ENOSYS) if (HAVE_FCHMOD || errno != ENOSYS)
rb_sys_fail_path(fptr->pathv); rb_sys_fail_path(fptr->pathv);
} }

8
gc.c
View File

@ -1154,11 +1154,17 @@ cvar_table_free_i(VALUE value, void *ctx)
return ID_TABLE_CONTINUE; return ID_TABLE_CONTINUE;
} }
static void
io_fptr_finalize(void *fptr)
{
rb_io_fptr_finalize((struct rb_io *)fptr);
}
static inline void static inline void
make_io_zombie(void *objspace, VALUE obj) make_io_zombie(void *objspace, VALUE obj)
{ {
rb_io_t *fptr = RFILE(obj)->fptr; rb_io_t *fptr = RFILE(obj)->fptr;
rb_gc_impl_make_zombie(objspace, obj, rb_io_fptr_finalize_internal, fptr); rb_gc_impl_make_zombie(objspace, obj, io_fptr_finalize, fptr);
} }
static bool static bool

View File

@ -58,9 +58,6 @@
/* internal/array.h */ /* internal/array.h */
#define rb_ary_new_from_args(...) rb_nonexistent_symbol(__VA_ARGS__) #define rb_ary_new_from_args(...) rb_nonexistent_symbol(__VA_ARGS__)
/* internal/io.h */
#define rb_io_fptr_finalize(...) rb_nonexistent_symbol(__VA_ARGS__)
/* internal/string.h */ /* internal/string.h */
#define rb_fstring_cstr(...) rb_nonexistent_symbol(__VA_ARGS__) #define rb_fstring_cstr(...) rb_nonexistent_symbol(__VA_ARGS__)

View File

@ -119,11 +119,6 @@ void rb_stdio_set_default_encoding(void);
VALUE rb_io_flush_raw(VALUE, int); VALUE rb_io_flush_raw(VALUE, int);
size_t rb_io_memsize(const rb_io_t *); size_t rb_io_memsize(const rb_io_t *);
int rb_stderr_tty_p(void); int rb_stderr_tty_p(void);
void rb_io_fptr_finalize_internal(void *ptr);
#ifdef rb_io_fptr_finalize
# undef rb_io_fptr_finalize
#endif
#define rb_io_fptr_finalize rb_io_fptr_finalize_internal
VALUE rb_io_popen(VALUE pname, VALUE pmode, VALUE env, VALUE opt); VALUE rb_io_popen(VALUE pname, VALUE pmode, VALUE env, VALUE opt);
VALUE rb_io_prep_stdin(void); VALUE rb_io_prep_stdin(void);

View File

@ -55,6 +55,7 @@ VALUE rb_mutex_owned_p(VALUE self);
VALUE rb_exec_recursive_outer_mid(VALUE (*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h, ID mid); VALUE rb_exec_recursive_outer_mid(VALUE (*f)(VALUE g, VALUE h, int r), VALUE g, VALUE h, ID mid);
void ruby_mn_threads_params(void); void ruby_mn_threads_params(void);
int rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout);
int rb_thread_wait_for_single_fd(int fd, int events, struct timeval * timeout); int rb_thread_wait_for_single_fd(int fd, int events, struct timeval * timeout);
struct rb_io_close_wait_list { struct rb_io_close_wait_list {
@ -73,8 +74,8 @@ RUBY_SYMBOL_EXPORT_BEGIN
void *rb_thread_prevent_fork(void *(*func)(void *), void *data); /* for ext/socket/raddrinfo.c */ void *rb_thread_prevent_fork(void *(*func)(void *), void *data); /* for ext/socket/raddrinfo.c */
/* Temporary. This API will be removed (renamed). */ /* Temporary. This API will be removed (renamed). */
VALUE rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd); VALUE rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1);
VALUE rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events); VALUE rb_thread_io_blocking_call(struct rb_io *io, rb_blocking_function_t *func, void *data1, int events);
/* thread.c (export) */ /* thread.c (export) */
int ruby_thread_has_gvl_p(void); /* for ext/fiddle/closure.c */ int ruby_thread_has_gvl_p(void); /* for ext/fiddle/closure.c */

4
io.c
View File

@ -225,7 +225,7 @@ static VALUE prep_io(int fd, enum rb_io_mode fmode, VALUE klass, const char *pat
VALUE VALUE
rb_io_blocking_region_wait(struct rb_io *io, rb_blocking_function_t *function, void *argument, enum rb_io_event events) rb_io_blocking_region_wait(struct rb_io *io, rb_blocking_function_t *function, void *argument, enum rb_io_event events)
{ {
return rb_thread_io_blocking_call(function, argument, io->fd, events); return rb_thread_io_blocking_call(io, function, argument, events);
} }
VALUE rb_io_blocking_region(struct rb_io *io, rb_blocking_function_t *function, void *argument) VALUE rb_io_blocking_region(struct rb_io *io, rb_blocking_function_t *function, void *argument)
@ -1473,7 +1473,7 @@ rb_io_wait(VALUE io, VALUE events, VALUE timeout)
tv = &tv_storage; tv = &tv_storage;
} }
int ready = rb_thread_wait_for_single_fd(fptr->fd, RB_NUM2INT(events), tv); int ready = rb_thread_io_wait(fptr, RB_NUM2INT(events), tv);
if (ready < 0) { if (ready < 0) {
rb_sys_fail(0); rb_sys_fail(0);

View File

@ -1798,12 +1798,12 @@ rb_thread_mn_schedulable(VALUE thval)
} }
VALUE VALUE
rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, int events) rb_thread_io_blocking_call(struct rb_io* io, rb_blocking_function_t *func, void *data1, int events)
{ {
rb_execution_context_t *volatile ec = GET_EC(); rb_execution_context_t *volatile ec = GET_EC();
rb_thread_t *volatile th = rb_ec_thread_ptr(ec); rb_thread_t *volatile th = rb_ec_thread_ptr(ec);
RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), fd, events); RUBY_DEBUG_LOG("th:%u fd:%d ev:%d", rb_th_serial(th), io->fd, events);
struct waiting_fd waiting_fd; struct waiting_fd waiting_fd;
volatile VALUE val = Qundef; /* shouldn't be used */ volatile VALUE val = Qundef; /* shouldn't be used */
@ -1812,6 +1812,8 @@ rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, in
volatile bool prev_mn_schedulable = th->mn_schedulable; volatile bool prev_mn_schedulable = th->mn_schedulable;
th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL); th->mn_schedulable = thread_io_mn_schedulable(th, events, NULL);
int fd = io->fd;
// `errno` is only valid when there is an actual error - but we can't // `errno` is only valid when there is an actual error - but we can't
// extract that from the return value of `func` alone, so we clear any // extract that from the return value of `func` alone, so we clear any
// prior `errno` value here so that we can later check if it was set by // prior `errno` value here so that we can later check if it was set by
@ -1824,10 +1826,10 @@ rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, in
if ((state = EC_EXEC_TAG()) == TAG_NONE) { if ((state = EC_EXEC_TAG()) == TAG_NONE) {
volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */ volatile enum ruby_tag_type saved_state = state; /* for BLOCKING_REGION */
retry: retry:
BLOCKING_REGION(waiting_fd.th, { BLOCKING_REGION(th, {
val = func(data1); val = func(data1);
saved_errno = errno; saved_errno = errno;
}, ubf_select, waiting_fd.th, FALSE); }, ubf_select, th, FALSE);
th = rb_ec_thread_ptr(ec); th = rb_ec_thread_ptr(ec);
if (events && if (events &&
@ -1866,9 +1868,9 @@ rb_thread_io_blocking_call(rb_blocking_function_t *func, void *data1, int fd, in
} }
VALUE VALUE
rb_thread_io_blocking_region(rb_blocking_function_t *func, void *data1, int fd) rb_thread_io_blocking_region(struct rb_io *io, rb_blocking_function_t *func, void *data1)
{ {
return rb_thread_io_blocking_call(func, data1, fd, 0); return rb_thread_io_blocking_call(io, func, data1, 0);
} }
/* /*
@ -4398,8 +4400,8 @@ wait_for_single_fd_blocking_region(rb_thread_t *th, struct pollfd *fds, nfds_t n
/* /*
* returns a mask of events * returns a mask of events
*/ */
int static int
rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout) thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
{ {
struct pollfd fds[1] = {{ struct pollfd fds[1] = {{
.fd = fd, .fd = fd,
@ -4533,15 +4535,14 @@ init_set_fd(int fd, rb_fdset_t *fds)
return fds; return fds;
} }
int static int
rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout) thread_io_wait(struct rb_io *io, int fd, int events, struct timeval *timeout)
{ {
rb_fdset_t rfds, wfds, efds; rb_fdset_t rfds, wfds, efds;
struct select_args args; struct select_args args;
int r; int r;
VALUE ptr = (VALUE)&args; VALUE ptr = (VALUE)&args;
rb_execution_context_t *ec = GET_EC(); rb_thread_t *th = GET_THREAD();
rb_thread_t *th = rb_ec_thread_ptr(ec);
args.as.fd = fd; args.as.fd = fd;
args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL; args.read = (events & RB_WAITFD_IN) ? init_set_fd(fd, &rfds) : NULL;
@ -4558,6 +4559,18 @@ rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
} }
#endif /* ! USE_POLL */ #endif /* ! USE_POLL */
int
rb_thread_wait_for_single_fd(int fd, int events, struct timeval *timeout)
{
return thread_io_wait(NULL, fd, events, timeout);
}
int
rb_thread_io_wait(struct rb_io *io, int events, struct timeval * timeout)
{
return thread_io_wait(io, io->fd, events, timeout);
}
/* /*
* for GC * for GC
*/ */