Ractor::Port

* Added `Ractor::Port`
  * `Ractor::Port#receive` (support multi-threads)
  * `Rcator::Port#close`
  * `Ractor::Port#closed?`
* Added some methods
  * `Ractor#join`
  * `Ractor#value`
  * `Ractor#monitor`
  * `Ractor#unmonitor`
* Removed some methods
  * `Ractor#take`
  * `Ractor.yield`
* Change the spec
  * `Racotr.select`

You can wait for multiple sequences of messages with `Ractor::Port`.

```ruby
ports = 3.times.map{ Ractor::Port.new }
ports.map.with_index do |port, ri|
  Ractor.new port,ri do |port, ri|
    3.times{|i| port << "r#{ri}-#{i}"}
  end
end

p ports.each{|port| pp 3.times.map{port.receive}}

```

In this example, we use 3 ports, and 3 Ractors send messages to them respectively.
We can receive a series of messages from each port.

You can use `Ractor#value` to get the last value of a Ractor's block:

```ruby
result = Ractor.new do
  heavy_task()
end.value
```

You can wait for the termination of a Ractor with `Ractor#join` like this:

```ruby
Ractor.new do
  some_task()
end.join
```

`#value` and `#join` are similar to `Thread#value` and `Thread#join`.

To implement `#join`, `Ractor#monitor` (and `Ractor#unmonitor`) is introduced.

This commit changes `Ractor.select()` method.
It now only accepts ports or Ractors, and returns when a port receives a message or a Ractor terminates.

We removes `Ractor.yield` and `Ractor#take` because:
* `Ractor::Port` supports most of similar use cases in a simpler manner.
* Removing them significantly simplifies the code.

We also change the internal thread scheduler code (thread_pthread.c):
* During barrier synchronization, we keep the `ractor_sched` lock to avoid deadlocks.
  This lock is released by `rb_ractor_sched_barrier_end()`
  which is called at the end of operations that require the barrier.
* fix potential deadlock issues by checking interrupts just before setting UBF.

https://bugs.ruby-lang.org/issues/21262
This commit is contained in:
Koichi Sasada 2025-05-27 03:58:04 +09:00
parent d2a1ad00cb
commit ef2bb61018
Notes: git 2025-05-30 19:01:47 +00:00
44 changed files with 2668 additions and 3517 deletions

File diff suppressed because it is too large Load Diff

View File

@ -3018,15 +3018,16 @@ assert_equal '[:itself]', %q{
itself
end
tracing_ractor = Ractor.new do
port = Ractor::Port.new
tracing_ractor = Ractor.new port do |port|
# 1: start tracing
events = []
tp = TracePoint.new(:c_call) { events << _1.method_id }
tp.enable
Ractor.yield(nil)
port << nil
# 3: run compiled method on tracing ractor
Ractor.yield(nil)
port << nil
traced_method
events
@ -3034,13 +3035,13 @@ assert_equal '[:itself]', %q{
tp&.disable
end
tracing_ractor.take
port.receive
# 2: compile on non tracing ractor
traced_method
tracing_ractor.take
tracing_ractor.take
port.receive
tracing_ractor.value
}
# Try to hit a lazy branch stub while another ractor enables tracing
@ -3054,17 +3055,18 @@ assert_equal '42', %q{
end
end
ractor = Ractor.new do
port = Ractor::Port.new
ractor = Ractor.new port do |port|
compiled(false)
Ractor.yield(nil)
port << nil
compiled(41)
end
tp = TracePoint.new(:line) { itself }
ractor.take
port.receive
tp.enable
ractor.take
ractor.value
}
# Test equality with changing types
@ -3140,7 +3142,7 @@ assert_equal '42', %q{
A.foo
A.foo
Ractor.new { A.foo }.take
Ractor.new { A.foo }.value
}
assert_equal '["plain", "special", "sub", "plain"]', %q{
@ -3859,36 +3861,6 @@ assert_equal '3,12', %q{
pt_inspect(p)
}
# Regression test for deadlock between branch_stub_hit and ractor_receive_if
assert_equal '10', %q{
r = Ractor.new Ractor.current do |main|
main << 1
main << 2
main << 3
main << 4
main << 5
main << 6
main << 7
main << 8
main << 9
main << 10
end
a = []
a << Ractor.receive_if{|msg| msg == 10}
a << Ractor.receive_if{|msg| msg == 9}
a << Ractor.receive_if{|msg| msg == 8}
a << Ractor.receive_if{|msg| msg == 7}
a << Ractor.receive_if{|msg| msg == 6}
a << Ractor.receive_if{|msg| msg == 5}
a << Ractor.receive_if{|msg| msg == 4}
a << Ractor.receive_if{|msg| msg == 3}
a << Ractor.receive_if{|msg| msg == 2}
a << Ractor.receive_if{|msg| msg == 1}
a.length
}
# checktype
assert_equal 'false', %q{
def function()

View File

@ -374,7 +374,7 @@ assert_equal 'ok', %q{
r = Ractor.new do
'ok'
end
r.take
r.value
}
# Passed arguments to Ractor.new will be a block parameter
@ -384,7 +384,7 @@ assert_equal 'ok', %q{
r = Ractor.new 'ok' do |msg|
msg
end
r.take
r.value
}
# Pass multiple arguments to Ractor.new
@ -393,7 +393,7 @@ assert_equal 'ok', %q{
r = Ractor.new 'ping', 'pong' do |msg, msg2|
[msg, msg2]
end
'ok' if r.take == ['ping', 'pong']
'ok' if r.value == ['ping', 'pong']
}
# Ractor#send passes an object with copy to a Ractor
@ -403,7 +403,7 @@ assert_equal 'ok', %q{
msg = Ractor.receive
end
r.send 'ok'
r.take
r.value
}
assert_equal '[1, 2, 3]', %q{

View File

@ -14293,6 +14293,7 @@ ractor.$(OBJEXT): {$(VPATH)}ractor.c
ractor.$(OBJEXT): {$(VPATH)}ractor.h
ractor.$(OBJEXT): {$(VPATH)}ractor.rbinc
ractor.$(OBJEXT): {$(VPATH)}ractor_core.h
ractor.$(OBJEXT): {$(VPATH)}ractor_sync.c
ractor.$(OBJEXT): {$(VPATH)}ruby_assert.h
ractor.$(OBJEXT): {$(VPATH)}ruby_atomic.h
ractor.$(OBJEXT): {$(VPATH)}rubyparser.h

2
gc.c
View File

@ -169,7 +169,7 @@ rb_gc_vm_lock_no_barrier(void)
void
rb_gc_vm_unlock_no_barrier(unsigned int lev)
{
RB_VM_LOCK_LEAVE_LEV(&lev);
RB_VM_LOCK_LEAVE_LEV_NB(&lev);
}
void

View File

@ -2181,7 +2181,7 @@ newobj_init(VALUE klass, VALUE flags, int wb_protected, rb_objspace_t *objspace,
gc_report(5, objspace, "newobj: %s\n", rb_obj_info(obj));
RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, rb_obj_info(obj));
// RUBY_DEBUG_LOG("obj:%p (%s)", (void *)obj, rb_obj_info(obj));
return obj;
}

1952
ractor.c

File diff suppressed because it is too large Load Diff

747
ractor.rb
View File

@ -4,7 +4,7 @@
#
# # The simplest ractor
# r = Ractor.new {puts "I am in Ractor!"}
# r.take # wait for it to finish
# r.join # wait for it to finish
# # Here, "I am in Ractor!" is printed
#
# Ractors do not share all objects with each other. There are two main benefits to this: across ractors, thread-safety
@ -36,53 +36,11 @@
# puts "I am in Ractor! a=#{a_in_ractor}"
# end
# r.send(a) # pass it
# r.take
# r.join
# # Here, "I am in Ractor! a=1" is printed
#
# There are two pairs of methods for sending/receiving messages:
#
# * Ractor#send and Ractor.receive for when the _sender_ knows the receiver (push);
# * Ractor.yield and Ractor#take for when the _receiver_ knows the sender (pull);
#
# In addition to that, any arguments passed to Ractor.new are passed to the block and available there
# as if received by Ractor.receive, and the last block value is sent outside of the
# ractor as if sent by Ractor.yield.
#
# A little demonstration of a classic ping-pong:
#
# server = Ractor.new(name: "server") do
# puts "Server starts: #{self.inspect}"
# puts "Server sends: ping"
# Ractor.yield 'ping' # The server doesn't know the receiver and sends to whoever interested
# received = Ractor.receive # The server doesn't know the sender and receives from whoever sent
# puts "Server received: #{received}"
# end
#
# client = Ractor.new(server) do |srv| # The server is sent to the client, and available as srv
# puts "Client starts: #{self.inspect}"
# received = srv.take # The client takes a message from the server
# puts "Client received from " \
# "#{srv.inspect}: #{received}"
# puts "Client sends to " \
# "#{srv.inspect}: pong"
# srv.send 'pong' # The client sends a message to the server
# end
#
# [client, server].each(&:take) # Wait until they both finish
#
# This will output something like:
#
# Server starts: #<Ractor:#2 server test.rb:1 running>
# Server sends: ping
# Client starts: #<Ractor:#3 test.rb:8 running>
# Client received from #<Ractor:#2 server test.rb:1 blocking>: ping
# Client sends to #<Ractor:#2 server test.rb:1 blocking>: pong
# Server received: pong
#
# Ractors receive their messages via the <em>incoming port</em>, and send them
# to the <em>outgoing port</em>. Either one can be disabled with Ractor#close_incoming and
# Ractor#close_outgoing, respectively. When a ractor terminates, its ports are closed
# automatically.
# as if received by Ractor.receive, and the last block value can be received with Ractor#value.
#
# == Shareable and unshareable objects
#
@ -307,130 +265,52 @@ class Ractor
#
# call-seq:
# Ractor.select(*ractors, [yield_value:, move: false]) -> [ractor or symbol, obj]
# Ractor.select(*ports) -> [...]
#
# Wait for any ractor to have something in its outgoing port, read from this ractor, and
# then return that ractor and the object received.
#
# r1 = Ractor.new {Ractor.yield 'from 1'}
# r2 = Ractor.new {Ractor.yield 'from 2'}
#
# r, obj = Ractor.select(r1, r2)
#
# puts "received #{obj.inspect} from #{r.inspect}"
# # Prints: received "from 1" from #<Ractor:#2 test.rb:1 running>
# # But could just as well print "from r2" here, either prints could be first.
#
# If one of the given ractors is the current ractor, and it is selected, +r+ will contain
# the +:receive+ symbol instead of the ractor object.
#
# r1 = Ractor.new(Ractor.current) do |main|
# main.send 'to main'
# Ractor.yield 'from 1'
# end
# r2 = Ractor.new do
# Ractor.yield 'from 2'
# end
#
# r, obj = Ractor.select(r1, r2, Ractor.current)
# puts "received #{obj.inspect} from #{r.inspect}"
# # Could print: received "to main" from :receive
#
# If +yield_value+ is provided, that value may be yielded if another ractor is calling #take.
# In this case, the pair <tt>[:yield, nil]</tt> is returned:
#
# r1 = Ractor.new(Ractor.current) do |main|
# puts "Received from main: #{main.take}"
# end
#
# puts "Trying to select"
# r, obj = Ractor.select(r1, Ractor.current, yield_value: 123)
# wait
# puts "Received #{obj.inspect} from #{r.inspect}"
#
# This will print:
#
# Trying to select
# Received from main: 123
# Received nil from :yield
#
# +move+ boolean flag defines whether yielded value will be copied (default) or moved.
def self.select(*ractors, yield_value: yield_unspecified = true, move: false)
raise ArgumentError, 'specify at least one ractor or `yield_value`' if yield_unspecified && ractors.empty?
# TBD
def self.select(*ports)
raise ArgumentError, 'specify at least one ractor or `yield_value`' if ports.empty?
if ractors.delete Ractor.current
do_receive = true
else
do_receive = false
monitors = {} # Ractor::Port => Ractor
ports = ports.map do |arg|
case arg
when Ractor
port = Ractor::Port.new
monitors[port] = arg
arg.monitor port
port
when Ractor::Port
arg
else
raise ArgumentError, "should be Ractor::Port or Ractor"
end
end
__builtin_ractor_select_internal ractors, do_receive, !yield_unspecified, yield_value, move
begin
result_port, obj = __builtin_ractor_select_internal(ports)
if r = monitors[result_port]
[r, r.value]
else
[result_port, obj]
end
ensure
# close all ports for join
monitors.each do |port, r|
r.unmonitor port
port.close
end
end
end
#
# call-seq:
# Ractor.receive -> msg
#
# Receive a message from the incoming port of the current ractor (which was
# sent there by #send from another ractor).
#
# r = Ractor.new do
# v1 = Ractor.receive
# puts "Received: #{v1}"
# end
# r.send('message1')
# r.take
# # Here will be printed: "Received: message1"
#
# Alternatively, the private instance method +receive+ may be used:
#
# r = Ractor.new do
# v1 = receive
# puts "Received: #{v1}"
# end
# r.send('message1')
# r.take
# # This prints: "Received: message1"
#
# The method blocks if the queue is empty.
#
# r = Ractor.new do
# puts "Before first receive"
# v1 = Ractor.receive
# puts "Received: #{v1}"
# v2 = Ractor.receive
# puts "Received: #{v2}"
# end
# wait
# puts "Still not received"
# r.send('message1')
# wait
# puts "Still received only one"
# r.send('message2')
# r.take
#
# Output:
#
# Before first receive
# Still not received
# Received: message1
# Still received only one
# Received: message2
#
# If close_incoming was called on the ractor, the method raises Ractor::ClosedError
# if there are no more messages in the incoming queue:
#
# Ractor.new do
# close_incoming
# receive
# end
# wait
# # in `receive': The incoming port is already closed => #<Ractor:#2 test.rb:1 running> (Ractor::ClosedError)
# Ractor.receive -> obj
#
# Receive a message from the default port.
def self.receive
__builtin_cexpr! %q{
ractor_receive(ec, rb_ec_ractor_ptr(ec))
}
Ractor.current.default_port.receive
end
class << self
@ -439,280 +319,21 @@ class Ractor
# same as Ractor.receive
private def receive
__builtin_cexpr! %q{
ractor_receive(ec, rb_ec_ractor_ptr(ec))
}
default_port.receive
end
alias recv receive
#
# call-seq:
# Ractor.receive_if {|msg| block } -> msg
# ractor.send(msg) -> self
#
# Receive only a specific message.
#
# Instead of Ractor.receive, Ractor.receive_if can be given a pattern (or any
# filter) in a block and you can choose the messages to accept that are available in
# your ractor's incoming queue.
#
# r = Ractor.new do
# p Ractor.receive_if{|msg| msg.match?(/foo/)} #=> "foo3"
# p Ractor.receive_if{|msg| msg.match?(/bar/)} #=> "bar1"
# p Ractor.receive_if{|msg| msg.match?(/baz/)} #=> "baz2"
# end
# r << "bar1"
# r << "baz2"
# r << "foo3"
# r.take
#
# This will output:
#
# foo3
# bar1
# baz2
#
# If the block returns a truthy value, the message is removed from the incoming queue
# and returned.
# Otherwise, the message remains in the incoming queue and the next messages are checked
# by the given block.
#
# If there are no messages left in the incoming queue, the method will
# block until new messages arrive.
#
# If the block is escaped by break/return/exception/throw, the message is removed from
# the incoming queue as if a truthy value had been returned.
#
# r = Ractor.new do
# val = Ractor.receive_if{|msg| msg.is_a?(Array)}
# puts "Received successfully: #{val}"
# end
#
# r.send(1)
# r.send('test')
# wait
# puts "2 non-matching sent, nothing received"
# r.send([1, 2, 3])
# wait
#
# Prints:
#
# 2 non-matching sent, nothing received
# Received successfully: [1, 2, 3]
#
# Note that you can not call receive/receive_if in the given block recursively.
# You should not do any tasks in the block other than message filtration.
#
# Ractor.current << true
# Ractor.receive_if{|msg| Ractor.receive}
# #=> `receive': can not call receive/receive_if recursively (Ractor::Error)
#
def self.receive_if &b
Primitive.ractor_receive_if b
end
# same as Ractor.receive_if
private def receive_if &b
Primitive.ractor_receive_if b
end
#
# call-seq:
# ractor.send(msg, move: false) -> self
#
# Send a message to a Ractor's incoming queue to be accepted by Ractor.receive.
#
# r = Ractor.new do
# value = Ractor.receive
# puts "Received #{value}"
# end
# r.send 'message'
# # Prints: "Received: message"
#
# The method is non-blocking (will return immediately even if the ractor is not ready
# to receive anything):
#
# r = Ractor.new {sleep(5)}
# r.send('test')
# puts "Sent successfully"
# # Prints: "Sent successfully" immediately
#
# An attempt to send to a ractor which already finished its execution will raise Ractor::ClosedError.
#
# r = Ractor.new {}
# r.take
# p r
# # "#<Ractor:#6 (irb):23 terminated>"
# r.send('test')
# # Ractor::ClosedError (The incoming-port is already closed)
#
# If close_incoming was called on the ractor, the method also raises Ractor::ClosedError.
#
# r = Ractor.new do
# sleep(500)
# receive
# end
# r.close_incoming
# r.send('test')
# # Ractor::ClosedError (The incoming-port is already closed)
# # The error is raised immediately, not when the ractor tries to receive
#
# If the +obj+ is unshareable, by default it will be copied into the receiving ractor by deep cloning.
# If <tt>move: true</tt> is passed, the object is _moved_ into the receiving ractor and becomes
# inaccessible to the sender.
#
# r = Ractor.new {puts "Received: #{receive}"}
# msg = 'message'
# r.send(msg, move: true)
# r.take
# p msg
#
# This prints:
#
# Received: message
# in `p': undefined method `inspect' for #<Ractor::MovedObject:0x000055c99b9b69b8>
#
# All references to the object and its parts will become invalid to the sender.
#
# r = Ractor.new {puts "Received: #{receive}"}
# s = 'message'
# ary = [s]
# copy = ary.dup
# r.send(ary, move: true)
#
# s.inspect
# # Ractor::MovedError (can not send any methods to a moved object)
# ary.class
# # Ractor::MovedError (can not send any methods to a moved object)
# copy.class
# # => Array, it is different object
# copy[0].inspect
# # Ractor::MovedError (can not send any methods to a moved object)
# # ...but its item was still a reference to `s`, which was moved
#
# If the object is shareable, <tt>move: true</tt> has no effect on it:
#
# r = Ractor.new {puts "Received: #{receive}"}
# s = 'message'.freeze
# r.send(s, move: true)
# s.inspect #=> "message", still available
#
def send(obj, move: false)
__builtin_cexpr! %q{
ractor_send(ec, RACTOR_PTR(self), obj, move)
}
# It is equivalent to default_port.send(msg)
def send(...)
default_port.send(...)
self
end
alias << send
#
# call-seq:
# Ractor.yield(msg, move: false) -> nil
#
# Send a message to the current ractor's outgoing port to be accepted by #take.
#
# r = Ractor.new {Ractor.yield 'Hello from ractor'}
# puts r.take
# # Prints: "Hello from ractor"
#
# This method is blocking, and will return only when somebody consumes the
# sent message.
#
# r = Ractor.new do
# Ractor.yield 'Hello from ractor'
# puts "Ractor: after yield"
# end
# wait
# puts "Still not taken"
# puts r.take
#
# This will print:
#
# Still not taken
# Hello from ractor
# Ractor: after yield
#
# If the outgoing port was closed with #close_outgoing, the method will raise:
#
# r = Ractor.new do
# close_outgoing
# Ractor.yield 'Hello from ractor'
# end
# wait
# # `yield': The outgoing-port is already closed (Ractor::ClosedError)
#
# The meaning of the +move+ argument is the same as for #send.
def self.yield(obj, move: false)
__builtin_cexpr! %q{
ractor_yield(ec, rb_ec_ractor_ptr(ec), obj, move)
}
end
#
# call-seq:
# ractor.take -> msg
#
# Get a message from the ractor's outgoing port, which was put there by Ractor.yield or at ractor's
# termination.
#
# r = Ractor.new do
# Ractor.yield 'explicit yield'
# 'last value'
# end
# puts r.take #=> 'explicit yield'
# puts r.take #=> 'last value'
# puts r.take # Ractor::ClosedError (The outgoing-port is already closed)
#
# The fact that the last value is also sent to the outgoing port means that +take+ can be used
# as an analog of Thread#join ("just wait until ractor finishes"). However, it will raise if
# somebody has already consumed that message.
#
# If the outgoing port was closed with #close_outgoing, the method will raise Ractor::ClosedError.
#
# r = Ractor.new do
# sleep(500)
# Ractor.yield 'Hello from ractor'
# end
# r.close_outgoing
# r.take
# # Ractor::ClosedError (The outgoing-port is already closed)
# # The error would be raised immediately, not when ractor will try to receive
#
# If an uncaught exception is raised in the Ractor, it is propagated by take as a
# Ractor::RemoteError.
#
# r = Ractor.new {raise "Something weird happened"}
#
# begin
# r.take
# rescue => e
# p e # => #<Ractor::RemoteError: thrown by remote Ractor.>
# p e.ractor == r # => true
# p e.cause # => #<RuntimeError: Something weird happened>
# end
#
# Ractor::ClosedError is a descendant of StopIteration, so the termination of the ractor will break
# out of any loops that receive this message without propagating the error:
#
# r = Ractor.new do
# 3.times {|i| Ractor.yield "message #{i}"}
# "finishing"
# end
#
# loop {puts "Received: " + r.take}
# puts "Continue successfully"
#
# This will print:
#
# Received: message 0
# Received: message 1
# Received: message 2
# Received: finishing
# Continue successfully
def take
__builtin_cexpr! %q{
ractor_take(ec, RACTOR_PTR(self))
}
end
def inspect
loc = __builtin_cexpr! %q{ RACTOR_PTR(self)->loc }
name = __builtin_cexpr! %q{ RACTOR_PTR(self)->name }
@ -737,38 +358,13 @@ class Ractor
#
# call-seq:
# ractor.close_incoming -> true | false
# Ractor.current.close -> true | false
#
# Closes the incoming port and returns whether it was already closed. All further attempts
# to Ractor.receive in the ractor, and #send to the ractor will fail with Ractor::ClosedError.
# Closes default_port. Closing port is allowed only by the ractor which creates this port.
# So this close method also allowed by the current Ractor.
#
# r = Ractor.new {sleep(500)}
# r.close_incoming #=> false
# r.close_incoming #=> true
# r.send('test')
# # Ractor::ClosedError (The incoming-port is already closed)
def close_incoming
__builtin_cexpr! %q{
ractor_close_incoming(ec, RACTOR_PTR(self));
}
end
#
# call-seq:
# ractor.close_outgoing -> true | false
#
# Closes the outgoing port and returns whether it was already closed. All further attempts
# to Ractor.yield in the ractor, and #take from the ractor will fail with Ractor::ClosedError.
#
# r = Ractor.new {sleep(500)}
# r.close_outgoing #=> false
# r.close_outgoing #=> true
# r.take
# # Ractor::ClosedError (The outgoing-port is already closed)
def close_outgoing
__builtin_cexpr! %q{
ractor_close_outgoing(ec, RACTOR_PTR(self));
}
def close
default_port.close
end
#
@ -922,4 +518,247 @@ class Ractor
}
end
end
#
# call-seq:
# ractor.default_port -> port object
#
# return default port of the Ractor.
#
def default_port
__builtin_cexpr! %q{
ractor_default_port_value(RACTOR_PTR(self))
}
end
#
# call-seq:
# ractor.join -> self
#
# Wait for the termination of the Ractor.
# If the Ractor was aborted (terminated with an exception),
# Ractor#value is called to raise an exception.
#
# Ractor.new{}.join #=> ractor
#
# Ractor.new{ raise "foo" }.join
# #=> raise an exception "foo (RuntimeError)"
#
def join
port = Port.new
self.monitor port
if port.receive == :aborted
__builtin_ractor_value
end
self
ensure
port.close
end
#
# call-seq:
# ractor.value -> obj
#
# Waits for +ractor+ to complete, using #join, and return its value or raise
# the exception which terminated the Ractor. The value will not be copied even
# if it is unshareable object. Therefore at most 1 Ractor can get a value.
#
# r = Ractor.new{ [1, 2] }
# r.value #=> [1, 2] (unshareable object)
#
# Ractor.new(r){|r| r.value} #=> Ractor::Error
#
def value
self.join
__builtin_ractor_value
end
#
# call-seq:
# ractor.monitor(port) -> self
#
# Register port as a monitoring port. If the ractor terminated,
# the port received a Symbol object.
# :exited will be sent if the ractor terminated without an exception.
# :aborted will be sent if the ractor terminated with a exception.
#
# r = Ractor.new{ some_task() }
# r.monitor(port = Ractor::Port.new)
# port.receive #=> :exited and r is terminated
#
# r = Ractor.new{ raise "foo" }
# r.monitor(port = Ractor::Port.new)
# port.receive #=> :terminated and r is terminated with an exception "foo"
#
def monitor port
__builtin_ractor_monitor(port)
end
#
# call-seq:
# ractor.unmonitor(port) -> self
#
# Unregister port from the monitoring ports.
#
def unmonitor port
__builtin_ractor_unmonitor(port)
end
class Port
#
# call-seq:
# port.receive -> msg
#
# Receive a message to the port (which was sent there by Port#send).
#
# port = Ractor::Port.new
# r = Ractor.new port do |port|
# port.send('message1')
# end
#
# v1 = port.receive
# puts "Received: #{v1}"
# r.join
# # Here will be printed: "Received: message1"
#
# The method blocks if the message queue is empty.
#
# port = Ractor::Port.new
# r = Ractor.new port do |port|
# wait
# puts "Still not received"
# port.send('message1')
# wait
# puts "Still received only one"
# port.send('message2')
# end
# puts "Before first receive"
# v1 = port.receive
# puts "Received: #{v1}"
# v2 = port.receive
# puts "Received: #{v2}"
# r.join
#
# Output:
#
# Before first receive
# Still not received
# Received: message1
# Still received only one
# Received: message2
#
# If close_incoming was called on the ractor, the method raises Ractor::ClosedError
# if there are no more messages in the message queue:
#
# port = Ractor::Port.new
# port.close
# port.receive #=> raise Ractor::ClosedError
#
def receive
__builtin_cexpr! %q{
ractor_port_receive(ec, self)
}
end
#
# call-seq:
# port.send(msg, move: false) -> self
#
# Send a message to a port to be accepted by port.receive.
#
# port = Ractor::Port.new
# r = Ractor.new do
# r.send 'message'
# end
# value = port.receive
# puts "Received #{value}"
# # Prints: "Received: message"
#
# The method is non-blocking (will return immediately even if the ractor is not ready
# to receive anything):
#
# port = Ractor::Port.new
# r = Ractor.new(port) do |port|
# port.send 'test'}
# puts "Sent successfully"
# # Prints: "Sent successfully" immediately
# end
#
# An attempt to send to a port which already closed its execution will raise Ractor::ClosedError.
#
# r = Ractor.new {Ractor::Port.new}
# r.join
# p r
# # "#<Ractor:#6 (irb):23 terminated>"
# port = r.value
# port.send('test') # raise Ractor::ClosedError
#
# If the +obj+ is unshareable, by default it will be copied into the receiving ractor by deep cloning.
#
# If the object is shareable, it only send a reference to the object without cloning.
#
def send obj, move: false
__builtin_cexpr! %q{
ractor_port_send(ec, self, obj, move)
}
end
alias << send
#
# call-seq:
# port.close
#
# Close the port. On the closed port, sending is not prohibited.
# Receiving is also not allowed if there is no sent messages arrived before closing.
#
# port = Ractor::Port.new
# Ractor.new port do |port|
# port.sned 1 # OK
# port.send 2 # OK
# port.close
# port.send 3 # raise Ractor::ClosedError
# end
#
# port.receive #=> 1
# port.receive #=> 2
# port.receive #=> raise Ractor::ClosedError
#
# Now, only a Ractor which creates the port is allowed to close ports.
#
# port = Ractor::Port.new
# Ractor.new port do |port|
# port.close #=> closing port by other ractors is not allowed (Ractor::Error)
# end.join
#
def close
__builtin_cexpr! %q{
ractor_port_close(ec, self)
}
end
#
# call-seq:
# port.closed? -> true/false
#
# Return the port is closed or not.
def closed?
__builtin_cexpr! %q{
ractor_port_closed_p(ec, self);
}
end
#
# call-seq:
# port.inspect -> string
def inspect
"#<Ractor::Port to:\##{
__builtin_cexpr! "SIZET2NUM(rb_ractor_id((RACTOR_PORT_PTR(self)->r)))"
} id:#{
__builtin_cexpr! "SIZET2NUM(ractor_port_id(RACTOR_PORT_PTR(self)))"
}>"
end
end
end

View File

@ -9,118 +9,36 @@
#define RACTOR_CHECK_MODE (VM_CHECK_MODE || RUBY_DEBUG) && (SIZEOF_UINT64_T == SIZEOF_VALUE)
#endif
enum rb_ractor_basket_type {
// basket is empty
basket_type_none,
// value is available
basket_type_ref,
basket_type_copy,
basket_type_move,
basket_type_will,
// basket should be deleted
basket_type_deleted,
// basket is reserved
basket_type_reserved,
// take_basket is available
basket_type_take_basket,
// basket is keeping by yielding ractor
basket_type_yielding,
};
// per ractor taking configuration
struct rb_ractor_selector_take_config {
bool closed;
bool oneshot;
};
struct rb_ractor_basket {
union {
enum rb_ractor_basket_type e;
rb_atomic_t atomic;
} type;
VALUE sender; // Ractor object sending message
rb_thread_t *sending_th;
union {
struct {
VALUE v;
bool exception;
} send;
struct {
struct rb_ractor_basket *basket;
struct rb_ractor_selector_take_config *config;
} take;
} p; // payload
};
static inline bool
basket_type_p(struct rb_ractor_basket *b, enum rb_ractor_basket_type type)
{
return b->type.e == type;
}
static inline bool
basket_none_p(struct rb_ractor_basket *b)
{
return basket_type_p(b, basket_type_none);
}
struct rb_ractor_queue {
struct rb_ractor_basket *baskets;
int start;
int cnt;
int size;
unsigned int serial;
unsigned int reserved_cnt;
};
enum rb_ractor_wait_status {
wait_none = 0x00,
wait_receiving = 0x01,
wait_taking = 0x02,
wait_yielding = 0x04,
wait_moving = 0x08,
};
enum rb_ractor_wakeup_status {
wakeup_none,
wakeup_by_send,
wakeup_by_yield,
wakeup_by_take,
wakeup_by_close,
wakeup_by_interrupt,
wakeup_by_retry,
};
struct rb_ractor_sync {
// ractor lock
rb_nativethread_lock_t lock;
#if RACTOR_CHECK_MODE > 0
VALUE locked_by;
#endif
bool incoming_port_closed;
bool outgoing_port_closed;
#ifndef RUBY_THREAD_PTHREAD_H
rb_nativethread_cond_t wakeup_cond;
#endif
// All sent messages will be pushed into recv_queue
struct rb_ractor_queue recv_queue;
// incoming messages
struct ractor_queue *recv_queue;
// The following ractors waiting for the yielding by this ractor
struct rb_ractor_queue takers_queue;
// waiting threads for receiving
struct ccan_list_head waiters;
// Enabled if the ractor already terminated and not taken yet.
struct rb_ractor_basket will_basket;
// ports
VALUE default_port_value;
struct st_table *ports;
size_t next_port_id;
struct ractor_wait {
struct ccan_list_head waiting_threads;
// each thread has struct ccan_list_node ractor_waiting.waiting_node
} wait;
// monitors
struct ccan_list_head monitors;
// value
rb_ractor_t *successor;
VALUE legacy;
bool legacy_exc;
};
// created
@ -146,12 +64,8 @@ enum ractor_status {
struct rb_ractor_struct {
struct rb_ractor_pub pub;
struct rb_ractor_sync sync;
// vm wide barrier synchronization
rb_nativethread_cond_t barrier_wait_cond;
// thread management
struct {
struct ccan_list_head set;
@ -162,6 +76,7 @@ struct rb_ractor_struct {
rb_execution_context_t *running_ec;
rb_thread_t *main;
} threads;
VALUE thgroup_default;
VALUE name;

1489
ractor_sync.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -151,7 +151,7 @@ class TestThreadInstrumentation < Test::Unit::TestCase
end
full_timeline = record do
ractor.take
ractor.value
end
timeline = timeline_for(Thread.current, full_timeline)
@ -172,7 +172,7 @@ class TestThreadInstrumentation < Test::Unit::TestCase
thread = Ractor.new{
sleep 0.1
Thread.current
}.take
}.value
sleep 0.1
end

View File

@ -8,7 +8,7 @@ class TestDateParseRactor < Test::Unit::TestCase
share = #{share}
d = Date.parse('Aug 23:55')
Ractor.make_shareable(d) if share
d2, d3 = Ractor.new(d) { |d| [d, Date.parse(d.to_s)] }.take
d2, d3 = Ractor.new(d) { |d| [d, Date.parse(d.to_s)] }.value
if share
assert_same d, d2
else

View File

@ -14,7 +14,7 @@ class RactorCompatibilityTest < Test::Unit::TestCase
e.corrections # It is important to call the #corrections method within Ractor.
e
end
}.take
}.value
assert_correction "Book", error.corrections
CODE
@ -32,7 +32,7 @@ class RactorCompatibilityTest < Test::Unit::TestCase
e.corrections # It is important to call the #corrections method within Ractor.
e
end
}.take
}.value
assert_correction ":bar", error.corrections
assert_match "Did you mean? :bar", get_message(error)
@ -49,7 +49,7 @@ class RactorCompatibilityTest < Test::Unit::TestCase
e.corrections # It is important to call the #corrections method within Ractor.
e
end
}.take
}.value
assert_correction :to_s, error.corrections
assert_match "Did you mean? to_s", get_message(error)
@ -71,7 +71,7 @@ class RactorCompatibilityTest < Test::Unit::TestCase
e.corrections # It is important to call the #corrections method within Ractor.
e
end
}.take
}.value
assert_correction ":foo", error.corrections
assert_match "Did you mean? :foo", get_message(error)
@ -90,7 +90,7 @@ class RactorCompatibilityTest < Test::Unit::TestCase
e.corrections # It is important to call the #corrections method within Ractor.
e
end
}.take
}.value
assert_not_match(/Did you mean\?/, error.message)
CODE
@ -108,7 +108,7 @@ class RactorCompatibilityTest < Test::Unit::TestCase
e.corrections # It is important to call the #corrections method within Ractor.
e
end
}.take
}.value
assert_correction :in_ractor, error.corrections
assert_match "Did you mean? in_ractor", get_message(error)

View File

@ -26,7 +26,7 @@ module TestDigestRactor
[r, hexdigest]
end
rs.each do |r, hexdigest|
puts r.take == hexdigest
puts r.value == hexdigest
end
end;
end

View File

@ -198,7 +198,7 @@ class TestEtc < Test::Unit::TestCase
raise unless Integer === Etc.nprocessors
end
end
end.each(&:take)
end.each(&:join)
RUBY
end
@ -210,7 +210,7 @@ class TestEtc < Test::Unit::TestCase
rescue => e
e.class
end
end.take
end.value
assert_equal Ractor::UnsafeError, r
RUBY
end
@ -221,19 +221,19 @@ class TestEtc < Test::Unit::TestCase
Etc.endpwent
assert_ractor(<<~RUBY, require: 'etc')
ractor = Ractor.new do
ractor = Ractor.new port = Ractor::Port.new do |port|
Etc.passwd do |s|
Ractor.yield :sync
Ractor.yield s.name
port << :sync
port << s.name
break :done
end
end
ractor.take # => :sync
port.receive # => :sync
assert_raise RuntimeError, /parallel/ do
Etc.passwd {}
end
name = ractor.take # => first name
ractor.take # => :done
name = port.receive # => first name
ractor.join # => :done
name2 = Etc.passwd do |s|
break s.name
end
@ -251,7 +251,7 @@ class TestEtc < Test::Unit::TestCase
raise unless Etc.getgrgid(Process.gid).gid == Process.gid
end
end
end.each(&:take)
end.each(&:join)
RUBY
end
end

View File

@ -17,7 +17,7 @@ class TestFiberCurrentRactor < Test::Unit::TestCase
Fiber.current.class
end.resume
end
assert_equal(Fiber, r.take)
assert_equal(Fiber, r.value)
end;
end
end

View File

@ -18,7 +18,7 @@ class TestIOConsoleInRactor < Test::Unit::TestCase
else
true # should not success
end
puts r.take
puts r.value
end;
assert_in_out_err(%W[-r#{path}], "#{<<~"begin;"}\n#{<<~'end;'}", ["true"], [])
@ -28,7 +28,7 @@ class TestIOConsoleInRactor < Test::Unit::TestCase
r = Ractor.new do
IO.console
end
puts console.class == r.take.class
puts console.class == r.value.class
end;
end
end if defined? Ractor

View File

@ -11,7 +11,7 @@ class TestIOWaitInRactor < Test::Unit::TestCase
r = Ractor.new do
$stdout.equal?($stdout.wait_writable)
end
puts r.take
puts r.value
end;
end
end if defined? Ractor

View File

@ -25,7 +25,7 @@ class JSONInRactorTest < Test::Unit::TestCase
end
expected_json = JSON.parse('{"a":2,"b":3.141,"c":"c","d":[1,"b",3.14],"e":{"foo":"bar"},' +
'"g":"\\"\\u0000\\u001f","h":1000.0,"i":0.001}')
actual_json = r.take
actual_json = r.value
if expected_json == actual_json
exit 0

View File

@ -5,12 +5,10 @@ class TestObjSpaceRactor < Test::Unit::TestCase
assert_ractor(<<~RUBY, require: 'objspace')
ObjectSpace.trace_object_allocations do
r = Ractor.new do
obj = 'a' * 1024
Ractor.yield obj
_obj = 'a' * 1024
end
r.take
r.take
r.join
end
RUBY
end
@ -30,7 +28,7 @@ class TestObjSpaceRactor < Test::Unit::TestCase
end
end
ractors.each(&:take)
ractors.each(&:join)
RUBY
end
@ -51,7 +49,7 @@ class TestObjSpaceRactor < Test::Unit::TestCase
end
end
ractors.each(&:take)
ractors.each(&:join)
RUBY
end
end

View File

@ -15,7 +15,7 @@ class TestPathnameRactor < Test::Unit::TestCase
r = Ractor.new Pathname("a") do |x|
x.join(Pathname("b"), Pathname("c"))
end
assert_equal(Pathname("a/b/c"), r.take)
assert_equal(Pathname("a/b/c"), r.value)
end;
end
end

View File

@ -62,7 +62,7 @@ module Prism
if reader
reader.gets.chomp
else
puts(ignore_warnings { Ractor.new(*arguments, &block) }.take)
puts(ignore_warnings { Ractor.new(*arguments, &block) }.value)
end
end
end

View File

@ -7,7 +7,7 @@ class TestPsychRactor < Test::Unit::TestCase
obj = {foo: [42]}
obj2 = Ractor.new(obj) do |obj|
Psych.unsafe_load(Psych.dump(obj))
end.take
end.value
assert_equal obj, obj2
RUBY
end
@ -33,7 +33,7 @@ class TestPsychRactor < Test::Unit::TestCase
val * 2
end
Psych.load('--- !!omap hello')
end.take
end.value
assert_equal 'hellohello', r
assert_equal 'hello', Psych.load('--- !!omap hello')
RUBY
@ -43,7 +43,7 @@ class TestPsychRactor < Test::Unit::TestCase
assert_ractor(<<~RUBY, require_relative: 'helper')
r = Ractor.new do
Psych.libyaml_version.join('.') == Psych::LIBYAML_VERSION
end.take
end.value
assert_equal true, r
RUBY
end

View File

@ -130,7 +130,7 @@ class TestEncoding < Test::Unit::TestCase
def test_ractor_load_encoding
assert_ractor("#{<<~"begin;"}\n#{<<~'end;'}")
begin;
Ractor.new{}.take
Ractor.new{}.join
$-w = nil
Encoding.default_external = Encoding::ISO8859_2
assert "[Bug #19562]"

File diff suppressed because it is too large Load Diff

View File

@ -808,7 +808,7 @@ class TestISeq < Test::Unit::TestCase
GC.start
Float(30)
}
assert_equal :new, r.take
assert_equal :new, r.value
RUBY
end

View File

@ -335,7 +335,7 @@ class TestMemoryView < Test::Unit::TestCase
p mv[[0, 2]]
mv[[1, 3]]
end
p r.take
p r.value
end;
end
end

View File

@ -74,7 +74,7 @@ class TestRactor < Test::Unit::TestCase
Warning[:experimental] = false
main_ractor_id = Thread.current.group.object_id
ractor_id = Ractor.new { Thread.current.group.object_id }.take
ractor_id = Ractor.new { Thread.current.group.object_id }.value
refute_equal main_ractor_id, ractor_id
end;
end
@ -93,7 +93,7 @@ class TestRactor < Test::Unit::TestCase
else
nil
end
end.take
end.value
assert_equal "uh oh", err_msg
RUBY
end

View File

@ -596,8 +596,8 @@ class TestShapes < Test::Unit::TestCase
assert_predicate RubyVM::Shape.of(tc), :too_complex?
assert_equal 3, tc.very_unique
assert_equal 3, Ractor.new(tc) { |x| Ractor.yield(x.very_unique) }.take
assert_equal tc.instance_variables.sort, Ractor.new(tc) { |x| Ractor.yield(x.instance_variables) }.take.sort
assert_equal 3, Ractor.new(tc) { |x| x.very_unique }.value
assert_equal tc.instance_variables.sort, Ractor.new(tc) { |x| x.instance_variables }.value.sort
end;
end
@ -699,10 +699,10 @@ class TestShapes < Test::Unit::TestCase
r = Ractor.new do
o = Object.new
o.instance_variable_set(:@a, "hello")
Ractor.yield(o)
o
end
o = r.take
o = r.value
assert_equal "hello", o.instance_variable_get(:@a)
end;
end
@ -717,10 +717,10 @@ class TestShapes < Test::Unit::TestCase
r = Ractor.new do
o = []
o.instance_variable_set(:@a, "hello")
Ractor.yield(o)
o
end
o = r.take
o = r.value
assert_equal "hello", o.instance_variable_get(:@a)
end;
end

View File

@ -17,7 +17,7 @@ class TestStringIOInRactor < Test::Unit::TestCase
io.puts "def"
"\0\0\0\0def\n" == io.string
end
puts r.take
puts r.value
end;
end
end

View File

@ -22,7 +22,7 @@ class TestStringScannerRactor < Test::Unit::TestCase
s.scan(/\\w+/)
]
end
puts r.take.compact
puts r.value.compact
end;
end
end

View File

@ -60,7 +60,7 @@ class TestRbConfig < Test::Unit::TestCase
[sizeof_int, fixnum_max]
end
sizeof_int, fixnum_max = r.take
sizeof_int, fixnum_max = r.value
assert_kind_of Integer, sizeof_int, "RbConfig::SIZEOF['int'] should be an Integer"
assert_kind_of Integer, fixnum_max, "RbConfig::LIMITS['FIXNUM_MAX'] should be an Integer"

View File

@ -74,7 +74,7 @@ class TestTimeExtension < Test::Unit::TestCase # :nodoc:
if defined?(Ractor)
def test_rfc2822_ractor
assert_ractor(<<~RUBY, require: 'time')
actual = Ractor.new { Time.rfc2822("Fri, 21 Nov 1997 09:55:06 -0600") }.take
actual = Ractor.new { Time.rfc2822("Fri, 21 Nov 1997 09:55:06 -0600") }.value
assert_equal(Time.utc(1997, 11, 21, 9, 55, 6) + 6 * 3600, actual)
RUBY
end

View File

@ -134,16 +134,17 @@ class TestTmpdir < Test::Unit::TestCase
def test_ractor
assert_ractor(<<~'end;', require: "tmpdir")
r = Ractor.new do
port = Ractor::Port.new
r = Ractor.new port do |port|
Dir.mktmpdir() do |d|
Ractor.yield d
port << d
Ractor.receive
end
end
dir = r.take
dir = port.receive
assert_file.directory? dir
r.send true
r.take
r.join
assert_file.not_exist? dir
end;
end

View File

@ -75,7 +75,7 @@ class URI::TestCommon < Test::Unit::TestCase
return unless defined?(Ractor)
assert_ractor(<<~RUBY, require: 'uri')
r = Ractor.new { URI.parse("https://ruby-lang.org/").inspect }
assert_equal(URI.parse("https://ruby-lang.org/").inspect, r.take)
assert_equal(URI.parse("https://ruby-lang.org/").inspect, r.value)
RUBY
end

View File

@ -526,9 +526,6 @@ thread_cleanup_func(void *th_ptr, int atfork)
}
rb_native_mutex_destroy(&th->interrupt_lock);
#ifndef RUBY_THREAD_PTHREAD_H
rb_native_cond_destroy(&th->ractor_waiting.cond);
#endif
}
static VALUE rb_threadptr_raise(rb_thread_t *, int, VALUE *);
@ -6174,6 +6171,8 @@ threadptr_interrupt_exec_exec(rb_thread_t *th)
}
rb_native_mutex_unlock(&th->interrupt_lock);
RUBY_DEBUG_LOG("task:%p", task);
if (task) {
(*task->func)(task->data);
ruby_xfree(task);
@ -6228,6 +6227,8 @@ rb_ractor_interrupt_exec(struct rb_ractor_struct *target_r,
{
struct interrupt_ractor_new_thread_data *d = ALLOC(struct interrupt_ractor_new_thread_data);
RUBY_DEBUG_LOG("flags:%d", (int)flags);
d->func = func;
d->data = data;
rb_thread_t *main_th = target_r->threads.main;

View File

@ -373,18 +373,38 @@ ractor_sched_dump_(const char *file, int line, rb_vm_t *vm)
#define thread_sched_lock(a, b) thread_sched_lock_(a, b, __FILE__, __LINE__)
#define thread_sched_unlock(a, b) thread_sched_unlock_(a, b, __FILE__, __LINE__)
static void
thread_sched_set_locked(struct rb_thread_sched *sched, rb_thread_t *th)
{
#if VM_CHECK_MODE > 0
VM_ASSERT(sched->lock_owner == NULL);
sched->lock_owner = th;
#endif
}
static void
thread_sched_set_unlocked(struct rb_thread_sched *sched, rb_thread_t *th)
{
#if VM_CHECK_MODE > 0
VM_ASSERT(sched->lock_owner == th);
sched->lock_owner = NULL;
#endif
}
static void
thread_sched_lock_(struct rb_thread_sched *sched, rb_thread_t *th, const char *file, int line)
{
rb_native_mutex_lock(&sched->lock_);
#if VM_CHECK_MODE
RUBY_DEBUG_LOG2(file, line, "th:%u prev_owner:%u", rb_th_serial(th), rb_th_serial(sched->lock_owner));
VM_ASSERT(sched->lock_owner == NULL);
sched->lock_owner = th;
RUBY_DEBUG_LOG2(file, line, "r:%d th:%u", th ? (int)rb_ractor_id(th->ractor) : -1, rb_th_serial(th));
#else
RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
#endif
thread_sched_set_locked(sched, th);
}
static void
@ -392,24 +412,11 @@ thread_sched_unlock_(struct rb_thread_sched *sched, rb_thread_t *th, const char
{
RUBY_DEBUG_LOG2(file, line, "th:%u", rb_th_serial(th));
#if VM_CHECK_MODE
VM_ASSERT(sched->lock_owner == th);
sched->lock_owner = NULL;
#endif
thread_sched_set_unlocked(sched, th);
rb_native_mutex_unlock(&sched->lock_);
}
static void
thread_sched_set_lock_owner(struct rb_thread_sched *sched, rb_thread_t *th)
{
RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
#if VM_CHECK_MODE > 0
sched->lock_owner = th;
#endif
}
static void
ASSERT_thread_sched_locked(struct rb_thread_sched *sched, rb_thread_t *th)
{
@ -542,7 +549,6 @@ ractor_sched_timeslice_threads_contain_p(rb_vm_t *vm, rb_thread_t *th)
}
static void ractor_sched_barrier_join_signal_locked(rb_vm_t *vm);
static void ractor_sched_barrier_join_wait_locked(rb_vm_t *vm, rb_thread_t *th);
// setup timeslice signals by the timer thread.
static void
@ -585,11 +591,10 @@ thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *c
}
if (add_th) {
while (UNLIKELY(vm->ractor.sched.barrier_waiting)) {
RUBY_DEBUG_LOG("barrier-wait");
ractor_sched_barrier_join_signal_locked(vm);
ractor_sched_barrier_join_wait_locked(vm, add_th);
if (vm->ractor.sched.barrier_waiting) {
// TODO: GC barrier check?
RUBY_DEBUG_LOG("barrier_waiting");
RUBY_VM_SET_VM_BARRIER_INTERRUPT(add_th->ec);
}
VM_ASSERT(!ractor_sched_running_threads_contain_p(vm, add_th));
@ -598,7 +603,6 @@ thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *c
ccan_list_add(&vm->ractor.sched.running_threads, &add_th->sched.node.running_threads);
vm->ractor.sched.running_cnt++;
sched->is_running = true;
VM_ASSERT(!vm->ractor.sched.barrier_waiting);
}
if (add_timeslice_th) {
@ -622,19 +626,6 @@ thread_sched_setup_running_threads(struct rb_thread_sched *sched, rb_ractor_t *c
}
ractor_sched_unlock(vm, cr);
if (add_th && !del_th && UNLIKELY(vm->ractor.sync.lock_owner != NULL)) {
// it can be after barrier synchronization by another ractor
rb_thread_t *lock_owner = NULL;
#if VM_CHECK_MODE
lock_owner = sched->lock_owner;
#endif
thread_sched_unlock(sched, lock_owner);
{
RB_VM_LOCKING();
}
thread_sched_lock(sched, lock_owner);
}
//RUBY_DEBUG_LOG("+:%u -:%u +ts:%u -ts:%u run:%u->%u",
// rb_th_serial(add_th), rb_th_serial(del_th),
// rb_th_serial(add_timeslice_th), rb_th_serial(del_timeslice_th),
@ -753,7 +744,8 @@ thread_sched_enq(struct rb_thread_sched *sched, rb_thread_t *ready_th)
}
}
else {
VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
// ractor_sched lock is needed
// VM_ASSERT(!ractor_sched_timeslice_threads_contain_p(ready_th->vm, sched->running));
}
ccan_list_add_tail(&sched->readyq, &ready_th->sched.node.readyq);
@ -849,12 +841,12 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
if (th_has_dedicated_nt(th)) {
RUBY_DEBUG_LOG("(nt) sleep th:%u running:%u", rb_th_serial(th), rb_th_serial(sched->running));
thread_sched_set_lock_owner(sched, NULL);
thread_sched_set_unlocked(sched, th);
{
RUBY_DEBUG_LOG("nt:%d cond:%p", th->nt->serial, &th->nt->cond.readyq);
rb_native_cond_wait(&th->nt->cond.readyq, &sched->lock_);
}
thread_sched_set_lock_owner(sched, th);
thread_sched_set_locked(sched, th);
RUBY_DEBUG_LOG("(nt) wakeup %s", sched->running == th ? "success" : "failed");
if (th == sched->running) {
@ -870,12 +862,12 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
RUBY_DEBUG_LOG("th:%u->%u (direct)", rb_th_serial(th), rb_th_serial(next_th));
thread_sched_set_lock_owner(sched, NULL);
thread_sched_set_unlocked(sched, th);
{
rb_ractor_set_current_ec(th->ractor, NULL);
thread_sched_switch(th, next_th);
}
thread_sched_set_lock_owner(sched, th);
thread_sched_set_locked(sched, th);
}
else {
// search another ready ractor
@ -884,12 +876,12 @@ thread_sched_wait_running_turn(struct rb_thread_sched *sched, rb_thread_t *th, b
RUBY_DEBUG_LOG("th:%u->%u (ractor scheduling)", rb_th_serial(th), rb_th_serial(next_th));
thread_sched_set_lock_owner(sched, NULL);
thread_sched_set_unlocked(sched, th);
{
rb_ractor_set_current_ec(th->ractor, NULL);
coroutine_transfer0(th->sched.context, nt->nt_context, false);
}
thread_sched_set_lock_owner(sched, th);
thread_sched_set_locked(sched, th);
}
VM_ASSERT(rb_current_ec_noinline() == th->ec);
@ -1041,15 +1033,45 @@ thread_sched_to_waiting(struct rb_thread_sched *sched, rb_thread_t *th)
}
// mini utility func
static void
setup_ubf(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
// return true if any there are any interrupts
static bool
ubf_set(rb_thread_t *th, rb_unblock_function_t *func, void *arg)
{
VM_ASSERT(func != NULL);
retry:
if (RUBY_VM_INTERRUPTED(th->ec)) {
RUBY_DEBUG_LOG("interrupted:0x%x", th->ec->interrupt_flag);
return true;
}
rb_native_mutex_lock(&th->interrupt_lock);
{
if (!th->ec->raised_flag && RUBY_VM_INTERRUPTED(th->ec)) {
rb_native_mutex_unlock(&th->interrupt_lock);
goto retry;
}
VM_ASSERT(th->unblock.func == NULL);
th->unblock.func = func;
th->unblock.arg = arg;
}
rb_native_mutex_unlock(&th->interrupt_lock);
return false;
}
static void
ubf_clear(rb_thread_t *th)
{
if (th->unblock.func) {
rb_native_mutex_lock(&th->interrupt_lock);
{
th->unblock.func = NULL;
th->unblock.arg = NULL;
}
rb_native_mutex_unlock(&th->interrupt_lock);
}
}
static void
@ -1085,7 +1107,10 @@ thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t
RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
RB_VM_SAVE_MACHINE_CONTEXT(th);
setup_ubf(th, ubf_waiting, (void *)th);
if (ubf_set(th, ubf_waiting, (void *)th)) {
return;
}
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
@ -1102,7 +1127,7 @@ thread_sched_to_waiting_until_wakeup(struct rb_thread_sched *sched, rb_thread_t
}
thread_sched_unlock(sched, th);
setup_ubf(th, NULL, NULL);
ubf_clear(th);
}
// run another thread in the ready queue.
@ -1311,66 +1336,59 @@ void rb_ractor_unlock_self(rb_ractor_t *r);
// The current thread for a ractor is put to "sleep" (descheduled in the STOPPED_FOREVER state) waiting for
// a ractor action to wake it up. See docs for `ractor_sched_sleep_with_cleanup` for more info.
void
rb_ractor_sched_sleep(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf_schedule_ractor_th)
rb_ractor_sched_wait(rb_execution_context_t *ec, rb_ractor_t *cr, rb_unblock_function_t *ubf, void *ubf_arg)
{
// ractor lock of cr is acquired
// r is sleeping status
RUBY_DEBUG_LOG("start%s", "");
rb_thread_t * volatile th = rb_ec_thread_ptr(ec);
struct rb_thread_sched *sched = TH_SCHED(th);
struct ccan_list_node *waitn = &th->ractor_waiting.waiting_node;
VM_ASSERT(waitn->next == waitn->prev && waitn->next == waitn); // it should be unlinked
ccan_list_add(&cr->sync.wait.waiting_threads, waitn);
setup_ubf(th, ubf_schedule_ractor_th, (void *)ec);
if (ubf_set(th, ubf, ubf_arg)) {
// interrupted
return;
}
thread_sched_lock(sched, th);
{
// setup sleep
bool can_direct_transfer = !th_has_dedicated_nt(th);
RB_VM_SAVE_MACHINE_CONTEXT(th);
th->status = THREAD_STOPPED_FOREVER;
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
rb_ractor_unlock_self(cr);
{
if (RUBY_VM_INTERRUPTED(th->ec)) {
RUBY_DEBUG_LOG("interrupted");
}
else if (th->ractor_waiting.wakeup_status != wakeup_none) {
RUBY_DEBUG_LOG("awaken:%d", (int)th->ractor_waiting.wakeup_status);
}
else {
// sleep
RB_VM_SAVE_MACHINE_CONTEXT(th);
th->status = THREAD_STOPPED_FOREVER;
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
bool can_direct_transfer = !th_has_dedicated_nt(th);
thread_sched_wakeup_next_thread(sched, th, can_direct_transfer);
thread_sched_wait_running_turn(sched, th, can_direct_transfer);
th->status = THREAD_RUNNABLE;
// wakeup
}
// sleep
thread_sched_wait_running_turn(sched, th, can_direct_transfer);
th->status = THREAD_RUNNABLE;
}
rb_ractor_lock_self(cr);
}
thread_sched_unlock(sched, th);
setup_ubf(th, NULL, NULL);
ubf_clear(th);
rb_ractor_lock_self(cr);
ccan_list_del_init(waitn);
RUBY_DEBUG_LOG("end%s", "");
}
void
rb_ractor_sched_wakeup(rb_ractor_t *r, rb_thread_t *th)
rb_ractor_sched_wakeup(rb_ractor_t *r, rb_thread_t *r_th)
{
// ractor lock of r is acquired
struct rb_thread_sched *sched = TH_SCHED(th);
// ractor lock of r is NOT acquired
struct rb_thread_sched *sched = TH_SCHED(r_th);
VM_ASSERT(th->ractor_waiting.wakeup_status != 0);
RUBY_DEBUG_LOG("r:%u th:%d", (unsigned int)rb_ractor_id(r), r_th->serial);
thread_sched_lock(sched, th);
thread_sched_lock(sched, r_th);
{
if (th->status == THREAD_STOPPED_FOREVER) {
thread_sched_to_ready_common(sched, th, true, false);
if (r_th->status == THREAD_STOPPED_FOREVER) {
thread_sched_to_ready_common(sched, r_th, true, false);
}
}
thread_sched_unlock(sched, th);
thread_sched_unlock(sched, r_th);
}
static bool
@ -1378,6 +1396,7 @@ ractor_sched_barrier_completed_p(rb_vm_t *vm)
{
RUBY_DEBUG_LOG("run:%u wait:%u", vm->ractor.sched.running_cnt, vm->ractor.sched.barrier_waiting_cnt);
VM_ASSERT(vm->ractor.sched.running_cnt - 1 >= vm->ractor.sched.barrier_waiting_cnt);
return (vm->ractor.sched.running_cnt - vm->ractor.sched.barrier_waiting_cnt) == 1;
}
@ -1388,6 +1407,8 @@ rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
VM_ASSERT(vm->ractor.sync.lock_owner == cr); // VM is locked
VM_ASSERT(!vm->ractor.sched.barrier_waiting);
VM_ASSERT(vm->ractor.sched.barrier_waiting_cnt == 0);
VM_ASSERT(vm->ractor.sched.barrier_ractor == NULL);
VM_ASSERT(vm->ractor.sched.barrier_lock_rec == 0);
RUBY_DEBUG_LOG("start serial:%u", vm->ractor.sched.barrier_serial);
@ -1396,46 +1417,60 @@ rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
ractor_sched_lock(vm, cr);
{
vm->ractor.sched.barrier_waiting = true;
vm->ractor.sched.barrier_ractor = cr;
vm->ractor.sched.barrier_lock_rec = vm->ractor.sync.lock_rec;
// release VM lock
lock_rec = vm->ractor.sync.lock_rec;
vm->ractor.sync.lock_rec = 0;
vm->ractor.sync.lock_owner = NULL;
rb_native_mutex_unlock(&vm->ractor.sync.lock);
{
// interrupts all running threads
rb_thread_t *ith;
ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
if (ith->ractor != cr) {
RUBY_DEBUG_LOG("barrier int:%u", rb_th_serial(ith));
RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
}
}
// wait for other ractors
while (!ractor_sched_barrier_completed_p(vm)) {
ractor_sched_set_unlocked(vm, cr);
rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
ractor_sched_set_locked(vm, cr);
// interrupts all running threads
rb_thread_t *ith;
ccan_list_for_each(&vm->ractor.sched.running_threads, ith, sched.node.running_threads) {
if (ith->ractor != cr) {
RUBY_DEBUG_LOG("barrier request to th:%u", rb_th_serial(ith));
RUBY_VM_SET_VM_BARRIER_INTERRUPT(ith->ec);
}
}
}
ractor_sched_unlock(vm, cr);
// acquire VM lock
rb_native_mutex_lock(&vm->ractor.sync.lock);
vm->ractor.sync.lock_rec = lock_rec;
vm->ractor.sync.lock_owner = cr;
// wait for other ractors
while (!ractor_sched_barrier_completed_p(vm)) {
ractor_sched_set_unlocked(vm, cr);
rb_native_cond_wait(&vm->ractor.sched.barrier_complete_cond, &vm->ractor.sched.lock);
ractor_sched_set_locked(vm, cr);
}
RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
RUBY_DEBUG_LOG("completed seirial:%u", vm->ractor.sched.barrier_serial);
ractor_sched_lock(vm, cr);
{
vm->ractor.sched.barrier_waiting = false;
// no other ractors are there
vm->ractor.sched.barrier_serial++;
vm->ractor.sched.barrier_waiting_cnt = 0;
rb_native_cond_broadcast(&vm->ractor.sched.barrier_release_cond);
// acquire VM lock
rb_native_mutex_lock(&vm->ractor.sync.lock);
vm->ractor.sync.lock_rec = lock_rec;
vm->ractor.sync.lock_owner = cr;
}
// do not release ractor_sched_lock and threre is no newly added (resumed) thread
// thread_sched_setup_running_threads
}
// called from vm_lock_leave if the vm_lock used for barrierred
void
rb_ractor_sched_barrier_end(rb_vm_t *vm, rb_ractor_t *cr)
{
RUBY_DEBUG_LOG("serial:%u", (unsigned int)vm->ractor.sched.barrier_serial - 1);
VM_ASSERT(vm->ractor.sched.barrier_waiting);
VM_ASSERT(vm->ractor.sched.barrier_ractor);
VM_ASSERT(vm->ractor.sched.barrier_lock_rec > 0);
vm->ractor.sched.barrier_waiting = false;
vm->ractor.sched.barrier_ractor = NULL;
vm->ractor.sched.barrier_lock_rec = 0;
ractor_sched_unlock(vm, cr);
}

View File

@ -164,4 +164,8 @@ native_tls_set(native_tls_key_t key, void *ptr)
RUBY_EXTERN native_tls_key_t ruby_current_ec_key;
#endif
struct rb_ractor_struct;
void rb_ractor_sched_wait(struct rb_execution_context_struct *ec, struct rb_ractor_struct *cr, rb_unblock_function_t *ubf, void *ptr);
void rb_ractor_sched_wakeup(struct rb_ractor_struct *r, struct rb_thread_struct *th);
#endif /* RUBY_THREAD_PTHREAD_H */

View File

@ -72,7 +72,7 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd,
RUBY_DEBUG_LOG("wait fd:%d", fd);
RB_VM_SAVE_MACHINE_CONTEXT(th);
setup_ubf(th, ubf_event_waiting, (void *)th);
ubf_set(th, ubf_event_waiting, (void *)th);
RB_INTERNAL_THREAD_HOOK(RUBY_INTERNAL_THREAD_EVENT_SUSPENDED, th);
@ -102,7 +102,7 @@ thread_sched_wait_events(struct rb_thread_sched *sched, rb_thread_t *th, int fd,
timer_thread_cancel_waiting(th);
}
setup_ubf(th, NULL, NULL); // TODO: maybe it is already NULL?
ubf_clear(th); // TODO: maybe it is already NULL?
th->status = THREAD_RUNNABLE;
}
@ -450,7 +450,7 @@ co_start(struct coroutine_context *from, struct coroutine_context *self)
// RUBY_DEBUG_LOG("th:%u", rb_th_serial(th));
thread_sched_set_lock_owner(sched, th);
thread_sched_set_locked(sched, th);
thread_sched_add_running_thread(TH_SCHED(th), th);
thread_sched_unlock(sched, th);
{
@ -475,13 +475,11 @@ co_start(struct coroutine_context *from, struct coroutine_context *self)
coroutine_transfer0(self, nt->nt_context, true);
}
else {
rb_vm_t *vm = th->vm;
bool has_ready_ractor = vm->ractor.sched.grq_cnt > 0; // at least this ractor is not queued
rb_thread_t *next_th = sched->running;
if (!has_ready_ractor && next_th && !next_th->nt) {
if (next_th && !next_th->nt) {
// switch to the next thread
thread_sched_set_lock_owner(sched, NULL);
thread_sched_set_unlocked(sched, NULL);
th->sched.finished = true;
thread_sched_switch0(th->sched.context, next_th, nt, true);
}

View File

@ -922,6 +922,7 @@ vm_barrier_finish_p(rb_vm_t *vm)
vm->ractor.blocking_cnt);
VM_ASSERT(vm->ractor.blocking_cnt <= vm->ractor.cnt);
return vm->ractor.blocking_cnt == vm->ractor.cnt;
}
@ -947,7 +948,7 @@ rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
// wait
while (!vm_barrier_finish_p(vm)) {
rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_cond);
rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_complete_cond);
}
RUBY_DEBUG_LOG("cnt:%u barrier success", vm->ractor.sync.barrier_cnt);
@ -957,9 +958,7 @@ rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr)
vm->ractor.sync.barrier_waiting = false;
vm->ractor.sync.barrier_cnt++;
ccan_list_for_each(&vm->ractor.set, r, vmlr_node) {
rb_native_cond_signal(&r->barrier_wait_cond);
}
rb_native_cond_broadcast(&vm->ractor.sync.barrier_release_cond);
}
void
@ -983,7 +982,7 @@ rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
if (vm_barrier_finish_p(vm)) {
RUBY_DEBUG_LOG("wakeup barrier owner");
rb_native_cond_signal(&vm->ractor.sync.barrier_cond);
rb_native_cond_signal(&vm->ractor.sync.barrier_complete_cond);
}
else {
RUBY_DEBUG_LOG("wait for barrier finish");
@ -991,10 +990,7 @@ rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr)
// wait for restart
while (barrier_cnt == vm->ractor.sync.barrier_cnt) {
vm->ractor.sync.lock_owner = NULL;
rb_native_cond_wait(&cr->barrier_wait_cond, &vm->ractor.sync.lock);
VM_ASSERT(vm->ractor.sync.lock_owner == NULL);
vm->ractor.sync.lock_owner = cr;
rb_vm_cond_wait(vm, &vm->ractor.sync.barrier_release_cond);
}
RUBY_DEBUG_LOG("barrier is released. Acquire vm_lock");

8
vm.c
View File

@ -3557,7 +3557,6 @@ thread_mark(void *ptr)
rb_gc_mark(th->last_status);
rb_gc_mark(th->locking_mutex);
rb_gc_mark(th->name);
rb_gc_mark(th->ractor_waiting.receiving_mutex);
rb_gc_mark(th->scheduler);
@ -3719,10 +3718,6 @@ th_init(rb_thread_t *th, VALUE self, rb_vm_t *vm)
th->ext_config.ractor_safe = true;
ccan_list_head_init(&th->interrupt_exec_tasks);
ccan_list_node_init(&th->ractor_waiting.waiting_node);
#ifndef RUBY_THREAD_PTHREAD_H
rb_native_cond_initialize(&th->ractor_waiting.cond);
#endif
#if USE_RUBY_DEBUG_LOG
static rb_atomic_t thread_serial = 1;
@ -4381,7 +4376,8 @@ Init_BareVM(void)
vm_opt_mid_table = st_init_numtable();
#ifdef RUBY_THREAD_WIN32_H
rb_native_cond_initialize(&vm->ractor.sync.barrier_cond);
rb_native_cond_initialize(&vm->ractor.sync.barrier_complete_cond);
rb_native_cond_initialize(&vm->ractor.sync.barrier_release_cond);
#endif
}

View File

@ -683,12 +683,15 @@ typedef struct rb_vm_struct {
bool terminate_waiting;
#ifndef RUBY_THREAD_PTHREAD_H
// win32
bool barrier_waiting;
unsigned int barrier_cnt;
rb_nativethread_cond_t barrier_cond;
rb_nativethread_cond_t barrier_complete_cond;
rb_nativethread_cond_t barrier_release_cond;
#endif
} sync;
#ifdef RUBY_THREAD_PTHREAD_H
// ractor scheduling
struct {
rb_nativethread_lock_t lock;
@ -722,7 +725,10 @@ typedef struct rb_vm_struct {
bool barrier_waiting;
unsigned int barrier_waiting_cnt;
unsigned int barrier_serial;
struct rb_ractor_struct *barrier_ractor;
unsigned int barrier_lock_rec;
} sched;
#endif
} ractor;
#ifdef USE_SIGALTSTACK
@ -1105,18 +1111,6 @@ typedef struct rb_ractor_struct rb_ractor_t;
struct rb_native_thread;
struct rb_thread_ractor_waiting {
//enum rb_ractor_wait_status wait_status;
int wait_status;
//enum rb_ractor_wakeup_status wakeup_status;
int wakeup_status;
struct ccan_list_node waiting_node; // the rb_thread_t
VALUE receiving_mutex; // protects Ractor.receive_if
#ifndef RUBY_THREAD_PTHREAD_H
rb_nativethread_cond_t cond;
#endif
};
typedef struct rb_thread_struct {
struct ccan_list_node lt_node; // managed by a ractor (r->threads.set)
VALUE self;
@ -1129,8 +1123,6 @@ typedef struct rb_thread_struct {
bool mn_schedulable;
rb_atomic_t serial; // only for RUBY_DEBUG_LOG()
struct rb_thread_ractor_waiting ractor_waiting;
VALUE last_status; /* $? */
/* for cfunc */
@ -1903,7 +1895,9 @@ rb_vm_living_threads_init(rb_vm_t *vm)
{
ccan_list_head_init(&vm->workqueue);
ccan_list_head_init(&vm->ractor.set);
#ifdef RUBY_THREAD_PTHREAD_H
ccan_list_head_init(&vm->ractor.sched.zombie_threads);
#endif
}
typedef int rb_backtrace_iter_func(void *, VALUE, int, VALUE);

View File

@ -7,6 +7,7 @@
void rb_ractor_sched_barrier_start(rb_vm_t *vm, rb_ractor_t *cr);
void rb_ractor_sched_barrier_join(rb_vm_t *vm, rb_ractor_t *cr);
void rb_ractor_sched_barrier_end(rb_vm_t *vm, rb_ractor_t *cr);
static bool
vm_locked(rb_vm_t *vm)
@ -103,15 +104,26 @@ vm_lock_enter(rb_ractor_t *cr, rb_vm_t *vm, bool locked, bool no_barrier, unsign
}
static void
vm_lock_leave(rb_vm_t *vm, unsigned int *lev APPEND_LOCATION_ARGS)
vm_lock_leave(rb_vm_t *vm, bool no_barrier, unsigned int *lev APPEND_LOCATION_ARGS)
{
rb_ractor_t *cr = vm->ractor.sync.lock_owner;
RUBY_DEBUG_LOG2(file, line, "rec:%u owner:%u%s", vm->ractor.sync.lock_rec,
(unsigned int)rb_ractor_id(vm->ractor.sync.lock_owner),
(unsigned int)rb_ractor_id(cr),
vm->ractor.sync.lock_rec == 1 ? " (leave)" : "");
ASSERT_vm_locking();
VM_ASSERT(vm->ractor.sync.lock_rec > 0);
VM_ASSERT(vm->ractor.sync.lock_rec == *lev);
VM_ASSERT(cr == GET_RACTOR());
#ifdef RUBY_THREAD_PTHREAD_H
if (vm->ractor.sched.barrier_ractor == cr &&
vm->ractor.sched.barrier_lock_rec == vm->ractor.sync.lock_rec) {
VM_ASSERT(!no_barrier);
rb_ractor_sched_barrier_end(vm, cr);
}
#endif
vm->ractor.sync.lock_rec--;
*lev = vm->ractor.sync.lock_rec;
@ -153,10 +165,16 @@ rb_vm_lock_enter_body_cr(rb_ractor_t *cr, unsigned int *lev APPEND_LOCATION_ARGS
vm_lock_enter(cr, vm, vm_locked(vm), false, lev APPEND_LOCATION_PARAMS);
}
void
rb_vm_lock_leave_body_nb(unsigned int *lev APPEND_LOCATION_ARGS)
{
vm_lock_leave(GET_VM(), true, lev APPEND_LOCATION_PARAMS);
}
void
rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS)
{
vm_lock_leave(GET_VM(), lev APPEND_LOCATION_PARAMS);
vm_lock_leave(GET_VM(), false, lev APPEND_LOCATION_PARAMS);
}
void
@ -174,7 +192,7 @@ rb_vm_unlock_body(LOCATION_ARGS)
rb_vm_t *vm = GET_VM();
ASSERT_vm_locking();
VM_ASSERT(vm->ractor.sync.lock_rec == 1);
vm_lock_leave(vm, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
vm_lock_leave(vm, false, &vm->ractor.sync.lock_rec APPEND_LOCATION_PARAMS);
}
static void

View File

@ -24,6 +24,7 @@ struct rb_ractor_struct;
NOINLINE(void rb_vm_lock_enter_body_cr(struct rb_ractor_struct *cr, unsigned int *lev APPEND_LOCATION_ARGS));
NOINLINE(void rb_vm_lock_enter_body_nb(unsigned int *lev APPEND_LOCATION_ARGS));
NOINLINE(void rb_vm_lock_enter_body(unsigned int *lev APPEND_LOCATION_ARGS));
void rb_vm_lock_leave_body_nb(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_lock_leave_body(unsigned int *lev APPEND_LOCATION_ARGS);
void rb_vm_barrier(void);
@ -86,6 +87,14 @@ rb_vm_lock_enter_nb(unsigned int *lev, const char *file, int line)
}
}
static inline void
rb_vm_lock_leave_nb(unsigned int *lev, const char *file, int line)
{
if (rb_multi_ractor_p()) {
rb_vm_lock_leave_body_nb(lev APPEND_LOCATION_PARAMS);
}
}
static inline void
rb_vm_lock_leave(unsigned int *lev, const char *file, int line)
{
@ -124,11 +133,12 @@ rb_vm_lock_leave_cr(struct rb_ractor_struct *cr, unsigned int *levp, const char
vm_locking_do; RB_VM_LOCK_LEAVE_LEV(&vm_locking_level), vm_locking_do = 0)
#define RB_VM_LOCK_ENTER_LEV_NB(levp) rb_vm_lock_enter_nb(levp, __FILE__, __LINE__)
#define RB_VM_LOCK_LEAVE_LEV_NB(levp) rb_vm_lock_leave_nb(levp, __FILE__, __LINE__)
#define RB_VM_LOCK_ENTER_NO_BARRIER() { unsigned int _lev; RB_VM_LOCK_ENTER_LEV_NB(&_lev);
#define RB_VM_LOCK_LEAVE_NO_BARRIER() RB_VM_LOCK_LEAVE_LEV(&_lev); }
#define RB_VM_LOCK_LEAVE_NO_BARRIER() RB_VM_LOCK_LEAVE_LEV_NB(&_lev); }
#define RB_VM_LOCKING_NO_BARRIER() \
for (unsigned int vm_locking_level, vm_locking_do = (RB_VM_LOCK_ENTER_LEV_NB(&vm_locking_level), 1); \
vm_locking_do; RB_VM_LOCK_LEAVE_LEV(&vm_locking_level), vm_locking_do = 0)
vm_locking_do; RB_VM_LOCK_LEAVE_LEV_NB(&vm_locking_level), vm_locking_do = 0)
#if RUBY_DEBUG > 0
void RUBY_ASSERT_vm_locking(void);