merge revision(s) 62934,63210,63215,63309: [Backport #14634]

thread_sync.c: avoid reaching across stacks of dead threads

	rb_ensure is insufficient cleanup for fork and we must
	reinitialize all waitqueues in the child process.

	Unfortunately this increases the footprint of ConditionVariable,
	Queue and SizedQueue by 8 bytes on 32-bit (16 bytes on 64-bit).

	[ruby-core:86316] [Bug #14634]

	variable.c: fix thread + fork errors in autoload

	This is fairly non-intrusive bugfix to prevent children
	from trying to reach into thread stacks of the parent.
	I will probably reuse this idea and redo r62934, too
	(same bug).

	* vm_core.h (typedef struct rb_vm_struct): add fork_gen counter
	* thread.c (rb_thread_atfork_internal): increment fork_gen
	* variable.c (struct autoload_data_i): store fork_gen
	* variable.c (check_autoload_data): remove (replaced with get_...)
	* variable.c (get_autoload_data): check fork_gen when retrieving
	* variable.c (check_autoload_required): use get_autoload_data
	* variable.c (rb_autoloading_value): ditto
	* variable.c (rb_autoload_p): ditto
	* variable.c (current_autoload_data): ditto
	* variable.c (autoload_reset): reset fork_gen, adjust indent
	* variable.c (rb_autoload_load): set fork_gen when setting state
	* test/ruby/test_autoload.rb (test_autoload_fork): new test
	  [ruby-core:86410] [Bug #14634]

	thread_sync: redo r62934 to use fork_gen

	Instead of maintaining linked-lists to store all
	rb_queue/rb_szqueue/rb_condvar structs; store only a fork_gen
	serial number to simplify management of these items.

	This reduces initialization costs and avoids the up-front cost
	of resetting all Queue/SizedQueue/ConditionVariable objects at
	fork while saving 8 bytes per-structure on 64-bit.  There are no
	savings on 32-bit.

	* thread.c (rb_thread_atfork_internal): remove rb_thread_sync_reset_all call
	* thread_sync.c (rb_thread_sync_reset_all): remove
	* thread_sync.c (queue_live): remove
	* thread_sync.c (queue_free): remove
	* thread_sync.c (struct rb_queue): s/live/fork_gen/
	* thread_sync.c (queue_data_type): use default free
	* thread_sync.c (queue_alloc): remove list_add
	* thread_sync.c (queue_fork_check): new function
	* thread_sync.c (queue_ptr): call queue_fork_check
	* thread_sync.c (szqueue_free): remove
	* thread_sync.c (szqueue_data_type): use default free
	* thread_sync.c (szqueue_alloc): remove list_add
	* thread_sync.c (szqueue_ptr):  check fork_gen via queue_fork_check
	* thread_sync.c (struct rb_condvar): s/live/fork_gen/
	* thread_sync.c (condvar_free): remove
	* thread_sync.c (cv_data_type): use default free
	* thread_sync.c (condvar_ptr): check fork_gen
	* thread_sync.c (condvar_alloc): remove list_add
	  [ruby-core:86316] [Bug #14634]

	thread_sync.c (condvar_ptr): reset fork_gen after forking

	Otherwise the condition variable waiter list will always
	be empty, which is wrong :x

	[Bug #14725] [Bug #14634]

git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/branches/ruby_2_5@66912 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
This commit is contained in:
nagachika 2019-01-23 14:14:25 +00:00
parent e42dcdb515
commit 63d9ab33fe
8 changed files with 162 additions and 15 deletions

View file

@ -4,6 +4,14 @@
static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
static VALUE rb_eClosedQueueError;
/*
* keep these globally so we can walk and reinitialize them at fork
* in the child process
*/
static LIST_HEAD(szqueue_list);
static LIST_HEAD(queue_list);
static LIST_HEAD(condvar_list);
/* sync_waiter is always on-stack */
struct sync_waiter {
rb_thread_t *th;
@ -556,6 +564,7 @@ void rb_mutex_allow_trap(VALUE self, int val)
#define queue_waitq(q) UNALIGNED_MEMBER_PTR(q, waitq)
PACKED_STRUCT_UNALIGNED(struct rb_queue {
struct list_head waitq;
rb_serial_t fork_gen;
const VALUE que;
int num_waiting;
});
@ -601,12 +610,29 @@ queue_alloc(VALUE klass)
return obj;
}
static int
queue_fork_check(struct rb_queue *q)
{
rb_serial_t fork_gen = GET_VM()->fork_gen;
if (q->fork_gen == fork_gen) {
return 0;
}
/* forked children can't reach into parent thread stacks */
q->fork_gen = fork_gen;
list_head_init(queue_waitq(q));
q->num_waiting = 0;
return 1;
}
static struct rb_queue *
queue_ptr(VALUE obj)
{
struct rb_queue *q;
TypedData_Get_Struct(obj, struct rb_queue, &queue_data_type, q);
queue_fork_check(q);
return q;
}
@ -649,6 +675,11 @@ szqueue_ptr(VALUE obj)
struct rb_szqueue *sq;
TypedData_Get_Struct(obj, struct rb_szqueue, &szqueue_data_type, sq);
if (queue_fork_check(&sq->q)) {
list_head_init(szqueue_pushq(sq));
sq->num_waiting_push = 0;
}
return sq;
}
@ -885,7 +916,7 @@ queue_do_pop(VALUE self, struct rb_queue *q, int should_block)
list_add_tail(&qw.as.q->waitq, &qw.w.node);
qw.as.q->num_waiting++;
rb_ensure(queue_sleep, Qfalse, queue_sleep_done, (VALUE)&qw);
rb_ensure(queue_sleep, self, queue_sleep_done, (VALUE)&qw);
}
}
@ -1127,7 +1158,7 @@ rb_szqueue_push(int argc, VALUE *argv, VALUE self)
list_add_tail(pushq, &qw.w.node);
sq->num_waiting_push++;
rb_ensure(queue_sleep, Qfalse, szqueue_sleep_done, (VALUE)&qw);
rb_ensure(queue_sleep, self, szqueue_sleep_done, (VALUE)&qw);
}
}
@ -1228,9 +1259,9 @@ rb_szqueue_empty_p(VALUE self)
/* ConditionalVariable */
/* TODO: maybe this can be IMEMO */
struct rb_condvar {
struct list_head waitq;
rb_serial_t fork_gen;
};
/*
@ -1277,9 +1308,15 @@ static struct rb_condvar *
condvar_ptr(VALUE self)
{
struct rb_condvar *cv;
rb_serial_t fork_gen = GET_VM()->fork_gen;
TypedData_Get_Struct(self, struct rb_condvar, &cv_data_type, cv);
/* forked children can't reach into parent thread stacks */
if (cv->fork_gen != fork_gen) {
list_head_init(&cv->waitq);
}
return cv;
}