mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-16 17:14:41 +02:00
8214278: Cleanup process_completed_threshold and related state
Change types, normalize names, remove special values. Reviewed-by: tschatzl, sjohanss
This commit is contained in:
parent
48d68d182a
commit
978c78f7fc
10 changed files with 67 additions and 69 deletions
|
@ -31,6 +31,7 @@
|
||||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||||
#include "gc/shared/workgroup.hpp"
|
#include "gc/shared/workgroup.hpp"
|
||||||
#include "runtime/atomic.hpp"
|
#include "runtime/atomic.hpp"
|
||||||
|
#include "runtime/flags/flagSetting.hpp"
|
||||||
#include "runtime/mutexLocker.hpp"
|
#include "runtime/mutexLocker.hpp"
|
||||||
#include "runtime/safepoint.hpp"
|
#include "runtime/safepoint.hpp"
|
||||||
#include "runtime/thread.inline.hpp"
|
#include "runtime/thread.inline.hpp"
|
||||||
|
@ -148,14 +149,9 @@ uint DirtyCardQueueSet::num_par_ids() {
|
||||||
|
|
||||||
void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
|
void DirtyCardQueueSet::initialize(Monitor* cbl_mon,
|
||||||
BufferNode::Allocator* allocator,
|
BufferNode::Allocator* allocator,
|
||||||
int process_completed_threshold,
|
|
||||||
int max_completed_queue,
|
|
||||||
Mutex* lock,
|
Mutex* lock,
|
||||||
bool init_free_ids) {
|
bool init_free_ids) {
|
||||||
PtrQueueSet::initialize(cbl_mon,
|
PtrQueueSet::initialize(cbl_mon, allocator);
|
||||||
allocator,
|
|
||||||
process_completed_threshold,
|
|
||||||
max_completed_queue);
|
|
||||||
_shared_dirty_card_queue.set_lock(lock);
|
_shared_dirty_card_queue.set_lock(lock);
|
||||||
if (init_free_ids) {
|
if (init_free_ids) {
|
||||||
_free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
|
_free_ids = new FreeIdSet(num_par_ids(), _cbl_mon);
|
||||||
|
@ -334,13 +330,11 @@ void DirtyCardQueueSet::concatenate_logs() {
|
||||||
// Iterate over all the threads, if we find a partial log add it to
|
// Iterate over all the threads, if we find a partial log add it to
|
||||||
// the global list of logs. Temporarily turn off the limit on the number
|
// the global list of logs. Temporarily turn off the limit on the number
|
||||||
// of outstanding buffers.
|
// of outstanding buffers.
|
||||||
int save_max_completed_queue = _max_completed_queue;
|
|
||||||
_max_completed_queue = max_jint;
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint.");
|
||||||
|
SizeTFlagSetting local_max(_max_completed_buffers,
|
||||||
|
MaxCompletedBuffersUnlimited);
|
||||||
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
|
||||||
concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
|
concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
|
||||||
}
|
}
|
||||||
concatenate_log(_shared_dirty_card_queue);
|
concatenate_log(_shared_dirty_card_queue);
|
||||||
// Restore the completed buffer queue limit.
|
|
||||||
_max_completed_queue = save_max_completed_queue;
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,8 +119,6 @@ public:
|
||||||
|
|
||||||
void initialize(Monitor* cbl_mon,
|
void initialize(Monitor* cbl_mon,
|
||||||
BufferNode::Allocator* allocator,
|
BufferNode::Allocator* allocator,
|
||||||
int process_completed_threshold,
|
|
||||||
int max_completed_queue,
|
|
||||||
Mutex* lock,
|
Mutex* lock,
|
||||||
bool init_free_ids = false);
|
bool init_free_ids = false);
|
||||||
|
|
||||||
|
|
|
@ -1659,19 +1659,15 @@ jint G1CollectedHeap::initialize() {
|
||||||
G1SATBBufferEnqueueingThresholdPercent,
|
G1SATBBufferEnqueueingThresholdPercent,
|
||||||
Shared_SATB_Q_lock);
|
Shared_SATB_Q_lock);
|
||||||
|
|
||||||
// process_completed_threshold and max_completed_queue are updated
|
// process_completed_buffers_threshold and max_completed_buffers are updated
|
||||||
// later, based on the concurrent refinement object.
|
// later, based on the concurrent refinement object.
|
||||||
G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
|
G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
|
||||||
&bs->dirty_card_queue_buffer_allocator(),
|
&bs->dirty_card_queue_buffer_allocator(),
|
||||||
-1, // temp. never trigger
|
|
||||||
-1, // temp. no limit
|
|
||||||
Shared_DirtyCardQ_lock,
|
Shared_DirtyCardQ_lock,
|
||||||
true); // init_free_ids
|
true); // init_free_ids
|
||||||
|
|
||||||
dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
|
dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
|
||||||
&bs->dirty_card_queue_buffer_allocator(),
|
&bs->dirty_card_queue_buffer_allocator(),
|
||||||
-1, // never trigger processing
|
|
||||||
-1, // no limit on length
|
|
||||||
Shared_DirtyCardQ_lock);
|
Shared_DirtyCardQ_lock);
|
||||||
|
|
||||||
// Create the hot card cache.
|
// Create the hot card cache.
|
||||||
|
@ -1782,8 +1778,8 @@ jint G1CollectedHeap::initialize() {
|
||||||
|
|
||||||
{
|
{
|
||||||
DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
|
DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
|
||||||
dcqs.set_process_completed_threshold((int)concurrent_refine()->yellow_zone());
|
dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone());
|
||||||
dcqs.set_max_completed_queue((int)concurrent_refine()->red_zone());
|
dcqs.set_max_completed_buffers(concurrent_refine()->red_zone());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Here we allocate the dummy HeapRegion that is required by the
|
// Here we allocate the dummy HeapRegion that is required by the
|
||||||
|
|
|
@ -144,7 +144,7 @@ void G1ConcurrentRefineThreadControl::stop() {
|
||||||
STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2));
|
STATIC_ASSERT(sizeof(LP64_ONLY(jint) NOT_LP64(jshort)) <= (sizeof(size_t)/2));
|
||||||
const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort);
|
const size_t max_yellow_zone = LP64_ONLY(max_jint) NOT_LP64(max_jshort);
|
||||||
const size_t max_green_zone = max_yellow_zone / 2;
|
const size_t max_green_zone = max_yellow_zone / 2;
|
||||||
const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_queue.
|
const size_t max_red_zone = INT_MAX; // For dcqs.set_max_completed_buffers.
|
||||||
STATIC_ASSERT(max_yellow_zone <= max_red_zone);
|
STATIC_ASSERT(max_yellow_zone <= max_red_zone);
|
||||||
|
|
||||||
// Range check assertions for green zone values.
|
// Range check assertions for green zone values.
|
||||||
|
@ -386,21 +386,22 @@ void G1ConcurrentRefine::adjust(double update_rs_time,
|
||||||
// Change the barrier params
|
// Change the barrier params
|
||||||
if (max_num_threads() == 0) {
|
if (max_num_threads() == 0) {
|
||||||
// Disable dcqs notification when there are no threads to notify.
|
// Disable dcqs notification when there are no threads to notify.
|
||||||
dcqs.set_process_completed_threshold(INT_MAX);
|
dcqs.set_process_completed_buffers_threshold(DirtyCardQueueSet::ProcessCompletedBuffersThresholdNever);
|
||||||
} else {
|
} else {
|
||||||
// Worker 0 is the primary; wakeup is via dcqs notification.
|
// Worker 0 is the primary; wakeup is via dcqs notification.
|
||||||
STATIC_ASSERT(max_yellow_zone <= INT_MAX);
|
STATIC_ASSERT(max_yellow_zone <= INT_MAX);
|
||||||
size_t activate = activation_threshold(0);
|
size_t activate = activation_threshold(0);
|
||||||
dcqs.set_process_completed_threshold((int)activate);
|
dcqs.set_process_completed_buffers_threshold(activate);
|
||||||
}
|
}
|
||||||
dcqs.set_max_completed_queue((int)red_zone());
|
dcqs.set_max_completed_buffers(red_zone());
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t curr_queue_size = dcqs.completed_buffers_num();
|
size_t curr_queue_size = dcqs.completed_buffers_num();
|
||||||
if (curr_queue_size >= yellow_zone()) {
|
if ((dcqs.max_completed_buffers() > 0) &&
|
||||||
dcqs.set_completed_queue_padding(curr_queue_size);
|
(curr_queue_size >= yellow_zone())) {
|
||||||
|
dcqs.set_completed_buffers_padding(curr_queue_size);
|
||||||
} else {
|
} else {
|
||||||
dcqs.set_completed_queue_padding(0);
|
dcqs.set_completed_buffers_padding(0);
|
||||||
}
|
}
|
||||||
dcqs.notify_if_necessary();
|
dcqs.notify_if_necessary();
|
||||||
}
|
}
|
||||||
|
@ -433,8 +434,8 @@ bool G1ConcurrentRefine::do_refinement_step(uint worker_id) {
|
||||||
// that means that the transition period after the evacuation pause has ended.
|
// that means that the transition period after the evacuation pause has ended.
|
||||||
// Since the value written to the DCQS is the same for all threads, there is no
|
// Since the value written to the DCQS is the same for all threads, there is no
|
||||||
// need to synchronize.
|
// need to synchronize.
|
||||||
if (dcqs.completed_queue_padding() > 0 && curr_buffer_num <= yellow_zone()) {
|
if (dcqs.completed_buffers_padding() > 0 && curr_buffer_num <= yellow_zone()) {
|
||||||
dcqs.set_completed_queue_padding(0);
|
dcqs.set_completed_buffers_padding(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
maybe_activate_more_threads(worker_id, curr_buffer_num);
|
maybe_activate_more_threads(worker_id, curr_buffer_num);
|
||||||
|
|
|
@ -37,12 +37,12 @@ G1SATBMarkQueueSet::G1SATBMarkQueueSet() : _g1h(NULL) {}
|
||||||
void G1SATBMarkQueueSet::initialize(G1CollectedHeap* g1h,
|
void G1SATBMarkQueueSet::initialize(G1CollectedHeap* g1h,
|
||||||
Monitor* cbl_mon,
|
Monitor* cbl_mon,
|
||||||
BufferNode::Allocator* allocator,
|
BufferNode::Allocator* allocator,
|
||||||
int process_completed_threshold,
|
size_t process_completed_buffers_threshold,
|
||||||
uint buffer_enqueue_threshold_percentage,
|
uint buffer_enqueue_threshold_percentage,
|
||||||
Mutex* lock) {
|
Mutex* lock) {
|
||||||
SATBMarkQueueSet::initialize(cbl_mon,
|
SATBMarkQueueSet::initialize(cbl_mon,
|
||||||
allocator,
|
allocator,
|
||||||
process_completed_threshold,
|
process_completed_buffers_threshold,
|
||||||
buffer_enqueue_threshold_percentage,
|
buffer_enqueue_threshold_percentage,
|
||||||
lock);
|
lock);
|
||||||
_g1h = g1h;
|
_g1h = g1h;
|
||||||
|
|
|
@ -39,7 +39,7 @@ public:
|
||||||
void initialize(G1CollectedHeap* g1h,
|
void initialize(G1CollectedHeap* g1h,
|
||||||
Monitor* cbl_mon,
|
Monitor* cbl_mon,
|
||||||
BufferNode::Allocator* allocator,
|
BufferNode::Allocator* allocator,
|
||||||
int process_completed_threshold,
|
size_t process_completed_buffers_threshold,
|
||||||
uint buffer_enqueue_threshold_percentage,
|
uint buffer_enqueue_threshold_percentage,
|
||||||
Mutex* lock);
|
Mutex* lock);
|
||||||
|
|
||||||
|
|
|
@ -164,12 +164,12 @@ PtrQueueSet::PtrQueueSet(bool notify_when_complete) :
|
||||||
_completed_buffers_head(NULL),
|
_completed_buffers_head(NULL),
|
||||||
_completed_buffers_tail(NULL),
|
_completed_buffers_tail(NULL),
|
||||||
_n_completed_buffers(0),
|
_n_completed_buffers(0),
|
||||||
_process_completed_threshold(0),
|
_process_completed_buffers_threshold(ProcessCompletedBuffersThresholdNever),
|
||||||
_process_completed(false),
|
_process_completed(false),
|
||||||
_all_active(false),
|
_all_active(false),
|
||||||
_notify_when_complete(notify_when_complete),
|
_notify_when_complete(notify_when_complete),
|
||||||
_max_completed_queue(0),
|
_max_completed_buffers(MaxCompletedBuffersUnlimited),
|
||||||
_completed_queue_padding(0)
|
_completed_buffers_padding(0)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
PtrQueueSet::~PtrQueueSet() {
|
PtrQueueSet::~PtrQueueSet() {
|
||||||
|
@ -179,12 +179,7 @@ PtrQueueSet::~PtrQueueSet() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void PtrQueueSet::initialize(Monitor* cbl_mon,
|
void PtrQueueSet::initialize(Monitor* cbl_mon,
|
||||||
BufferNode::Allocator* allocator,
|
BufferNode::Allocator* allocator) {
|
||||||
int process_completed_threshold,
|
|
||||||
int max_completed_queue) {
|
|
||||||
_max_completed_queue = max_completed_queue;
|
|
||||||
_process_completed_threshold = process_completed_threshold;
|
|
||||||
_completed_queue_padding = 0;
|
|
||||||
assert(cbl_mon != NULL && allocator != NULL, "Init order issue?");
|
assert(cbl_mon != NULL && allocator != NULL, "Init order issue?");
|
||||||
_cbl_mon = cbl_mon;
|
_cbl_mon = cbl_mon;
|
||||||
_allocator = allocator;
|
_allocator = allocator;
|
||||||
|
@ -238,13 +233,14 @@ void PtrQueue::handle_zero_index() {
|
||||||
|
|
||||||
bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
|
bool PtrQueueSet::process_or_enqueue_complete_buffer(BufferNode* node) {
|
||||||
if (Thread::current()->is_Java_thread()) {
|
if (Thread::current()->is_Java_thread()) {
|
||||||
// We don't lock. It is fine to be epsilon-precise here.
|
// If the number of buffers exceeds the limit, make this Java
|
||||||
if (_max_completed_queue == 0 ||
|
// thread do the processing itself. We don't lock to access
|
||||||
(_max_completed_queue > 0 &&
|
// buffer count or padding; it is fine to be imprecise here. The
|
||||||
_n_completed_buffers >= _max_completed_queue + _completed_queue_padding)) {
|
// add of padding could overflow, which is treated as unlimited.
|
||||||
bool b = mut_process_buffer(node);
|
size_t limit = _max_completed_buffers + _completed_buffers_padding;
|
||||||
if (b) {
|
if ((_n_completed_buffers > limit) && (limit >= _max_completed_buffers)) {
|
||||||
// True here means that the buffer hasn't been deallocated and the caller may reuse it.
|
if (mut_process_buffer(node)) {
|
||||||
|
// Successfully processed; return true to allow buffer reuse.
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -267,8 +263,8 @@ void PtrQueueSet::enqueue_complete_buffer(BufferNode* cbn) {
|
||||||
}
|
}
|
||||||
_n_completed_buffers++;
|
_n_completed_buffers++;
|
||||||
|
|
||||||
if (!_process_completed && _process_completed_threshold >= 0 &&
|
if (!_process_completed &&
|
||||||
_n_completed_buffers >= (size_t)_process_completed_threshold) {
|
(_n_completed_buffers > _process_completed_buffers_threshold)) {
|
||||||
_process_completed = true;
|
_process_completed = true;
|
||||||
if (_notify_when_complete) {
|
if (_notify_when_complete) {
|
||||||
_cbl_mon->notify();
|
_cbl_mon->notify();
|
||||||
|
@ -327,8 +323,7 @@ void PtrQueueSet::merge_bufferlists(PtrQueueSet *src) {
|
||||||
|
|
||||||
void PtrQueueSet::notify_if_necessary() {
|
void PtrQueueSet::notify_if_necessary() {
|
||||||
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx x(_cbl_mon, Mutex::_no_safepoint_check_flag);
|
||||||
assert(_process_completed_threshold >= 0, "_process_completed_threshold is negative");
|
if (_n_completed_buffers > _process_completed_buffers_threshold) {
|
||||||
if (_n_completed_buffers >= (size_t)_process_completed_threshold || _max_completed_queue == 0) {
|
|
||||||
_process_completed = true;
|
_process_completed = true;
|
||||||
if (_notify_when_complete)
|
if (_notify_when_complete)
|
||||||
_cbl_mon->notify();
|
_cbl_mon->notify();
|
||||||
|
|
|
@ -284,7 +284,7 @@ protected:
|
||||||
BufferNode* _completed_buffers_head;
|
BufferNode* _completed_buffers_head;
|
||||||
BufferNode* _completed_buffers_tail;
|
BufferNode* _completed_buffers_tail;
|
||||||
size_t _n_completed_buffers;
|
size_t _n_completed_buffers;
|
||||||
int _process_completed_threshold;
|
size_t _process_completed_buffers_threshold;
|
||||||
volatile bool _process_completed;
|
volatile bool _process_completed;
|
||||||
|
|
||||||
bool _all_active;
|
bool _all_active;
|
||||||
|
@ -293,9 +293,9 @@ protected:
|
||||||
bool _notify_when_complete;
|
bool _notify_when_complete;
|
||||||
|
|
||||||
// Maximum number of elements allowed on completed queue: after that,
|
// Maximum number of elements allowed on completed queue: after that,
|
||||||
// enqueuer does the work itself. Zero indicates no maximum.
|
// enqueuer does the work itself.
|
||||||
int _max_completed_queue;
|
size_t _max_completed_buffers;
|
||||||
size_t _completed_queue_padding;
|
size_t _completed_buffers_padding;
|
||||||
|
|
||||||
size_t completed_buffers_list_length();
|
size_t completed_buffers_list_length();
|
||||||
void assert_completed_buffer_list_len_correct_locked();
|
void assert_completed_buffer_list_len_correct_locked();
|
||||||
|
@ -316,10 +316,7 @@ protected:
|
||||||
|
|
||||||
// Because of init-order concerns, we can't pass these as constructor
|
// Because of init-order concerns, we can't pass these as constructor
|
||||||
// arguments.
|
// arguments.
|
||||||
void initialize(Monitor* cbl_mon,
|
void initialize(Monitor* cbl_mon, BufferNode::Allocator* allocator);
|
||||||
BufferNode::Allocator* allocator,
|
|
||||||
int process_completed_threshold,
|
|
||||||
int max_completed_queue);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -350,18 +347,34 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get/Set the number of completed buffers that triggers log processing.
|
// Get/Set the number of completed buffers that triggers log processing.
|
||||||
void set_process_completed_threshold(int sz) { _process_completed_threshold = sz; }
|
// Log processing should be done when the number of buffers exceeds the
|
||||||
int process_completed_threshold() const { return _process_completed_threshold; }
|
// threshold.
|
||||||
|
void set_process_completed_buffers_threshold(size_t sz) {
|
||||||
|
_process_completed_buffers_threshold = sz;
|
||||||
|
}
|
||||||
|
size_t process_completed_buffers_threshold() const {
|
||||||
|
return _process_completed_buffers_threshold;
|
||||||
|
}
|
||||||
|
static const size_t ProcessCompletedBuffersThresholdNever = ~size_t(0);
|
||||||
|
|
||||||
size_t completed_buffers_num() { return _n_completed_buffers; }
|
size_t completed_buffers_num() const { return _n_completed_buffers; }
|
||||||
|
|
||||||
void merge_bufferlists(PtrQueueSet* src);
|
void merge_bufferlists(PtrQueueSet* src);
|
||||||
|
|
||||||
void set_max_completed_queue(int m) { _max_completed_queue = m; }
|
void set_max_completed_buffers(size_t m) {
|
||||||
int max_completed_queue() { return _max_completed_queue; }
|
_max_completed_buffers = m;
|
||||||
|
}
|
||||||
|
size_t max_completed_buffers() const {
|
||||||
|
return _max_completed_buffers;
|
||||||
|
}
|
||||||
|
static const size_t MaxCompletedBuffersUnlimited = ~size_t(0);
|
||||||
|
|
||||||
void set_completed_queue_padding(size_t padding) { _completed_queue_padding = padding; }
|
void set_completed_buffers_padding(size_t padding) {
|
||||||
size_t completed_queue_padding() { return _completed_queue_padding; }
|
_completed_buffers_padding = padding;
|
||||||
|
}
|
||||||
|
size_t completed_buffers_padding() const {
|
||||||
|
return _completed_buffers_padding;
|
||||||
|
}
|
||||||
|
|
||||||
// Notify the consumer if the number of buffers crossed the threshold
|
// Notify the consumer if the number of buffers crossed the threshold
|
||||||
void notify_if_necessary();
|
void notify_if_necessary();
|
||||||
|
|
|
@ -113,10 +113,11 @@ SATBMarkQueueSet::SATBMarkQueueSet() :
|
||||||
|
|
||||||
void SATBMarkQueueSet::initialize(Monitor* cbl_mon,
|
void SATBMarkQueueSet::initialize(Monitor* cbl_mon,
|
||||||
BufferNode::Allocator* allocator,
|
BufferNode::Allocator* allocator,
|
||||||
int process_completed_threshold,
|
size_t process_completed_buffers_threshold,
|
||||||
uint buffer_enqueue_threshold_percentage,
|
uint buffer_enqueue_threshold_percentage,
|
||||||
Mutex* lock) {
|
Mutex* lock) {
|
||||||
PtrQueueSet::initialize(cbl_mon, allocator, process_completed_threshold, -1);
|
PtrQueueSet::initialize(cbl_mon, allocator);
|
||||||
|
set_process_completed_buffers_threshold(process_completed_buffers_threshold);
|
||||||
_shared_satb_queue.set_lock(lock);
|
_shared_satb_queue.set_lock(lock);
|
||||||
assert(buffer_size() != 0, "buffer size not initialized");
|
assert(buffer_size() != 0, "buffer size not initialized");
|
||||||
// Minimum threshold of 1 ensures enqueuing of completely full buffers.
|
// Minimum threshold of 1 ensures enqueuing of completely full buffers.
|
||||||
|
|
|
@ -110,7 +110,7 @@ protected:
|
||||||
|
|
||||||
void initialize(Monitor* cbl_mon,
|
void initialize(Monitor* cbl_mon,
|
||||||
BufferNode::Allocator* allocator,
|
BufferNode::Allocator* allocator,
|
||||||
int process_completed_threshold,
|
size_t process_completed_buffers_threshold,
|
||||||
uint buffer_enqueue_threshold_percentage,
|
uint buffer_enqueue_threshold_percentage,
|
||||||
Mutex* lock);
|
Mutex* lock);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue