mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-19 10:34:38 +02:00
8202776: Modularize GC allocations in runtime
Reviewed-by: eosterlund, shade
This commit is contained in:
parent
fcfd1c85dd
commit
26b8ea76f5
3 changed files with 43 additions and 30 deletions
|
@ -365,20 +365,32 @@ void CollectedHeap::check_for_valid_allocation_state() {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
|
HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size,
|
||||||
|
bool* gc_overhead_limit_was_exceeded, TRAPS) {
|
||||||
|
if (UseTLAB) {
|
||||||
|
HeapWord* result = allocate_from_tlab(klass, size, THREAD);
|
||||||
|
if (result != NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Universe::heap()->mem_allocate(size, gc_overhead_limit_was_exceeded);
|
||||||
|
}
|
||||||
|
|
||||||
|
HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
|
||||||
|
ThreadLocalAllocBuffer& tlab = THREAD->tlab();
|
||||||
|
|
||||||
// Retain tlab and allocate object in shared space if
|
// Retain tlab and allocate object in shared space if
|
||||||
// the amount free in the tlab is too large to discard.
|
// the amount free in the tlab is too large to discard.
|
||||||
if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
|
if (tlab.free() > tlab.refill_waste_limit()) {
|
||||||
thread->tlab().record_slow_allocation(size);
|
tlab.record_slow_allocation(size);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Discard tlab and allocate a new one.
|
// Discard tlab and allocate a new one.
|
||||||
// To minimize fragmentation, the last TLAB may be smaller than the rest.
|
// To minimize fragmentation, the last TLAB may be smaller than the rest.
|
||||||
size_t new_tlab_size = thread->tlab().compute_size(size);
|
size_t new_tlab_size = tlab.compute_size(size);
|
||||||
|
|
||||||
thread->tlab().clear_before_allocation();
|
tlab.clear_before_allocation();
|
||||||
|
|
||||||
if (new_tlab_size == 0) {
|
if (new_tlab_size == 0) {
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -397,7 +409,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
|
||||||
assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
|
assert(actual_tlab_size != 0, "Allocation succeeded but actual size not updated. obj at: " PTR_FORMAT " min: " SIZE_FORMAT ", desired: " SIZE_FORMAT,
|
||||||
p2i(obj), min_tlab_size, new_tlab_size);
|
p2i(obj), min_tlab_size, new_tlab_size);
|
||||||
|
|
||||||
AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, thread);
|
AllocTracer::send_allocation_in_new_tlab(klass, obj, actual_tlab_size * HeapWordSize, size * HeapWordSize, THREAD);
|
||||||
|
|
||||||
if (ZeroTLAB) {
|
if (ZeroTLAB) {
|
||||||
// ..and clear it.
|
// ..and clear it.
|
||||||
|
@ -412,7 +424,7 @@ HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, s
|
||||||
Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
|
Copy::fill_to_words(obj + hdr_size, actual_tlab_size - hdr_size, badHeapWordVal);
|
||||||
#endif // ASSERT
|
#endif // ASSERT
|
||||||
}
|
}
|
||||||
thread->tlab().fill(obj, obj + size, actual_tlab_size);
|
tlab.fill(obj, obj + size, actual_tlab_size);
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -141,8 +141,15 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
virtual void resize_all_tlabs();
|
virtual void resize_all_tlabs();
|
||||||
|
|
||||||
// Allocate from the current thread's TLAB, with broken-out slow path.
|
// Allocate from the current thread's TLAB, with broken-out slow path.
|
||||||
inline static HeapWord* allocate_from_tlab(Klass* klass, Thread* thread, size_t size);
|
inline static HeapWord* allocate_from_tlab(Klass* klass, size_t size, TRAPS);
|
||||||
static HeapWord* allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size);
|
static HeapWord* allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS);
|
||||||
|
|
||||||
|
// Raw memory allocation facilities
|
||||||
|
// The obj and array allocate methods are covers for these methods.
|
||||||
|
// mem_allocate() should never be
|
||||||
|
// called to allocate TLABs, only individual objects.
|
||||||
|
virtual HeapWord* mem_allocate(size_t size,
|
||||||
|
bool* gc_overhead_limit_was_exceeded) = 0;
|
||||||
|
|
||||||
// Allocate an uninitialized block of the given size, or returns NULL if
|
// Allocate an uninitialized block of the given size, or returns NULL if
|
||||||
// this is impossible.
|
// this is impossible.
|
||||||
|
@ -309,12 +316,12 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
|
inline static oop array_allocate_nozero(Klass* klass, int size, int length, TRAPS);
|
||||||
inline static oop class_allocate(Klass* klass, int size, TRAPS);
|
inline static oop class_allocate(Klass* klass, int size, TRAPS);
|
||||||
|
|
||||||
// Raw memory allocation facilities
|
// Raw memory allocation. This may or may not use TLAB allocations to satisfy the
|
||||||
// The obj and array allocate methods are covers for these methods.
|
// allocation. A GC implementation may override this function to satisfy the allocation
|
||||||
// mem_allocate() should never be
|
// in any way. But the default is to try a TLAB allocation, and otherwise perform
|
||||||
// called to allocate TLABs, only individual objects.
|
// mem_allocate.
|
||||||
virtual HeapWord* mem_allocate(size_t size,
|
virtual HeapWord* obj_allocate_raw(Klass* klass, size_t size,
|
||||||
bool* gc_overhead_limit_was_exceeded) = 0;
|
bool* gc_overhead_limit_was_exceeded, TRAPS);
|
||||||
|
|
||||||
// Utilities for turning raw memory into filler objects.
|
// Utilities for turning raw memory into filler objects.
|
||||||
//
|
//
|
||||||
|
|
|
@ -137,18 +137,10 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, T
|
||||||
return NULL; // caller does a CHECK_0 too
|
return NULL; // caller does a CHECK_0 too
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* result = NULL;
|
|
||||||
if (UseTLAB) {
|
|
||||||
result = allocate_from_tlab(klass, THREAD, size);
|
|
||||||
if (result != NULL) {
|
|
||||||
assert(!HAS_PENDING_EXCEPTION,
|
|
||||||
"Unexpected exception, will result in uninitialized storage");
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bool gc_overhead_limit_was_exceeded = false;
|
bool gc_overhead_limit_was_exceeded = false;
|
||||||
result = Universe::heap()->mem_allocate(size,
|
CollectedHeap* heap = Universe::heap();
|
||||||
&gc_overhead_limit_was_exceeded);
|
HeapWord* result = heap->obj_allocate_raw(klass, size, &gc_overhead_limit_was_exceeded, THREAD);
|
||||||
|
|
||||||
if (result != NULL) {
|
if (result != NULL) {
|
||||||
NOT_PRODUCT(Universe::heap()->
|
NOT_PRODUCT(Universe::heap()->
|
||||||
check_for_non_bad_heap_word_value(result, size));
|
check_for_non_bad_heap_word_value(result, size));
|
||||||
|
@ -161,7 +153,6 @@ HeapWord* CollectedHeap::common_mem_allocate_noinit(Klass* klass, size_t size, T
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (!gc_overhead_limit_was_exceeded) {
|
if (!gc_overhead_limit_was_exceeded) {
|
||||||
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
|
// -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
|
||||||
report_java_out_of_memory("Java heap space");
|
report_java_out_of_memory("Java heap space");
|
||||||
|
@ -193,15 +184,18 @@ HeapWord* CollectedHeap::common_mem_allocate_init(Klass* klass, size_t size, TRA
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, Thread* thread, size_t size) {
|
HeapWord* CollectedHeap::allocate_from_tlab(Klass* klass, size_t size, TRAPS) {
|
||||||
assert(UseTLAB, "should use UseTLAB");
|
assert(UseTLAB, "should use UseTLAB");
|
||||||
|
|
||||||
HeapWord* obj = thread->tlab().allocate(size);
|
HeapWord* obj = THREAD->tlab().allocate(size);
|
||||||
if (obj != NULL) {
|
if (obj != NULL) {
|
||||||
return obj;
|
return obj;
|
||||||
}
|
}
|
||||||
// Otherwise...
|
// Otherwise...
|
||||||
return allocate_from_tlab_slow(klass, thread, size);
|
obj = allocate_from_tlab_slow(klass, size, THREAD);
|
||||||
|
assert(obj == NULL || !HAS_PENDING_EXCEPTION,
|
||||||
|
"Unexpected exception, will result in uninitialized storage");
|
||||||
|
return obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
|
void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue