mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 11:34:38 +02:00
6994322: Remove the is_tlab and is_noref / is_large_noref parameters from the CollectedHeap
Remove two unused parameters from the mem_allocate() method and update its uses accordingly. Reviewed-by: stefank, johnc
This commit is contained in:
parent
43d9e95a42
commit
937d6ded89
17 changed files with 55 additions and 131 deletions
|
@ -386,8 +386,6 @@ bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
|
|||
// we rely on the size_policy object to force a bail out.
|
||||
HeapWord* ParallelScavengeHeap::mem_allocate(
|
||||
size_t size,
|
||||
bool is_noref,
|
||||
bool is_tlab,
|
||||
bool* gc_overhead_limit_was_exceeded) {
|
||||
assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
|
||||
assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
|
||||
|
@ -398,7 +396,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||
// limit is being exceeded as checked below.
|
||||
*gc_overhead_limit_was_exceeded = false;
|
||||
|
||||
HeapWord* result = young_gen()->allocate(size, is_tlab);
|
||||
HeapWord* result = young_gen()->allocate(size);
|
||||
|
||||
uint loop_count = 0;
|
||||
uint gc_count = 0;
|
||||
|
@ -419,7 +417,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||
MutexLocker ml(Heap_lock);
|
||||
gc_count = Universe::heap()->total_collections();
|
||||
|
||||
result = young_gen()->allocate(size, is_tlab);
|
||||
result = young_gen()->allocate(size);
|
||||
|
||||
// (1) If the requested object is too large to easily fit in the
|
||||
// young_gen, or
|
||||
|
@ -433,21 +431,13 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
if (!is_tlab &&
|
||||
size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
|
||||
result = old_gen()->allocate(size, is_tlab);
|
||||
if (size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
|
||||
result = old_gen()->allocate(size);
|
||||
if (result != NULL) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
if (GC_locker::is_active_and_needs_gc()) {
|
||||
// GC is locked out. If this is a TLAB allocation,
|
||||
// return NULL; the requestor will retry allocation
|
||||
// of an idividual object at a time.
|
||||
if (is_tlab) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// If this thread is not in a jni critical section, we stall
|
||||
// the requestor until the critical section has cleared and
|
||||
// GC allowed. When the critical section clears, a GC is
|
||||
|
@ -472,7 +462,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||
if (result == NULL) {
|
||||
|
||||
// Generate a VM operation
|
||||
VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
|
||||
VM_ParallelGCFailedAllocation op(size, gc_count);
|
||||
VMThread::execute(&op);
|
||||
|
||||
// Did the VM operation execute? If so, return the result directly.
|
||||
|
@ -526,7 +516,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||
if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
|
||||
(loop_count % QueuedAllocationWarningCount == 0)) {
|
||||
warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
|
||||
" size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
|
||||
" size=%d", loop_count, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,7 +529,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
|
|||
// time over limit here, that is the responsibility of the heap specific
|
||||
// collection methods. This method decides where to attempt allocations,
|
||||
// and when to attempt collections, but no collection specific policy.
|
||||
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
|
||||
HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
|
||||
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
|
||||
assert(!Universe::heap()->is_gc_active(), "not reentrant");
|
||||
|
@ -553,7 +543,7 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
|
|||
// First level allocation failure, scavenge and allocate in young gen.
|
||||
GCCauseSetter gccs(this, GCCause::_allocation_failure);
|
||||
PSScavenge::invoke();
|
||||
HeapWord* result = young_gen()->allocate(size, is_tlab);
|
||||
HeapWord* result = young_gen()->allocate(size);
|
||||
|
||||
// Second level allocation failure.
|
||||
// Mark sweep and allocate in young generation.
|
||||
|
@ -562,28 +552,28 @@ HeapWord* ParallelScavengeHeap::failed_mem_allocate(size_t size, bool is_tlab) {
|
|||
// Don't mark sweep twice if so.
|
||||
if (mark_sweep_invocation_count == total_invocations()) {
|
||||
invoke_full_gc(false);
|
||||
result = young_gen()->allocate(size, is_tlab);
|
||||
result = young_gen()->allocate(size);
|
||||
}
|
||||
}
|
||||
|
||||
// Third level allocation failure.
|
||||
// After mark sweep and young generation allocation failure,
|
||||
// allocate in old generation.
|
||||
if (result == NULL && !is_tlab) {
|
||||
result = old_gen()->allocate(size, is_tlab);
|
||||
if (result == NULL) {
|
||||
result = old_gen()->allocate(size);
|
||||
}
|
||||
|
||||
// Fourth level allocation failure. We're running out of memory.
|
||||
// More complete mark sweep and allocate in young generation.
|
||||
if (result == NULL) {
|
||||
invoke_full_gc(true);
|
||||
result = young_gen()->allocate(size, is_tlab);
|
||||
result = young_gen()->allocate(size);
|
||||
}
|
||||
|
||||
// Fifth level allocation failure.
|
||||
// After more complete mark sweep, allocate in old generation.
|
||||
if (result == NULL && !is_tlab) {
|
||||
result = old_gen()->allocate(size, is_tlab);
|
||||
if (result == NULL) {
|
||||
result = old_gen()->allocate(size);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -761,7 +751,7 @@ size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
|||
}
|
||||
|
||||
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t size) {
|
||||
return young_gen()->allocate(size, true);
|
||||
return young_gen()->allocate(size);
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::accumulate_statistics_all_tlabs() {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -165,12 +165,13 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
|
|||
// an excessive amount of time is being spent doing collections
|
||||
// and caused a NULL to be returned. If a NULL is not returned,
|
||||
// "gc_time_limit_was_exceeded" has an undefined meaning.
|
||||
|
||||
HeapWord* mem_allocate(size_t size,
|
||||
bool is_noref,
|
||||
bool is_tlab,
|
||||
bool* gc_overhead_limit_was_exceeded);
|
||||
HeapWord* failed_mem_allocate(size_t size, bool is_tlab);
|
||||
|
||||
// Allocation attempt(s) during a safepoint. It should never be called
|
||||
// to allocate a new TLAB as this allocation might be satisfied out
|
||||
// of the old generation.
|
||||
HeapWord* failed_mem_allocate(size_t size);
|
||||
|
||||
HeapWord* permanent_mem_allocate(size_t size);
|
||||
HeapWord* failed_permanent_mem_allocate(size_t size);
|
||||
|
@ -194,8 +195,6 @@ CollectorPolicy* collector_policy() const { return (CollectorPolicy*) _collector
|
|||
inline void invoke_scavenge();
|
||||
inline void invoke_full_gc(bool maximum_compaction);
|
||||
|
||||
size_t large_typearray_limit() { return FastAllocateSizeLimit; }
|
||||
|
||||
bool supports_inline_contig_alloc() const { return !UseNUMA; }
|
||||
|
||||
HeapWord** top_addr() const { return !UseNUMA ? young_gen()->top_addr() : (HeapWord**)-1; }
|
||||
|
|
|
@ -182,12 +182,12 @@ size_t PSOldGen::contiguous_available() const {
|
|||
|
||||
// Allocation. We report all successful allocations to the size policy
|
||||
// Note that the perm gen does not use this method, and should not!
|
||||
HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
|
||||
HeapWord* PSOldGen::allocate(size_t word_size) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
HeapWord* res = allocate_noexpand(word_size, is_tlab);
|
||||
HeapWord* res = allocate_noexpand(word_size);
|
||||
|
||||
if (res == NULL) {
|
||||
res = expand_and_allocate(word_size, is_tlab);
|
||||
res = expand_and_allocate(word_size);
|
||||
}
|
||||
|
||||
// Allocations in the old generation need to be reported
|
||||
|
@ -199,13 +199,12 @@ HeapWord* PSOldGen::allocate(size_t word_size, bool is_tlab) {
|
|||
return res;
|
||||
}
|
||||
|
||||
HeapWord* PSOldGen::expand_and_allocate(size_t word_size, bool is_tlab) {
|
||||
assert(!is_tlab, "TLAB's are not supported in PSOldGen");
|
||||
HeapWord* PSOldGen::expand_and_allocate(size_t word_size) {
|
||||
expand(word_size*HeapWordSize);
|
||||
if (GCExpandToAllocateDelayMillis > 0) {
|
||||
os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
|
||||
}
|
||||
return allocate_noexpand(word_size, is_tlab);
|
||||
return allocate_noexpand(word_size);
|
||||
}
|
||||
|
||||
HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
|
||||
|
|
|
@ -60,9 +60,8 @@ class PSOldGen : public CHeapObj {
|
|||
// Used when initializing the _name field.
|
||||
static inline const char* select_name();
|
||||
|
||||
HeapWord* allocate_noexpand(size_t word_size, bool is_tlab) {
|
||||
HeapWord* allocate_noexpand(size_t word_size) {
|
||||
// We assume the heap lock is held here.
|
||||
assert(!is_tlab, "Does not support TLAB allocation");
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
HeapWord* res = object_space()->allocate(word_size);
|
||||
if (res != NULL) {
|
||||
|
@ -89,7 +88,7 @@ class PSOldGen : public CHeapObj {
|
|||
return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
|
||||
}
|
||||
|
||||
HeapWord* expand_and_allocate(size_t word_size, bool is_tlab);
|
||||
HeapWord* expand_and_allocate(size_t word_size);
|
||||
HeapWord* expand_and_cas_allocate(size_t word_size);
|
||||
void expand(size_t bytes);
|
||||
bool expand_by(size_t bytes);
|
||||
|
@ -164,7 +163,7 @@ class PSOldGen : public CHeapObj {
|
|||
|
||||
// Allocation. We report all successful allocations to the size policy
|
||||
// Note that the perm gen does not use this method, and should not!
|
||||
HeapWord* allocate(size_t word_size, bool is_tlab);
|
||||
HeapWord* allocate(size_t word_size);
|
||||
|
||||
// Iteration.
|
||||
void oop_iterate(OopClosure* cl) { object_space()->oop_iterate(cl); }
|
||||
|
|
|
@ -46,10 +46,10 @@ PSPermGen::PSPermGen(ReservedSpace rs, size_t alignment,
|
|||
|
||||
HeapWord* PSPermGen::allocate_permanent(size_t size) {
|
||||
assert_locked_or_safepoint(Heap_lock);
|
||||
HeapWord* obj = allocate_noexpand(size, false);
|
||||
HeapWord* obj = allocate_noexpand(size);
|
||||
|
||||
if (obj == NULL) {
|
||||
obj = expand_and_allocate(size, false);
|
||||
obj = expand_and_allocate(size);
|
||||
}
|
||||
|
||||
return obj;
|
||||
|
|
|
@ -157,7 +157,7 @@ class PSYoungGen : public CHeapObj {
|
|||
}
|
||||
|
||||
// Allocation
|
||||
HeapWord* allocate(size_t word_size, bool is_tlab) {
|
||||
HeapWord* allocate(size_t word_size) {
|
||||
HeapWord* result = eden_space()->cas_allocate(word_size);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -33,10 +33,9 @@
|
|||
|
||||
// The following methods are used by the parallel scavenge collector
|
||||
VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
|
||||
bool is_tlab, unsigned int gc_count) :
|
||||
unsigned int gc_count) :
|
||||
VM_GC_Operation(gc_count, GCCause::_allocation_failure),
|
||||
_size(size),
|
||||
_is_tlab(is_tlab),
|
||||
_result(NULL)
|
||||
{
|
||||
}
|
||||
|
@ -48,7 +47,7 @@ void VM_ParallelGCFailedAllocation::doit() {
|
|||
assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "must be a ParallelScavengeHeap");
|
||||
|
||||
GCCauseSetter gccs(heap, _gc_cause);
|
||||
_result = heap->failed_mem_allocate(_size, _is_tlab);
|
||||
_result = heap->failed_mem_allocate(_size);
|
||||
|
||||
if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
|
||||
set_gc_locked();
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -32,12 +32,10 @@
|
|||
class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
|
||||
private:
|
||||
size_t _size;
|
||||
bool _is_tlab;
|
||||
HeapWord* _result;
|
||||
|
||||
public:
|
||||
VM_ParallelGCFailedAllocation(size_t size, bool is_tlab,
|
||||
unsigned int gc_count);
|
||||
VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
|
||||
|
||||
virtual VMOp_Type type() const {
|
||||
return VMOp_ParallelGCFailedAllocation;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue