mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-20 19:14:38 +02:00
8029524: Remove unsused method CollectedHeap::unsafe_max_alloc()
Reviewed-by: pliden, jmasa
This commit is contained in:
parent
343c8d34a8
commit
810bc0c558
7 changed files with 0 additions and 55 deletions
|
@ -2376,25 +2376,6 @@ size_t G1CollectedHeap::recalculate_used() const {
|
||||||
return blk.result();
|
return blk.result();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t G1CollectedHeap::unsafe_max_alloc() {
|
|
||||||
if (free_regions() > 0) return HeapRegion::GrainBytes;
|
|
||||||
// otherwise, is there space in the current allocation region?
|
|
||||||
|
|
||||||
// We need to store the current allocation region in a local variable
|
|
||||||
// here. The problem is that this method doesn't take any locks and
|
|
||||||
// there may be other threads which overwrite the current allocation
|
|
||||||
// region field. attempt_allocation(), for example, sets it to NULL
|
|
||||||
// and this can happen *after* the NULL check here but before the call
|
|
||||||
// to free(), resulting in a SIGSEGV. Note that this doesn't appear
|
|
||||||
// to be a problem in the optimized build, since the two loads of the
|
|
||||||
// current allocation region field are optimized away.
|
|
||||||
HeapRegion* hr = _mutator_alloc_region.get();
|
|
||||||
if (hr == NULL) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return hr->free();
|
|
||||||
}
|
|
||||||
|
|
||||||
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
|
||||||
switch (cause) {
|
switch (cause) {
|
||||||
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
|
case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
|
||||||
|
|
|
@ -1183,15 +1183,6 @@ public:
|
||||||
// end fields defining the extent of the contiguous allocation region.)
|
// end fields defining the extent of the contiguous allocation region.)
|
||||||
// But G1CollectedHeap doesn't yet support this.
|
// But G1CollectedHeap doesn't yet support this.
|
||||||
|
|
||||||
// Return an estimate of the maximum allocation that could be performed
|
|
||||||
// without triggering any collection or expansion activity. In a
|
|
||||||
// generational collector, for example, this is probably the largest
|
|
||||||
// allocation that could be supported (without expansion) in the youngest
|
|
||||||
// generation. It is "unsafe" because no locks are taken; the result
|
|
||||||
// should be treated as an approximation, not a guarantee, for use in
|
|
||||||
// heuristic resizing decisions.
|
|
||||||
virtual size_t unsafe_max_alloc();
|
|
||||||
|
|
||||||
virtual bool is_maximal_no_gc() const {
|
virtual bool is_maximal_no_gc() const {
|
||||||
return _g1_storage.uncommitted_size() == 0;
|
return _g1_storage.uncommitted_size() == 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -484,10 +484,6 @@ void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
|
||||||
young_gen()->eden_space()->ensure_parsability();
|
young_gen()->eden_space()->ensure_parsability();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t ParallelScavengeHeap::unsafe_max_alloc() {
|
|
||||||
return young_gen()->eden_space()->free_in_bytes();
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
|
size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
|
||||||
return young_gen()->eden_space()->tlab_capacity(thr);
|
return young_gen()->eden_space()->tlab_capacity(thr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,8 +184,6 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||||
void accumulate_statistics_all_tlabs();
|
void accumulate_statistics_all_tlabs();
|
||||||
void resize_all_tlabs();
|
void resize_all_tlabs();
|
||||||
|
|
||||||
size_t unsafe_max_alloc();
|
|
||||||
|
|
||||||
bool supports_tlab_allocation() const { return true; }
|
bool supports_tlab_allocation() const { return true; }
|
||||||
|
|
||||||
size_t tlab_capacity(Thread* thr) const;
|
size_t tlab_capacity(Thread* thr) const;
|
||||||
|
|
|
@ -389,15 +389,6 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||||
// allocation from them and necessitating allocation of new TLABs.
|
// allocation from them and necessitating allocation of new TLABs.
|
||||||
virtual void ensure_parsability(bool retire_tlabs);
|
virtual void ensure_parsability(bool retire_tlabs);
|
||||||
|
|
||||||
// Return an estimate of the maximum allocation that could be performed
|
|
||||||
// without triggering any collection or expansion activity. In a
|
|
||||||
// generational collector, for example, this is probably the largest
|
|
||||||
// allocation that could be supported (without expansion) in the youngest
|
|
||||||
// generation. It is "unsafe" because no locks are taken; the result
|
|
||||||
// should be treated as an approximation, not a guarantee, for use in
|
|
||||||
// heuristic resizing decisions.
|
|
||||||
virtual size_t unsafe_max_alloc() = 0;
|
|
||||||
|
|
||||||
// Section on thread-local allocation buffers (TLABs)
|
// Section on thread-local allocation buffers (TLABs)
|
||||||
// If the heap supports thread-local allocation buffers, it should override
|
// If the heap supports thread-local allocation buffers, it should override
|
||||||
// the following methods:
|
// the following methods:
|
||||||
|
|
|
@ -673,10 +673,6 @@ HeapWord** GenCollectedHeap::end_addr() const {
|
||||||
return _gens[0]->end_addr();
|
return _gens[0]->end_addr();
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t GenCollectedHeap::unsafe_max_alloc() {
|
|
||||||
return _gens[0]->unsafe_max_alloc_nogc();
|
|
||||||
}
|
|
||||||
|
|
||||||
// public collection interfaces
|
// public collection interfaces
|
||||||
|
|
||||||
void GenCollectedHeap::collect(GCCause::Cause cause) {
|
void GenCollectedHeap::collect(GCCause::Cause cause) {
|
||||||
|
|
|
@ -166,14 +166,6 @@ public:
|
||||||
HeapWord** top_addr() const;
|
HeapWord** top_addr() const;
|
||||||
HeapWord** end_addr() const;
|
HeapWord** end_addr() const;
|
||||||
|
|
||||||
// Return an estimate of the maximum allocation that could be performed
|
|
||||||
// without triggering any collection activity. In a generational
|
|
||||||
// collector, for example, this is probably the largest allocation that
|
|
||||||
// could be supported in the youngest generation. It is "unsafe" because
|
|
||||||
// no locks are taken; the result should be treated as an approximation,
|
|
||||||
// not a guarantee.
|
|
||||||
size_t unsafe_max_alloc();
|
|
||||||
|
|
||||||
// Does this heap support heap inspection? (+PrintClassHistogram)
|
// Does this heap support heap inspection? (+PrintClassHistogram)
|
||||||
virtual bool supports_heap_inspection() const { return true; }
|
virtual bool supports_heap_inspection() const { return true; }
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue