mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 03:24:38 +02:00
7133260: AllocationProfiler uses space in metadata and doesn't seem to do anything useful
Remove -Xaprof and Klass::_alloc_count & ArrayKlass::_alloc_size. Reviewed-by: stefank, coleenp
This commit is contained in:
parent
4ecee47075
commit
b454ece6d7
30 changed files with 7 additions and 388 deletions
|
@ -2017,12 +2017,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
|
|||
|
||||
ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
|
||||
|
||||
|
||||
void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
|
||||
// ugghh... how would one do this efficiently for a non-contiguous space?
|
||||
guarantee(false, "NYI");
|
||||
}
|
||||
|
||||
bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
|
||||
return _smallLinearAllocBlock._word_size == 0;
|
||||
}
|
||||
|
|
|
@ -396,7 +396,6 @@ class CompactibleFreeListSpace: public CompactibleSpace {
|
|||
// iteration support for promotion
|
||||
void save_marks();
|
||||
bool no_allocs_since_save_marks();
|
||||
void object_iterate_since_last_GC(ObjectClosure* cl);
|
||||
|
||||
// iteration support for sweeping
|
||||
void save_sweep_limit() {
|
||||
|
|
|
@ -3129,26 +3129,6 @@ oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \
|
|||
|
||||
ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
|
||||
|
||||
void
|
||||
ConcurrentMarkSweepGeneration::object_iterate_since_last_GC(ObjectClosure* blk)
|
||||
{
|
||||
// Not currently implemented; need to do the following. -- ysr.
|
||||
// dld -- I think that is used for some sort of allocation profiler. So it
|
||||
// really means the objects allocated by the mutator since the last
|
||||
// GC. We could potentially implement this cheaply by recording only
|
||||
// the direct allocations in a side data structure.
|
||||
//
|
||||
// I think we probably ought not to be required to support these
|
||||
// iterations at any arbitrary point; I think there ought to be some
|
||||
// call to enable/disable allocation profiling in a generation/space,
|
||||
// and the iterator ought to return the objects allocated in the
|
||||
// gen/space since the enable call, or the last iterator call (which
|
||||
// will probably be at a GC.) That way, for gens like CM&S that would
|
||||
// require some extra data structure to support this, we only pay the
|
||||
// cost when it's in use...
|
||||
cmsSpace()->object_iterate_since_last_GC(blk);
|
||||
}
|
||||
|
||||
void
|
||||
ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
|
||||
cl->set_generation(this);
|
||||
|
|
|
@ -1273,7 +1273,6 @@ class ConcurrentMarkSweepGeneration: public CardGeneration {
|
|||
// Iteration support and related enquiries
|
||||
void save_marks();
|
||||
bool no_allocs_since_save_marks();
|
||||
void object_iterate_since_last_GC(ObjectClosure* cl);
|
||||
void younger_refs_iterate(OopsInGenClosure* cl);
|
||||
|
||||
// Iteration support specific to CMS generations
|
||||
|
|
|
@ -54,7 +54,6 @@
|
|||
#include "memory/referenceProcessor.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.pcgc.inline.hpp"
|
||||
#include "runtime/aprofiler.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
|
||||
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
|
||||
|
@ -2665,11 +2664,6 @@ void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
|
|||
heap_region_iterate(&blk);
|
||||
}
|
||||
|
||||
void G1CollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
|
||||
// FIXME: is this right?
|
||||
guarantee(false, "object_iterate_since_last_GC not supported by G1 heap");
|
||||
}
|
||||
|
||||
// Calls a SpaceClosure on a HeapRegion.
|
||||
|
||||
class SpaceClosureRegionClosure: public HeapRegionClosure {
|
||||
|
@ -3598,8 +3592,6 @@ G1CollectedHeap* G1CollectedHeap::heap() {
|
|||
void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
|
||||
// always_do_update_barrier = false;
|
||||
assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
|
||||
// Call allocation profiler
|
||||
AllocationProfiler::iterate_since_last_gc();
|
||||
// Fill TLAB's and such
|
||||
ensure_parsability(true);
|
||||
}
|
||||
|
|
|
@ -1360,11 +1360,6 @@ public:
|
|||
object_iterate(cl);
|
||||
}
|
||||
|
||||
// Iterate over all objects allocated since the last collection, calling
|
||||
// "cl.do_object" on each. The heap must have been initialized properly
|
||||
// to support this function, or else this call will fail.
|
||||
virtual void object_iterate_since_last_GC(ObjectClosure* cl);
|
||||
|
||||
// Iterate over all spaces in use in the heap, in ascending address order.
|
||||
virtual void space_iterate(SpaceClosure* cl);
|
||||
|
||||
|
|
|
@ -43,7 +43,6 @@
|
|||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/aprofiler.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/fprofiler.hpp"
|
||||
#include "runtime/synchronizer.hpp"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue