mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-18 10:04:42 +02:00
Merge
This commit is contained in:
commit
d44b2b7565
19 changed files with 506 additions and 68 deletions
|
@ -734,7 +734,7 @@ bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
|
||||||
|
|
||||||
// Move class loader data from main list to the unloaded list for unloading
|
// Move class loader data from main list to the unloaded list for unloading
|
||||||
// and deallocation later.
|
// and deallocation later.
|
||||||
bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure, bool clean_alive) {
|
||||||
ClassLoaderData* data = _head;
|
ClassLoaderData* data = _head;
|
||||||
ClassLoaderData* prev = NULL;
|
ClassLoaderData* prev = NULL;
|
||||||
bool seen_dead_loader = false;
|
bool seen_dead_loader = false;
|
||||||
|
@ -743,27 +743,9 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||||
// purging and we don't want to rewalk the previously unloaded class loader data.
|
// purging and we don't want to rewalk the previously unloaded class loader data.
|
||||||
_saved_unloading = _unloading;
|
_saved_unloading = _unloading;
|
||||||
|
|
||||||
// mark metadata seen on the stack and code cache so we can delete
|
|
||||||
// unneeded entries.
|
|
||||||
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
|
|
||||||
MetadataOnStackMark md_on_stack(has_redefined_a_class);
|
|
||||||
if (has_redefined_a_class) {
|
|
||||||
// purge_previous_versions also cleans weak method links. Because
|
|
||||||
// one method's MDO can reference another method from another
|
|
||||||
// class loader, we need to first clean weak method links for all
|
|
||||||
// class loaders here. Below, we can then free redefined methods
|
|
||||||
// for all class loaders.
|
|
||||||
while (data != NULL) {
|
|
||||||
if (data->is_alive(is_alive_closure)) {
|
|
||||||
data->classes_do(InstanceKlass::purge_previous_versions);
|
|
||||||
}
|
|
||||||
data = data->next();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
data = _head;
|
data = _head;
|
||||||
while (data != NULL) {
|
while (data != NULL) {
|
||||||
if (data->is_alive(is_alive_closure)) {
|
if (data->is_alive(is_alive_closure)) {
|
||||||
data->free_deallocate_list();
|
|
||||||
prev = data;
|
prev = data;
|
||||||
data = data->next();
|
data = data->next();
|
||||||
continue;
|
continue;
|
||||||
|
@ -785,6 +767,11 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||||
_unloading = dead;
|
_unloading = dead;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (clean_alive) {
|
||||||
|
// Clean previous versions and the deallocate list.
|
||||||
|
ClassLoaderDataGraph::clean_metaspaces();
|
||||||
|
}
|
||||||
|
|
||||||
if (seen_dead_loader) {
|
if (seen_dead_loader) {
|
||||||
post_class_unload_events();
|
post_class_unload_events();
|
||||||
}
|
}
|
||||||
|
@ -792,6 +779,26 @@ bool ClassLoaderDataGraph::do_unloading(BoolObjectClosure* is_alive_closure) {
|
||||||
return seen_dead_loader;
|
return seen_dead_loader;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ClassLoaderDataGraph::clean_metaspaces() {
|
||||||
|
// mark metadata seen on the stack and code cache so we can delete unneeded entries.
|
||||||
|
bool has_redefined_a_class = JvmtiExport::has_redefined_a_class();
|
||||||
|
MetadataOnStackMark md_on_stack(has_redefined_a_class);
|
||||||
|
|
||||||
|
if (has_redefined_a_class) {
|
||||||
|
// purge_previous_versions also cleans weak method links. Because
|
||||||
|
// one method's MDO can reference another method from another
|
||||||
|
// class loader, we need to first clean weak method links for all
|
||||||
|
// class loaders here. Below, we can then free redefined methods
|
||||||
|
// for all class loaders.
|
||||||
|
for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
|
||||||
|
data->classes_do(InstanceKlass::purge_previous_versions);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Need to purge the previous version before deallocating.
|
||||||
|
free_deallocate_lists();
|
||||||
|
}
|
||||||
|
|
||||||
void ClassLoaderDataGraph::purge() {
|
void ClassLoaderDataGraph::purge() {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
|
||||||
ClassLoaderData* list = _unloading;
|
ClassLoaderData* list = _unloading;
|
||||||
|
@ -819,6 +826,14 @@ void ClassLoaderDataGraph::post_class_unload_events(void) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ClassLoaderDataGraph::free_deallocate_lists() {
|
||||||
|
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
|
||||||
|
// We need to keep this data until InstanceKlass::purge_previous_version has been
|
||||||
|
// called on all alive classes. See the comment in ClassLoaderDataGraph::clean_metaspaces.
|
||||||
|
cld->free_deallocate_list();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// CDS support
|
// CDS support
|
||||||
|
|
||||||
// Global metaspaces for writing information to the shared archive. When
|
// Global metaspaces for writing information to the shared archive. When
|
||||||
|
|
|
@ -71,6 +71,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||||
|
|
||||||
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
|
static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
|
||||||
static void post_class_unload_events(void);
|
static void post_class_unload_events(void);
|
||||||
|
static void clean_metaspaces();
|
||||||
public:
|
public:
|
||||||
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
|
||||||
static void purge();
|
static void purge();
|
||||||
|
@ -90,7 +91,7 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||||
static void methods_do(void f(Method*));
|
static void methods_do(void f(Method*));
|
||||||
static void loaded_classes_do(KlassClosure* klass_closure);
|
static void loaded_classes_do(KlassClosure* klass_closure);
|
||||||
static void classes_unloading_do(void f(Klass* const));
|
static void classes_unloading_do(void f(Klass* const));
|
||||||
static bool do_unloading(BoolObjectClosure* is_alive);
|
static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive);
|
||||||
|
|
||||||
// CMS support.
|
// CMS support.
|
||||||
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
|
static void remember_new_clds(bool remember) { _saved_head = (remember ? _head : NULL); }
|
||||||
|
@ -106,6 +107,8 @@ class ClassLoaderDataGraph : public AllStatic {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void free_deallocate_lists();
|
||||||
|
|
||||||
static void dump_on(outputStream * const out) PRODUCT_RETURN;
|
static void dump_on(outputStream * const out) PRODUCT_RETURN;
|
||||||
static void dump() { dump_on(tty); }
|
static void dump() { dump_on(tty); }
|
||||||
static void verify();
|
static void verify();
|
||||||
|
|
|
@ -31,25 +31,23 @@
|
||||||
#include "runtime/synchronizer.hpp"
|
#include "runtime/synchronizer.hpp"
|
||||||
#include "runtime/thread.hpp"
|
#include "runtime/thread.hpp"
|
||||||
#include "services/threadService.hpp"
|
#include "services/threadService.hpp"
|
||||||
#include "utilities/growableArray.hpp"
|
#include "utilities/chunkedList.hpp"
|
||||||
|
|
||||||
|
volatile MetadataOnStackBuffer* MetadataOnStackMark::_used_buffers = NULL;
|
||||||
|
volatile MetadataOnStackBuffer* MetadataOnStackMark::_free_buffers = NULL;
|
||||||
|
|
||||||
// Keep track of marked on-stack metadata so it can be cleared.
|
|
||||||
GrowableArray<Metadata*>* _marked_objects = NULL;
|
|
||||||
NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
|
NOT_PRODUCT(bool MetadataOnStackMark::_is_active = false;)
|
||||||
|
|
||||||
// Walk metadata on the stack and mark it so that redefinition doesn't delete
|
// Walk metadata on the stack and mark it so that redefinition doesn't delete
|
||||||
// it. Class unloading also walks the previous versions and might try to
|
// it. Class unloading also walks the previous versions and might try to
|
||||||
// delete it, so this class is used by class unloading also.
|
// delete it, so this class is used by class unloading also.
|
||||||
MetadataOnStackMark::MetadataOnStackMark(bool has_redefined_a_class) {
|
MetadataOnStackMark::MetadataOnStackMark(bool visit_code_cache) {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
|
assert(SafepointSynchronize::is_at_safepoint(), "sanity check");
|
||||||
|
assert(_used_buffers == NULL, "sanity check");
|
||||||
NOT_PRODUCT(_is_active = true;)
|
NOT_PRODUCT(_is_active = true;)
|
||||||
if (_marked_objects == NULL) {
|
|
||||||
_marked_objects = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(1000, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
Threads::metadata_do(Metadata::mark_on_stack);
|
Threads::metadata_do(Metadata::mark_on_stack);
|
||||||
if (has_redefined_a_class) {
|
if (visit_code_cache) {
|
||||||
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
|
||||||
}
|
}
|
||||||
CompileBroker::mark_on_stack();
|
CompileBroker::mark_on_stack();
|
||||||
|
@ -62,15 +60,93 @@ MetadataOnStackMark::~MetadataOnStackMark() {
|
||||||
// Unmark everything that was marked. Can't do the same walk because
|
// Unmark everything that was marked. Can't do the same walk because
|
||||||
// redefine classes messes up the code cache so the set of methods
|
// redefine classes messes up the code cache so the set of methods
|
||||||
// might not be the same.
|
// might not be the same.
|
||||||
for (int i = 0; i< _marked_objects->length(); i++) {
|
|
||||||
_marked_objects->at(i)->set_on_stack(false);
|
retire_buffer_for_thread(Thread::current());
|
||||||
|
|
||||||
|
MetadataOnStackBuffer* buffer = const_cast<MetadataOnStackBuffer* >(_used_buffers);
|
||||||
|
while (buffer != NULL) {
|
||||||
|
// Clear on stack state for all metadata.
|
||||||
|
size_t size = buffer->size();
|
||||||
|
for (size_t i = 0; i < size; i++) {
|
||||||
|
Metadata* md = buffer->at(i);
|
||||||
|
md->set_on_stack(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
MetadataOnStackBuffer* next = buffer->next_used();
|
||||||
|
|
||||||
|
// Move the buffer to the free list.
|
||||||
|
buffer->clear();
|
||||||
|
buffer->set_next_used(NULL);
|
||||||
|
buffer->set_next_free(const_cast<MetadataOnStackBuffer*>(_free_buffers));
|
||||||
|
_free_buffers = buffer;
|
||||||
|
|
||||||
|
// Step to next used buffer.
|
||||||
|
buffer = next;
|
||||||
}
|
}
|
||||||
_marked_objects->clear(); // reuse growable array for next time.
|
|
||||||
|
_used_buffers = NULL;
|
||||||
|
|
||||||
NOT_PRODUCT(_is_active = false;)
|
NOT_PRODUCT(_is_active = false;)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record which objects are marked so we can unmark the same objects.
|
void MetadataOnStackMark::retire_buffer(MetadataOnStackBuffer* buffer) {
|
||||||
void MetadataOnStackMark::record(Metadata* m) {
|
if (buffer == NULL) {
|
||||||
assert(_is_active, "metadata on stack marking is active");
|
return;
|
||||||
_marked_objects->push(m);
|
}
|
||||||
|
|
||||||
|
MetadataOnStackBuffer* old_head;
|
||||||
|
|
||||||
|
do {
|
||||||
|
old_head = const_cast<MetadataOnStackBuffer*>(_used_buffers);
|
||||||
|
buffer->set_next_used(old_head);
|
||||||
|
} while (Atomic::cmpxchg_ptr(buffer, &_used_buffers, old_head) != old_head);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MetadataOnStackMark::retire_buffer_for_thread(Thread* thread) {
|
||||||
|
retire_buffer(thread->metadata_on_stack_buffer());
|
||||||
|
thread->set_metadata_on_stack_buffer(NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MetadataOnStackMark::has_buffer_for_thread(Thread* thread) {
|
||||||
|
return thread->metadata_on_stack_buffer() != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
MetadataOnStackBuffer* MetadataOnStackMark::allocate_buffer() {
|
||||||
|
MetadataOnStackBuffer* allocated;
|
||||||
|
MetadataOnStackBuffer* new_head;
|
||||||
|
|
||||||
|
do {
|
||||||
|
allocated = const_cast<MetadataOnStackBuffer*>(_free_buffers);
|
||||||
|
if (allocated == NULL) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
new_head = allocated->next_free();
|
||||||
|
} while (Atomic::cmpxchg_ptr(new_head, &_free_buffers, allocated) != allocated);
|
||||||
|
|
||||||
|
if (allocated == NULL) {
|
||||||
|
allocated = new MetadataOnStackBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(!allocated->is_full(), err_msg("Should not be full: " PTR_FORMAT, p2i(allocated)));
|
||||||
|
|
||||||
|
return allocated;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record which objects are marked so we can unmark the same objects.
|
||||||
|
void MetadataOnStackMark::record(Metadata* m, Thread* thread) {
|
||||||
|
assert(_is_active, "metadata on stack marking is active");
|
||||||
|
|
||||||
|
MetadataOnStackBuffer* buffer = thread->metadata_on_stack_buffer();
|
||||||
|
|
||||||
|
if (buffer != NULL && buffer->is_full()) {
|
||||||
|
retire_buffer(buffer);
|
||||||
|
buffer = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (buffer == NULL) {
|
||||||
|
buffer = allocate_buffer();
|
||||||
|
thread->set_metadata_on_stack_buffer(buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer->push(m);
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,9 +26,12 @@
|
||||||
#define SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
|
#define SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
|
||||||
|
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
|
#include "utilities/chunkedList.hpp"
|
||||||
|
|
||||||
class Metadata;
|
class Metadata;
|
||||||
|
|
||||||
|
typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
|
||||||
|
|
||||||
// Helper class to mark and unmark metadata used on the stack as either handles
|
// Helper class to mark and unmark metadata used on the stack as either handles
|
||||||
// or executing methods, so that it can't be deleted during class redefinition
|
// or executing methods, so that it can't be deleted during class redefinition
|
||||||
// and class unloading.
|
// and class unloading.
|
||||||
|
@ -36,10 +39,20 @@ class Metadata;
|
||||||
// metadata during parsing, relocated methods, and methods in backtraces.
|
// metadata during parsing, relocated methods, and methods in backtraces.
|
||||||
class MetadataOnStackMark : public StackObj {
|
class MetadataOnStackMark : public StackObj {
|
||||||
NOT_PRODUCT(static bool _is_active;)
|
NOT_PRODUCT(static bool _is_active;)
|
||||||
|
|
||||||
|
static volatile MetadataOnStackBuffer* _used_buffers;
|
||||||
|
static volatile MetadataOnStackBuffer* _free_buffers;
|
||||||
|
|
||||||
|
static MetadataOnStackBuffer* allocate_buffer();
|
||||||
|
static void retire_buffer(MetadataOnStackBuffer* buffer);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
MetadataOnStackMark(bool has_redefined_a_class);
|
MetadataOnStackMark(bool visit_code_cache);
|
||||||
~MetadataOnStackMark();
|
~MetadataOnStackMark();
|
||||||
static void record(Metadata* m);
|
|
||||||
|
static void record(Metadata* m, Thread* thread);
|
||||||
|
static void retire_buffer_for_thread(Thread* thread);
|
||||||
|
static bool has_buffer_for_thread(Thread* thread);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
|
#endif // SHARE_VM_CLASSFILE_METADATAONSTACKMARK_HPP
|
||||||
|
|
|
@ -1690,9 +1690,9 @@ public:
|
||||||
|
|
||||||
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
|
// Assumes classes in the SystemDictionary are only unloaded at a safepoint
|
||||||
// Note: anonymous classes are not in the SD.
|
// Note: anonymous classes are not in the SD.
|
||||||
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
|
bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive, bool clean_alive) {
|
||||||
// First, mark for unload all ClassLoaderData referencing a dead class loader.
|
// First, mark for unload all ClassLoaderData referencing a dead class loader.
|
||||||
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive);
|
bool unloading_occurred = ClassLoaderDataGraph::do_unloading(is_alive, clean_alive);
|
||||||
if (unloading_occurred) {
|
if (unloading_occurred) {
|
||||||
dictionary()->do_unloading();
|
dictionary()->do_unloading();
|
||||||
constraints()->purge_loader_constraints();
|
constraints()->purge_loader_constraints();
|
||||||
|
|
|
@ -334,7 +334,7 @@ public:
|
||||||
|
|
||||||
// Unload (that is, break root links to) all unmarked classes and
|
// Unload (that is, break root links to) all unmarked classes and
|
||||||
// loaders. Returns "true" iff something was unloaded.
|
// loaders. Returns "true" iff something was unloaded.
|
||||||
static bool do_unloading(BoolObjectClosure* is_alive);
|
static bool do_unloading(BoolObjectClosure* is_alive, bool clean_alive = true);
|
||||||
|
|
||||||
// Used by DumpSharedSpaces only to remove classes that failed verification
|
// Used by DumpSharedSpaces only to remove classes that failed verification
|
||||||
static void remove_classes_in_error_state();
|
static void remove_classes_in_error_state();
|
||||||
|
|
|
@ -1700,11 +1700,17 @@ void nmethod::post_compiled_method_unload() {
|
||||||
set_unload_reported();
|
set_unload_reported();
|
||||||
}
|
}
|
||||||
|
|
||||||
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
|
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive, bool mark_on_stack) {
|
||||||
if (ic->is_icholder_call()) {
|
if (ic->is_icholder_call()) {
|
||||||
// The only exception is compiledICHolder oops which may
|
// The only exception is compiledICHolder oops which may
|
||||||
// yet be marked below. (We check this further below).
|
// yet be marked below. (We check this further below).
|
||||||
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
CompiledICHolder* cichk_oop = ic->cached_icholder();
|
||||||
|
|
||||||
|
if (mark_on_stack) {
|
||||||
|
Metadata::mark_on_stack(cichk_oop->holder_method());
|
||||||
|
Metadata::mark_on_stack(cichk_oop->holder_klass());
|
||||||
|
}
|
||||||
|
|
||||||
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
|
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
|
||||||
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
|
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
|
||||||
return;
|
return;
|
||||||
|
@ -1712,6 +1718,10 @@ void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_a
|
||||||
} else {
|
} else {
|
||||||
Metadata* ic_oop = ic->cached_metadata();
|
Metadata* ic_oop = ic->cached_metadata();
|
||||||
if (ic_oop != NULL) {
|
if (ic_oop != NULL) {
|
||||||
|
if (mark_on_stack) {
|
||||||
|
Metadata::mark_on_stack(ic_oop);
|
||||||
|
}
|
||||||
|
|
||||||
if (ic_oop->is_klass()) {
|
if (ic_oop->is_klass()) {
|
||||||
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
|
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
|
||||||
return;
|
return;
|
||||||
|
@ -1772,7 +1782,7 @@ void nmethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred)
|
||||||
while(iter.next()) {
|
while(iter.next()) {
|
||||||
if (iter.type() == relocInfo::virtual_call_type) {
|
if (iter.type() == relocInfo::virtual_call_type) {
|
||||||
CompiledIC *ic = CompiledIC_at(&iter);
|
CompiledIC *ic = CompiledIC_at(&iter);
|
||||||
clean_ic_if_metadata_is_dead(ic, is_alive);
|
clean_ic_if_metadata_is_dead(ic, is_alive, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1840,6 +1850,53 @@ static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClos
|
||||||
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
|
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
|
||||||
|
assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
|
||||||
|
|
||||||
|
oop_Relocation* r = iter_at_oop->oop_reloc();
|
||||||
|
// Traverse those oops directly embedded in the code.
|
||||||
|
// Other oops (oop_index>0) are seen as part of scopes_oops.
|
||||||
|
assert(1 == (r->oop_is_immediate()) +
|
||||||
|
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
||||||
|
"oop must be found in exactly one place");
|
||||||
|
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
||||||
|
// Unload this nmethod if the oop is dead.
|
||||||
|
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
|
||||||
|
return true;;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void nmethod::mark_metadata_on_stack_at(RelocIterator* iter_at_metadata) {
|
||||||
|
assert(iter_at_metadata->type() == relocInfo::metadata_type, "Wrong relocation type");
|
||||||
|
|
||||||
|
metadata_Relocation* r = iter_at_metadata->metadata_reloc();
|
||||||
|
// In this metadata, we must only follow those metadatas directly embedded in
|
||||||
|
// the code. Other metadatas (oop_index>0) are seen as part of
|
||||||
|
// the metadata section below.
|
||||||
|
assert(1 == (r->metadata_is_immediate()) +
|
||||||
|
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
|
||||||
|
"metadata must be found in exactly one place");
|
||||||
|
if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
|
||||||
|
Metadata* md = r->metadata_value();
|
||||||
|
if (md != _method) Metadata::mark_on_stack(md);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void nmethod::mark_metadata_on_stack_non_relocs() {
|
||||||
|
// Visit the metadata section
|
||||||
|
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
|
||||||
|
if (*p == Universe::non_oop_word() || *p == NULL) continue; // skip non-oops
|
||||||
|
Metadata* md = *p;
|
||||||
|
Metadata::mark_on_stack(md);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Visit metadata not embedded in the other places.
|
||||||
|
if (_method != NULL) Metadata::mark_on_stack(_method);
|
||||||
|
}
|
||||||
|
|
||||||
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
|
|
||||||
|
@ -1869,6 +1926,11 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
|
||||||
unloading_occurred = true;
|
unloading_occurred = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// When class redefinition is used all metadata in the CodeCache has to be recorded,
|
||||||
|
// so that unused "previous versions" can be purged. Since walking the CodeCache can
|
||||||
|
// be expensive, the "mark on stack" is piggy-backed on this parallel unloading code.
|
||||||
|
bool mark_metadata_on_stack = a_class_was_redefined;
|
||||||
|
|
||||||
// Exception cache
|
// Exception cache
|
||||||
clean_exception_cache(is_alive);
|
clean_exception_cache(is_alive);
|
||||||
|
|
||||||
|
@ -1884,7 +1946,7 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
|
||||||
if (unloading_occurred) {
|
if (unloading_occurred) {
|
||||||
// If class unloading occurred we first iterate over all inline caches and
|
// If class unloading occurred we first iterate over all inline caches and
|
||||||
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
// clear ICs where the cached oop is referring to an unloaded klass or method.
|
||||||
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
|
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive, mark_metadata_on_stack);
|
||||||
}
|
}
|
||||||
|
|
||||||
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
|
||||||
|
@ -1900,24 +1962,21 @@ bool nmethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_
|
||||||
|
|
||||||
case relocInfo::oop_type:
|
case relocInfo::oop_type:
|
||||||
if (!is_unloaded) {
|
if (!is_unloaded) {
|
||||||
// Unload check
|
is_unloaded = unload_if_dead_at(&iter, is_alive, unloading_occurred);
|
||||||
oop_Relocation* r = iter.oop_reloc();
|
|
||||||
// Traverse those oops directly embedded in the code.
|
|
||||||
// Other oops (oop_index>0) are seen as part of scopes_oops.
|
|
||||||
assert(1 == (r->oop_is_immediate()) +
|
|
||||||
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
|
||||||
"oop must be found in exactly one place");
|
|
||||||
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
|
||||||
if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
|
|
||||||
is_unloaded = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
case relocInfo::metadata_type:
|
||||||
|
if (mark_metadata_on_stack) {
|
||||||
|
mark_metadata_on_stack_at(&iter);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (mark_metadata_on_stack) {
|
||||||
|
mark_metadata_on_stack_non_relocs();
|
||||||
|
}
|
||||||
|
|
||||||
if (is_unloaded) {
|
if (is_unloaded) {
|
||||||
return postponed;
|
return postponed;
|
||||||
}
|
}
|
||||||
|
@ -2065,7 +2124,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
||||||
while (iter.next()) {
|
while (iter.next()) {
|
||||||
if (iter.type() == relocInfo::metadata_type ) {
|
if (iter.type() == relocInfo::metadata_type ) {
|
||||||
metadata_Relocation* r = iter.metadata_reloc();
|
metadata_Relocation* r = iter.metadata_reloc();
|
||||||
// In this lmetadata, we must only follow those metadatas directly embedded in
|
// In this metadata, we must only follow those metadatas directly embedded in
|
||||||
// the code. Other metadatas (oop_index>0) are seen as part of
|
// the code. Other metadatas (oop_index>0) are seen as part of
|
||||||
// the metadata section below.
|
// the metadata section below.
|
||||||
assert(1 == (r->metadata_is_immediate()) +
|
assert(1 == (r->metadata_is_immediate()) +
|
||||||
|
@ -2099,7 +2158,7 @@ void nmethod::metadata_do(void f(Metadata*)) {
|
||||||
f(md);
|
f(md);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call function Method*, not embedded in these other places.
|
// Visit metadata not embedded in the other places.
|
||||||
if (_method != NULL) f(_method);
|
if (_method != NULL) f(_method);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -615,9 +615,16 @@ public:
|
||||||
// The parallel versions are used by G1.
|
// The parallel versions are used by G1.
|
||||||
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
|
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||||
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
|
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||||
|
|
||||||
|
private:
|
||||||
// Unload a nmethod if the *root object is dead.
|
// Unload a nmethod if the *root object is dead.
|
||||||
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
|
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
|
||||||
|
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
|
||||||
|
|
||||||
|
void mark_metadata_on_stack_at(RelocIterator* iter_at_metadata);
|
||||||
|
void mark_metadata_on_stack_non_relocs();
|
||||||
|
|
||||||
|
public:
|
||||||
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
|
||||||
OopClosure* f);
|
OopClosure* f);
|
||||||
void oops_do(OopClosure* f) { oops_do(f, false); }
|
void oops_do(OopClosure* f) { oops_do(f, false); }
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "classfile/metadataOnStackMark.hpp"
|
||||||
#include "classfile/symbolTable.hpp"
|
#include "classfile/symbolTable.hpp"
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "gc_implementation/g1/concurrentMark.inline.hpp"
|
#include "gc_implementation/g1/concurrentMark.inline.hpp"
|
||||||
|
@ -2564,17 +2565,27 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
|
||||||
G1CMTraceTime trace("Unloading", G1Log::finer());
|
G1CMTraceTime trace("Unloading", G1Log::finer());
|
||||||
|
|
||||||
if (ClassUnloadingWithConcurrentMark) {
|
if (ClassUnloadingWithConcurrentMark) {
|
||||||
|
// Cleaning of klasses depends on correct information from MetadataMarkOnStack. The CodeCache::mark_on_stack
|
||||||
|
// part is too slow to be done serially, so it is handled during the weakRefsWorkParallelPart phase.
|
||||||
|
// Defer the cleaning until we have complete on_stack data.
|
||||||
|
MetadataOnStackMark md_on_stack(false /* Don't visit the code cache at this point */);
|
||||||
|
|
||||||
bool purged_classes;
|
bool purged_classes;
|
||||||
|
|
||||||
{
|
{
|
||||||
G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
G1CMTraceTime trace("System Dictionary Unloading", G1Log::finest());
|
||||||
purged_classes = SystemDictionary::do_unloading(&g1_is_alive);
|
purged_classes = SystemDictionary::do_unloading(&g1_is_alive, false /* Defer klass cleaning */);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
|
G1CMTraceTime trace("Parallel Unloading", G1Log::finest());
|
||||||
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
weakRefsWorkParallelPart(&g1_is_alive, purged_classes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
G1CMTraceTime trace("Deallocate Metadata", G1Log::finest());
|
||||||
|
ClassLoaderDataGraph::free_deallocate_lists();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (G1StringDedup::is_enabled()) {
|
if (G1StringDedup::is_enabled()) {
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
|
#include "classfile/metadataOnStackMark.hpp"
|
||||||
#include "classfile/stringTable.hpp"
|
#include "classfile/stringTable.hpp"
|
||||||
#include "code/codeCache.hpp"
|
#include "code/codeCache.hpp"
|
||||||
#include "code/icBuffer.hpp"
|
#include "code/icBuffer.hpp"
|
||||||
|
@ -5013,6 +5014,10 @@ private:
|
||||||
clean_nmethod(claimed_nmethods[i]);
|
clean_nmethod(claimed_nmethods[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The nmethod cleaning helps out and does the CodeCache part of MetadataOnStackMark.
|
||||||
|
// Need to retire the buffers now that this thread has stopped cleaning nmethods.
|
||||||
|
MetadataOnStackMark::retire_buffer_for_thread(Thread::current());
|
||||||
}
|
}
|
||||||
|
|
||||||
void work_second_pass(uint worker_id) {
|
void work_second_pass(uint worker_id) {
|
||||||
|
@ -5065,6 +5070,9 @@ public:
|
||||||
// G1 specific cleanup work that has
|
// G1 specific cleanup work that has
|
||||||
// been moved here to be done in parallel.
|
// been moved here to be done in parallel.
|
||||||
ik->clean_dependent_nmethods();
|
ik->clean_dependent_nmethods();
|
||||||
|
if (JvmtiExport::has_redefined_a_class()) {
|
||||||
|
InstanceKlass::purge_previous_versions(ik);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void work() {
|
void work() {
|
||||||
|
@ -5099,8 +5107,18 @@ public:
|
||||||
_klass_cleaning_task(is_alive) {
|
_klass_cleaning_task(is_alive) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void pre_work_verification() {
|
||||||
|
assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
|
||||||
|
}
|
||||||
|
|
||||||
|
void post_work_verification() {
|
||||||
|
assert(!MetadataOnStackMark::has_buffer_for_thread(Thread::current()), "Should be empty");
|
||||||
|
}
|
||||||
|
|
||||||
// The parallel work done by all worker threads.
|
// The parallel work done by all worker threads.
|
||||||
void work(uint worker_id) {
|
void work(uint worker_id) {
|
||||||
|
pre_work_verification();
|
||||||
|
|
||||||
// Do first pass of code cache cleaning.
|
// Do first pass of code cache cleaning.
|
||||||
_code_cache_task.work_first_pass(worker_id);
|
_code_cache_task.work_first_pass(worker_id);
|
||||||
|
|
||||||
|
@ -5119,6 +5137,8 @@ public:
|
||||||
|
|
||||||
// Clean all klasses that were not unloaded.
|
// Clean all klasses that were not unloaded.
|
||||||
_klass_cleaning_task.work();
|
_klass_cleaning_task.work();
|
||||||
|
|
||||||
|
post_work_verification();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1779,11 +1779,22 @@ int ConstantPool::copy_cpool_bytes(int cpool_size,
|
||||||
|
|
||||||
void ConstantPool::set_on_stack(const bool value) {
|
void ConstantPool::set_on_stack(const bool value) {
|
||||||
if (value) {
|
if (value) {
|
||||||
_flags |= _on_stack;
|
int old_flags = *const_cast<volatile int *>(&_flags);
|
||||||
|
while ((old_flags & _on_stack) == 0) {
|
||||||
|
int new_flags = old_flags | _on_stack;
|
||||||
|
int result = Atomic::cmpxchg(new_flags, &_flags, old_flags);
|
||||||
|
|
||||||
|
if (result == old_flags) {
|
||||||
|
// Succeeded.
|
||||||
|
MetadataOnStackMark::record(this, Thread::current());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
old_flags = result;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// Clearing is done single-threadedly.
|
||||||
_flags &= ~_on_stack;
|
_flags &= ~_on_stack;
|
||||||
}
|
}
|
||||||
if (value) MetadataOnStackMark::record(this);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSR 292 support for patching constant pool oops after the class is linked and
|
// JSR 292 support for patching constant pool oops after the class is linked and
|
||||||
|
|
|
@ -1862,9 +1862,12 @@ Method* Method::checked_resolve_jmethod_id(jmethodID mid) {
|
||||||
void Method::set_on_stack(const bool value) {
|
void Method::set_on_stack(const bool value) {
|
||||||
// Set both the method itself and its constant pool. The constant pool
|
// Set both the method itself and its constant pool. The constant pool
|
||||||
// on stack means some method referring to it is also on the stack.
|
// on stack means some method referring to it is also on the stack.
|
||||||
_access_flags.set_on_stack(value);
|
|
||||||
constants()->set_on_stack(value);
|
constants()->set_on_stack(value);
|
||||||
if (value) MetadataOnStackMark::record(this);
|
|
||||||
|
bool succeeded = _access_flags.set_on_stack(value);
|
||||||
|
if (value && succeeded) {
|
||||||
|
MetadataOnStackMark::record(this, Thread::current());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called when the class loader is unloaded to make all methods weak.
|
// Called when the class loader is unloaded to make all methods weak.
|
||||||
|
|
|
@ -3861,6 +3861,7 @@ void TestKlass_test();
|
||||||
void TestBitMap_test();
|
void TestBitMap_test();
|
||||||
void TestAsUtf8();
|
void TestAsUtf8();
|
||||||
void Test_linked_list();
|
void Test_linked_list();
|
||||||
|
void TestChunkedList_test();
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
void TestOldFreeSpaceCalculation_test();
|
void TestOldFreeSpaceCalculation_test();
|
||||||
void TestG1BiasedArray_test();
|
void TestG1BiasedArray_test();
|
||||||
|
@ -3894,6 +3895,7 @@ void execute_internal_vm_tests() {
|
||||||
run_unit_test(TestAsUtf8());
|
run_unit_test(TestAsUtf8());
|
||||||
run_unit_test(ObjectMonitor::sanity_checks());
|
run_unit_test(ObjectMonitor::sanity_checks());
|
||||||
run_unit_test(Test_linked_list());
|
run_unit_test(Test_linked_list());
|
||||||
|
run_unit_test(TestChunkedList_test());
|
||||||
#if INCLUDE_VM_STRUCTS
|
#if INCLUDE_VM_STRUCTS
|
||||||
run_unit_test(VMStructs::test());
|
run_unit_test(VMStructs::test());
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -201,6 +201,8 @@ Thread::Thread() {
|
||||||
// This initial value ==> never claimed.
|
// This initial value ==> never claimed.
|
||||||
_oops_do_parity = 0;
|
_oops_do_parity = 0;
|
||||||
|
|
||||||
|
_metadata_on_stack_buffer = NULL;
|
||||||
|
|
||||||
// the handle mark links itself to last_handle_mark
|
// the handle mark links itself to last_handle_mark
|
||||||
new HandleMark(this);
|
new HandleMark(this);
|
||||||
|
|
||||||
|
|
|
@ -42,11 +42,10 @@
|
||||||
#include "runtime/threadLocalStorage.hpp"
|
#include "runtime/threadLocalStorage.hpp"
|
||||||
#include "runtime/thread_ext.hpp"
|
#include "runtime/thread_ext.hpp"
|
||||||
#include "runtime/unhandledOops.hpp"
|
#include "runtime/unhandledOops.hpp"
|
||||||
#include "utilities/macros.hpp"
|
|
||||||
|
|
||||||
#include "trace/traceBackend.hpp"
|
#include "trace/traceBackend.hpp"
|
||||||
#include "trace/traceMacros.hpp"
|
#include "trace/traceMacros.hpp"
|
||||||
#include "utilities/exceptions.hpp"
|
#include "utilities/exceptions.hpp"
|
||||||
|
#include "utilities/macros.hpp"
|
||||||
#include "utilities/top.hpp"
|
#include "utilities/top.hpp"
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
#include "gc_implementation/g1/dirtyCardQueue.hpp"
|
||||||
|
@ -83,6 +82,10 @@ class GCTaskQueue;
|
||||||
class ThreadClosure;
|
class ThreadClosure;
|
||||||
class IdealGraphPrinter;
|
class IdealGraphPrinter;
|
||||||
|
|
||||||
|
class Metadata;
|
||||||
|
template <class T, MEMFLAGS F> class ChunkedList;
|
||||||
|
typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
|
||||||
|
|
||||||
DEBUG_ONLY(class ResourceMark;)
|
DEBUG_ONLY(class ResourceMark;)
|
||||||
|
|
||||||
class WorkerThread;
|
class WorkerThread;
|
||||||
|
@ -255,6 +258,9 @@ class Thread: public ThreadShadow {
|
||||||
jlong _allocated_bytes; // Cumulative number of bytes allocated on
|
jlong _allocated_bytes; // Cumulative number of bytes allocated on
|
||||||
// the Java heap
|
// the Java heap
|
||||||
|
|
||||||
|
// Thread-local buffer used by MetadataOnStackMark.
|
||||||
|
MetadataOnStackBuffer* _metadata_on_stack_buffer;
|
||||||
|
|
||||||
TRACE_DATA _trace_data; // Thread-local data for tracing
|
TRACE_DATA _trace_data; // Thread-local data for tracing
|
||||||
|
|
||||||
ThreadExt _ext;
|
ThreadExt _ext;
|
||||||
|
@ -490,7 +496,10 @@ class Thread: public ThreadShadow {
|
||||||
// creation fails due to lack of memory, too many threads etc.
|
// creation fails due to lack of memory, too many threads etc.
|
||||||
bool set_as_starting_thread();
|
bool set_as_starting_thread();
|
||||||
|
|
||||||
protected:
|
void set_metadata_on_stack_buffer(MetadataOnStackBuffer* buffer) { _metadata_on_stack_buffer = buffer; }
|
||||||
|
MetadataOnStackBuffer* metadata_on_stack_buffer() const { return _metadata_on_stack_buffer; }
|
||||||
|
|
||||||
|
protected:
|
||||||
// OS data associated with the thread
|
// OS data associated with the thread
|
||||||
OSThread* _osthread; // Platform-specific thread information
|
OSThread* _osthread; // Platform-specific thread information
|
||||||
|
|
||||||
|
|
|
@ -47,6 +47,21 @@ void AccessFlags::atomic_clear_bits(jint bits) {
|
||||||
} while(f != old_flags);
|
} while(f != old_flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Returns true iff this thread succeeded setting the bit.
|
||||||
|
bool AccessFlags::atomic_set_one_bit(jint bit) {
|
||||||
|
// Atomically update the flags with the bit given
|
||||||
|
jint old_flags, new_flags, f;
|
||||||
|
bool is_setting_bit = false;
|
||||||
|
do {
|
||||||
|
old_flags = _flags;
|
||||||
|
new_flags = old_flags | bit;
|
||||||
|
is_setting_bit = old_flags != new_flags;
|
||||||
|
f = Atomic::cmpxchg(new_flags, &_flags, old_flags);
|
||||||
|
} while(f != old_flags);
|
||||||
|
|
||||||
|
return is_setting_bit;
|
||||||
|
}
|
||||||
|
|
||||||
#if !defined(PRODUCT) || INCLUDE_JVMTI
|
#if !defined(PRODUCT) || INCLUDE_JVMTI
|
||||||
|
|
||||||
void AccessFlags::print_on(outputStream* st) const {
|
void AccessFlags::print_on(outputStream* st) const {
|
||||||
|
|
|
@ -172,6 +172,7 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
|
||||||
|
|
||||||
// Atomic update of flags
|
// Atomic update of flags
|
||||||
void atomic_set_bits(jint bits);
|
void atomic_set_bits(jint bits);
|
||||||
|
bool atomic_set_one_bit(jint bit);
|
||||||
void atomic_clear_bits(jint bits);
|
void atomic_clear_bits(jint bits);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -233,12 +234,13 @@ class AccessFlags VALUE_OBJ_CLASS_SPEC {
|
||||||
atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
|
atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_on_stack(const bool value)
|
bool set_on_stack(const bool value)
|
||||||
{
|
{
|
||||||
if (value) {
|
if (value) {
|
||||||
atomic_set_bits(JVM_ACC_ON_STACK);
|
return atomic_set_one_bit(JVM_ACC_ON_STACK);
|
||||||
} else {
|
} else {
|
||||||
atomic_clear_bits(JVM_ACC_ON_STACK);
|
atomic_clear_bits(JVM_ACC_ON_STACK);
|
||||||
|
return true; // Ignored
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Conversion
|
// Conversion
|
||||||
|
|
109
hotspot/src/share/vm/utilities/chunkedList.cpp
Normal file
109
hotspot/src/share/vm/utilities/chunkedList.cpp
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "precompiled.hpp"
|
||||||
|
#include "utilities/chunkedList.hpp"
|
||||||
|
#include "utilities/debug.hpp"
|
||||||
|
|
||||||
|
/////////////// Unit tests ///////////////
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
class TestChunkedList {
|
||||||
|
typedef ChunkedList<T, mtOther> ChunkedListT;
|
||||||
|
|
||||||
|
public:
|
||||||
|
static void testEmpty() {
|
||||||
|
ChunkedListT buffer;
|
||||||
|
assert(buffer.size() == 0, "assert");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testFull() {
|
||||||
|
ChunkedListT buffer;
|
||||||
|
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
||||||
|
buffer.push((T)i);
|
||||||
|
}
|
||||||
|
assert(buffer.size() == ChunkedListT::BufferSize, "assert");
|
||||||
|
assert(buffer.is_full(), "assert");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testSize() {
|
||||||
|
ChunkedListT buffer;
|
||||||
|
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
||||||
|
assert(buffer.size() == i, "assert");
|
||||||
|
buffer.push((T)i);
|
||||||
|
assert(buffer.size() == i + 1, "assert");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testClear() {
|
||||||
|
ChunkedListT buffer;
|
||||||
|
|
||||||
|
buffer.clear();
|
||||||
|
assert(buffer.size() == 0, "assert");
|
||||||
|
|
||||||
|
for (uintptr_t i = 0; i < ChunkedListT::BufferSize / 2; i++) {
|
||||||
|
buffer.push((T)i);
|
||||||
|
}
|
||||||
|
buffer.clear();
|
||||||
|
assert(buffer.size() == 0, "assert");
|
||||||
|
|
||||||
|
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
||||||
|
buffer.push((T)i);
|
||||||
|
}
|
||||||
|
buffer.clear();
|
||||||
|
assert(buffer.size() == 0, "assert");
|
||||||
|
}
|
||||||
|
|
||||||
|
static void testAt() {
|
||||||
|
ChunkedListT buffer;
|
||||||
|
|
||||||
|
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
||||||
|
buffer.push((T)i);
|
||||||
|
assert(buffer.at(i) == (T)i, "assert");
|
||||||
|
}
|
||||||
|
|
||||||
|
for (uintptr_t i = 0; i < ChunkedListT::BufferSize; i++) {
|
||||||
|
assert(buffer.at(i) == (T)i, "assert");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void test() {
|
||||||
|
testEmpty();
|
||||||
|
testFull();
|
||||||
|
testSize();
|
||||||
|
testClear();
|
||||||
|
testAt();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class Metadata;
|
||||||
|
|
||||||
|
void TestChunkedList_test() {
|
||||||
|
TestChunkedList<Metadata*>::test();
|
||||||
|
TestChunkedList<size_t>::test();
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
81
hotspot/src/share/vm/utilities/chunkedList.hpp
Normal file
81
hotspot/src/share/vm/utilities/chunkedList.hpp
Normal file
|
@ -0,0 +1,81 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
|
||||||
|
#define SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
|
||||||
|
|
||||||
|
#include "memory/allocation.hpp"
|
||||||
|
#include "utilities/debug.hpp"
|
||||||
|
|
||||||
|
template <class T, MEMFLAGS F> class ChunkedList : public CHeapObj<F> {
|
||||||
|
template <class U> friend class TestChunkedList;
|
||||||
|
|
||||||
|
static const size_t BufferSize = 64;
|
||||||
|
|
||||||
|
T _values[BufferSize];
|
||||||
|
T* _top;
|
||||||
|
|
||||||
|
ChunkedList<T, F>* _next_used;
|
||||||
|
ChunkedList<T, F>* _next_free;
|
||||||
|
|
||||||
|
T const * end() const {
|
||||||
|
return &_values[BufferSize];
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
ChunkedList<T, F>() : _top(_values), _next_used(NULL), _next_free(NULL) {}
|
||||||
|
|
||||||
|
bool is_full() const {
|
||||||
|
return _top == end();
|
||||||
|
}
|
||||||
|
|
||||||
|
void clear() {
|
||||||
|
_top = _values;
|
||||||
|
// Don't clear the next pointers since that would interfere
|
||||||
|
// with other threads trying to iterate through the lists.
|
||||||
|
}
|
||||||
|
|
||||||
|
void push(T m) {
|
||||||
|
assert(!is_full(), "Buffer is full");
|
||||||
|
*_top = m;
|
||||||
|
_top++;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_next_used(ChunkedList<T, F>* buffer) { _next_used = buffer; }
|
||||||
|
void set_next_free(ChunkedList<T, F>* buffer) { _next_free = buffer; }
|
||||||
|
|
||||||
|
ChunkedList<T, F>* next_used() const { return _next_used; }
|
||||||
|
ChunkedList<T, F>* next_free() const { return _next_free; }
|
||||||
|
|
||||||
|
size_t size() const {
|
||||||
|
return pointer_delta(_top, _values, sizeof(T));
|
||||||
|
}
|
||||||
|
|
||||||
|
T at(size_t i) {
|
||||||
|
assert(i < size(), err_msg("IOOBE i: " SIZE_FORMAT " size(): " SIZE_FORMAT, i, size()));
|
||||||
|
return _values[i];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#endif // SHARE_VM_UTILITIES_CHUNKED_LIST_HPP
|
Loading…
Add table
Add a link
Reference in a new issue