mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 19:44:41 +02:00
8206009: Move CDS java heap object archiving code to heapShared.hpp and heapShared.cpp
Restructure and cleanup java heap object archiving code. Reviewed-by: coleenp, iklam
This commit is contained in:
parent
4f4a2385c5
commit
a2ad8f419f
22 changed files with 350 additions and 307 deletions
|
@ -24,25 +24,200 @@
|
|||
|
||||
#include "precompiled.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/stringTable.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/heapShared.inline.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceShared.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/safepointVerifiers.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
#if INCLUDE_G1GC
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#endif
|
||||
|
||||
#if INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
bool HeapShared::_open_archive_heap_region_mapped = false;
|
||||
bool HeapShared::_archive_heap_region_fixed = false;
|
||||
|
||||
address HeapShared::_narrow_oop_base;
|
||||
int HeapShared::_narrow_oop_shift;
|
||||
|
||||
////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// Java heap object archiving support
|
||||
//
|
||||
////////////////////////////////////////////////////////////////
|
||||
void HeapShared::fixup_mapped_heap_regions() {
|
||||
FileMapInfo *mapinfo = FileMapInfo::current_info();
|
||||
mapinfo->fixup_mapped_heap_regions();
|
||||
set_archive_heap_region_fixed();
|
||||
}
|
||||
|
||||
unsigned HeapShared::oop_hash(oop const& p) {
|
||||
assert(!p->mark()->has_bias_pattern(),
|
||||
"this object should never have been locked"); // so identity_hash won't safepoin
|
||||
unsigned hash = (unsigned)p->identity_hash();
|
||||
return hash;
|
||||
}
|
||||
|
||||
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = NULL;
|
||||
oop HeapShared::find_archived_heap_object(oop obj) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
oop* p = cache->get(obj);
|
||||
if (p != NULL) {
|
||||
return *p;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
|
||||
assert(DumpSharedSpaces, "dump-time only");
|
||||
|
||||
oop ao = find_archived_heap_object(obj);
|
||||
if (ao != NULL) {
|
||||
// already archived
|
||||
return ao;
|
||||
}
|
||||
|
||||
int len = obj->size();
|
||||
if (G1CollectedHeap::heap()->is_archive_alloc_too_large(len)) {
|
||||
log_debug(cds, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: " SIZE_FORMAT,
|
||||
p2i(obj), (size_t)obj->size());
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Pre-compute object identity hash at CDS dump time.
|
||||
obj->identity_hash();
|
||||
|
||||
oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
|
||||
if (archived_oop != NULL) {
|
||||
Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)archived_oop, len);
|
||||
MetaspaceShared::relocate_klass_ptr(archived_oop);
|
||||
ArchivedObjectCache* cache = archived_object_cache();
|
||||
cache->put(obj, archived_oop);
|
||||
log_debug(cds, heap)("Archived heap object " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||
p2i(obj), p2i(archived_oop));
|
||||
} else {
|
||||
log_error(cds, heap)(
|
||||
"Cannot allocate space for object " PTR_FORMAT " in archived heap region",
|
||||
p2i(obj));
|
||||
vm_exit(1);
|
||||
}
|
||||
return archived_oop;
|
||||
}
|
||||
|
||||
oop HeapShared::materialize_archived_object(narrowOop v) {
|
||||
assert(archive_heap_region_fixed(),
|
||||
"must be called after archive heap regions are fixed");
|
||||
if (!CompressedOops::is_null(v)) {
|
||||
oop obj = HeapShared::decode_from_archive(v);
|
||||
return G1CollectedHeap::heap()->materialize_archived_object(obj);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void HeapShared::archive_klass_objects(Thread* THREAD) {
|
||||
GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
|
||||
assert(klasses != NULL, "sanity");
|
||||
for (int i = 0; i < klasses->length(); i++) {
|
||||
Klass* k = klasses->at(i);
|
||||
|
||||
// archive mirror object
|
||||
java_lang_Class::archive_mirror(k, CHECK);
|
||||
|
||||
// archive the resolved_referenes array
|
||||
if (k->is_instance_klass()) {
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
ik->constants()->archive_resolved_references(THREAD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void HeapShared::archive_java_heap_objects(GrowableArray<MemRegion> *closed,
|
||||
GrowableArray<MemRegion> *open) {
|
||||
if (!is_heap_object_archiving_allowed()) {
|
||||
if (log_is_enabled(Info, cds)) {
|
||||
log_info(cds)(
|
||||
"Archived java heap is not supported as UseG1GC, "
|
||||
"UseCompressedOops and UseCompressedClassPointers are required."
|
||||
"Current settings: UseG1GC=%s, UseCompressedOops=%s, UseCompressedClassPointers=%s.",
|
||||
BOOL_TO_STR(UseG1GC), BOOL_TO_STR(UseCompressedOops),
|
||||
BOOL_TO_STR(UseCompressedClassPointers));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
{
|
||||
NoSafepointVerifier nsv;
|
||||
|
||||
// Cache for recording where the archived objects are copied to
|
||||
create_archived_object_cache();
|
||||
|
||||
tty->print_cr("Dumping objects to closed archive heap region ...");
|
||||
NOT_PRODUCT(StringTable::verify());
|
||||
copy_closed_archive_heap_objects(closed);
|
||||
|
||||
tty->print_cr("Dumping objects to open archive heap region ...");
|
||||
copy_open_archive_heap_objects(open);
|
||||
|
||||
destroy_archived_object_cache();
|
||||
}
|
||||
|
||||
G1HeapVerifier::verify_archive_regions();
|
||||
}
|
||||
|
||||
void HeapShared::copy_closed_archive_heap_objects(
|
||||
GrowableArray<MemRegion> * closed_archive) {
|
||||
assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
|
||||
|
||||
Thread* THREAD = Thread::current();
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range();
|
||||
|
||||
// Archive interned string objects
|
||||
StringTable::write_to_archive();
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(closed_archive,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
|
||||
void HeapShared::copy_open_archive_heap_objects(
|
||||
GrowableArray<MemRegion> * open_archive) {
|
||||
assert(is_heap_object_archiving_allowed(), "Cannot archive java heap objects");
|
||||
|
||||
Thread* THREAD = Thread::current();
|
||||
G1CollectedHeap::heap()->begin_archive_alloc_range(true /* open */);
|
||||
|
||||
java_lang_Class::archive_basic_type_mirrors(THREAD);
|
||||
|
||||
archive_klass_objects(THREAD);
|
||||
|
||||
archive_object_subgraphs(THREAD);
|
||||
|
||||
G1CollectedHeap::heap()->end_archive_alloc_range(open_archive,
|
||||
os::vm_allocation_granularity());
|
||||
}
|
||||
|
||||
void HeapShared::init_narrow_oop_decoding(address base, int shift) {
|
||||
_narrow_oop_base = base;
|
||||
_narrow_oop_shift = shift;
|
||||
}
|
||||
|
||||
//
|
||||
// Subgraph archiving support
|
||||
//
|
||||
HeapShared::DumpTimeKlassSubGraphInfoTable* HeapShared::_dump_time_subgraph_info_table = NULL;
|
||||
HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_table;
|
||||
|
||||
|
@ -214,7 +389,7 @@ void HeapShared::serialize_subgraph_info_table_header(SerializeClosure* soc) {
|
|||
}
|
||||
|
||||
void HeapShared::initialize_from_archived_subgraph(Klass* k) {
|
||||
if (!MetaspaceShared::open_archive_heap_region_mapped()) {
|
||||
if (!open_archive_heap_region_mapped()) {
|
||||
return; // nothing to do
|
||||
}
|
||||
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
|
||||
|
@ -274,8 +449,7 @@ void HeapShared::initialize_from_archived_subgraph(Klass* k) {
|
|||
// The object refereced by the field becomes 'known' by GC from this
|
||||
// point. All objects in the subgraph reachable from the object are
|
||||
// also 'known' by GC.
|
||||
oop v = MetaspaceShared::materialize_archived_object(
|
||||
entry_field_records->at(i+1));
|
||||
oop v = materialize_archived_object(entry_field_records->at(i+1));
|
||||
m->obj_field_put(field_offset, v);
|
||||
i += 2;
|
||||
|
||||
|
@ -310,7 +484,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
|||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(obj)) {
|
||||
assert(!MetaspaceShared::is_archive_object(obj),
|
||||
assert(!HeapShared::is_archived_object(obj),
|
||||
"original objects must not point to archived objects");
|
||||
|
||||
size_t field_delta = pointer_delta(p, _orig_referencing_obj, sizeof(char));
|
||||
|
@ -329,7 +503,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
|||
|
||||
oop archived = HeapShared::archive_reachable_objects_from(_level + 1, _subgraph_info, obj, THREAD);
|
||||
assert(archived != NULL, "VM should have exited with unarchivable objects for _level > 1");
|
||||
assert(MetaspaceShared::is_archive_object(archived), "must be");
|
||||
assert(HeapShared::is_archived_object(archived), "must be");
|
||||
|
||||
if (!_record_klasses_only) {
|
||||
// Update the reference in the archived copy of the referencing object.
|
||||
|
@ -347,7 +521,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
|
|||
// (3) Record the klasses of all orig_obj and all reachable objects.
|
||||
oop HeapShared::archive_reachable_objects_from(int level, KlassSubGraphInfo* subgraph_info, oop orig_obj, TRAPS) {
|
||||
assert(orig_obj != NULL, "must be");
|
||||
assert(!MetaspaceShared::is_archive_object(orig_obj), "sanity");
|
||||
assert(!is_archived_object(orig_obj), "sanity");
|
||||
|
||||
// java.lang.Class instances cannot be included in an archived
|
||||
// object sub-graph.
|
||||
|
@ -356,7 +530,7 @@ oop HeapShared::archive_reachable_objects_from(int level, KlassSubGraphInfo* sub
|
|||
vm_exit(1);
|
||||
}
|
||||
|
||||
oop archived_obj = MetaspaceShared::find_archived_heap_object(orig_obj);
|
||||
oop archived_obj = find_archived_heap_object(orig_obj);
|
||||
if (java_lang_String::is_instance(orig_obj) && archived_obj != NULL) {
|
||||
// To save time, don't walk strings that are already archived. They just contain
|
||||
// pointers to a type array, whose klass doesn't need to be recorded.
|
||||
|
@ -373,7 +547,7 @@ oop HeapShared::archive_reachable_objects_from(int level, KlassSubGraphInfo* sub
|
|||
bool record_klasses_only = (archived_obj != NULL);
|
||||
if (archived_obj == NULL) {
|
||||
++_num_new_archived_objs;
|
||||
archived_obj = MetaspaceShared::archive_heap_object(orig_obj, THREAD);
|
||||
archived_obj = archive_heap_object(orig_obj, THREAD);
|
||||
if (archived_obj == NULL) {
|
||||
// Skip archiving the sub-graph referenced from the current entry field.
|
||||
ResourceMark rm;
|
||||
|
@ -447,7 +621,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
|
|||
assert(k->is_shared_boot_class(), "must be boot class");
|
||||
|
||||
oop m = k->java_mirror();
|
||||
oop archived_m = MetaspaceShared::find_archived_heap_object(m);
|
||||
oop archived_m = find_archived_heap_object(m);
|
||||
if (CompressedOops::is_null(archived_m)) {
|
||||
return;
|
||||
}
|
||||
|
@ -508,7 +682,7 @@ void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_o
|
|||
assert(k->is_shared_boot_class(), "must be boot class");
|
||||
|
||||
oop m = k->java_mirror();
|
||||
oop archived_m = MetaspaceShared::find_archived_heap_object(m);
|
||||
oop archived_m = find_archived_heap_object(m);
|
||||
if (CompressedOops::is_null(archived_m)) {
|
||||
return;
|
||||
}
|
||||
|
@ -519,7 +693,7 @@ void HeapShared::verify_subgraph_from_static_field(InstanceKlass* k, int field_o
|
|||
}
|
||||
|
||||
void HeapShared::verify_subgraph_from(oop orig_obj) {
|
||||
oop archived_obj = MetaspaceShared::find_archived_heap_object(orig_obj);
|
||||
oop archived_obj = find_archived_heap_object(orig_obj);
|
||||
if (archived_obj == NULL) {
|
||||
// It's OK for the root of a subgraph to be not archived. See comments in
|
||||
// archive_reachable_objects_from().
|
||||
|
@ -546,11 +720,11 @@ void HeapShared::verify_reachable_objects_from(oop obj, bool is_archived) {
|
|||
set_has_been_seen_during_subgraph_recording(obj);
|
||||
|
||||
if (is_archived) {
|
||||
assert(MetaspaceShared::is_archive_object(obj), "must be");
|
||||
assert(MetaspaceShared::find_archived_heap_object(obj) == NULL, "must be");
|
||||
assert(is_archived_object(obj), "must be");
|
||||
assert(find_archived_heap_object(obj) == NULL, "must be");
|
||||
} else {
|
||||
assert(!MetaspaceShared::is_archive_object(obj), "must be");
|
||||
assert(MetaspaceShared::find_archived_heap_object(obj) != NULL, "must be");
|
||||
assert(!is_archived_object(obj), "must be");
|
||||
assert(find_archived_heap_object(obj) != NULL, "must be");
|
||||
}
|
||||
|
||||
VerifySharedOopClosure walker(is_archived);
|
||||
|
@ -670,7 +844,7 @@ void HeapShared::init_archivable_static_fields(Thread* THREAD) {
|
|||
}
|
||||
}
|
||||
|
||||
void HeapShared::archive_static_fields(Thread* THREAD) {
|
||||
void HeapShared::archive_object_subgraphs(Thread* THREAD) {
|
||||
// For each class X that has one or more archived fields:
|
||||
// [1] Dump the subgraph of each archived field
|
||||
// [2] Create a list of all the class of the objects that can be reached
|
||||
|
@ -767,11 +941,6 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
|||
return oopmap;
|
||||
}
|
||||
|
||||
void HeapShared::init_narrow_oop_decoding(address base, int shift) {
|
||||
_narrow_oop_base = base;
|
||||
_narrow_oop_shift = shift;
|
||||
}
|
||||
|
||||
// Patch all the embedded oop pointers inside an archived heap region,
|
||||
// to be consistent with the runtime oop encoding.
|
||||
class PatchEmbeddedPointers: public BitMapClosure {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue