mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8250989: Consolidate buffer allocation code for CDS static/dynamic dumping
Reviewed-by: ccheung, coleenp
This commit is contained in:
parent
0e18634b6a
commit
c5ff454481
42 changed files with 701 additions and 918 deletions
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -198,13 +198,8 @@ void SimpleCompactHashtable::init(address base_address, u4 entry_count, u4 bucke
|
||||||
_bucket_count = bucket_count;
|
_bucket_count = bucket_count;
|
||||||
_entry_count = entry_count;
|
_entry_count = entry_count;
|
||||||
_base_address = base_address;
|
_base_address = base_address;
|
||||||
if (DynamicDumpSharedSpaces) {
|
_buckets = buckets;
|
||||||
_buckets = DynamicArchive::buffer_to_target(buckets);
|
_entries = entries;
|
||||||
_entries = DynamicArchive::buffer_to_target(entries);
|
|
||||||
} else {
|
|
||||||
_buckets = buckets;
|
|
||||||
_entries = entries;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t SimpleCompactHashtable::calculate_header_size() {
|
size_t SimpleCompactHashtable::calculate_header_size() {
|
||||||
|
|
|
@ -41,6 +41,7 @@
|
||||||
#include "interpreter/linkResolver.hpp"
|
#include "interpreter/linkResolver.hpp"
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
#include "logging/logStream.hpp"
|
#include "logging/logStream.hpp"
|
||||||
|
#include "memory/archiveBuilder.hpp"
|
||||||
#include "memory/heapShared.inline.hpp"
|
#include "memory/heapShared.inline.hpp"
|
||||||
#include "memory/metaspaceShared.hpp"
|
#include "memory/metaspaceShared.hpp"
|
||||||
#include "memory/oopFactory.hpp"
|
#include "memory/oopFactory.hpp"
|
||||||
|
@ -855,7 +856,9 @@ static void initialize_static_field(fieldDescriptor* fd, Handle mirror, TRAPS) {
|
||||||
break;
|
break;
|
||||||
case T_OBJECT:
|
case T_OBJECT:
|
||||||
{
|
{
|
||||||
assert(fd->signature() == vmSymbols::string_signature(),
|
// Can't use vmSymbols::string_signature() as fd->signature() may have been relocated
|
||||||
|
// during DumpSharedSpaces
|
||||||
|
assert(fd->signature()->equals("Ljava/lang/String;"),
|
||||||
"just checking");
|
"just checking");
|
||||||
if (DumpSharedSpaces && HeapShared::is_archived_object(mirror())) {
|
if (DumpSharedSpaces && HeapShared::is_archived_object(mirror())) {
|
||||||
// Archive the String field and update the pointer.
|
// Archive the String field and update the pointer.
|
||||||
|
@ -1122,6 +1125,21 @@ class ResetMirrorField: public FieldClosure {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void set_klass_field_in_archived_mirror(oop mirror_obj, int offset, Klass* k) {
|
||||||
|
assert(java_lang_Class::is_instance(mirror_obj), "must be");
|
||||||
|
// this is the copy of k in the output buffer
|
||||||
|
Klass* copy = ArchiveBuilder::get_relocated_klass(k);
|
||||||
|
|
||||||
|
// This is the address of k, if the archive is loaded at the requested location
|
||||||
|
Klass* def = ArchiveBuilder::current()->to_requested(copy);
|
||||||
|
|
||||||
|
log_debug(cds, heap, mirror)(
|
||||||
|
"Relocate mirror metadata field at %d from " PTR_FORMAT " ==> " PTR_FORMAT,
|
||||||
|
offset, p2i(k), p2i(def));
|
||||||
|
|
||||||
|
mirror_obj->metadata_field_put(offset, def);
|
||||||
|
}
|
||||||
|
|
||||||
void java_lang_Class::archive_basic_type_mirrors(TRAPS) {
|
void java_lang_Class::archive_basic_type_mirrors(TRAPS) {
|
||||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||||
"HeapShared::is_heap_object_archiving_allowed() must be true");
|
"HeapShared::is_heap_object_archiving_allowed() must be true");
|
||||||
|
@ -1136,8 +1154,7 @@ void java_lang_Class::archive_basic_type_mirrors(TRAPS) {
|
||||||
Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
|
Klass *ak = (Klass*)(archived_m->metadata_field(_array_klass_offset));
|
||||||
assert(ak != NULL || t == T_VOID, "should not be NULL");
|
assert(ak != NULL || t == T_VOID, "should not be NULL");
|
||||||
if (ak != NULL) {
|
if (ak != NULL) {
|
||||||
Klass *reloc_ak = MetaspaceShared::get_relocated_klass(ak, true);
|
set_klass_field_in_archived_mirror(archived_m, _array_klass_offset, ak);
|
||||||
archived_m->metadata_field_put(_array_klass_offset, reloc_ak);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clear the fields. Just to be safe
|
// Clear the fields. Just to be safe
|
||||||
|
@ -1259,21 +1276,13 @@ oop java_lang_Class::process_archived_mirror(Klass* k, oop mirror,
|
||||||
// The archived mirror's field at _klass_offset is still pointing to the original
|
// The archived mirror's field at _klass_offset is still pointing to the original
|
||||||
// klass. Updated the field in the archived mirror to point to the relocated
|
// klass. Updated the field in the archived mirror to point to the relocated
|
||||||
// klass in the archive.
|
// klass in the archive.
|
||||||
Klass *reloc_k = MetaspaceShared::get_relocated_klass(as_Klass(mirror), true);
|
set_klass_field_in_archived_mirror(archived_mirror, _klass_offset, as_Klass(mirror));
|
||||||
log_debug(cds, heap, mirror)(
|
|
||||||
"Relocate mirror metadata field at _klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
|
|
||||||
p2i(as_Klass(mirror)), p2i(reloc_k));
|
|
||||||
archived_mirror->metadata_field_put(_klass_offset, reloc_k);
|
|
||||||
|
|
||||||
// The field at _array_klass_offset is pointing to the original one dimension
|
// The field at _array_klass_offset is pointing to the original one dimension
|
||||||
// higher array klass if exists. Relocate the pointer.
|
// higher array klass if exists. Relocate the pointer.
|
||||||
Klass *arr = array_klass_acquire(mirror);
|
Klass *arr = array_klass_acquire(mirror);
|
||||||
if (arr != NULL) {
|
if (arr != NULL) {
|
||||||
Klass *reloc_arr = MetaspaceShared::get_relocated_klass(arr, true);
|
set_klass_field_in_archived_mirror(archived_mirror, _array_klass_offset, arr);
|
||||||
log_debug(cds, heap, mirror)(
|
|
||||||
"Relocate mirror metadata field at _array_klass_offset from " PTR_FORMAT " ==> " PTR_FORMAT,
|
|
||||||
p2i(arr), p2i(reloc_arr));
|
|
||||||
archived_mirror->metadata_field_put(_array_klass_offset, reloc_arr);
|
|
||||||
}
|
}
|
||||||
return archived_mirror;
|
return archived_mirror;
|
||||||
}
|
}
|
||||||
|
|
|
@ -719,6 +719,7 @@ oop StringTable::lookup_shared(const jchar* name, int len, unsigned int hash) {
|
||||||
|
|
||||||
oop StringTable::create_archived_string(oop s, Thread* THREAD) {
|
oop StringTable::create_archived_string(oop s, Thread* THREAD) {
|
||||||
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
|
assert(DumpSharedSpaces, "this function is only used with -Xshare:dump");
|
||||||
|
assert(java_lang_String::is_instance(s), "sanity");
|
||||||
assert(!HeapShared::is_archived_object(s), "sanity");
|
assert(!HeapShared::is_archived_object(s), "sanity");
|
||||||
|
|
||||||
oop new_s = NULL;
|
oop new_s = NULL;
|
||||||
|
|
|
@ -584,6 +584,7 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
|
||||||
#if INCLUDE_CDS
|
#if INCLUDE_CDS
|
||||||
void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
|
void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
|
||||||
CompactHashtableWriter* writer) {
|
CompactHashtableWriter* writer) {
|
||||||
|
ArchiveBuilder* builder = ArchiveBuilder::current();
|
||||||
int len = symbols->length();
|
int len = symbols->length();
|
||||||
for (int i = 0; i < len; i++) {
|
for (int i = 0; i < len; i++) {
|
||||||
Symbol* sym = ArchiveBuilder::get_relocated_symbol(symbols->at(i));
|
Symbol* sym = ArchiveBuilder::get_relocated_symbol(symbols->at(i));
|
||||||
|
@ -591,10 +592,7 @@ void SymbolTable::copy_shared_symbol_table(GrowableArray<Symbol*>* symbols,
|
||||||
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
|
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
|
||||||
"must not rehash during dumping");
|
"must not rehash during dumping");
|
||||||
sym->set_permanent();
|
sym->set_permanent();
|
||||||
if (DynamicDumpSharedSpaces) {
|
writer->add(fixed_hash, builder->buffer_to_offset_u4((address)sym));
|
||||||
sym = DynamicArchive::buffer_to_target(sym);
|
|
||||||
}
|
|
||||||
writer->add(fixed_hash, MetaspaceShared::object_delta_u4(sym));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -609,15 +607,6 @@ void SymbolTable::write_to_archive(GrowableArray<Symbol*>* symbols) {
|
||||||
if (!DynamicDumpSharedSpaces) {
|
if (!DynamicDumpSharedSpaces) {
|
||||||
_shared_table.reset();
|
_shared_table.reset();
|
||||||
writer.dump(&_shared_table, "symbol");
|
writer.dump(&_shared_table, "symbol");
|
||||||
|
|
||||||
// Verify the written shared table is correct -- at this point,
|
|
||||||
// vmSymbols has already been relocated to point to the archived
|
|
||||||
// version of the Symbols.
|
|
||||||
Symbol* sym = vmSymbols::java_lang_Object();
|
|
||||||
const char* name = (const char*)sym->bytes();
|
|
||||||
int len = sym->utf8_length();
|
|
||||||
unsigned int hash = hash_symbol(name, len, _alt_hash);
|
|
||||||
assert(sym == _shared_table.lookup(name, hash, len), "sanity");
|
|
||||||
} else {
|
} else {
|
||||||
_dynamic_shared_table.reset();
|
_dynamic_shared_table.reset();
|
||||||
writer.dump(&_dynamic_shared_table, "symbol");
|
writer.dump(&_dynamic_shared_table, "symbol");
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include "logging/logStream.hpp"
|
#include "logging/logStream.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/archiveUtils.hpp"
|
#include "memory/archiveUtils.hpp"
|
||||||
|
#include "memory/archiveBuilder.hpp"
|
||||||
#include "memory/dynamicArchive.hpp"
|
#include "memory/dynamicArchive.hpp"
|
||||||
#include "memory/filemap.hpp"
|
#include "memory/filemap.hpp"
|
||||||
#include "memory/heapShared.hpp"
|
#include "memory/heapShared.hpp"
|
||||||
|
@ -279,15 +280,6 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
class LambdaProxyClassKey {
|
class LambdaProxyClassKey {
|
||||||
template <typename T> static void original_to_target(T& field) {
|
|
||||||
if (field != NULL) {
|
|
||||||
if (DynamicDumpSharedSpaces) {
|
|
||||||
field = DynamicArchive::original_to_target(field);
|
|
||||||
}
|
|
||||||
ArchivePtrMarker::mark_pointer(&field);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
InstanceKlass* _caller_ik;
|
InstanceKlass* _caller_ik;
|
||||||
Symbol* _invoked_name;
|
Symbol* _invoked_name;
|
||||||
Symbol* _invoked_type;
|
Symbol* _invoked_type;
|
||||||
|
@ -318,13 +310,13 @@ public:
|
||||||
it->push(&_instantiated_method_type);
|
it->push(&_instantiated_method_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
void original_to_target() {
|
void mark_pointers() {
|
||||||
original_to_target(_caller_ik);
|
ArchivePtrMarker::mark_pointer(&_caller_ik);
|
||||||
original_to_target(_instantiated_method_type);
|
ArchivePtrMarker::mark_pointer(&_instantiated_method_type);
|
||||||
original_to_target(_invoked_name);
|
ArchivePtrMarker::mark_pointer(&_invoked_name);
|
||||||
original_to_target(_invoked_type);
|
ArchivePtrMarker::mark_pointer(&_invoked_type);
|
||||||
original_to_target(_member_method);
|
ArchivePtrMarker::mark_pointer(&_member_method);
|
||||||
original_to_target(_method_type);
|
ArchivePtrMarker::mark_pointer(&_method_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool equals(LambdaProxyClassKey const& other) const {
|
bool equals(LambdaProxyClassKey const& other) const {
|
||||||
|
@ -337,11 +329,11 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int hash() const {
|
unsigned int hash() const {
|
||||||
return SystemDictionaryShared::hash_for_shared_dictionary(_caller_ik) +
|
return SystemDictionaryShared::hash_for_shared_dictionary((address)_caller_ik) +
|
||||||
SystemDictionaryShared::hash_for_shared_dictionary(_invoked_name) +
|
SystemDictionaryShared::hash_for_shared_dictionary((address)_invoked_name) +
|
||||||
SystemDictionaryShared::hash_for_shared_dictionary(_invoked_type) +
|
SystemDictionaryShared::hash_for_shared_dictionary((address)_invoked_type) +
|
||||||
SystemDictionaryShared::hash_for_shared_dictionary(_method_type) +
|
SystemDictionaryShared::hash_for_shared_dictionary((address)_method_type) +
|
||||||
SystemDictionaryShared::hash_for_shared_dictionary(_instantiated_method_type);
|
SystemDictionaryShared::hash_for_shared_dictionary((address)_instantiated_method_type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int dumptime_hash(Symbol* sym) {
|
static unsigned int dumptime_hash(Symbol* sym) {
|
||||||
|
@ -406,10 +398,8 @@ public:
|
||||||
}
|
}
|
||||||
void init(LambdaProxyClassKey& key, DumpTimeLambdaProxyClassInfo& info) {
|
void init(LambdaProxyClassKey& key, DumpTimeLambdaProxyClassInfo& info) {
|
||||||
_key = key;
|
_key = key;
|
||||||
_key.original_to_target();
|
_key.mark_pointers();
|
||||||
_proxy_klass_head = DynamicDumpSharedSpaces ?
|
_proxy_klass_head = info._proxy_klasses->at(0);
|
||||||
DynamicArchive::original_to_target(info._proxy_klasses->at(0)) :
|
|
||||||
info._proxy_klasses->at(0);
|
|
||||||
ArchivePtrMarker::mark_pointer(&_proxy_klass_head);
|
ArchivePtrMarker::mark_pointer(&_proxy_klass_head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -604,14 +594,9 @@ public:
|
||||||
return loader_constraints() + i;
|
return loader_constraints() + i;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u4 object_delta_u4(Symbol* sym) {
|
|
||||||
if (DynamicDumpSharedSpaces) {
|
|
||||||
sym = DynamicArchive::original_to_target(sym);
|
|
||||||
}
|
|
||||||
return MetaspaceShared::object_delta_u4(sym);
|
|
||||||
}
|
|
||||||
|
|
||||||
void init(DumpTimeSharedClassInfo& info) {
|
void init(DumpTimeSharedClassInfo& info) {
|
||||||
|
ArchiveBuilder* builder = ArchiveBuilder::current();
|
||||||
|
assert(builder->is_in_buffer_space(info._klass), "must be");
|
||||||
_klass = info._klass;
|
_klass = info._klass;
|
||||||
if (!SystemDictionaryShared::is_builtin(_klass)) {
|
if (!SystemDictionaryShared::is_builtin(_klass)) {
|
||||||
CrcInfo* c = crc();
|
CrcInfo* c = crc();
|
||||||
|
@ -625,8 +610,8 @@ public:
|
||||||
RTVerifierConstraint* vf_constraints = verifier_constraints();
|
RTVerifierConstraint* vf_constraints = verifier_constraints();
|
||||||
char* flags = verifier_constraint_flags();
|
char* flags = verifier_constraint_flags();
|
||||||
for (i = 0; i < _num_verifier_constraints; i++) {
|
for (i = 0; i < _num_verifier_constraints; i++) {
|
||||||
vf_constraints[i]._name = object_delta_u4(info._verifier_constraints->at(i)._name);
|
vf_constraints[i]._name = builder->any_to_offset_u4(info._verifier_constraints->at(i)._name);
|
||||||
vf_constraints[i]._from_name = object_delta_u4(info._verifier_constraints->at(i)._from_name);
|
vf_constraints[i]._from_name = builder->any_to_offset_u4(info._verifier_constraints->at(i)._from_name);
|
||||||
}
|
}
|
||||||
for (i = 0; i < _num_verifier_constraints; i++) {
|
for (i = 0; i < _num_verifier_constraints; i++) {
|
||||||
flags[i] = info._verifier_constraint_flags->at(i);
|
flags[i] = info._verifier_constraint_flags->at(i);
|
||||||
|
@ -636,7 +621,7 @@ public:
|
||||||
if (_num_loader_constraints > 0) {
|
if (_num_loader_constraints > 0) {
|
||||||
RTLoaderConstraint* ld_constraints = loader_constraints();
|
RTLoaderConstraint* ld_constraints = loader_constraints();
|
||||||
for (i = 0; i < _num_loader_constraints; i++) {
|
for (i = 0; i < _num_loader_constraints; i++) {
|
||||||
ld_constraints[i]._name = object_delta_u4(info._loader_constraints->at(i)._name);
|
ld_constraints[i]._name = builder->any_to_offset_u4(info._loader_constraints->at(i)._name);
|
||||||
ld_constraints[i]._loader_type1 = info._loader_constraints->at(i)._loader_type1;
|
ld_constraints[i]._loader_type1 = info._loader_constraints->at(i)._loader_type1;
|
||||||
ld_constraints[i]._loader_type2 = info._loader_constraints->at(i)._loader_type2;
|
ld_constraints[i]._loader_type2 = info._loader_constraints->at(i)._loader_type2;
|
||||||
}
|
}
|
||||||
|
@ -644,12 +629,8 @@ public:
|
||||||
|
|
||||||
if (_klass->is_hidden()) {
|
if (_klass->is_hidden()) {
|
||||||
InstanceKlass* n_h = info.nest_host();
|
InstanceKlass* n_h = info.nest_host();
|
||||||
if (DynamicDumpSharedSpaces) {
|
|
||||||
n_h = DynamicArchive::original_to_target(n_h);
|
|
||||||
}
|
|
||||||
set_nest_host(n_h);
|
set_nest_host(n_h);
|
||||||
}
|
}
|
||||||
_klass = DynamicDumpSharedSpaces ? DynamicArchive::original_to_target(info._klass) : info._klass;
|
|
||||||
ArchivePtrMarker::mark_pointer(&_klass);
|
ArchivePtrMarker::mark_pointer(&_klass);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -682,13 +663,9 @@ public:
|
||||||
return *info_pointer_addr(klass);
|
return *info_pointer_addr(klass);
|
||||||
}
|
}
|
||||||
static void set_for(InstanceKlass* klass, RunTimeSharedClassInfo* record) {
|
static void set_for(InstanceKlass* klass, RunTimeSharedClassInfo* record) {
|
||||||
if (DynamicDumpSharedSpaces) {
|
assert(ArchiveBuilder::current()->is_in_buffer_space(klass), "must be");
|
||||||
klass = DynamicArchive::original_to_buffer(klass);
|
assert(ArchiveBuilder::current()->is_in_buffer_space(record), "must be");
|
||||||
*info_pointer_addr(klass) = DynamicArchive::buffer_to_target(record);
|
*info_pointer_addr(klass) = record;
|
||||||
} else {
|
|
||||||
*info_pointer_addr(klass) = record;
|
|
||||||
}
|
|
||||||
|
|
||||||
ArchivePtrMarker::mark_pointer(info_pointer_addr(klass));
|
ArchivePtrMarker::mark_pointer(info_pointer_addr(klass));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2026,11 +2003,27 @@ size_t SystemDictionaryShared::estimate_size_for_archive() {
|
||||||
return total_size;
|
return total_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsigned int SystemDictionaryShared::hash_for_shared_dictionary(address ptr) {
|
||||||
|
if (ArchiveBuilder::is_active()) {
|
||||||
|
uintx offset = ArchiveBuilder::current()->any_to_offset(ptr);
|
||||||
|
unsigned int hash = primitive_hash<uintx>(offset);
|
||||||
|
DEBUG_ONLY({
|
||||||
|
if (MetaspaceObj::is_shared((const MetaspaceObj*)ptr)) {
|
||||||
|
assert(hash == SystemDictionaryShared::hash_for_shared_dictionary_quick(ptr), "must be");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return hash;
|
||||||
|
} else {
|
||||||
|
return SystemDictionaryShared::hash_for_shared_dictionary_quick(ptr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
class CopyLambdaProxyClassInfoToArchive : StackObj {
|
class CopyLambdaProxyClassInfoToArchive : StackObj {
|
||||||
CompactHashtableWriter* _writer;
|
CompactHashtableWriter* _writer;
|
||||||
|
ArchiveBuilder* _builder;
|
||||||
public:
|
public:
|
||||||
CopyLambdaProxyClassInfoToArchive(CompactHashtableWriter* writer)
|
CopyLambdaProxyClassInfoToArchive(CompactHashtableWriter* writer)
|
||||||
: _writer(writer) {}
|
: _writer(writer), _builder(ArchiveBuilder::current()) {}
|
||||||
bool do_entry(LambdaProxyClassKey& key, DumpTimeLambdaProxyClassInfo& info) {
|
bool do_entry(LambdaProxyClassKey& key, DumpTimeLambdaProxyClassInfo& info) {
|
||||||
// In static dump, info._proxy_klasses->at(0) is already relocated to point to the archived class
|
// In static dump, info._proxy_klasses->at(0) is already relocated to point to the archived class
|
||||||
// (not the original class).
|
// (not the original class).
|
||||||
|
@ -2047,10 +2040,8 @@ public:
|
||||||
RunTimeLambdaProxyClassInfo* runtime_info =
|
RunTimeLambdaProxyClassInfo* runtime_info =
|
||||||
(RunTimeLambdaProxyClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size);
|
(RunTimeLambdaProxyClassInfo*)MetaspaceShared::read_only_space_alloc(byte_size);
|
||||||
runtime_info->init(key, info);
|
runtime_info->init(key, info);
|
||||||
unsigned int hash = runtime_info->hash(); // Fields in runtime_info->_key already point to target space.
|
unsigned int hash = runtime_info->hash();
|
||||||
u4 delta = DynamicDumpSharedSpaces ?
|
u4 delta = _builder->any_to_offset_u4((void*)runtime_info);
|
||||||
MetaspaceShared::object_delta_u4((void*)DynamicArchive::buffer_to_target(runtime_info)) :
|
|
||||||
MetaspaceShared::object_delta_u4((void*)runtime_info);
|
|
||||||
_writer->add(hash, delta);
|
_writer->add(hash, delta);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -2065,8 +2056,10 @@ public:
|
||||||
for (int i = 0; i < len-1; i++) {
|
for (int i = 0; i < len-1; i++) {
|
||||||
InstanceKlass* ok0 = info._proxy_klasses->at(i+0); // this is original klass
|
InstanceKlass* ok0 = info._proxy_klasses->at(i+0); // this is original klass
|
||||||
InstanceKlass* ok1 = info._proxy_klasses->at(i+1); // this is original klass
|
InstanceKlass* ok1 = info._proxy_klasses->at(i+1); // this is original klass
|
||||||
InstanceKlass* bk0 = DynamicDumpSharedSpaces ? DynamicArchive::original_to_buffer(ok0) : ok0;
|
assert(ArchiveBuilder::current()->is_in_buffer_space(ok0), "must be");
|
||||||
InstanceKlass* bk1 = DynamicDumpSharedSpaces ? DynamicArchive::original_to_buffer(ok1) : ok1;
|
assert(ArchiveBuilder::current()->is_in_buffer_space(ok1), "must be");
|
||||||
|
InstanceKlass* bk0 = ok0;
|
||||||
|
InstanceKlass* bk1 = ok1;
|
||||||
assert(bk0->next_link() == 0, "must be called after Klass::remove_unshareable_info()");
|
assert(bk0->next_link() == 0, "must be called after Klass::remove_unshareable_info()");
|
||||||
assert(bk1->next_link() == 0, "must be called after Klass::remove_unshareable_info()");
|
assert(bk1->next_link() == 0, "must be called after Klass::remove_unshareable_info()");
|
||||||
bk0->set_next_link(bk1);
|
bk0->set_next_link(bk1);
|
||||||
|
@ -2074,11 +2067,8 @@ public:
|
||||||
ArchivePtrMarker::mark_pointer(bk0->next_link_addr());
|
ArchivePtrMarker::mark_pointer(bk0->next_link_addr());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (DynamicDumpSharedSpaces) {
|
info._proxy_klasses->at(0)->set_lambda_proxy_is_available();
|
||||||
DynamicArchive::original_to_buffer(info._proxy_klasses->at(0))->set_lambda_proxy_is_available();
|
|
||||||
} else {
|
|
||||||
info._proxy_klasses->at(0)->set_lambda_proxy_is_available();
|
|
||||||
}
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -2086,10 +2076,11 @@ public:
|
||||||
class CopySharedClassInfoToArchive : StackObj {
|
class CopySharedClassInfoToArchive : StackObj {
|
||||||
CompactHashtableWriter* _writer;
|
CompactHashtableWriter* _writer;
|
||||||
bool _is_builtin;
|
bool _is_builtin;
|
||||||
|
ArchiveBuilder *_builder;
|
||||||
public:
|
public:
|
||||||
CopySharedClassInfoToArchive(CompactHashtableWriter* writer,
|
CopySharedClassInfoToArchive(CompactHashtableWriter* writer,
|
||||||
bool is_builtin)
|
bool is_builtin)
|
||||||
: _writer(writer), _is_builtin(is_builtin) {}
|
: _writer(writer), _is_builtin(is_builtin), _builder(ArchiveBuilder::current()) {}
|
||||||
|
|
||||||
bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
|
bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
|
||||||
if (!info.is_excluded() && info.is_builtin() == _is_builtin) {
|
if (!info.is_excluded() && info.is_builtin() == _is_builtin) {
|
||||||
|
@ -2100,16 +2091,8 @@ public:
|
||||||
|
|
||||||
unsigned int hash;
|
unsigned int hash;
|
||||||
Symbol* name = info._klass->name();
|
Symbol* name = info._klass->name();
|
||||||
if (DynamicDumpSharedSpaces) {
|
hash = SystemDictionaryShared::hash_for_shared_dictionary((address)name);
|
||||||
name = DynamicArchive::original_to_target(name);
|
u4 delta = _builder->buffer_to_offset_u4((address)record);
|
||||||
}
|
|
||||||
hash = SystemDictionaryShared::hash_for_shared_dictionary(name);
|
|
||||||
u4 delta;
|
|
||||||
if (DynamicDumpSharedSpaces) {
|
|
||||||
delta = MetaspaceShared::object_delta_u4(DynamicArchive::buffer_to_target(record));
|
|
||||||
} else {
|
|
||||||
delta = MetaspaceShared::object_delta_u4(record);
|
|
||||||
}
|
|
||||||
if (_is_builtin && info._klass->is_hidden()) {
|
if (_is_builtin && info._klass->is_hidden()) {
|
||||||
// skip
|
// skip
|
||||||
} else {
|
} else {
|
||||||
|
@ -2200,7 +2183,7 @@ SystemDictionaryShared::find_record(RunTimeSharedDictionary* static_dict, RunTim
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(name);
|
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(name);
|
||||||
const RunTimeSharedClassInfo* record = NULL;
|
const RunTimeSharedClassInfo* record = NULL;
|
||||||
if (!MetaspaceShared::is_shared_dynamic(name)) {
|
if (!MetaspaceShared::is_shared_dynamic(name)) {
|
||||||
// The names of all shared classes in the static dict must also be in the
|
// The names of all shared classes in the static dict must also be in the
|
||||||
|
|
|
@ -341,12 +341,15 @@ public:
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static unsigned int hash_for_shared_dictionary(T* ptr) {
|
static unsigned int hash_for_shared_dictionary_quick(T* ptr) {
|
||||||
|
assert(MetaspaceObj::is_shared((const MetaspaceObj*)ptr), "must be");
|
||||||
assert(ptr > (T*)SharedBaseAddress, "must be");
|
assert(ptr > (T*)SharedBaseAddress, "must be");
|
||||||
address p = address(ptr) - SharedBaseAddress;
|
uintx offset = uintx(ptr) - uintx(SharedBaseAddress);
|
||||||
return primitive_hash<address>(p);
|
return primitive_hash<uintx>(offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static unsigned int hash_for_shared_dictionary(address ptr);
|
||||||
|
|
||||||
#if INCLUDE_CDS_JAVA_HEAP
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
private:
|
private:
|
||||||
static void update_archived_mirror_native_pointers_for(RunTimeSharedDictionary* dict);
|
static void update_archived_mirror_native_pointers_for(RunTimeSharedDictionary* dict);
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "classfile/classLoaderDataShared.hpp"
|
#include "classfile/classLoaderDataShared.hpp"
|
||||||
|
#include "classfile/symbolTable.hpp"
|
||||||
#include "classfile/systemDictionaryShared.hpp"
|
#include "classfile/systemDictionaryShared.hpp"
|
||||||
#include "classfile/vmClasses.hpp"
|
#include "classfile/vmClasses.hpp"
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
|
@ -40,13 +41,12 @@
|
||||||
#include "oops/objArrayKlass.hpp"
|
#include "oops/objArrayKlass.hpp"
|
||||||
#include "oops/oopHandle.inline.hpp"
|
#include "oops/oopHandle.inline.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
|
#include "runtime/thread.hpp"
|
||||||
#include "utilities/align.hpp"
|
#include "utilities/align.hpp"
|
||||||
#include "utilities/bitMap.inline.hpp"
|
#include "utilities/bitMap.inline.hpp"
|
||||||
#include "utilities/hashtable.inline.hpp"
|
#include "utilities/hashtable.inline.hpp"
|
||||||
|
|
||||||
ArchiveBuilder* ArchiveBuilder::_singleton = NULL;
|
ArchiveBuilder* ArchiveBuilder::_current = NULL;
|
||||||
intx ArchiveBuilder::_buffer_to_target_delta = 0;
|
|
||||||
|
|
||||||
class AdapterHandlerEntry;
|
class AdapterHandlerEntry;
|
||||||
|
|
||||||
class MethodTrampolineInfo {
|
class MethodTrampolineInfo {
|
||||||
|
@ -69,7 +69,7 @@ class AdapterToTrampoline : public ResourceHashtable<
|
||||||
static AdapterToTrampoline* _adapter_to_trampoline = NULL;
|
static AdapterToTrampoline* _adapter_to_trampoline = NULL;
|
||||||
|
|
||||||
ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
|
ArchiveBuilder::OtherROAllocMark::~OtherROAllocMark() {
|
||||||
char* newtop = ArchiveBuilder::singleton()->_ro_region->top();
|
char* newtop = ArchiveBuilder::current()->_ro_region->top();
|
||||||
ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
|
ArchiveBuilder::alloc_stats()->record_other_type(int(newtop - _oldtop), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,8 +161,8 @@ void ArchiveBuilder::SourceObjList::relocate(int i, ArchiveBuilder* builder) {
|
||||||
|
|
||||||
ArchiveBuilder::ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
|
ArchiveBuilder::ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
|
||||||
: _rw_src_objs(), _ro_src_objs(), _src_obj_table(INITIAL_TABLE_SIZE) {
|
: _rw_src_objs(), _ro_src_objs(), _src_obj_table(INITIAL_TABLE_SIZE) {
|
||||||
assert(_singleton == NULL, "must be");
|
assert(_current == NULL, "must be");
|
||||||
_singleton = this;
|
_current = this;
|
||||||
|
|
||||||
_klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
|
_klasses = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
|
||||||
_symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
|
_symbols = new (ResourceObj::C_HEAP, mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
|
||||||
|
@ -177,12 +177,24 @@ ArchiveBuilder::ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, Dum
|
||||||
_rw_region = rw_region;
|
_rw_region = rw_region;
|
||||||
_ro_region = ro_region;
|
_ro_region = ro_region;
|
||||||
|
|
||||||
|
_num_dump_regions_used = 0;
|
||||||
|
|
||||||
_estimated_metaspaceobj_bytes = 0;
|
_estimated_metaspaceobj_bytes = 0;
|
||||||
|
_estimated_hashtable_bytes = 0;
|
||||||
|
_estimated_trampoline_bytes = 0;
|
||||||
|
|
||||||
|
_requested_static_archive_bottom = NULL;
|
||||||
|
_requested_static_archive_top = NULL;
|
||||||
|
_mapped_static_archive_bottom = NULL;
|
||||||
|
_mapped_static_archive_top = NULL;
|
||||||
|
_requested_dynamic_archive_bottom = NULL;
|
||||||
|
_requested_dynamic_archive_top = NULL;
|
||||||
|
_buffer_to_requested_delta = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ArchiveBuilder::~ArchiveBuilder() {
|
ArchiveBuilder::~ArchiveBuilder() {
|
||||||
assert(_singleton == this, "must be");
|
assert(_current == this, "must be");
|
||||||
_singleton = NULL;
|
_current = NULL;
|
||||||
|
|
||||||
clean_up_src_obj_table();
|
clean_up_src_obj_table();
|
||||||
|
|
||||||
|
@ -282,6 +294,10 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
|
||||||
// DynamicArchiveBuilder::sort_methods()).
|
// DynamicArchiveBuilder::sort_methods()).
|
||||||
sort_symbols_and_fix_hash();
|
sort_symbols_and_fix_hash();
|
||||||
sort_klasses();
|
sort_klasses();
|
||||||
|
|
||||||
|
// TODO -- we need a proper estimate for the archived modules, etc,
|
||||||
|
// but this should be enough for now
|
||||||
|
_estimated_metaspaceobj_bytes += 200 * 1024 * 1024;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,6 +329,93 @@ void ArchiveBuilder::sort_klasses() {
|
||||||
_klasses->sort(compare_klass_by_name);
|
_klasses->sort(compare_klass_by_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t ArchiveBuilder::estimate_archive_size() {
|
||||||
|
// size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
|
||||||
|
size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
|
||||||
|
size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
|
||||||
|
_estimated_hashtable_bytes = symbol_table_est + dictionary_est;
|
||||||
|
|
||||||
|
_estimated_trampoline_bytes = allocate_method_trampoline_info();
|
||||||
|
|
||||||
|
size_t total = 0;
|
||||||
|
|
||||||
|
total += _estimated_metaspaceobj_bytes;
|
||||||
|
total += _estimated_hashtable_bytes;
|
||||||
|
total += _estimated_trampoline_bytes;
|
||||||
|
|
||||||
|
// allow fragmentation at the end of each dump region
|
||||||
|
total += _total_dump_regions * reserve_alignment();
|
||||||
|
|
||||||
|
log_info(cds)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
|
||||||
|
symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
|
||||||
|
log_info(cds)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
|
||||||
|
log_info(cds)("_estimated_trampoline_bytes = " SIZE_FORMAT, _estimated_trampoline_bytes);
|
||||||
|
log_info(cds)("total estimate bytes = " SIZE_FORMAT, total);
|
||||||
|
|
||||||
|
return align_up(total, reserve_alignment());
|
||||||
|
}
|
||||||
|
|
||||||
|
address ArchiveBuilder::reserve_buffer() {
|
||||||
|
size_t buffer_size = estimate_archive_size();
|
||||||
|
ReservedSpace rs(buffer_size);
|
||||||
|
if (!rs.is_reserved()) {
|
||||||
|
log_error(cds)("Failed to reserve " SIZE_FORMAT " bytes of output buffer.", buffer_size);
|
||||||
|
vm_direct_exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// buffer_bottom is the lowest address of the 3 core regions (mc, rw, ro) when
|
||||||
|
// we are copying the class metadata into the buffer.
|
||||||
|
address buffer_bottom = (address)rs.base();
|
||||||
|
log_info(cds)("Reserved output buffer space at : " PTR_FORMAT " [" SIZE_FORMAT " bytes]",
|
||||||
|
p2i(buffer_bottom), buffer_size);
|
||||||
|
MetaspaceShared::set_shared_rs(rs);
|
||||||
|
|
||||||
|
MetaspaceShared::init_shared_dump_space(_mc_region);
|
||||||
|
_buffer_bottom = buffer_bottom;
|
||||||
|
_last_verified_top = buffer_bottom;
|
||||||
|
_current_dump_space = _mc_region;
|
||||||
|
_num_dump_regions_used = 1;
|
||||||
|
_other_region_used_bytes = 0;
|
||||||
|
|
||||||
|
ArchivePtrMarker::initialize(&_ptrmap, (address*)_mc_region->base(), (address*)_mc_region->top());
|
||||||
|
|
||||||
|
// The bottom of the static archive should be mapped at this address by default.
|
||||||
|
_requested_static_archive_bottom = (address)MetaspaceShared::requested_base_address();
|
||||||
|
|
||||||
|
// The bottom of the archive (that I am writing now) should be mapped at this address by default.
|
||||||
|
address my_archive_requested_bottom;
|
||||||
|
|
||||||
|
if (DumpSharedSpaces) {
|
||||||
|
my_archive_requested_bottom = _requested_static_archive_bottom;
|
||||||
|
} else {
|
||||||
|
_mapped_static_archive_bottom = (address)MetaspaceObj::shared_metaspace_base();
|
||||||
|
_mapped_static_archive_top = (address)MetaspaceObj::shared_metaspace_top();
|
||||||
|
assert(_mapped_static_archive_top >= _mapped_static_archive_bottom, "must be");
|
||||||
|
size_t static_archive_size = _mapped_static_archive_top - _mapped_static_archive_bottom;
|
||||||
|
|
||||||
|
// At run time, we will mmap the dynamic archive at my_archive_requested_bottom
|
||||||
|
_requested_static_archive_top = _requested_static_archive_bottom + static_archive_size;
|
||||||
|
my_archive_requested_bottom = align_up(_requested_static_archive_top, MetaspaceShared::reserved_space_alignment());
|
||||||
|
|
||||||
|
_requested_dynamic_archive_bottom = my_archive_requested_bottom;
|
||||||
|
}
|
||||||
|
|
||||||
|
_buffer_to_requested_delta = my_archive_requested_bottom - _buffer_bottom;
|
||||||
|
|
||||||
|
address my_archive_requested_top = my_archive_requested_bottom + buffer_size;
|
||||||
|
if (my_archive_requested_bottom < _requested_static_archive_bottom ||
|
||||||
|
my_archive_requested_top <= _requested_static_archive_bottom) {
|
||||||
|
// Size overflow.
|
||||||
|
log_error(cds)("my_archive_requested_bottom = " INTPTR_FORMAT, p2i(my_archive_requested_bottom));
|
||||||
|
log_error(cds)("my_archive_requested_top = " INTPTR_FORMAT, p2i(my_archive_requested_top));
|
||||||
|
log_error(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is too high. "
|
||||||
|
"Please rerun java -Xshare:dump with a lower value", p2i(_requested_static_archive_bottom));
|
||||||
|
vm_direct_exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer_bottom;
|
||||||
|
}
|
||||||
|
|
||||||
void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
void ArchiveBuilder::iterate_sorted_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -563,22 +666,19 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
void ArchiveBuilder::relocate_roots() {
|
void ArchiveBuilder::relocate_roots() {
|
||||||
|
log_info(cds)("Relocating external roots ... ");
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
RefRelocator doit(this);
|
RefRelocator doit(this);
|
||||||
iterate_sorted_roots(&doit, /*is_relocating_pointers=*/true);
|
iterate_sorted_roots(&doit, /*is_relocating_pointers=*/true);
|
||||||
doit.finish();
|
doit.finish();
|
||||||
|
log_info(cds)("done");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ArchiveBuilder::relocate_pointers() {
|
void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
|
||||||
log_info(cds)("Relocating embedded pointers ... ");
|
log_info(cds)("Relocating embedded pointers in core regions ... ");
|
||||||
relocate_embedded_pointers(&_rw_src_objs);
|
relocate_embedded_pointers(&_rw_src_objs);
|
||||||
relocate_embedded_pointers(&_ro_src_objs);
|
relocate_embedded_pointers(&_ro_src_objs);
|
||||||
update_special_refs();
|
update_special_refs();
|
||||||
|
|
||||||
log_info(cds)("Relocating external roots ... ");
|
|
||||||
relocate_roots();
|
|
||||||
|
|
||||||
log_info(cds)("done");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// We must relocate vmClasses::_klasses[] only after we have copied the
|
// We must relocate vmClasses::_klasses[] only after we have copied the
|
||||||
|
@ -613,12 +713,128 @@ void ArchiveBuilder::make_klasses_shareable() {
|
||||||
|
|
||||||
if (log_is_enabled(Debug, cds, class)) {
|
if (log_is_enabled(Debug, cds, class)) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
log_debug(cds, class)("klasses[%4d] = " PTR_FORMAT " %s", i, p2i(to_target(ik)), ik->external_name());
|
log_debug(cds, class)("klasses[%4d] = " PTR_FORMAT " %s", i, p2i(to_requested(ik)), ik->external_name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uintx ArchiveBuilder::buffer_to_offset(address p) const {
|
||||||
|
address requested_p = to_requested(p);
|
||||||
|
assert(requested_p >= _requested_static_archive_bottom, "must be");
|
||||||
|
return requested_p - _requested_static_archive_bottom;
|
||||||
|
}
|
||||||
|
|
||||||
|
uintx ArchiveBuilder::any_to_offset(address p) const {
|
||||||
|
if (is_in_mapped_static_archive(p)) {
|
||||||
|
assert(DynamicDumpSharedSpaces, "must be");
|
||||||
|
return p - _mapped_static_archive_bottom;
|
||||||
|
}
|
||||||
|
return buffer_to_offset(p);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update a Java object to point its Klass* to the new location after
|
||||||
|
// shared archive has been compacted.
|
||||||
|
void ArchiveBuilder::relocate_klass_ptr(oop o) {
|
||||||
|
assert(DumpSharedSpaces, "sanity");
|
||||||
|
Klass* k = get_relocated_klass(o->klass());
|
||||||
|
Klass* requested_k = to_requested(k);
|
||||||
|
narrowKlass nk = CompressedKlassPointers::encode_not_null(requested_k, _requested_static_archive_bottom);
|
||||||
|
o->set_narrow_klass(nk);
|
||||||
|
}
|
||||||
|
|
||||||
|
// RelocateBufferToRequested --- Relocate all the pointers in mc/rw/ro,
|
||||||
|
// so that the archive can be mapped to the "requested" location without runtime relocation.
|
||||||
|
//
|
||||||
|
// - See ArchiveBuilder header for the definition of "buffer", "mapped" and "requested"
|
||||||
|
// - ArchivePtrMarker::ptrmap() marks all the pointers in the mc/rw/ro regions
|
||||||
|
// - Every pointer must have one of the following values:
|
||||||
|
// [a] NULL:
|
||||||
|
// No relocation is needed. Remove this pointer from ptrmap so we don't need to
|
||||||
|
// consider it at runtime.
|
||||||
|
// [b] Points into an object X which is inside the buffer:
|
||||||
|
// Adjust this pointer by _buffer_to_requested_delta, so it points to X
|
||||||
|
// when the archive is mapped at the requested location.
|
||||||
|
// [c] Points into an object Y which is inside mapped static archive:
|
||||||
|
// - This happens only during dynamic dump
|
||||||
|
// - Adjust this pointer by _mapped_to_requested_static_archive_delta,
|
||||||
|
// so it points to Y when the static archive is mapped at the requested location.
|
||||||
|
template <bool STATIC_DUMP>
|
||||||
|
class RelocateBufferToRequested : public BitMapClosure {
|
||||||
|
ArchiveBuilder* _builder;
|
||||||
|
address _buffer_bottom;
|
||||||
|
intx _buffer_to_requested_delta;
|
||||||
|
intx _mapped_to_requested_static_archive_delta;
|
||||||
|
size_t _max_non_null_offset;
|
||||||
|
|
||||||
|
public:
|
||||||
|
RelocateBufferToRequested(ArchiveBuilder* builder) {
|
||||||
|
_builder = builder;
|
||||||
|
_buffer_bottom = _builder->buffer_bottom();
|
||||||
|
_buffer_to_requested_delta = builder->buffer_to_requested_delta();
|
||||||
|
_mapped_to_requested_static_archive_delta = builder->requested_static_archive_bottom() - builder->mapped_static_archive_bottom();
|
||||||
|
_max_non_null_offset = 0;
|
||||||
|
|
||||||
|
address bottom = _builder->buffer_bottom();
|
||||||
|
address top = _builder->buffer_top();
|
||||||
|
address new_bottom = bottom + _buffer_to_requested_delta;
|
||||||
|
address new_top = top + _buffer_to_requested_delta;
|
||||||
|
log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
|
||||||
|
"[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]",
|
||||||
|
p2i(bottom), p2i(top),
|
||||||
|
p2i(new_bottom), p2i(new_top));
|
||||||
|
}
|
||||||
|
|
||||||
|
bool do_bit(size_t offset) {
|
||||||
|
address* p = (address*)_buffer_bottom + offset;
|
||||||
|
assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
|
||||||
|
|
||||||
|
if (*p == NULL) {
|
||||||
|
// todo -- clear bit, etc
|
||||||
|
ArchivePtrMarker::ptrmap()->clear_bit(offset);
|
||||||
|
} else {
|
||||||
|
if (STATIC_DUMP) {
|
||||||
|
assert(_builder->is_in_buffer_space(*p), "old pointer must point inside buffer space");
|
||||||
|
*p += _buffer_to_requested_delta;
|
||||||
|
assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
|
||||||
|
} else {
|
||||||
|
if (_builder->is_in_buffer_space(*p)) {
|
||||||
|
*p += _buffer_to_requested_delta;
|
||||||
|
// assert is in requested dynamic archive
|
||||||
|
} else {
|
||||||
|
assert(_builder->is_in_mapped_static_archive(*p), "old pointer must point inside buffer space or mapped static archive");
|
||||||
|
*p += _mapped_to_requested_static_archive_delta;
|
||||||
|
assert(_builder->is_in_requested_static_archive(*p), "new pointer must point inside requested archive");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_max_non_null_offset = offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true; // keep iterating
|
||||||
|
}
|
||||||
|
|
||||||
|
void doit() {
|
||||||
|
ArchivePtrMarker::ptrmap()->iterate(this);
|
||||||
|
ArchivePtrMarker::compact(_max_non_null_offset);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void ArchiveBuilder::relocate_to_requested() {
|
||||||
|
size_t my_archive_size = buffer_top() - buffer_bottom();
|
||||||
|
|
||||||
|
if (DumpSharedSpaces) {
|
||||||
|
_requested_static_archive_top = _requested_static_archive_bottom + my_archive_size;
|
||||||
|
RelocateBufferToRequested<true> patcher(this);
|
||||||
|
patcher.doit();
|
||||||
|
} else {
|
||||||
|
assert(DynamicDumpSharedSpaces, "must be");
|
||||||
|
_requested_dynamic_archive_top = _requested_dynamic_archive_bottom + my_archive_size;
|
||||||
|
RelocateBufferToRequested<false> patcher(this);
|
||||||
|
patcher.doit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Write detailed info to a mapfile to analyze contents of the archive.
|
// Write detailed info to a mapfile to analyze contents of the archive.
|
||||||
// static dump:
|
// static dump:
|
||||||
// java -Xshare:dump -Xlog:cds+map=trace:file=cds.map:none:filesize=0
|
// java -Xshare:dump -Xlog:cds+map=trace:file=cds.map:none:filesize=0
|
||||||
|
@ -632,9 +848,9 @@ void ArchiveBuilder::make_klasses_shareable() {
|
||||||
// consistency, we log everything using runtime addresses.
|
// consistency, we log everything using runtime addresses.
|
||||||
class ArchiveBuilder::CDSMapLogger : AllStatic {
|
class ArchiveBuilder::CDSMapLogger : AllStatic {
|
||||||
static intx buffer_to_runtime_delta() {
|
static intx buffer_to_runtime_delta() {
|
||||||
// Translate the buffers used by the MC/RW/RO regions to their eventual locations
|
// Translate the buffers used by the MC/RW/RO regions to their eventual (requested) locations
|
||||||
// at runtime.
|
// at runtime.
|
||||||
return _buffer_to_target_delta + MetaspaceShared::final_delta();
|
return ArchiveBuilder::current()->buffer_to_requested_delta();
|
||||||
}
|
}
|
||||||
|
|
||||||
// mc/rw/ro regions only
|
// mc/rw/ro regions only
|
||||||
|
@ -907,3 +1123,9 @@ void ArchiveBuilder::update_method_trampolines() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void ArchiveBuilder::assert_is_vm_thread() {
|
||||||
|
assert(Thread::current()->is_VM_thread(), "ArchiveBuilder should be used only inside the VMThread");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "memory/archiveUtils.hpp"
|
#include "memory/archiveUtils.hpp"
|
||||||
#include "memory/metaspaceClosure.hpp"
|
#include "memory/metaspaceClosure.hpp"
|
||||||
#include "oops/klass.hpp"
|
#include "oops/klass.hpp"
|
||||||
|
#include "runtime/os.hpp"
|
||||||
#include "utilities/bitMap.hpp"
|
#include "utilities/bitMap.hpp"
|
||||||
#include "utilities/growableArray.hpp"
|
#include "utilities/growableArray.hpp"
|
||||||
#include "utilities/hashtable.hpp"
|
#include "utilities/hashtable.hpp"
|
||||||
|
@ -40,7 +41,49 @@ class Klass;
|
||||||
class MemRegion;
|
class MemRegion;
|
||||||
class Symbol;
|
class Symbol;
|
||||||
|
|
||||||
|
// Overview of CDS archive creation (for both static and dynamic dump):
|
||||||
|
//
|
||||||
|
// [1] Load all classes (static dump: from the classlist, dynamic dump: as part of app execution)
|
||||||
|
// [2] Allocate "output buffer"
|
||||||
|
// [3] Copy contents of the 3 "core" regions (mc/rw/ro) into the output buffer.
|
||||||
|
// - mc region:
|
||||||
|
// allocate_method_trampolines();
|
||||||
|
// allocate the cpp vtables (static dump only)
|
||||||
|
// - memcpy the MetaspaceObjs into rw/ro:
|
||||||
|
// dump_rw_region();
|
||||||
|
// dump_ro_region();
|
||||||
|
// - fix all the pointers in the MetaspaceObjs to point to the copies
|
||||||
|
// relocate_metaspaceobj_embedded_pointers()
|
||||||
|
// [4] Copy symbol table, dictionary, etc, into the ro region
|
||||||
|
// [5] Relocate all the pointers in mc/rw/ro, so that the archive can be mapped to
|
||||||
|
// the "requested" location without runtime relocation. See relocate_to_requested()
|
||||||
class ArchiveBuilder : public StackObj {
|
class ArchiveBuilder : public StackObj {
|
||||||
|
protected:
|
||||||
|
DumpRegion* _current_dump_space;
|
||||||
|
address _buffer_bottom; // for writing the contents of mc/rw/ro regions
|
||||||
|
address _last_verified_top;
|
||||||
|
int _num_dump_regions_used;
|
||||||
|
size_t _other_region_used_bytes;
|
||||||
|
|
||||||
|
// These are the addresses where we will request the static and dynamic archives to be
|
||||||
|
// mapped at run time. If the request fails (due to ASLR), we will map the archives at
|
||||||
|
// os-selected addresses.
|
||||||
|
address _requested_static_archive_bottom; // This is determined solely by the value of
|
||||||
|
// SharedBaseAddress during -Xshare:dump.
|
||||||
|
address _requested_static_archive_top;
|
||||||
|
address _requested_dynamic_archive_bottom; // Used only during dynamic dump. It's placed
|
||||||
|
// immediately above _requested_static_archive_top.
|
||||||
|
address _requested_dynamic_archive_top;
|
||||||
|
|
||||||
|
// (Used only during dynamic dump) where the static archive is actually mapped. This
|
||||||
|
// may be different than _requested_static_archive_{bottom,top} due to ASLR
|
||||||
|
address _mapped_static_archive_bottom;
|
||||||
|
address _mapped_static_archive_top;
|
||||||
|
|
||||||
|
intx _buffer_to_requested_delta;
|
||||||
|
|
||||||
|
DumpRegion* current_dump_space() const { return _current_dump_space; }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
enum FollowMode {
|
enum FollowMode {
|
||||||
make_a_copy, point_to_it, set_to_null
|
make_a_copy, point_to_it, set_to_null
|
||||||
|
@ -146,6 +189,7 @@ private:
|
||||||
DumpRegion* _mc_region;
|
DumpRegion* _mc_region;
|
||||||
DumpRegion* _rw_region;
|
DumpRegion* _rw_region;
|
||||||
DumpRegion* _ro_region;
|
DumpRegion* _ro_region;
|
||||||
|
CHeapBitMap _ptrmap; // bitmap used by ArchivePtrMarker
|
||||||
|
|
||||||
SourceObjList _rw_src_objs; // objs to put in rw region
|
SourceObjList _rw_src_objs; // objs to put in rw region
|
||||||
SourceObjList _ro_src_objs; // objs to put in ro region
|
SourceObjList _ro_src_objs; // objs to put in ro region
|
||||||
|
@ -161,7 +205,7 @@ private:
|
||||||
DumpAllocStats* _alloc_stats;
|
DumpAllocStats* _alloc_stats;
|
||||||
|
|
||||||
// For global access.
|
// For global access.
|
||||||
static ArchiveBuilder* _singleton;
|
static ArchiveBuilder* _current;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Use this when you allocate space with MetaspaceShare::read_only_space_alloc()
|
// Use this when you allocate space with MetaspaceShare::read_only_space_alloc()
|
||||||
|
@ -171,7 +215,7 @@ public:
|
||||||
char* _oldtop;
|
char* _oldtop;
|
||||||
public:
|
public:
|
||||||
OtherROAllocMark() {
|
OtherROAllocMark() {
|
||||||
_oldtop = _singleton->_ro_region->top();
|
_oldtop = _current->_ro_region->top();
|
||||||
}
|
}
|
||||||
~OtherROAllocMark();
|
~OtherROAllocMark();
|
||||||
};
|
};
|
||||||
|
@ -190,47 +234,88 @@ private:
|
||||||
|
|
||||||
void update_special_refs();
|
void update_special_refs();
|
||||||
void relocate_embedded_pointers(SourceObjList* src_objs);
|
void relocate_embedded_pointers(SourceObjList* src_objs);
|
||||||
void relocate_roots();
|
|
||||||
|
|
||||||
bool is_excluded(Klass* k);
|
bool is_excluded(Klass* k);
|
||||||
void clean_up_src_obj_table();
|
void clean_up_src_obj_table();
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) = 0;
|
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) = 0;
|
||||||
|
|
||||||
// Conservative estimate for number of bytes needed for:
|
// Conservative estimate for number of bytes needed for:
|
||||||
size_t _estimated_metaspaceobj_bytes; // all archived MetaspaceObj's.
|
size_t _estimated_metaspaceobj_bytes; // all archived MetaspaceObj's.
|
||||||
|
size_t _estimated_hashtable_bytes; // symbol table and dictionaries
|
||||||
|
size_t _estimated_trampoline_bytes; // method entry trampolines
|
||||||
|
|
||||||
protected:
|
static const int _total_dump_regions = 3;
|
||||||
DumpRegion* _current_dump_space;
|
|
||||||
address _alloc_bottom;
|
|
||||||
|
|
||||||
DumpRegion* current_dump_space() const { return _current_dump_space; }
|
size_t estimate_archive_size();
|
||||||
|
|
||||||
|
static size_t reserve_alignment() {
|
||||||
|
return os::vm_allocation_granularity();
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void set_current_dump_space(DumpRegion* r) { _current_dump_space = r; }
|
void set_current_dump_space(DumpRegion* r) { _current_dump_space = r; }
|
||||||
|
address reserve_buffer();
|
||||||
|
|
||||||
|
address buffer_bottom() const { return _buffer_bottom; }
|
||||||
|
address buffer_top() const { return (address)current_dump_space()->top(); }
|
||||||
|
address requested_static_archive_bottom() const { return _requested_static_archive_bottom; }
|
||||||
|
address mapped_static_archive_bottom() const { return _mapped_static_archive_bottom; }
|
||||||
|
intx buffer_to_requested_delta() const { return _buffer_to_requested_delta; }
|
||||||
|
|
||||||
bool is_in_buffer_space(address p) const {
|
bool is_in_buffer_space(address p) const {
|
||||||
return (_alloc_bottom <= p && p < (address)current_dump_space()->top());
|
return (buffer_bottom() <= p && p < buffer_top());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> bool is_in_target_space(T target_obj) const {
|
template <typename T> bool is_in_requested_static_archive(T p) const {
|
||||||
address buff_obj = address(target_obj) - _buffer_to_target_delta;
|
return _requested_static_archive_bottom <= (address)p && (address)p < _requested_static_archive_top;
|
||||||
return is_in_buffer_space(buff_obj);
|
}
|
||||||
|
|
||||||
|
template <typename T> bool is_in_mapped_static_archive(T p) const {
|
||||||
|
return _mapped_static_archive_bottom <= (address)p && (address)p < _mapped_static_archive_top;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> bool is_in_buffer_space(T obj) const {
|
template <typename T> bool is_in_buffer_space(T obj) const {
|
||||||
return is_in_buffer_space(address(obj));
|
return is_in_buffer_space(address(obj));
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> T to_target_no_check(T obj) const {
|
template <typename T> T to_requested(T obj) const {
|
||||||
return (T)(address(obj) + _buffer_to_target_delta);
|
assert(is_in_buffer_space(obj), "must be");
|
||||||
|
return (T)(address(obj) + _buffer_to_requested_delta);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T> T to_target(T obj) const {
|
static intx get_buffer_to_requested_delta() {
|
||||||
assert(is_in_buffer_space(obj), "must be");
|
return current()->buffer_to_requested_delta();
|
||||||
return (T)(address(obj) + _buffer_to_target_delta);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static const uintx MAX_SHARED_DELTA = 0x7FFFFFFF;
|
||||||
|
|
||||||
|
// The address p points to an object inside the output buffer. When the archive is mapped
|
||||||
|
// at the requested address, what's the offset of this object from _requested_static_archive_bottom?
|
||||||
|
uintx buffer_to_offset(address p) const;
|
||||||
|
|
||||||
|
// Same as buffer_to_offset, except that the address p points to either (a) an object
|
||||||
|
// inside the output buffer, or (b), an object in the currently mapped static archive.
|
||||||
|
uintx any_to_offset(address p) const;
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
u4 buffer_to_offset_u4(T p) const {
|
||||||
|
uintx offset = buffer_to_offset((address)p);
|
||||||
|
guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset");
|
||||||
|
return (u4)offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
u4 any_to_offset_u4(T p) const {
|
||||||
|
uintx offset = any_to_offset((address)p);
|
||||||
|
guarantee(offset <= MAX_SHARED_DELTA, "must be 32-bit offset");
|
||||||
|
return (u4)offset;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void assert_is_vm_thread() PRODUCT_RETURN;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region);
|
ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region);
|
||||||
~ArchiveBuilder();
|
~ArchiveBuilder();
|
||||||
|
@ -244,9 +329,11 @@ public:
|
||||||
|
|
||||||
void dump_rw_region();
|
void dump_rw_region();
|
||||||
void dump_ro_region();
|
void dump_ro_region();
|
||||||
void relocate_pointers();
|
void relocate_metaspaceobj_embedded_pointers();
|
||||||
|
void relocate_roots();
|
||||||
void relocate_vm_classes();
|
void relocate_vm_classes();
|
||||||
void make_klasses_shareable();
|
void make_klasses_shareable();
|
||||||
|
void relocate_to_requested();
|
||||||
void write_cds_map_to_log(FileMapInfo* mapinfo,
|
void write_cds_map_to_log(FileMapInfo* mapinfo,
|
||||||
GrowableArray<MemRegion> *closed_heap_regions,
|
GrowableArray<MemRegion> *closed_heap_regions,
|
||||||
GrowableArray<MemRegion> *open_heap_regions,
|
GrowableArray<MemRegion> *open_heap_regions,
|
||||||
|
@ -258,34 +345,39 @@ public:
|
||||||
GrowableArray<Klass*>* klasses() const { return _klasses; }
|
GrowableArray<Klass*>* klasses() const { return _klasses; }
|
||||||
GrowableArray<Symbol*>* symbols() const { return _symbols; }
|
GrowableArray<Symbol*>* symbols() const { return _symbols; }
|
||||||
|
|
||||||
static ArchiveBuilder* singleton() {
|
static bool is_active() {
|
||||||
assert(_singleton != NULL, "ArchiveBuilder must be active");
|
return (_current != NULL);
|
||||||
return _singleton;
|
}
|
||||||
|
|
||||||
|
static ArchiveBuilder* current() {
|
||||||
|
assert_is_vm_thread();
|
||||||
|
assert(_current != NULL, "ArchiveBuilder must be active");
|
||||||
|
return _current;
|
||||||
}
|
}
|
||||||
|
|
||||||
static DumpAllocStats* alloc_stats() {
|
static DumpAllocStats* alloc_stats() {
|
||||||
return singleton()->_alloc_stats;
|
return current()->_alloc_stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void relocate_klass_ptr(oop o);
|
||||||
|
|
||||||
static Klass* get_relocated_klass(Klass* orig_klass) {
|
static Klass* get_relocated_klass(Klass* orig_klass) {
|
||||||
Klass* klass = (Klass*)singleton()->get_dumped_addr((address)orig_klass);
|
Klass* klass = (Klass*)current()->get_dumped_addr((address)orig_klass);
|
||||||
assert(klass != NULL && klass->is_klass(), "must be");
|
assert(klass != NULL && klass->is_klass(), "must be");
|
||||||
return klass;
|
return klass;
|
||||||
}
|
}
|
||||||
|
|
||||||
static Symbol* get_relocated_symbol(Symbol* orig_symbol) {
|
static Symbol* get_relocated_symbol(Symbol* orig_symbol) {
|
||||||
return (Symbol*)singleton()->get_dumped_addr((address)orig_symbol);
|
return (Symbol*)current()->get_dumped_addr((address)orig_symbol);
|
||||||
}
|
}
|
||||||
|
|
||||||
void print_stats(int ro_all, int rw_all, int mc_all);
|
void print_stats(int ro_all, int rw_all, int mc_all);
|
||||||
static intx _buffer_to_target_delta;
|
|
||||||
|
|
||||||
// Method trampolines related functions
|
// Method trampolines related functions
|
||||||
void allocate_method_trampolines();
|
void allocate_method_trampolines();
|
||||||
void allocate_method_trampolines_for(InstanceKlass* ik);
|
void allocate_method_trampolines_for(InstanceKlass* ik);
|
||||||
size_t allocate_method_trampoline_info();
|
size_t allocate_method_trampoline_info();
|
||||||
void update_method_trampolines();
|
void update_method_trampolines();
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
#endif // SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "classfile/systemDictionaryShared.hpp"
|
#include "classfile/systemDictionaryShared.hpp"
|
||||||
#include "classfile/vmClasses.hpp"
|
#include "classfile/vmClasses.hpp"
|
||||||
#include "interpreter/bootstrapInfo.hpp"
|
#include "interpreter/bootstrapInfo.hpp"
|
||||||
|
#include "memory/archiveBuilder.hpp"
|
||||||
#include "memory/archiveUtils.hpp"
|
#include "memory/archiveUtils.hpp"
|
||||||
#include "memory/dynamicArchive.hpp"
|
#include "memory/dynamicArchive.hpp"
|
||||||
#include "memory/filemap.hpp"
|
#include "memory/filemap.hpp"
|
||||||
|
@ -150,14 +151,12 @@ char* DumpRegion::expand_top_to(char* newtop) {
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MetaspaceShared::commit_to(_rs, _vs, newtop);
|
||||||
|
_top = newtop;
|
||||||
|
|
||||||
if (_rs == MetaspaceShared::shared_rs()) {
|
if (_rs == MetaspaceShared::shared_rs()) {
|
||||||
uintx delta;
|
uintx delta = ArchiveBuilder::current()->buffer_to_offset((address)(newtop-1));
|
||||||
if (DynamicDumpSharedSpaces) {
|
if (delta > ArchiveBuilder::MAX_SHARED_DELTA) {
|
||||||
delta = DynamicArchive::object_delta_uintx(newtop);
|
|
||||||
} else {
|
|
||||||
delta = MetaspaceShared::object_delta_uintx(newtop);
|
|
||||||
}
|
|
||||||
if (delta > MAX_SHARED_DELTA) {
|
|
||||||
// This is just a sanity check and should not appear in any real world usage. This
|
// This is just a sanity check and should not appear in any real world usage. This
|
||||||
// happens only if you allocate more than 2GB of shared objects and would require
|
// happens only if you allocate more than 2GB of shared objects and would require
|
||||||
// millions of shared classes.
|
// millions of shared classes.
|
||||||
|
@ -166,8 +165,6 @@ char* DumpRegion::expand_top_to(char* newtop) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
MetaspaceShared::commit_to(_rs, _vs, newtop);
|
|
||||||
_top = newtop;
|
|
||||||
return _top;
|
return _top;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,7 +190,7 @@ void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
|
||||||
void DumpRegion::print(size_t total_bytes) const {
|
void DumpRegion::print(size_t total_bytes) const {
|
||||||
log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
||||||
_name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
|
_name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
|
||||||
p2i(_base + MetaspaceShared::final_delta()));
|
p2i(ArchiveBuilder::current()->to_requested(_base)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
|
void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -85,7 +85,6 @@ public:
|
||||||
// If the archive ends up being mapped at a different address (e.g. 0x810000000), SharedDataRelocator
|
// If the archive ends up being mapped at a different address (e.g. 0x810000000), SharedDataRelocator
|
||||||
// is used to shift each marked pointer by a delta (0x10000000 in this example), so that it points to
|
// is used to shift each marked pointer by a delta (0x10000000 in this example), so that it points to
|
||||||
// the actually mapped location of the target object.
|
// the actually mapped location of the target object.
|
||||||
template <bool COMPACTING>
|
|
||||||
class SharedDataRelocator: public BitMapClosure {
|
class SharedDataRelocator: public BitMapClosure {
|
||||||
// for all (address** p), where (is_marked(p) && _patch_base <= p && p < _patch_end) { *p += delta; }
|
// for all (address** p), where (is_marked(p) && _patch_base <= p && p < _patch_end) { *p += delta; }
|
||||||
|
|
||||||
|
@ -104,17 +103,10 @@ class SharedDataRelocator: public BitMapClosure {
|
||||||
// How much to relocate for each pointer.
|
// How much to relocate for each pointer.
|
||||||
intx _delta;
|
intx _delta;
|
||||||
|
|
||||||
// The following fields are used only when COMPACTING == true;
|
|
||||||
// The highest offset (inclusive) in the bitmap that contains a non-null pointer.
|
|
||||||
// This is used at dump time to reduce the size of the bitmap (which may have been over-allocated).
|
|
||||||
size_t _max_non_null_offset;
|
|
||||||
CHeapBitMap* _ptrmap;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
SharedDataRelocator(address* patch_base, address* patch_end,
|
SharedDataRelocator(address* patch_base, address* patch_end,
|
||||||
address valid_old_base, address valid_old_end,
|
address valid_old_base, address valid_old_end,
|
||||||
address valid_new_base, address valid_new_end, intx delta,
|
address valid_new_base, address valid_new_end, intx delta) :
|
||||||
CHeapBitMap* ptrmap = NULL) :
|
|
||||||
_patch_base(patch_base), _patch_end(patch_end),
|
_patch_base(patch_base), _patch_end(patch_end),
|
||||||
_valid_old_base(valid_old_base), _valid_old_end(valid_old_end),
|
_valid_old_base(valid_old_base), _valid_old_end(valid_old_end),
|
||||||
_valid_new_base(valid_new_base), _valid_new_end(valid_new_end),
|
_valid_new_base(valid_new_base), _valid_new_end(valid_new_end),
|
||||||
|
@ -125,23 +117,9 @@ class SharedDataRelocator: public BitMapClosure {
|
||||||
log_debug(cds, reloc)("SharedDataRelocator::_valid_old_end = " PTR_FORMAT, p2i(_valid_old_end));
|
log_debug(cds, reloc)("SharedDataRelocator::_valid_old_end = " PTR_FORMAT, p2i(_valid_old_end));
|
||||||
log_debug(cds, reloc)("SharedDataRelocator::_valid_new_base = " PTR_FORMAT, p2i(_valid_new_base));
|
log_debug(cds, reloc)("SharedDataRelocator::_valid_new_base = " PTR_FORMAT, p2i(_valid_new_base));
|
||||||
log_debug(cds, reloc)("SharedDataRelocator::_valid_new_end = " PTR_FORMAT, p2i(_valid_new_end));
|
log_debug(cds, reloc)("SharedDataRelocator::_valid_new_end = " PTR_FORMAT, p2i(_valid_new_end));
|
||||||
if (COMPACTING) {
|
|
||||||
assert(ptrmap != NULL, "must be");
|
|
||||||
_max_non_null_offset = 0;
|
|
||||||
_ptrmap = ptrmap;
|
|
||||||
} else {
|
|
||||||
// Don't touch the _max_non_null_offset and _ptrmap fields. Hopefully a good C++ compiler can
|
|
||||||
// elide them.
|
|
||||||
assert(ptrmap == NULL, "must be");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t max_non_null_offset() {
|
bool do_bit(size_t offset);
|
||||||
assert(COMPACTING, "must be");
|
|
||||||
return _max_non_null_offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline bool do_bit(size_t offset);
|
|
||||||
};
|
};
|
||||||
|
|
||||||
class DumpRegion {
|
class DumpRegion {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -28,32 +28,13 @@
|
||||||
#include "memory/archiveUtils.hpp"
|
#include "memory/archiveUtils.hpp"
|
||||||
#include "utilities/bitMap.inline.hpp"
|
#include "utilities/bitMap.inline.hpp"
|
||||||
|
|
||||||
template <bool COMPACTING>
|
inline bool SharedDataRelocator::do_bit(size_t offset) {
|
||||||
inline bool SharedDataRelocator<COMPACTING>::do_bit(size_t offset) {
|
|
||||||
address* p = _patch_base + offset;
|
address* p = _patch_base + offset;
|
||||||
assert(_patch_base <= p && p < _patch_end, "must be");
|
assert(_patch_base <= p && p < _patch_end, "must be");
|
||||||
|
|
||||||
address old_ptr = *p;
|
address old_ptr = *p;
|
||||||
if (old_ptr == NULL) {
|
assert(_valid_old_base <= old_ptr && old_ptr < _valid_old_end, "must be");
|
||||||
assert(COMPACTING, "NULL pointers should not be marked when relocating at run-time");
|
assert(old_ptr != NULL, "bits for NULL pointers should have been cleaned at dump time");
|
||||||
} else {
|
|
||||||
assert(_valid_old_base <= old_ptr && old_ptr < _valid_old_end, "must be");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (COMPACTING) {
|
|
||||||
// Start-up performance: use a template parameter to elide this block for run-time archive
|
|
||||||
// relocation.
|
|
||||||
assert(Arguments::is_dumping_archive(), "Don't do this during run-time archive loading!");
|
|
||||||
if (old_ptr == NULL) {
|
|
||||||
_ptrmap->clear_bit(offset);
|
|
||||||
DEBUG_ONLY(log_trace(cds, reloc)("Clearing pointer [" PTR_FORMAT "] -> NULL @ " SIZE_FORMAT_W(9), p2i(p), offset));
|
|
||||||
return true;
|
|
||||||
} else {
|
|
||||||
_max_non_null_offset = offset;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert(old_ptr != NULL, "bits for NULL pointers should have been cleaned at dump time");
|
|
||||||
}
|
|
||||||
|
|
||||||
address new_ptr = old_ptr + _delta;
|
address new_ptr = old_ptr + _delta;
|
||||||
assert(new_ptr != NULL, "don't point to the bottom of the archive"); // See ArchivePtrMarker::mark_pointer().
|
assert(new_ptr != NULL, "don't point to the bottom of the archive"); // See ArchivePtrMarker::mark_pointer().
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
#include "memory/metaspaceShared.hpp"
|
#include "memory/metaspaceShared.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "oops/klass.inline.hpp"
|
#include "oops/klass.inline.hpp"
|
||||||
#include "runtime/os.inline.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/sharedRuntime.hpp"
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/vmThread.hpp"
|
#include "runtime/vmThread.hpp"
|
||||||
#include "runtime/vmOperations.hpp"
|
#include "runtime/vmOperations.hpp"
|
||||||
|
@ -53,9 +53,6 @@ public:
|
||||||
return os::vm_allocation_granularity();
|
return os::vm_allocation_granularity();
|
||||||
}
|
}
|
||||||
|
|
||||||
static const int _total_dump_regions = 3;
|
|
||||||
int _num_dump_regions_used;
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void mark_pointer(address* ptr_loc) {
|
void mark_pointer(address* ptr_loc) {
|
||||||
ArchivePtrMarker::mark_pointer(ptr_loc);
|
ArchivePtrMarker::mark_pointer(ptr_loc);
|
||||||
|
@ -73,59 +70,31 @@ public:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!MetaspaceShared::is_in_shared_metaspace(a_name)) {
|
u4 a_offset = ArchiveBuilder::current()->any_to_offset_u4(a_name);
|
||||||
// a_name points to a Symbol in the top archive.
|
u4 b_offset = ArchiveBuilder::current()->any_to_offset_u4(b_name);
|
||||||
// When this method is called, a_name is still pointing to the output space.
|
|
||||||
// Translate it to point to the output space, so that it can be compared with
|
|
||||||
// Symbols in the base archive.
|
|
||||||
a_name = (Symbol*)(address(a_name) + _buffer_to_target_delta);
|
|
||||||
}
|
|
||||||
if (!MetaspaceShared::is_in_shared_metaspace(b_name)) {
|
|
||||||
b_name = (Symbol*)(address(b_name) + _buffer_to_target_delta);
|
|
||||||
}
|
|
||||||
|
|
||||||
return a_name->fast_compare(b_name);
|
if (a_offset < b_offset) {
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
assert(a_offset > b_offset, "must be");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
DynamicArchiveHeader *_header;
|
DynamicArchiveHeader *_header;
|
||||||
address _last_verified_top;
|
|
||||||
size_t _other_region_used_bytes;
|
|
||||||
|
|
||||||
// Conservative estimate for number of bytes needed for:
|
void init_header();
|
||||||
size_t _estimated_hashtable_bytes; // symbol table and dictionaries
|
|
||||||
size_t _estimated_trampoline_bytes; // method entry trampolines
|
|
||||||
|
|
||||||
size_t estimate_archive_size();
|
|
||||||
size_t estimate_class_file_size();
|
|
||||||
address reserve_space_and_init_buffer_to_target_delta();
|
|
||||||
void init_header(address addr);
|
|
||||||
void release_header();
|
void release_header();
|
||||||
void sort_methods();
|
void sort_methods();
|
||||||
void sort_methods(InstanceKlass* ik) const;
|
void sort_methods(InstanceKlass* ik) const;
|
||||||
void remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const;
|
void remark_pointers_for_instance_klass(InstanceKlass* k, bool should_mark) const;
|
||||||
void relocate_buffer_to_target();
|
|
||||||
void write_archive(char* serialized_data);
|
void write_archive(char* serialized_data);
|
||||||
|
|
||||||
void init_first_dump_space(address reserved_bottom) {
|
|
||||||
DumpRegion* mc_space = MetaspaceShared::misc_code_dump_space();
|
|
||||||
DumpRegion* rw_space = MetaspaceShared::read_write_dump_space();
|
|
||||||
|
|
||||||
// Use the same MC->RW->RO ordering as in the base archive.
|
|
||||||
MetaspaceShared::init_shared_dump_space(mc_space);
|
|
||||||
_current_dump_space = mc_space;
|
|
||||||
_last_verified_top = reserved_bottom;
|
|
||||||
_num_dump_regions_used = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
DynamicArchiveBuilder() : ArchiveBuilder(MetaspaceShared::misc_code_dump_space(),
|
DynamicArchiveBuilder() : ArchiveBuilder(MetaspaceShared::misc_code_dump_space(),
|
||||||
MetaspaceShared::read_write_dump_space(),
|
MetaspaceShared::read_write_dump_space(),
|
||||||
MetaspaceShared::read_only_dump_space()) {
|
MetaspaceShared::read_only_dump_space()) {
|
||||||
_estimated_hashtable_bytes = 0;
|
|
||||||
_estimated_trampoline_bytes = 0;
|
|
||||||
|
|
||||||
_num_dump_regions_used = 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void start_dump_space(DumpRegion* next) {
|
void start_dump_space(DumpRegion* next) {
|
||||||
|
@ -173,17 +142,15 @@ public:
|
||||||
|
|
||||||
gather_klasses_and_symbols();
|
gather_klasses_and_symbols();
|
||||||
|
|
||||||
// rw space starts ...
|
// mc space starts ...
|
||||||
address reserved_bottom = reserve_space_and_init_buffer_to_target_delta();
|
reserve_buffer();
|
||||||
init_header(reserved_bottom);
|
init_header();
|
||||||
|
|
||||||
CHeapBitMap ptrmap;
|
|
||||||
ArchivePtrMarker::initialize(&ptrmap, (address*)reserved_bottom, (address*)current_dump_space()->top());
|
|
||||||
|
|
||||||
allocate_method_trampolines();
|
allocate_method_trampolines();
|
||||||
verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
|
verify_estimate_size(_estimated_trampoline_bytes, "Trampolines");
|
||||||
|
|
||||||
gather_source_objs();
|
gather_source_objs();
|
||||||
|
// rw space starts ...
|
||||||
start_dump_space(MetaspaceShared::read_write_dump_space());
|
start_dump_space(MetaspaceShared::read_write_dump_space());
|
||||||
|
|
||||||
log_info(cds, dynamic)("Copying %d klasses and %d symbols",
|
log_info(cds, dynamic)("Copying %d klasses and %d symbols",
|
||||||
|
@ -195,7 +162,8 @@ public:
|
||||||
DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
|
DumpRegion* ro_space = MetaspaceShared::read_only_dump_space();
|
||||||
start_dump_space(ro_space);
|
start_dump_space(ro_space);
|
||||||
dump_ro_region();
|
dump_ro_region();
|
||||||
relocate_pointers();
|
relocate_metaspaceobj_embedded_pointers();
|
||||||
|
relocate_roots();
|
||||||
|
|
||||||
verify_estimate_size(_estimated_metaspaceobj_bytes, "MetaspaceObjs");
|
verify_estimate_size(_estimated_metaspaceobj_bytes, "MetaspaceObjs");
|
||||||
|
|
||||||
|
@ -226,8 +194,7 @@ public:
|
||||||
log_info(cds)("Adjust lambda proxy class dictionary");
|
log_info(cds)("Adjust lambda proxy class dictionary");
|
||||||
SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
|
SystemDictionaryShared::adjust_lambda_proxy_class_dictionary();
|
||||||
|
|
||||||
log_info(cds)("Final relocation of pointers ... ");
|
relocate_to_requested();
|
||||||
relocate_buffer_to_target();
|
|
||||||
|
|
||||||
write_archive(serialized_data);
|
write_archive(serialized_data);
|
||||||
release_header();
|
release_header();
|
||||||
|
@ -237,76 +204,12 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||||
if (!is_relocating_pointers) {
|
|
||||||
SystemDictionaryShared::dumptime_classes_do(it);
|
|
||||||
}
|
|
||||||
FileMapInfo::metaspace_pointers_do(it);
|
FileMapInfo::metaspace_pointers_do(it);
|
||||||
|
SystemDictionaryShared::dumptime_classes_do(it);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t DynamicArchiveBuilder::estimate_archive_size() {
|
void DynamicArchiveBuilder::init_header() {
|
||||||
// size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
|
|
||||||
size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
|
|
||||||
size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
|
|
||||||
_estimated_hashtable_bytes = symbol_table_est + dictionary_est;
|
|
||||||
|
|
||||||
_estimated_trampoline_bytes = allocate_method_trampoline_info();
|
|
||||||
|
|
||||||
size_t total = 0;
|
|
||||||
|
|
||||||
total += _estimated_metaspaceobj_bytes;
|
|
||||||
total += _estimated_hashtable_bytes;
|
|
||||||
total += _estimated_trampoline_bytes;
|
|
||||||
|
|
||||||
// allow fragmentation at the end of each dump region
|
|
||||||
total += _total_dump_regions * reserve_alignment();
|
|
||||||
|
|
||||||
log_info(cds, dynamic)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
|
|
||||||
symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
|
|
||||||
log_info(cds, dynamic)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
|
|
||||||
log_info(cds, dynamic)("_estimated_trampoline_bytes = " SIZE_FORMAT, _estimated_trampoline_bytes);
|
|
||||||
log_info(cds, dynamic)("total estimate bytes = " SIZE_FORMAT, total);
|
|
||||||
|
|
||||||
return align_up(total, reserve_alignment());
|
|
||||||
}
|
|
||||||
|
|
||||||
address DynamicArchiveBuilder::reserve_space_and_init_buffer_to_target_delta() {
|
|
||||||
size_t total = estimate_archive_size();
|
|
||||||
ReservedSpace rs(total);
|
|
||||||
if (!rs.is_reserved()) {
|
|
||||||
log_error(cds, dynamic)("Failed to reserve %d bytes of output buffer.", (int)total);
|
|
||||||
vm_direct_exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
address buffer_base = (address)rs.base();
|
|
||||||
log_info(cds, dynamic)("Reserved output buffer space at : " PTR_FORMAT " [%d bytes]",
|
|
||||||
p2i(buffer_base), (int)total);
|
|
||||||
MetaspaceShared::set_shared_rs(rs);
|
|
||||||
|
|
||||||
// At run time, we will mmap the dynamic archive at target_space_bottom.
|
|
||||||
// However, at dump time, we may not be able to write into the target_space,
|
|
||||||
// as it's occupied by dynamically loaded Klasses. So we allocate a buffer
|
|
||||||
// at an arbitrary location chosen by the OS. We will write all the dynamically
|
|
||||||
// archived classes into this buffer. At the final stage of dumping, we relocate
|
|
||||||
// all pointers that are inside the buffer_space to point to their (runtime)
|
|
||||||
// target location inside thetarget_space.
|
|
||||||
address target_space_bottom =
|
|
||||||
(address)align_up(MetaspaceShared::shared_metaspace_top(), reserve_alignment());
|
|
||||||
_buffer_to_target_delta = intx(target_space_bottom) - intx(buffer_base);
|
|
||||||
|
|
||||||
log_info(cds, dynamic)("Target archive space at : " PTR_FORMAT, p2i(target_space_bottom));
|
|
||||||
log_info(cds, dynamic)("Buffer-space to target-space delta : " PTR_FORMAT, p2i((address)_buffer_to_target_delta));
|
|
||||||
|
|
||||||
return buffer_base;
|
|
||||||
}
|
|
||||||
|
|
||||||
void DynamicArchiveBuilder::init_header(address reserved_bottom) {
|
|
||||||
_alloc_bottom = reserved_bottom;
|
|
||||||
_last_verified_top = reserved_bottom;
|
|
||||||
_other_region_used_bytes = 0;
|
|
||||||
|
|
||||||
init_first_dump_space(reserved_bottom);
|
|
||||||
|
|
||||||
FileMapInfo* mapinfo = new FileMapInfo(false);
|
FileMapInfo* mapinfo = new FileMapInfo(false);
|
||||||
assert(FileMapInfo::dynamic_info() == mapinfo, "must be");
|
assert(FileMapInfo::dynamic_info() == mapinfo, "must be");
|
||||||
_header = mapinfo->dynamic_header();
|
_header = mapinfo->dynamic_header();
|
||||||
|
@ -360,7 +263,8 @@ void DynamicArchiveBuilder::sort_methods(InstanceKlass* ik) const {
|
||||||
|
|
||||||
if (log_is_enabled(Debug, cds, dynamic)) {
|
if (log_is_enabled(Debug, cds, dynamic)) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " %s", p2i(to_target(ik)), ik->external_name());
|
log_debug(cds, dynamic)("sorting methods for " PTR_FORMAT " (" PTR_FORMAT ") %s",
|
||||||
|
p2i(ik), p2i(to_requested(ik)), ik->external_name());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Method sorting may re-layout the [iv]tables, which would change the offset(s)
|
// Method sorting may re-layout the [iv]tables, which would change the offset(s)
|
||||||
|
@ -428,92 +332,14 @@ void DynamicArchiveBuilder::remark_pointers_for_instance_klass(InstanceKlass* k,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class RelocateBufferToTarget: public BitMapClosure {
|
|
||||||
DynamicArchiveBuilder *_builder;
|
|
||||||
address* _buffer_bottom;
|
|
||||||
intx _buffer_to_target_delta;
|
|
||||||
public:
|
|
||||||
RelocateBufferToTarget(DynamicArchiveBuilder* builder, address* bottom, intx delta) :
|
|
||||||
_builder(builder), _buffer_bottom(bottom), _buffer_to_target_delta(delta) {}
|
|
||||||
|
|
||||||
bool do_bit(size_t offset) {
|
|
||||||
address* p = _buffer_bottom + offset;
|
|
||||||
assert(_builder->is_in_buffer_space(p), "pointer must live in buffer space");
|
|
||||||
|
|
||||||
address old_ptr = *p;
|
|
||||||
if (_builder->is_in_buffer_space(old_ptr)) {
|
|
||||||
address new_ptr = old_ptr + _buffer_to_target_delta;
|
|
||||||
log_trace(cds, dynamic)("Final patch: @%6d [" PTR_FORMAT " -> " PTR_FORMAT "] " PTR_FORMAT " => " PTR_FORMAT,
|
|
||||||
(int)offset, p2i(p), p2i(_builder->to_target(p)),
|
|
||||||
p2i(old_ptr), p2i(new_ptr));
|
|
||||||
*p = new_ptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true; // keep iterating
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void DynamicArchiveBuilder::relocate_buffer_to_target() {
|
|
||||||
RelocateBufferToTarget patcher(this, (address*)_alloc_bottom, _buffer_to_target_delta);
|
|
||||||
ArchivePtrMarker::ptrmap()->iterate(&patcher);
|
|
||||||
|
|
||||||
Array<u8>* table = FileMapInfo::saved_shared_path_table().table();
|
|
||||||
SharedPathTable runtime_table(to_target(table), FileMapInfo::shared_path_table().size());
|
|
||||||
_header->set_shared_path_table(runtime_table);
|
|
||||||
|
|
||||||
address relocatable_base = (address)SharedBaseAddress;
|
|
||||||
address relocatable_end = (address)(current_dump_space()->top()) + _buffer_to_target_delta;
|
|
||||||
|
|
||||||
intx addr_delta = MetaspaceShared::final_delta();
|
|
||||||
if (addr_delta == 0) {
|
|
||||||
ArchivePtrMarker::compact(relocatable_base, relocatable_end);
|
|
||||||
} else {
|
|
||||||
// The base archive is NOT mapped at MetaspaceShared::requested_base_address() (due to ASLR).
|
|
||||||
// This means that the current content of the dynamic archive is based on a random
|
|
||||||
// address. Let's relocate all the pointers, so that it can be mapped to
|
|
||||||
// MetaspaceShared::requested_base_address() without runtime relocation.
|
|
||||||
//
|
|
||||||
// Note: both the base and dynamic archive are written with
|
|
||||||
// FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address()
|
|
||||||
|
|
||||||
// Patch all pointers that are marked by ptrmap within this region,
|
|
||||||
// where we have just dumped all the metaspace data.
|
|
||||||
address patch_base = (address)_alloc_bottom;
|
|
||||||
address patch_end = (address)current_dump_space()->top();
|
|
||||||
|
|
||||||
// the current value of the pointers to be patched must be within this
|
|
||||||
// range (i.e., must point to either the top archive (as currently mapped), or to the
|
|
||||||
// (targeted address of) the top archive)
|
|
||||||
address valid_old_base = relocatable_base;
|
|
||||||
address valid_old_end = relocatable_end;
|
|
||||||
size_t base_plus_top_size = valid_old_end - valid_old_base;
|
|
||||||
size_t top_size = patch_end - patch_base;
|
|
||||||
size_t base_size = base_plus_top_size - top_size;
|
|
||||||
assert(base_plus_top_size > base_size, "no overflow");
|
|
||||||
assert(base_plus_top_size > top_size, "no overflow");
|
|
||||||
|
|
||||||
// after patching, the pointers must point inside this range
|
|
||||||
// (the requested location of the archive, as mapped at runtime).
|
|
||||||
address valid_new_base = (address)MetaspaceShared::requested_base_address();
|
|
||||||
address valid_new_end = valid_new_base + base_plus_top_size;
|
|
||||||
|
|
||||||
log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT "] to "
|
|
||||||
"[" INTPTR_FORMAT " - " INTPTR_FORMAT "], delta = " INTX_FORMAT " bytes",
|
|
||||||
p2i(patch_base + base_size), p2i(patch_end),
|
|
||||||
p2i(valid_new_base + base_size), p2i(valid_new_end), addr_delta);
|
|
||||||
|
|
||||||
SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
|
|
||||||
valid_new_base, valid_new_end, addr_delta, ArchivePtrMarker::ptrmap());
|
|
||||||
ArchivePtrMarker::ptrmap()->iterate(&patcher);
|
|
||||||
ArchivePtrMarker::compact(patcher.max_non_null_offset());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void DynamicArchiveBuilder::write_archive(char* serialized_data) {
|
void DynamicArchiveBuilder::write_archive(char* serialized_data) {
|
||||||
int num_klasses = klasses()->length();
|
int num_klasses = klasses()->length();
|
||||||
int num_symbols = symbols()->length();
|
int num_symbols = symbols()->length();
|
||||||
|
|
||||||
_header->set_serialized_data(to_target(serialized_data));
|
Array<u8>* table = FileMapInfo::saved_shared_path_table().table();
|
||||||
|
SharedPathTable runtime_table(table, FileMapInfo::shared_path_table().size());
|
||||||
|
_header->set_shared_path_table(runtime_table);
|
||||||
|
_header->set_serialized_data(serialized_data);
|
||||||
|
|
||||||
FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
|
FileMapInfo* dynamic_info = FileMapInfo::dynamic_info();
|
||||||
assert(dynamic_info != NULL, "Sanity");
|
assert(dynamic_info != NULL, "Sanity");
|
||||||
|
@ -523,7 +349,7 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
|
||||||
dynamic_info->open_for_write(archive_name);
|
dynamic_info->open_for_write(archive_name);
|
||||||
size_t bitmap_size_in_bytes;
|
size_t bitmap_size_in_bytes;
|
||||||
char* bitmap = MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL, bitmap_size_in_bytes);
|
char* bitmap = MetaspaceShared::write_core_archive_regions(dynamic_info, NULL, NULL, bitmap_size_in_bytes);
|
||||||
dynamic_info->set_final_requested_base((char*)MetaspaceShared::requested_base_address());
|
dynamic_info->set_requested_base((char*)MetaspaceShared::requested_base_address());
|
||||||
dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
|
dynamic_info->set_header_crc(dynamic_info->compute_header_crc());
|
||||||
dynamic_info->write_header();
|
dynamic_info->write_header();
|
||||||
dynamic_info->close();
|
dynamic_info->close();
|
||||||
|
@ -532,15 +358,14 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
|
||||||
bitmap, bitmap_size_in_bytes);
|
bitmap, bitmap_size_in_bytes);
|
||||||
FREE_C_HEAP_ARRAY(char, bitmap);
|
FREE_C_HEAP_ARRAY(char, bitmap);
|
||||||
|
|
||||||
address base = to_target(_alloc_bottom);
|
address base = _requested_dynamic_archive_bottom;
|
||||||
address top = address(current_dump_space()->top()) + _buffer_to_target_delta;
|
address top = _requested_dynamic_archive_top;
|
||||||
size_t file_size = pointer_delta(top, base, sizeof(char));
|
size_t file_size = pointer_delta(top, base, sizeof(char));
|
||||||
|
|
||||||
base += MetaspaceShared::final_delta();
|
|
||||||
top += MetaspaceShared::final_delta();
|
|
||||||
log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT
|
log_info(cds, dynamic)("Written dynamic archive " PTR_FORMAT " - " PTR_FORMAT
|
||||||
" [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
|
" [" SIZE_FORMAT " bytes header, " SIZE_FORMAT " bytes total]",
|
||||||
p2i(base), p2i(top), _header->header_size(), file_size);
|
p2i(base), p2i(top), _header->header_size(), file_size);
|
||||||
|
|
||||||
log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
|
log_info(cds, dynamic)("%d klasses; %d symbols", num_klasses, num_symbols);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -573,61 +398,10 @@ void DynamicArchive::dump() {
|
||||||
}
|
}
|
||||||
|
|
||||||
DynamicArchiveBuilder builder;
|
DynamicArchiveBuilder builder;
|
||||||
_builder = &builder;
|
|
||||||
VM_PopulateDynamicDumpSharedSpace op(&builder);
|
VM_PopulateDynamicDumpSharedSpace op(&builder);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
_builder = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
address DynamicArchive::original_to_buffer_impl(address orig_obj) {
|
|
||||||
assert(DynamicDumpSharedSpaces, "must be");
|
|
||||||
address buff_obj = _builder->get_dumped_addr(orig_obj);
|
|
||||||
assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
|
|
||||||
assert(buff_obj != orig_obj, "call this only when you know orig_obj must be copied and not just referenced");
|
|
||||||
assert(_builder->is_in_buffer_space(buff_obj), "must be");
|
|
||||||
return buff_obj;
|
|
||||||
}
|
|
||||||
|
|
||||||
address DynamicArchive::buffer_to_target_impl(address buff_obj) {
|
|
||||||
assert(DynamicDumpSharedSpaces, "must be");
|
|
||||||
assert(_builder->is_in_buffer_space(buff_obj), "must be");
|
|
||||||
return _builder->to_target(buff_obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
address DynamicArchive::original_to_target_impl(address orig_obj) {
|
|
||||||
assert(DynamicDumpSharedSpaces, "must be");
|
|
||||||
if (MetaspaceShared::is_in_shared_metaspace(orig_obj)) {
|
|
||||||
// This happens when the top archive points to a Symbol* in the base archive.
|
|
||||||
return orig_obj;
|
|
||||||
}
|
|
||||||
address buff_obj = _builder->get_dumped_addr(orig_obj);
|
|
||||||
assert(buff_obj != NULL, "orig_obj must be used by the dynamic archive");
|
|
||||||
if (buff_obj == orig_obj) {
|
|
||||||
// We are storing a pointer to an original object into the dynamic buffer. E.g.,
|
|
||||||
// a Symbol* that used by both the base and top archives.
|
|
||||||
assert(MetaspaceShared::is_in_shared_metaspace(orig_obj), "must be");
|
|
||||||
return orig_obj;
|
|
||||||
} else {
|
|
||||||
return _builder->to_target(buff_obj);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uintx DynamicArchive::object_delta_uintx(void* buff_obj) {
|
|
||||||
assert(DynamicDumpSharedSpaces, "must be");
|
|
||||||
address target_obj = _builder->to_target_no_check(address(buff_obj));
|
|
||||||
assert(uintx(target_obj) >= SharedBaseAddress, "must be");
|
|
||||||
return uintx(target_obj) - SharedBaseAddress;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool DynamicArchive::is_in_target_space(void *obj) {
|
|
||||||
assert(DynamicDumpSharedSpaces, "must be");
|
|
||||||
return _builder->is_in_target_space(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
DynamicArchiveBuilder* DynamicArchive::_builder = NULL;
|
|
||||||
|
|
||||||
|
|
||||||
bool DynamicArchive::validate(FileMapInfo* dynamic_info) {
|
bool DynamicArchive::validate(FileMapInfo* dynamic_info) {
|
||||||
assert(!dynamic_info->is_static(), "must be");
|
assert(!dynamic_info->is_static(), "must be");
|
||||||
// Check if the recorded base archive matches with the current one
|
// Check if the recorded base archive matches with the current one
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -58,47 +58,8 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
class DynamicArchive : AllStatic {
|
class DynamicArchive : AllStatic {
|
||||||
static class DynamicArchiveBuilder* _builder;
|
|
||||||
static address original_to_target_impl(address orig_obj);
|
|
||||||
static address original_to_buffer_impl(address orig_obj);
|
|
||||||
static address buffer_to_target_impl(address buff_obj);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static void dump();
|
static void dump();
|
||||||
|
|
||||||
// obj is a copy of a MetaspaceObj, stored in the dumping buffer.
|
|
||||||
//
|
|
||||||
// The return value is the runtime targeted location of this object as
|
|
||||||
// mapped from the dynamic archive.
|
|
||||||
template <typename T> static T buffer_to_target(T buff_obj) {
|
|
||||||
return (T)buffer_to_target_impl(address(buff_obj));
|
|
||||||
}
|
|
||||||
|
|
||||||
// obj is an original MetaspaceObj used by the JVM (e.g., a valid Symbol* in the
|
|
||||||
// SymbolTable).
|
|
||||||
//
|
|
||||||
// The return value is the runtime targeted location of this object as
|
|
||||||
// mapped from the dynamic archive.
|
|
||||||
template <typename T> static T original_to_target(T obj) {
|
|
||||||
return (T)original_to_target_impl(address(obj));
|
|
||||||
}
|
|
||||||
|
|
||||||
// obj is an original MetaspaceObj use by the JVM (e.g., a valid Symbol* in the
|
|
||||||
// SymbolTable).
|
|
||||||
//
|
|
||||||
// The return value is the location of this object in the dump time
|
|
||||||
// buffer space
|
|
||||||
template <typename T> static T original_to_buffer(T obj) {
|
|
||||||
return (T)original_to_buffer_impl(address(obj));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delta of this object from SharedBaseAddress
|
|
||||||
static uintx object_delta_uintx(void* buff_obj);
|
|
||||||
|
|
||||||
// Does obj point to an address inside the runtime target space of the dynamic
|
|
||||||
// archive?
|
|
||||||
static bool is_in_target_space(void *obj);
|
|
||||||
|
|
||||||
static bool is_mapped() { return FileMapInfo::dynamic_info() != NULL; }
|
static bool is_mapped() { return FileMapInfo::dynamic_info() != NULL; }
|
||||||
static bool validate(FileMapInfo* dynamic_info);
|
static bool validate(FileMapInfo* dynamic_info);
|
||||||
};
|
};
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
#include "logging/logStream.hpp"
|
#include "logging/logStream.hpp"
|
||||||
#include "logging/logMessage.hpp"
|
#include "logging/logMessage.hpp"
|
||||||
|
#include "memory/archiveBuilder.hpp"
|
||||||
#include "memory/archiveUtils.inline.hpp"
|
#include "memory/archiveUtils.inline.hpp"
|
||||||
#include "memory/dynamicArchive.hpp"
|
#include "memory/dynamicArchive.hpp"
|
||||||
#include "memory/filemap.hpp"
|
#include "memory/filemap.hpp"
|
||||||
|
@ -1255,25 +1256,11 @@ size_t FileMapRegion::used_aligned() const {
|
||||||
return align_up(used(), os::vm_allocation_granularity());
|
return align_up(used(), os::vm_allocation_granularity());
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileMapRegion::init(int region_index, char* base, size_t size, bool read_only,
|
void FileMapRegion::init(int region_index, size_t mapping_offset, size_t size, bool read_only,
|
||||||
bool allow_exec, int crc) {
|
bool allow_exec, int crc) {
|
||||||
_is_heap_region = HeapShared::is_heap_region(region_index);
|
_is_heap_region = HeapShared::is_heap_region(region_index);
|
||||||
_is_bitmap_region = (region_index == MetaspaceShared::bm);
|
_is_bitmap_region = (region_index == MetaspaceShared::bm);
|
||||||
_mapping_offset = 0;
|
_mapping_offset = mapping_offset;
|
||||||
|
|
||||||
if (_is_heap_region) {
|
|
||||||
assert(!DynamicDumpSharedSpaces, "must be");
|
|
||||||
assert((base - (char*)CompressedKlassPointers::base()) % HeapWordSize == 0, "Sanity");
|
|
||||||
if (base != NULL) {
|
|
||||||
_mapping_offset = (size_t)CompressedOops::encode_not_null((oop)base);
|
|
||||||
assert(_mapping_offset == (size_t)(uint32_t)_mapping_offset, "must be 32-bit only");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (base != NULL) {
|
|
||||||
assert(base >= (char*)SharedBaseAddress, "must be");
|
|
||||||
_mapping_offset = base - (char*)SharedBaseAddress;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_used = size;
|
_used = size;
|
||||||
_read_only = read_only;
|
_read_only = read_only;
|
||||||
_allow_exec = allow_exec;
|
_allow_exec = allow_exec;
|
||||||
|
@ -1314,29 +1301,35 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||||
Arguments::assert_is_dumping_archive();
|
Arguments::assert_is_dumping_archive();
|
||||||
|
|
||||||
FileMapRegion* si = space_at(region);
|
FileMapRegion* si = space_at(region);
|
||||||
char* target_base;
|
char* requested_base;
|
||||||
|
size_t mapping_offset = 0;
|
||||||
|
|
||||||
if (region == MetaspaceShared::bm) {
|
if (region == MetaspaceShared::bm) {
|
||||||
target_base = NULL; // always NULL for bm region.
|
requested_base = NULL; // always NULL for bm region
|
||||||
|
} else if (size == 0) {
|
||||||
|
// This is an unused region (e.g., a heap region when !INCLUDE_CDS_JAVA_HEAP)
|
||||||
|
requested_base = NULL;
|
||||||
|
} else if (HeapShared::is_heap_region(region)) {
|
||||||
|
assert(!DynamicDumpSharedSpaces, "must be");
|
||||||
|
requested_base = base;
|
||||||
|
mapping_offset = (size_t)CompressedOops::encode_not_null((oop)base);
|
||||||
|
assert(mapping_offset == (size_t)(uint32_t)mapping_offset, "must be 32-bit only");
|
||||||
} else {
|
} else {
|
||||||
if (DynamicDumpSharedSpaces) {
|
char* requested_SharedBaseAddress = (char*)MetaspaceShared::requested_base_address();
|
||||||
assert(!HeapShared::is_heap_region(region), "dynamic archive doesn't support heap regions");
|
requested_base = ArchiveBuilder::current()->to_requested(base);
|
||||||
target_base = DynamicArchive::buffer_to_target(base);
|
assert(requested_base >= requested_SharedBaseAddress, "must be");
|
||||||
} else {
|
mapping_offset = requested_base - requested_SharedBaseAddress;
|
||||||
target_base = base;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
si->set_file_offset(_file_offset);
|
si->set_file_offset(_file_offset);
|
||||||
char* requested_base = (target_base == NULL) ? NULL : target_base + MetaspaceShared::final_delta();
|
|
||||||
int crc = ClassLoader::crc32(0, base, (jint)size);
|
int crc = ClassLoader::crc32(0, base, (jint)size);
|
||||||
if (size > 0) {
|
if (size > 0) {
|
||||||
log_debug(cds)("Shared file region (%-3s) %d: " SIZE_FORMAT_W(8)
|
log_info(cds)("Shared file region (%-3s) %d: " SIZE_FORMAT_W(8)
|
||||||
" bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08)
|
" bytes, addr " INTPTR_FORMAT " file offset " SIZE_FORMAT_HEX_W(08)
|
||||||
" crc 0x%08x",
|
" crc 0x%08x",
|
||||||
region_name(region), region, size, p2i(requested_base), _file_offset, crc);
|
region_name(region), region, size, p2i(requested_base), _file_offset, crc);
|
||||||
}
|
}
|
||||||
si->init(region, target_base, size, read_only, allow_exec, crc);
|
si->init(region, mapping_offset, size, read_only, allow_exec, crc);
|
||||||
|
|
||||||
if (base != NULL) {
|
if (base != NULL) {
|
||||||
write_bytes_aligned(base, size);
|
write_bytes_aligned(base, size);
|
||||||
|
@ -1494,10 +1487,6 @@ void FileMapInfo::write_bytes_aligned(const void* buffer, size_t nbytes) {
|
||||||
align_file_position();
|
align_file_position();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FileMapInfo::set_final_requested_base(char* b) {
|
|
||||||
header()->set_final_requested_base(b);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the shared archive file. This does NOT unmap mapped regions.
|
// Close the shared archive file. This does NOT unmap mapped regions.
|
||||||
|
|
||||||
void FileMapInfo::close() {
|
void FileMapInfo::close() {
|
||||||
|
@ -1575,7 +1564,7 @@ MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char*
|
||||||
}
|
}
|
||||||
|
|
||||||
header()->set_mapped_base_address(header()->requested_base_address() + addr_delta);
|
header()->set_mapped_base_address(header()->requested_base_address() + addr_delta);
|
||||||
if (addr_delta != 0 && !relocate_pointers(addr_delta)) {
|
if (addr_delta != 0 && !relocate_pointers_in_core_regions(addr_delta)) {
|
||||||
return MAP_ARCHIVE_OTHER_FAILURE;
|
return MAP_ARCHIVE_OTHER_FAILURE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1688,12 +1677,14 @@ char* FileMapInfo::map_bitmap_region() {
|
||||||
return bitmap_base;
|
return bitmap_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FileMapInfo::relocate_pointers(intx addr_delta) {
|
// This is called when we cannot map the archive at the requested[ base address (usually 0x800000000).
|
||||||
|
// We relocate all pointers in the 3 core regions (mc, ro, rw).
|
||||||
|
bool FileMapInfo::relocate_pointers_in_core_regions(intx addr_delta) {
|
||||||
log_debug(cds, reloc)("runtime archive relocation start");
|
log_debug(cds, reloc)("runtime archive relocation start");
|
||||||
char* bitmap_base = map_bitmap_region();
|
char* bitmap_base = map_bitmap_region();
|
||||||
|
|
||||||
if (bitmap_base == NULL) {
|
if (bitmap_base == NULL) {
|
||||||
return false;
|
return false; // OOM, or CRC check failure
|
||||||
} else {
|
} else {
|
||||||
size_t ptrmap_size_in_bits = header()->ptrmap_size_in_bits();
|
size_t ptrmap_size_in_bits = header()->ptrmap_size_in_bits();
|
||||||
log_debug(cds, reloc)("mapped relocation bitmap @ " INTPTR_FORMAT " (" SIZE_FORMAT " bits)",
|
log_debug(cds, reloc)("mapped relocation bitmap @ " INTPTR_FORMAT " (" SIZE_FORMAT " bits)",
|
||||||
|
@ -1716,8 +1707,8 @@ bool FileMapInfo::relocate_pointers(intx addr_delta) {
|
||||||
address valid_new_base = (address)header()->mapped_base_address();
|
address valid_new_base = (address)header()->mapped_base_address();
|
||||||
address valid_new_end = (address)mapped_end();
|
address valid_new_end = (address)mapped_end();
|
||||||
|
|
||||||
SharedDataRelocator<false> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
|
SharedDataRelocator patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
|
||||||
valid_new_base, valid_new_end, addr_delta);
|
valid_new_base, valid_new_end, addr_delta);
|
||||||
ptrmap.iterate(&patcher);
|
ptrmap.iterate(&patcher);
|
||||||
|
|
||||||
// The MetaspaceShared::bm region will be unmapped in MetaspaceShared::initialize_shared_spaces().
|
// The MetaspaceShared::bm region will be unmapped in MetaspaceShared::initialize_shared_spaces().
|
||||||
|
@ -2191,6 +2182,10 @@ FileMapRegion* FileMapInfo::last_core_space() const {
|
||||||
return space_at(MetaspaceShared::ro);
|
return space_at(MetaspaceShared::ro);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FileMapHeader::set_as_offset(char* p, size_t *offset) {
|
||||||
|
*offset = ArchiveBuilder::current()->any_to_offset((address)p);
|
||||||
|
}
|
||||||
|
|
||||||
int FileMapHeader::compute_crc() {
|
int FileMapHeader::compute_crc() {
|
||||||
char* start = (char*)this;
|
char* start = (char*)this;
|
||||||
// start computing from the field after _crc
|
// start computing from the field after _crc
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -169,7 +169,7 @@ public:
|
||||||
void set_read_only(bool v) { _read_only = v; }
|
void set_read_only(bool v) { _read_only = v; }
|
||||||
void set_mapped_base(char* p) { _mapped_base = p; }
|
void set_mapped_base(char* p) { _mapped_base = p; }
|
||||||
void set_mapped_from_file(bool v) { _mapped_from_file = v; }
|
void set_mapped_from_file(bool v) { _mapped_from_file = v; }
|
||||||
void init(int region_index, char* base, size_t size, bool read_only,
|
void init(int region_index, size_t mapping_offset, size_t size, bool read_only,
|
||||||
bool allow_exec, int crc);
|
bool allow_exec, int crc);
|
||||||
|
|
||||||
void init_oopmap(size_t oopmap_offset, size_t size_in_bits) {
|
void init_oopmap(size_t oopmap_offset, size_t size_in_bits) {
|
||||||
|
@ -241,10 +241,7 @@ class FileMapHeader: private CDSFileMapHeaderBase {
|
||||||
char* from_mapped_offset(size_t offset) const {
|
char* from_mapped_offset(size_t offset) const {
|
||||||
return mapped_base_address() + offset;
|
return mapped_base_address() + offset;
|
||||||
}
|
}
|
||||||
void set_mapped_offset(char* p, size_t *offset) {
|
void set_as_offset(char* p, size_t *offset);
|
||||||
assert(p >= mapped_base_address(), "sanity");
|
|
||||||
*offset = p - mapped_base_address();
|
|
||||||
}
|
|
||||||
public:
|
public:
|
||||||
// Accessors -- fields declared in CDSFileMapHeaderBase
|
// Accessors -- fields declared in CDSFileMapHeaderBase
|
||||||
unsigned int magic() const {return _magic;}
|
unsigned int magic() const {return _magic;}
|
||||||
|
@ -287,8 +284,8 @@ public:
|
||||||
narrowOop heap_obj_roots() const { return _heap_obj_roots; }
|
narrowOop heap_obj_roots() const { return _heap_obj_roots; }
|
||||||
|
|
||||||
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
|
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
|
||||||
void set_cloned_vtables(char* p) { set_mapped_offset(p, &_cloned_vtables_offset); }
|
void set_cloned_vtables(char* p) { set_as_offset(p, &_cloned_vtables_offset); }
|
||||||
void set_serialized_data(char* p) { set_mapped_offset(p, &_serialized_data_offset); }
|
void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
|
||||||
void set_base_archive_name_size(size_t s) { _base_archive_name_size = s; }
|
void set_base_archive_name_size(size_t s) { _base_archive_name_size = s; }
|
||||||
void set_base_archive_is_default(bool b) { _base_archive_is_default = b; }
|
void set_base_archive_is_default(bool b) { _base_archive_is_default = b; }
|
||||||
void set_header_size(size_t s) { _header_size = s; }
|
void set_header_size(size_t s) { _header_size = s; }
|
||||||
|
@ -296,15 +293,15 @@ public:
|
||||||
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
|
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
|
||||||
void set_heap_obj_roots(narrowOop r) { _heap_obj_roots = r; }
|
void set_heap_obj_roots(narrowOop r) { _heap_obj_roots = r; }
|
||||||
void set_i2i_entry_code_buffers(address p) {
|
void set_i2i_entry_code_buffers(address p) {
|
||||||
set_mapped_offset((char*)p, &_i2i_entry_code_buffers_offset);
|
set_as_offset((char*)p, &_i2i_entry_code_buffers_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_shared_path_table(SharedPathTable table) {
|
void set_shared_path_table(SharedPathTable table) {
|
||||||
set_mapped_offset((char*)table.table(), &_shared_path_table_offset);
|
set_as_offset((char*)table.table(), &_shared_path_table_offset);
|
||||||
_shared_path_table_size = table.size();
|
_shared_path_table_size = table.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_final_requested_base(char* b) {
|
void set_requested_base(char* b) {
|
||||||
_requested_base_address = b;
|
_requested_base_address = b;
|
||||||
_mapped_base_address = 0;
|
_mapped_base_address = 0;
|
||||||
}
|
}
|
||||||
|
@ -421,10 +418,9 @@ public:
|
||||||
bool is_mapped() const { return _is_mapped; }
|
bool is_mapped() const { return _is_mapped; }
|
||||||
void set_is_mapped(bool v) { _is_mapped = v; }
|
void set_is_mapped(bool v) { _is_mapped = v; }
|
||||||
const char* full_path() const { return _full_path; }
|
const char* full_path() const { return _full_path; }
|
||||||
void set_final_requested_base(char* b);
|
|
||||||
|
|
||||||
char* requested_base_address() const { return header()->requested_base_address(); }
|
|
||||||
|
|
||||||
|
void set_requested_base(char* b) { header()->set_requested_base(b); }
|
||||||
|
char* requested_base_address() const { return header()->requested_base_address(); }
|
||||||
|
|
||||||
class DynamicArchiveHeader* dynamic_header() const {
|
class DynamicArchiveHeader* dynamic_header() const {
|
||||||
assert(!is_static(), "must be");
|
assert(!is_static(), "must be");
|
||||||
|
@ -578,7 +574,7 @@ public:
|
||||||
char* map_bitmap_region();
|
char* map_bitmap_region();
|
||||||
MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs);
|
MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs);
|
||||||
bool read_region(int i, char* base, size_t size);
|
bool read_region(int i, char* base, size_t size);
|
||||||
bool relocate_pointers(intx addr_delta);
|
bool relocate_pointers_in_core_regions(intx addr_delta);
|
||||||
static size_t set_oopmaps_offset(GrowableArray<ArchiveHeapOopmapInfo> *oopmaps, size_t curr_size);
|
static size_t set_oopmaps_offset(GrowableArray<ArchiveHeapOopmapInfo> *oopmaps, size_t curr_size);
|
||||||
static size_t write_oopmaps(GrowableArray<ArchiveHeapOopmapInfo> *oopmaps, size_t curr_offset, char* buffer);
|
static size_t write_oopmaps(GrowableArray<ArchiveHeapOopmapInfo> *oopmaps, size_t curr_offset, char* buffer);
|
||||||
|
|
||||||
|
|
|
@ -266,7 +266,6 @@ oop HeapShared::archive_heap_object(oop obj, Thread* THREAD) {
|
||||||
oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
|
oop archived_oop = (oop)G1CollectedHeap::heap()->archive_mem_allocate(len);
|
||||||
if (archived_oop != NULL) {
|
if (archived_oop != NULL) {
|
||||||
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
|
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(obj), cast_from_oop<HeapWord*>(archived_oop), len);
|
||||||
MetaspaceShared::relocate_klass_ptr(archived_oop);
|
|
||||||
// Reinitialize markword to remove age/marking/locking/etc.
|
// Reinitialize markword to remove age/marking/locking/etc.
|
||||||
//
|
//
|
||||||
// We need to retain the identity_hash, because it may have been used by some hashtables
|
// We need to retain the identity_hash, because it may have been used by some hashtables
|
||||||
|
@ -302,7 +301,7 @@ void HeapShared::archive_klass_objects(Thread* THREAD) {
|
||||||
GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
|
GrowableArray<Klass*>* klasses = MetaspaceShared::collected_klasses();
|
||||||
assert(klasses != NULL, "sanity");
|
assert(klasses != NULL, "sanity");
|
||||||
for (int i = 0; i < klasses->length(); i++) {
|
for (int i = 0; i < klasses->length(); i++) {
|
||||||
Klass* k = klasses->at(i);
|
Klass* k = ArchiveBuilder::get_relocated_klass(klasses->at(i));
|
||||||
|
|
||||||
// archive mirror object
|
// archive mirror object
|
||||||
java_lang_Class::archive_mirror(k, CHECK);
|
java_lang_Class::archive_mirror(k, CHECK);
|
||||||
|
@ -454,7 +453,7 @@ HeapShared::RunTimeKlassSubGraphInfoTable HeapShared::_run_time_subgraph_info_
|
||||||
KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
|
KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_graph) {
|
||||||
assert(DumpSharedSpaces, "dump time only");
|
assert(DumpSharedSpaces, "dump time only");
|
||||||
bool created;
|
bool created;
|
||||||
Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
|
Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
|
||||||
KlassSubGraphInfo* info =
|
KlassSubGraphInfo* info =
|
||||||
_dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
|
_dump_time_subgraph_info_table->put_if_absent(relocated_k, KlassSubGraphInfo(relocated_k, is_full_module_graph),
|
||||||
&created);
|
&created);
|
||||||
|
@ -464,7 +463,7 @@ KlassSubGraphInfo* HeapShared::init_subgraph_info(Klass* k, bool is_full_module_
|
||||||
|
|
||||||
KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
|
KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
|
||||||
assert(DumpSharedSpaces, "dump time only");
|
assert(DumpSharedSpaces, "dump time only");
|
||||||
Klass* relocated_k = MetaspaceShared::get_relocated_klass(k);
|
Klass* relocated_k = ArchiveBuilder::get_relocated_klass(k);
|
||||||
KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
|
KlassSubGraphInfo* info = _dump_time_subgraph_info_table->get(relocated_k);
|
||||||
assert(info != NULL, "must have been initialized");
|
assert(info != NULL, "must have been initialized");
|
||||||
return info;
|
return info;
|
||||||
|
@ -484,17 +483,16 @@ void KlassSubGraphInfo::add_subgraph_entry_field(
|
||||||
|
|
||||||
// Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
|
// Add the Klass* for an object in the current KlassSubGraphInfo's subgraphs.
|
||||||
// Only objects of boot classes can be included in sub-graph.
|
// Only objects of boot classes can be included in sub-graph.
|
||||||
void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k, Klass* relocated_k) {
|
void KlassSubGraphInfo::add_subgraph_object_klass(Klass* orig_k) {
|
||||||
assert(DumpSharedSpaces, "dump time only");
|
assert(DumpSharedSpaces, "dump time only");
|
||||||
assert(relocated_k == MetaspaceShared::get_relocated_klass(orig_k),
|
Klass* relocated_k = ArchiveBuilder::get_relocated_klass(orig_k);
|
||||||
"must be the relocated Klass in the shared space");
|
|
||||||
|
|
||||||
if (_subgraph_object_klasses == NULL) {
|
if (_subgraph_object_klasses == NULL) {
|
||||||
_subgraph_object_klasses =
|
_subgraph_object_klasses =
|
||||||
new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
|
new(ResourceObj::C_HEAP, mtClass) GrowableArray<Klass*>(50, mtClass);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert(ArchiveBuilder::singleton()->is_in_buffer_space(relocated_k), "must be a shared class");
|
assert(ArchiveBuilder::current()->is_in_buffer_space(relocated_k), "must be a shared class");
|
||||||
|
|
||||||
if (_k == relocated_k) {
|
if (_k == relocated_k) {
|
||||||
// Don't add the Klass containing the sub-graph to it's own klass
|
// Don't add the Klass containing the sub-graph to it's own klass
|
||||||
|
@ -619,8 +617,8 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
|
||||||
(ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
|
(ArchivedKlassSubGraphInfoRecord*)MetaspaceShared::read_only_space_alloc(sizeof(ArchivedKlassSubGraphInfoRecord));
|
||||||
record->init(&info);
|
record->init(&info);
|
||||||
|
|
||||||
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(klass);
|
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary((address)klass);
|
||||||
u4 delta = MetaspaceShared::object_delta_u4(record);
|
u4 delta = ArchiveBuilder::current()->any_to_offset_u4(record);
|
||||||
_writer->add(hash, delta);
|
_writer->add(hash, delta);
|
||||||
}
|
}
|
||||||
return true; // keep on iterating
|
return true; // keep on iterating
|
||||||
|
@ -741,7 +739,10 @@ const ArchivedKlassSubGraphInfoRecord*
|
||||||
HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
|
HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAPS) {
|
||||||
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
|
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
|
||||||
|
|
||||||
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
|
if (!k->is_shared()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
|
||||||
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
|
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
|
||||||
|
|
||||||
// Initialize from archived data. Currently this is done only
|
// Initialize from archived data. Currently this is done only
|
||||||
|
@ -772,7 +773,11 @@ HeapShared::resolve_or_init_classes_for_subgraph_of(Klass* k, bool do_init, TRAP
|
||||||
Array<Klass*>* klasses = record->subgraph_object_klasses();
|
Array<Klass*>* klasses = record->subgraph_object_klasses();
|
||||||
if (klasses != NULL) {
|
if (klasses != NULL) {
|
||||||
for (int i = 0; i < klasses->length(); i++) {
|
for (int i = 0; i < klasses->length(); i++) {
|
||||||
resolve_or_init(klasses->at(i), do_init, CHECK_NULL);
|
Klass* klass = klasses->at(i);
|
||||||
|
if (!klass->is_shared()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
resolve_or_init(klass, do_init, CHECK_NULL);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -829,7 +834,7 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapShared::clear_archived_roots_of(Klass* k) {
|
void HeapShared::clear_archived_roots_of(Klass* k) {
|
||||||
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary(k);
|
unsigned int hash = SystemDictionaryShared::hash_for_shared_dictionary_quick(k);
|
||||||
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
|
const ArchivedKlassSubGraphInfoRecord* record = _run_time_subgraph_info_table.lookup(k, hash, 0);
|
||||||
if (record != NULL) {
|
if (record != NULL) {
|
||||||
Array<int>* entry_field_records = record->entry_field_records();
|
Array<int>* entry_field_records = record->entry_field_records();
|
||||||
|
@ -1021,8 +1026,7 @@ oop HeapShared::archive_reachable_objects_from(int level,
|
||||||
|
|
||||||
assert(archived_obj != NULL, "must be");
|
assert(archived_obj != NULL, "must be");
|
||||||
Klass *orig_k = orig_obj->klass();
|
Klass *orig_k = orig_obj->klass();
|
||||||
Klass *relocated_k = archived_obj->klass();
|
subgraph_info->add_subgraph_object_klass(orig_k);
|
||||||
subgraph_info->add_subgraph_object_klass(orig_k, relocated_k);
|
|
||||||
|
|
||||||
WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
|
WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
|
||||||
subgraph_info, orig_obj, archived_obj, THREAD);
|
subgraph_info, orig_obj, archived_obj, THREAD);
|
||||||
|
@ -1405,12 +1409,16 @@ ResourceBitMap HeapShared::calculate_oopmap(MemRegion region) {
|
||||||
HeapWord* p = region.start();
|
HeapWord* p = region.start();
|
||||||
HeapWord* end = region.end();
|
HeapWord* end = region.end();
|
||||||
FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
|
FindEmbeddedNonNullPointers finder((narrowOop*)p, &oopmap);
|
||||||
|
ArchiveBuilder* builder = DumpSharedSpaces ? ArchiveBuilder::current() : NULL;
|
||||||
|
|
||||||
int num_objs = 0;
|
int num_objs = 0;
|
||||||
while (p < end) {
|
while (p < end) {
|
||||||
oop o = (oop)p;
|
oop o = (oop)p;
|
||||||
o->oop_iterate(&finder);
|
o->oop_iterate(&finder);
|
||||||
p += o->size();
|
p += o->size();
|
||||||
|
if (DumpSharedSpaces) {
|
||||||
|
builder->relocate_klass_ptr(o);
|
||||||
|
}
|
||||||
++ num_objs;
|
++ num_objs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -102,7 +102,7 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
|
||||||
}
|
}
|
||||||
void add_subgraph_entry_field(int static_field_offset, oop v,
|
void add_subgraph_entry_field(int static_field_offset, oop v,
|
||||||
bool is_closed_archive);
|
bool is_closed_archive);
|
||||||
void add_subgraph_object_klass(Klass *orig_k, Klass *relocated_k);
|
void add_subgraph_object_klass(Klass *orig_k);
|
||||||
int num_subgraph_object_klasses() {
|
int num_subgraph_object_klasses() {
|
||||||
return _subgraph_object_klasses == NULL ? 0 :
|
return _subgraph_object_klasses == NULL ? 0 :
|
||||||
_subgraph_object_klasses->length();
|
_subgraph_object_klasses->length();
|
||||||
|
|
|
@ -672,23 +672,27 @@ void Metaspace::global_initialize() {
|
||||||
|
|
||||||
metaspace::ChunkHeaderPool::initialize();
|
metaspace::ChunkHeaderPool::initialize();
|
||||||
|
|
||||||
|
if (DumpSharedSpaces) {
|
||||||
|
assert(!UseSharedSpaces, "sanity");
|
||||||
|
MetaspaceShared::initialize_for_static_dump();
|
||||||
|
}
|
||||||
|
|
||||||
// If UseCompressedClassPointers=1, we have two cases:
|
// If UseCompressedClassPointers=1, we have two cases:
|
||||||
// a) if CDS is active (either dump time or runtime), it will create the ccs
|
// a) if CDS is active (runtime, Xshare=on), it will create the class space
|
||||||
// for us, initialize it and set up CompressedKlassPointers encoding.
|
// for us, initialize it and set up CompressedKlassPointers encoding.
|
||||||
// Class space will be reserved above the mapped archives.
|
// Class space will be reserved above the mapped archives.
|
||||||
// b) if CDS is not active, we will create the ccs on our own. It will be
|
// b) if CDS either deactivated (Xshare=off) or a static dump is to be done (Xshare:dump),
|
||||||
// placed above the java heap, since we assume it has been placed in low
|
// we will create the class space on our own. It will be placed above the java heap,
|
||||||
|
// since we assume it has been placed in low
|
||||||
// address regions. We may rethink this (see JDK-8244943). Failing that,
|
// address regions. We may rethink this (see JDK-8244943). Failing that,
|
||||||
// it will be placed anywhere.
|
// it will be placed anywhere.
|
||||||
|
|
||||||
#if INCLUDE_CDS
|
#if INCLUDE_CDS
|
||||||
// case (a)
|
// case (a)
|
||||||
if (DumpSharedSpaces) {
|
if (UseSharedSpaces) {
|
||||||
MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
|
MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
|
||||||
} else if (UseSharedSpaces) {
|
|
||||||
// If any of the archived space fails to map, UseSharedSpaces
|
// If any of the archived space fails to map, UseSharedSpaces
|
||||||
// is reset to false.
|
// is reset to false.
|
||||||
MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
|
if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
|
||||||
|
@ -699,7 +703,7 @@ void Metaspace::global_initialize() {
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
|
|
||||||
if (using_class_space() && !class_space_is_initialized()) {
|
if (using_class_space() && !class_space_is_initialized()) {
|
||||||
assert(!UseSharedSpaces && !DumpSharedSpaces, "CDS should be off at this point");
|
assert(!UseSharedSpaces, "CDS archive is not mapped at this point");
|
||||||
|
|
||||||
// case (b)
|
// case (b)
|
||||||
ReservedSpace rs;
|
ReservedSpace rs;
|
||||||
|
|
|
@ -172,13 +172,13 @@ static bool shared_base_valid(char* shared_base) {
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool shared_base_too_high(char* shared_base, size_t cds_total) {
|
static bool shared_base_too_high(char* specified_base, char* aligned_base, size_t cds_max) {
|
||||||
if (SharedBaseAddress != 0 && shared_base < (char*)SharedBaseAddress) {
|
if (specified_base != NULL && aligned_base < specified_base) {
|
||||||
// SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so
|
// SharedBaseAddress is very high (e.g., 0xffffffffffffff00) so
|
||||||
// align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around.
|
// align_up(SharedBaseAddress, MetaspaceShared::reserved_space_alignment()) has wrapped around.
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
if (max_uintx - uintx(shared_base) < uintx(cds_total)) {
|
if (max_uintx - uintx(aligned_base) < uintx(cds_max)) {
|
||||||
// The end of the archive will wrap around
|
// The end of the archive will wrap around
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -186,164 +186,51 @@ static bool shared_base_too_high(char* shared_base, size_t cds_total) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static char* compute_shared_base(size_t cds_total) {
|
static char* compute_shared_base(size_t cds_max) {
|
||||||
char* shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
|
char* specified_base = (char*)SharedBaseAddress;
|
||||||
|
char* aligned_base = align_up(specified_base, MetaspaceShared::reserved_space_alignment());
|
||||||
|
|
||||||
const char* err = NULL;
|
const char* err = NULL;
|
||||||
if (shared_base_too_high(shared_base, cds_total)) {
|
if (shared_base_too_high(specified_base, aligned_base, cds_max)) {
|
||||||
err = "too high";
|
err = "too high";
|
||||||
} else if (!shared_base_valid(shared_base)) {
|
} else if (!shared_base_valid(aligned_base)) {
|
||||||
err = "invalid for this platform";
|
err = "invalid for this platform";
|
||||||
|
} else {
|
||||||
|
return aligned_base;
|
||||||
}
|
}
|
||||||
if (err) {
|
|
||||||
log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT,
|
log_warning(cds)("SharedBaseAddress (" INTPTR_FORMAT ") is %s. Reverted to " INTPTR_FORMAT,
|
||||||
p2i((void*)SharedBaseAddress), err,
|
p2i((void*)SharedBaseAddress), err,
|
||||||
p2i((void*)Arguments::default_SharedBaseAddress()));
|
p2i((void*)Arguments::default_SharedBaseAddress()));
|
||||||
SharedBaseAddress = Arguments::default_SharedBaseAddress();
|
|
||||||
shared_base = (char*)align_up((char*)SharedBaseAddress, MetaspaceShared::reserved_space_alignment());
|
specified_base = (char*)Arguments::default_SharedBaseAddress();
|
||||||
}
|
aligned_base = align_up(specified_base, MetaspaceShared::reserved_space_alignment());
|
||||||
assert(!shared_base_too_high(shared_base, cds_total) && shared_base_valid(shared_base), "Sanity");
|
|
||||||
return shared_base;
|
// Make sure the default value of SharedBaseAddress specified in globals.hpp is sane.
|
||||||
|
assert(!shared_base_too_high(specified_base, aligned_base, cds_max), "Sanity");
|
||||||
|
assert(shared_base_valid(aligned_base), "Sanity");
|
||||||
|
return aligned_base;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() {
|
void MetaspaceShared::initialize_for_static_dump() {
|
||||||
assert(DumpSharedSpaces, "should be called for dump time only");
|
assert(DumpSharedSpaces, "should be called for dump time only");
|
||||||
|
|
||||||
|
// The max allowed size for CDS archive. We use this to limit SharedBaseAddress
|
||||||
|
// to avoid address space wrap around.
|
||||||
|
size_t cds_max;
|
||||||
const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment();
|
const size_t reserve_alignment = MetaspaceShared::reserved_space_alignment();
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// On 64-bit VM we reserve a 4G range and, if UseCompressedClassPointers=1,
|
|
||||||
// will use that to house both the archives and the ccs. See below for
|
|
||||||
// details.
|
|
||||||
const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
|
const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
|
||||||
const size_t cds_total = align_down(UnscaledClassSpaceMax, reserve_alignment);
|
cds_max = align_down(UnscaledClassSpaceMax, reserve_alignment);
|
||||||
#else
|
#else
|
||||||
// We don't support archives larger than 256MB on 32-bit due to limited
|
// We don't support archives larger than 256MB on 32-bit due to limited
|
||||||
// virtual address space.
|
// virtual address space.
|
||||||
size_t cds_total = align_down(256*M, reserve_alignment);
|
cds_max = align_down(256*M, reserve_alignment);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
char* shared_base = compute_shared_base(cds_total);
|
_requested_base_address = compute_shared_base(cds_max);
|
||||||
_requested_base_address = shared_base;
|
SharedBaseAddress = (size_t)_requested_base_address;
|
||||||
|
|
||||||
// Whether to use SharedBaseAddress as attach address.
|
|
||||||
bool use_requested_base = true;
|
|
||||||
|
|
||||||
if (shared_base == NULL) {
|
|
||||||
use_requested_base = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ArchiveRelocationMode == 1) {
|
|
||||||
log_info(cds)("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
|
|
||||||
use_requested_base = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// First try to reserve the space at the specified SharedBaseAddress.
|
|
||||||
assert(!_shared_rs.is_reserved(), "must be");
|
|
||||||
if (use_requested_base) {
|
|
||||||
_shared_rs = ReservedSpace(cds_total, reserve_alignment,
|
|
||||||
false /* large */, (char*)shared_base);
|
|
||||||
if (_shared_rs.is_reserved()) {
|
|
||||||
assert(_shared_rs.base() == shared_base, "should match");
|
|
||||||
} else {
|
|
||||||
log_info(cds)("dumptime space reservation: failed to map at "
|
|
||||||
"SharedBaseAddress " PTR_FORMAT, p2i(shared_base));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!_shared_rs.is_reserved()) {
|
|
||||||
// Get a reserved space anywhere if attaching at the SharedBaseAddress
|
|
||||||
// fails:
|
|
||||||
if (UseCompressedClassPointers) {
|
|
||||||
// If we need to reserve class space as well, let the platform handle
|
|
||||||
// the reservation.
|
|
||||||
LP64_ONLY(_shared_rs =
|
|
||||||
Metaspace::reserve_address_space_for_compressed_classes(cds_total);)
|
|
||||||
NOT_LP64(ShouldNotReachHere();)
|
|
||||||
} else {
|
|
||||||
// anywhere is fine.
|
|
||||||
_shared_rs = ReservedSpace(cds_total, reserve_alignment,
|
|
||||||
false /* large */, (char*)NULL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!_shared_rs.is_reserved()) {
|
|
||||||
vm_exit_during_initialization("Unable to reserve memory for shared space",
|
|
||||||
err_msg(SIZE_FORMAT " bytes.", cds_total));
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef _LP64
|
|
||||||
|
|
||||||
if (UseCompressedClassPointers) {
|
|
||||||
|
|
||||||
assert(CompressedKlassPointers::is_valid_base((address)_shared_rs.base()), "Sanity");
|
|
||||||
|
|
||||||
// On 64-bit VM, if UseCompressedClassPointers=1, the compressed class space
|
|
||||||
// must be allocated near the cds such as that the compressed Klass pointer
|
|
||||||
// encoding can be used to en/decode pointers from both cds and ccs. Since
|
|
||||||
// Metaspace cannot do this (it knows nothing about cds), we do it for
|
|
||||||
// Metaspace here and pass it the space to use for ccs.
|
|
||||||
//
|
|
||||||
// We do this by reserving space for the ccs behind the archives. Note
|
|
||||||
// however that ccs follows a different alignment
|
|
||||||
// (Metaspace::reserve_alignment), so there may be a gap between ccs and
|
|
||||||
// cds.
|
|
||||||
// We use a similar layout at runtime, see reserve_address_space_for_archives().
|
|
||||||
//
|
|
||||||
// +-- SharedBaseAddress (default = 0x800000000)
|
|
||||||
// v
|
|
||||||
// +-..---------+---------+ ... +----+----+----+--------+-----------------+
|
|
||||||
// | Heap | Archive | | MC | RW | RO | [gap] | class space |
|
|
||||||
// +-..---------+---------+ ... +----+----+----+--------+-----------------+
|
|
||||||
// |<-- MaxHeapSize -->| |<-- UnscaledClassSpaceMax = 4GB -->|
|
|
||||||
//
|
|
||||||
// Note: ccs must follow the archives, and the archives must start at the
|
|
||||||
// encoding base. However, the exact placement of ccs does not matter as
|
|
||||||
// long as it it resides in the encoding range of CompressedKlassPointers
|
|
||||||
// and comes after the archive.
|
|
||||||
//
|
|
||||||
// We do this by splitting up the allocated 4G into 3G of archive space,
|
|
||||||
// followed by 1G for the ccs:
|
|
||||||
// + The upper 1 GB is used as the "temporary compressed class space"
|
|
||||||
// -- preload_classes() will store Klasses into this space.
|
|
||||||
// + The lower 3 GB is used for the archive -- when preload_classes()
|
|
||||||
// is done, ArchiveBuilder will copy the class metadata into this
|
|
||||||
// space, first the RW parts, then the RO parts.
|
|
||||||
|
|
||||||
// Starting address of ccs must be aligned to Metaspace::reserve_alignment()...
|
|
||||||
size_t class_space_size = align_down(_shared_rs.size() / 4, Metaspace::reserve_alignment());
|
|
||||||
address class_space_start = (address)align_down(_shared_rs.end() - class_space_size, Metaspace::reserve_alignment());
|
|
||||||
size_t archive_size = class_space_start - (address)_shared_rs.base();
|
|
||||||
|
|
||||||
ReservedSpace tmp_class_space = _shared_rs.last_part(archive_size);
|
|
||||||
_shared_rs = _shared_rs.first_part(archive_size);
|
|
||||||
|
|
||||||
// ... as does the size of ccs.
|
|
||||||
tmp_class_space = tmp_class_space.first_part(class_space_size);
|
|
||||||
CompressedClassSpaceSize = class_space_size;
|
|
||||||
|
|
||||||
// Let Metaspace initialize ccs
|
|
||||||
Metaspace::initialize_class_space(tmp_class_space);
|
|
||||||
|
|
||||||
// and set up CompressedKlassPointers encoding.
|
|
||||||
CompressedKlassPointers::initialize((address)_shared_rs.base(), cds_total);
|
|
||||||
|
|
||||||
log_info(cds)("narrow_klass_base = " PTR_FORMAT ", narrow_klass_shift = %d",
|
|
||||||
p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
|
|
||||||
|
|
||||||
log_info(cds)("Allocated temporary class space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
|
|
||||||
CompressedClassSpaceSize, p2i(tmp_class_space.base()));
|
|
||||||
|
|
||||||
assert(_shared_rs.end() == tmp_class_space.base() &&
|
|
||||||
is_aligned(_shared_rs.base(), MetaspaceShared::reserved_space_alignment()) &&
|
|
||||||
is_aligned(tmp_class_space.base(), Metaspace::reserve_alignment()) &&
|
|
||||||
is_aligned(tmp_class_space.size(), Metaspace::reserve_alignment()), "Sanity");
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
init_shared_dump_space(&_mc_region);
|
|
||||||
SharedBaseAddress = (size_t)_shared_rs.base();
|
|
||||||
log_info(cds)("Allocated shared space: " SIZE_FORMAT " bytes at " PTR_FORMAT,
|
|
||||||
_shared_rs.size(), p2i(_shared_rs.base()));
|
|
||||||
|
|
||||||
size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
|
size_t symbol_rs_size = LP64_ONLY(3 * G) NOT_LP64(128 * M);
|
||||||
_symbol_rs = ReservedSpace(symbol_rs_size);
|
_symbol_rs = ReservedSpace(symbol_rs_size);
|
||||||
|
@ -462,10 +349,6 @@ void MetaspaceShared::commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newto
|
||||||
which, commit, vs->actual_committed_size(), vs->high());
|
which, commit, vs->actual_committed_size(), vs->high());
|
||||||
}
|
}
|
||||||
|
|
||||||
void MetaspaceShared::initialize_ptr_marker(CHeapBitMap* ptrmap) {
|
|
||||||
ArchivePtrMarker::initialize(ptrmap, (address*)_shared_vs.low(), (address*)_shared_vs.high());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read/write a data stream for restoring/preserving metadata pointers and
|
// Read/write a data stream for restoring/preserving metadata pointers and
|
||||||
// miscellaneous data from/to the shared archive file.
|
// miscellaneous data from/to the shared archive file.
|
||||||
|
|
||||||
|
@ -528,18 +411,6 @@ address MetaspaceShared::i2i_entry_code_buffers() {
|
||||||
return _i2i_entry_code_buffers;
|
return _i2i_entry_code_buffers;
|
||||||
}
|
}
|
||||||
|
|
||||||
uintx MetaspaceShared::object_delta_uintx(void* obj) {
|
|
||||||
Arguments::assert_is_dumping_archive();
|
|
||||||
if (DumpSharedSpaces) {
|
|
||||||
assert(shared_rs()->contains(obj), "must be");
|
|
||||||
} else {
|
|
||||||
assert(is_in_shared_metaspace(obj) || DynamicArchive::is_in_target_space(obj), "must be");
|
|
||||||
}
|
|
||||||
address base_address = address(SharedBaseAddress);
|
|
||||||
uintx deltax = address(obj) - base_address;
|
|
||||||
return deltax;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Global object for holding classes that have been loaded. Since this
|
// Global object for holding classes that have been loaded. Since this
|
||||||
// is run at a safepoint just before exit, this is the entire set of classes.
|
// is run at a safepoint just before exit, this is the entire set of classes.
|
||||||
static GrowableArray<Klass*>* _global_klass_objects;
|
static GrowableArray<Klass*>* _global_klass_objects;
|
||||||
|
@ -601,7 +472,6 @@ private:
|
||||||
void print_bitmap_region_stats(size_t size, size_t total_size);
|
void print_bitmap_region_stats(size_t size, size_t total_size);
|
||||||
void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
|
void print_heap_region_stats(GrowableArray<MemRegion> *heap_mem,
|
||||||
const char *name, size_t total_size);
|
const char *name, size_t total_size);
|
||||||
void relocate_to_requested_base_address(CHeapBitMap* ptrmap);
|
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
@ -619,10 +489,7 @@ public:
|
||||||
class StaticArchiveBuilder : public ArchiveBuilder {
|
class StaticArchiveBuilder : public ArchiveBuilder {
|
||||||
public:
|
public:
|
||||||
StaticArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
|
StaticArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, DumpRegion* ro_region)
|
||||||
: ArchiveBuilder(mc_region, rw_region, ro_region) {
|
: ArchiveBuilder(mc_region, rw_region, ro_region) {}
|
||||||
_alloc_bottom = address(SharedBaseAddress);
|
|
||||||
_buffer_to_target_delta = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) {
|
||||||
FileMapInfo::metaspace_pointers_do(it, false);
|
FileMapInfo::metaspace_pointers_do(it, false);
|
||||||
|
@ -661,50 +528,8 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables() {
|
||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VM_PopulateDumpSharedSpace::relocate_to_requested_base_address(CHeapBitMap* ptrmap) {
|
|
||||||
intx addr_delta = MetaspaceShared::final_delta();
|
|
||||||
if (addr_delta == 0) {
|
|
||||||
ArchivePtrMarker::compact((address)SharedBaseAddress, (address)_ro_region.top());
|
|
||||||
} else {
|
|
||||||
// We are not able to reserve space at MetaspaceShared::requested_base_address() (due to ASLR).
|
|
||||||
// This means that the current content of the archive is based on a random
|
|
||||||
// address. Let's relocate all the pointers, so that it can be mapped to
|
|
||||||
// MetaspaceShared::requested_base_address() without runtime relocation.
|
|
||||||
//
|
|
||||||
// Note: both the base and dynamic archive are written with
|
|
||||||
// FileMapHeader::_requested_base_address == MetaspaceShared::requested_base_address()
|
|
||||||
|
|
||||||
// Patch all pointers that are marked by ptrmap within this region,
|
|
||||||
// where we have just dumped all the metaspace data.
|
|
||||||
address patch_base = (address)SharedBaseAddress;
|
|
||||||
address patch_end = (address)_ro_region.top();
|
|
||||||
size_t size = patch_end - patch_base;
|
|
||||||
|
|
||||||
// the current value of the pointers to be patched must be within this
|
|
||||||
// range (i.e., must point to valid metaspace objects)
|
|
||||||
address valid_old_base = patch_base;
|
|
||||||
address valid_old_end = patch_end;
|
|
||||||
|
|
||||||
// after patching, the pointers must point inside this range
|
|
||||||
// (the requested location of the archive, as mapped at runtime).
|
|
||||||
address valid_new_base = (address)MetaspaceShared::requested_base_address();
|
|
||||||
address valid_new_end = valid_new_base + size;
|
|
||||||
|
|
||||||
log_debug(cds)("Relocating archive from [" INTPTR_FORMAT " - " INTPTR_FORMAT " ] to "
|
|
||||||
"[" INTPTR_FORMAT " - " INTPTR_FORMAT " ]", p2i(patch_base), p2i(patch_end),
|
|
||||||
p2i(valid_new_base), p2i(valid_new_end));
|
|
||||||
|
|
||||||
SharedDataRelocator<true> patcher((address*)patch_base, (address*)patch_end, valid_old_base, valid_old_end,
|
|
||||||
valid_new_base, valid_new_end, addr_delta, ptrmap);
|
|
||||||
ptrmap->iterate(&patcher);
|
|
||||||
ArchivePtrMarker::compact(patcher.max_non_null_offset());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void VM_PopulateDumpSharedSpace::doit() {
|
void VM_PopulateDumpSharedSpace::doit() {
|
||||||
HeapShared::run_full_gc_in_vm_thread();
|
HeapShared::run_full_gc_in_vm_thread();
|
||||||
CHeapBitMap ptrmap;
|
|
||||||
MetaspaceShared::initialize_ptr_marker(&ptrmap);
|
|
||||||
|
|
||||||
// We should no longer allocate anything from the metaspace, so that:
|
// We should no longer allocate anything from the metaspace, so that:
|
||||||
//
|
//
|
||||||
|
@ -733,8 +558,8 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||||
SystemDictionaryShared::check_excluded_classes();
|
SystemDictionaryShared::check_excluded_classes();
|
||||||
|
|
||||||
StaticArchiveBuilder builder(&_mc_region, &_rw_region, &_ro_region);
|
StaticArchiveBuilder builder(&_mc_region, &_rw_region, &_ro_region);
|
||||||
builder.set_current_dump_space(&_mc_region);
|
|
||||||
builder.gather_klasses_and_symbols();
|
builder.gather_klasses_and_symbols();
|
||||||
|
builder.reserve_buffer();
|
||||||
_global_klass_objects = builder.klasses();
|
_global_klass_objects = builder.klasses();
|
||||||
|
|
||||||
builder.gather_source_objs();
|
builder.gather_source_objs();
|
||||||
|
@ -770,15 +595,16 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
builder.relocate_pointers();
|
builder.relocate_metaspaceobj_embedded_pointers();
|
||||||
|
|
||||||
dump_shared_symbol_table(builder.symbols());
|
|
||||||
|
|
||||||
// Dump supported java heap objects
|
// Dump supported java heap objects
|
||||||
_closed_archive_heap_regions = NULL;
|
_closed_archive_heap_regions = NULL;
|
||||||
_open_archive_heap_regions = NULL;
|
_open_archive_heap_regions = NULL;
|
||||||
dump_java_heap_objects();
|
dump_java_heap_objects();
|
||||||
|
|
||||||
|
builder.relocate_roots();
|
||||||
|
dump_shared_symbol_table(builder.symbols());
|
||||||
|
|
||||||
builder.relocate_vm_classes();
|
builder.relocate_vm_classes();
|
||||||
|
|
||||||
log_info(cds)("Update method trampolines");
|
log_info(cds)("Update method trampolines");
|
||||||
|
@ -798,7 +624,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||||
|
|
||||||
// relocate the data so that it can be mapped to MetaspaceShared::requested_base_address()
|
// relocate the data so that it can be mapped to MetaspaceShared::requested_base_address()
|
||||||
// without runtime relocation.
|
// without runtime relocation.
|
||||||
relocate_to_requested_base_address(&ptrmap);
|
builder.relocate_to_requested();
|
||||||
|
|
||||||
// Create and write the archive file that maps the shared spaces.
|
// Create and write the archive file that maps the shared spaces.
|
||||||
|
|
||||||
|
@ -823,7 +649,7 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||||
MetaspaceShared::first_open_archive_heap_region,
|
MetaspaceShared::first_open_archive_heap_region,
|
||||||
MetaspaceShared::max_open_archive_heap_region);
|
MetaspaceShared::max_open_archive_heap_region);
|
||||||
|
|
||||||
mapinfo->set_final_requested_base((char*)MetaspaceShared::requested_base_address());
|
mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
|
||||||
mapinfo->set_header_crc(mapinfo->compute_header_crc());
|
mapinfo->set_header_crc(mapinfo->compute_header_crc());
|
||||||
mapinfo->write_header();
|
mapinfo->write_header();
|
||||||
print_region_stats(mapinfo);
|
print_region_stats(mapinfo);
|
||||||
|
@ -919,23 +745,6 @@ void MetaspaceShared::write_region(FileMapInfo* mapinfo, int region_idx, DumpReg
|
||||||
mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
|
mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update a Java object to point its Klass* to the new location after
|
|
||||||
// shared archive has been compacted.
|
|
||||||
void MetaspaceShared::relocate_klass_ptr(oop o) {
|
|
||||||
assert(DumpSharedSpaces, "sanity");
|
|
||||||
Klass* k = ArchiveBuilder::get_relocated_klass(o->klass());
|
|
||||||
o->set_klass(k);
|
|
||||||
}
|
|
||||||
|
|
||||||
Klass* MetaspaceShared::get_relocated_klass(Klass *k, bool is_final) {
|
|
||||||
assert(DumpSharedSpaces, "sanity");
|
|
||||||
k = ArchiveBuilder::get_relocated_klass(k);
|
|
||||||
if (is_final) {
|
|
||||||
k = (Klass*)(address(k) + final_delta());
|
|
||||||
}
|
|
||||||
return k;
|
|
||||||
}
|
|
||||||
|
|
||||||
static GrowableArray<ClassLoaderData*>* _loaded_cld = NULL;
|
static GrowableArray<ClassLoaderData*>* _loaded_cld = NULL;
|
||||||
|
|
||||||
class CollectCLDClosure : public CLDClosure {
|
class CollectCLDClosure : public CLDClosure {
|
||||||
|
@ -1836,13 +1645,6 @@ void MetaspaceShared::report_out_of_space(const char* name, size_t needed_bytes)
|
||||||
"Please reduce the number of shared classes.");
|
"Please reduce the number of shared classes.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is used to relocate the pointers so that the base archive can be mapped at
|
|
||||||
// MetaspaceShared::requested_base_address() without runtime relocation.
|
|
||||||
intx MetaspaceShared::final_delta() {
|
|
||||||
return intx(MetaspaceShared::requested_base_address()) // We want the base archive to be mapped to here at runtime
|
|
||||||
- intx(SharedBaseAddress); // .. but the base archive is mapped at here at dump time
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MetaspaceShared::use_full_module_graph() {
|
bool MetaspaceShared::use_full_module_graph() {
|
||||||
#if INCLUDE_CDS_JAVA_HEAP
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
|
if (ClassLoaderDataShared::is_full_module_graph_loaded()) {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2012, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2012, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -33,8 +33,6 @@
|
||||||
#include "utilities/macros.hpp"
|
#include "utilities/macros.hpp"
|
||||||
#include "utilities/resourceHash.hpp"
|
#include "utilities/resourceHash.hpp"
|
||||||
|
|
||||||
#define MAX_SHARED_DELTA (0x7FFFFFFF)
|
|
||||||
|
|
||||||
// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes.
|
// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes.
|
||||||
// We enforce the same alignment rule in blocks allocated from the shared space.
|
// We enforce the same alignment rule in blocks allocated from the shared space.
|
||||||
const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
|
const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
|
||||||
|
@ -125,22 +123,12 @@ class MetaspaceShared : AllStatic {
|
||||||
}
|
}
|
||||||
|
|
||||||
static void commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) NOT_CDS_RETURN;
|
static void commit_to(ReservedSpace* rs, VirtualSpace* vs, char* newtop) NOT_CDS_RETURN;
|
||||||
static void initialize_dumptime_shared_and_meta_spaces() NOT_CDS_RETURN;
|
static void initialize_for_static_dump() NOT_CDS_RETURN;
|
||||||
static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN;
|
static void initialize_runtime_shared_and_meta_spaces() NOT_CDS_RETURN;
|
||||||
static void post_initialize(TRAPS) NOT_CDS_RETURN;
|
static void post_initialize(TRAPS) NOT_CDS_RETURN;
|
||||||
|
|
||||||
static void print_on(outputStream* st);
|
static void print_on(outputStream* st);
|
||||||
|
|
||||||
// Delta of this object from SharedBaseAddress
|
|
||||||
static uintx object_delta_uintx(void* obj);
|
|
||||||
|
|
||||||
static u4 object_delta_u4(void* obj) {
|
|
||||||
// offset is guaranteed to be less than MAX_SHARED_DELTA in DumpRegion::expand_top_to()
|
|
||||||
uintx deltax = object_delta_uintx(obj);
|
|
||||||
guarantee(deltax <= MAX_SHARED_DELTA, "must be 32-bit offset");
|
|
||||||
return (u4)deltax;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void set_archive_loading_failed() {
|
static void set_archive_loading_failed() {
|
||||||
_archive_loading_failed = true;
|
_archive_loading_failed = true;
|
||||||
}
|
}
|
||||||
|
@ -239,20 +227,27 @@ class MetaspaceShared : AllStatic {
|
||||||
static void init_misc_code_space();
|
static void init_misc_code_space();
|
||||||
static address i2i_entry_code_buffers();
|
static address i2i_entry_code_buffers();
|
||||||
|
|
||||||
static void relocate_klass_ptr(oop o);
|
|
||||||
static Klass* get_relocated_klass(Klass *k, bool is_final=false);
|
|
||||||
|
|
||||||
static void initialize_ptr_marker(CHeapBitMap* ptrmap);
|
static void initialize_ptr_marker(CHeapBitMap* ptrmap);
|
||||||
|
|
||||||
// This is the base address as specified by -XX:SharedBaseAddress during -Xshare:dump.
|
// This is the base address as specified by -XX:SharedBaseAddress during -Xshare:dump.
|
||||||
// Both the base/top archives are written using this as their base address.
|
// Both the base/top archives are written using this as their base address.
|
||||||
|
//
|
||||||
|
// During static dump: _requested_base_address == SharedBaseAddress.
|
||||||
|
//
|
||||||
|
// During dynamic dump: _requested_base_address is not always the same as SharedBaseAddress:
|
||||||
|
// - SharedBaseAddress is used for *reading the base archive*. I.e., CompactHashtable uses
|
||||||
|
// it to convert offsets to pointers to Symbols in the base archive.
|
||||||
|
// The base archive may be mapped to an OS-selected address due to ASLR. E.g.,
|
||||||
|
// you may have SharedBaseAddress == 0x00ff123400000000.
|
||||||
|
// - _requested_base_address is used for *writing the output archive*. It's usually
|
||||||
|
// 0x800000000 (unless it was set by -XX:SharedBaseAddress during -Xshare:dump).
|
||||||
static char* requested_base_address() {
|
static char* requested_base_address() {
|
||||||
return _requested_base_address;
|
return _requested_base_address;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Non-zero if the archive(s) need to be mapped a non-default location due to ASLR.
|
// Non-zero if the archive(s) need to be mapped a non-default location due to ASLR.
|
||||||
static intx relocation_delta() { return _relocation_delta; }
|
static intx relocation_delta() { return _relocation_delta; }
|
||||||
static intx final_delta();
|
|
||||||
static bool use_windows_memory_mapping() {
|
static bool use_windows_memory_mapping() {
|
||||||
const bool is_windows = (NOT_WINDOWS(false) WINDOWS_ONLY(true));
|
const bool is_windows = (NOT_WINDOWS(false) WINDOWS_ONLY(true));
|
||||||
//const bool is_windows = true; // enable this to allow testing the windows mmap semantics on Linux, etc.
|
//const bool is_windows = true; // enable this to allow testing the windows mmap semantics on Linux, etc.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -181,10 +181,13 @@ public:
|
||||||
static bool is_null(Klass* v) { return v == NULL; }
|
static bool is_null(Klass* v) { return v == NULL; }
|
||||||
static bool is_null(narrowKlass v) { return v == 0; }
|
static bool is_null(narrowKlass v) { return v == 0; }
|
||||||
|
|
||||||
|
static inline Klass* decode_raw(narrowKlass v, address base);
|
||||||
static inline Klass* decode_raw(narrowKlass v);
|
static inline Klass* decode_raw(narrowKlass v);
|
||||||
static inline Klass* decode_not_null(narrowKlass v);
|
static inline Klass* decode_not_null(narrowKlass v);
|
||||||
|
static inline Klass* decode_not_null(narrowKlass v, address base);
|
||||||
static inline Klass* decode(narrowKlass v);
|
static inline Klass* decode(narrowKlass v);
|
||||||
static inline narrowKlass encode_not_null(Klass* v);
|
static inline narrowKlass encode_not_null(Klass* v);
|
||||||
|
static inline narrowKlass encode_not_null(Klass* v, address base);
|
||||||
static inline narrowKlass encode(Klass* v);
|
static inline narrowKlass encode(Klass* v);
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -117,12 +117,20 @@ static inline bool check_alignment(Klass* v) {
|
||||||
}
|
}
|
||||||
|
|
||||||
inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v) {
|
inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v) {
|
||||||
return (Klass*)(void*)((uintptr_t)base() +((uintptr_t)v << shift()));
|
return decode_raw(v, base());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline Klass* CompressedKlassPointers::decode_raw(narrowKlass v, address narrow_base) {
|
||||||
|
return (Klass*)(void*)((uintptr_t)narrow_base +((uintptr_t)v << shift()));
|
||||||
|
}
|
||||||
|
|
||||||
inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v) {
|
inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v) {
|
||||||
|
return decode_not_null(v, base());
|
||||||
|
}
|
||||||
|
|
||||||
|
inline Klass* CompressedKlassPointers::decode_not_null(narrowKlass v, address narrow_base) {
|
||||||
assert(!is_null(v), "narrow klass value can never be zero");
|
assert(!is_null(v), "narrow klass value can never be zero");
|
||||||
Klass* result = decode_raw(v);
|
Klass* result = decode_raw(v, narrow_base);
|
||||||
assert(check_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
|
assert(check_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -132,13 +140,17 @@ inline Klass* CompressedKlassPointers::decode(narrowKlass v) {
|
||||||
}
|
}
|
||||||
|
|
||||||
inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) {
|
inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v) {
|
||||||
|
return encode_not_null(v, base());
|
||||||
|
}
|
||||||
|
|
||||||
|
inline narrowKlass CompressedKlassPointers::encode_not_null(Klass* v, address narrow_base) {
|
||||||
assert(!is_null(v), "klass value can never be zero");
|
assert(!is_null(v), "klass value can never be zero");
|
||||||
assert(check_alignment(v), "Address not aligned");
|
assert(check_alignment(v), "Address not aligned");
|
||||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, base(), 1));
|
uint64_t pd = (uint64_t)(pointer_delta((void*)v, narrow_base, 1));
|
||||||
assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
|
assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
|
||||||
uint64_t result = pd >> shift();
|
uint64_t result = pd >> shift();
|
||||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
|
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
|
||||||
assert(decode(result) == v, "reversibility");
|
assert(decode_not_null(result, narrow_base) == v, "reversibility");
|
||||||
return (narrowKlass)result;
|
return (narrowKlass)result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -143,6 +143,14 @@ bool oopDesc::has_klass_gap() {
|
||||||
return UseCompressedClassPointers;
|
return UseCompressedClassPointers;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
|
void oopDesc::set_narrow_klass(narrowKlass nk) {
|
||||||
|
assert(DumpSharedSpaces, "Used by CDS only. Do not abuse!");
|
||||||
|
assert(UseCompressedClassPointers, "must be");
|
||||||
|
_metadata._compressed_klass = nk;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void* oopDesc::load_klass_raw(oop obj) {
|
void* oopDesc::load_klass_raw(oop obj) {
|
||||||
if (UseCompressedClassPointers) {
|
if (UseCompressedClassPointers) {
|
||||||
narrowKlass narrow_klass = obj->_metadata._compressed_klass;
|
narrowKlass narrow_klass = obj->_metadata._compressed_klass;
|
||||||
|
|
|
@ -76,6 +76,7 @@ class oopDesc {
|
||||||
inline Klass* klass_or_null() const;
|
inline Klass* klass_or_null() const;
|
||||||
inline Klass* klass_or_null_acquire() const;
|
inline Klass* klass_or_null_acquire() const;
|
||||||
|
|
||||||
|
void set_narrow_klass(narrowKlass nk) NOT_CDS_JAVA_HEAP_RETURN;
|
||||||
inline void set_klass(Klass* k);
|
inline void set_klass(Klass* k);
|
||||||
static inline void release_set_klass(HeapWord* mem, Klass* k);
|
static inline void release_set_klass(HeapWord* mem, Klass* k);
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -40,9 +40,8 @@ import jtreg.SkippedException;
|
||||||
public class ArchiveRelocationTest {
|
public class ArchiveRelocationTest {
|
||||||
public static void main(String... args) throws Exception {
|
public static void main(String... args) throws Exception {
|
||||||
try {
|
try {
|
||||||
test(true, false);
|
test(false);
|
||||||
test(false, true);
|
test(true);
|
||||||
test(true, true);
|
|
||||||
} catch (SkippedException s) {
|
} catch (SkippedException s) {
|
||||||
s.printStackTrace();
|
s.printStackTrace();
|
||||||
throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)");
|
throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)");
|
||||||
|
@ -51,20 +50,17 @@ public class ArchiveRelocationTest {
|
||||||
|
|
||||||
static int caseCount = 0;
|
static int caseCount = 0;
|
||||||
|
|
||||||
// dump_reloc - force relocation of archive during dump time?
|
|
||||||
// run_reloc - force relocation of archive during run time?
|
// run_reloc - force relocation of archive during run time?
|
||||||
static void test(boolean dump_reloc, boolean run_reloc) throws Exception {
|
// Note: relocation always happens during dumping.
|
||||||
|
static void test(boolean run_reloc) throws Exception {
|
||||||
caseCount += 1;
|
caseCount += 1;
|
||||||
System.out.println("============================================================");
|
System.out.println("============================================================");
|
||||||
System.out.println("case = " + caseCount + ", dump = " + dump_reloc
|
System.out.println("case = " + caseCount + ", run_reloc = " + run_reloc);
|
||||||
+ ", run = " + run_reloc);
|
|
||||||
System.out.println("============================================================");
|
System.out.println("============================================================");
|
||||||
|
|
||||||
|
|
||||||
String appJar = ClassFileInstaller.getJarPath("hello.jar");
|
String appJar = ClassFileInstaller.getJarPath("hello.jar");
|
||||||
String mainClass = "Hello";
|
String mainClass = "Hello";
|
||||||
String forceRelocation = "-XX:ArchiveRelocationMode=1";
|
String forceRelocation = "-XX:ArchiveRelocationMode=1";
|
||||||
String dumpRelocArg = dump_reloc ? forceRelocation : "-showversion";
|
|
||||||
String runRelocArg = run_reloc ? forceRelocation : "-showversion";
|
String runRelocArg = run_reloc ? forceRelocation : "-showversion";
|
||||||
String logArg = "-Xlog:cds=debug,cds+reloc=debug";
|
String logArg = "-Xlog:cds=debug,cds+reloc=debug";
|
||||||
String unlockArg = "-XX:+UnlockDiagnosticVMOptions";
|
String unlockArg = "-XX:+UnlockDiagnosticVMOptions";
|
||||||
|
@ -72,11 +68,8 @@ public class ArchiveRelocationTest {
|
||||||
|
|
||||||
OutputAnalyzer out = TestCommon.dump(appJar,
|
OutputAnalyzer out = TestCommon.dump(appJar,
|
||||||
TestCommon.list(mainClass),
|
TestCommon.list(mainClass),
|
||||||
unlockArg, dumpRelocArg, logArg, nmtArg);
|
unlockArg, logArg, nmtArg);
|
||||||
if (dump_reloc) {
|
out.shouldContain("Relocating archive from");
|
||||||
out.shouldContain("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
|
|
||||||
out.shouldContain("Relocating archive from");
|
|
||||||
}
|
|
||||||
|
|
||||||
TestCommon.run("-cp", appJar, unlockArg, runRelocArg, logArg, mainClass)
|
TestCommon.run("-cp", appJar, unlockArg, runRelocArg, logArg, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -74,9 +74,12 @@ public class ExtraSymbols {
|
||||||
}
|
}
|
||||||
|
|
||||||
static int numOfEntries(OutputAnalyzer output) {
|
static int numOfEntries(OutputAnalyzer output) {
|
||||||
String s = output.firstMatch("Number of entries : .*");
|
// Look for this pattern:
|
||||||
|
// [4.661s][info][cds,hashtables] Shared symbol table stats -------- base: 0x0000000800000000
|
||||||
|
// [4.661s][info][cds,hashtables] Number of entries : 50078
|
||||||
|
String s = output.firstMatch("Shared symbol table stats[^\n]*\n[^\n]*Number of entries : .*");
|
||||||
String subs[] = s.split("[:]");
|
String subs[] = s.split("[:]");
|
||||||
int numEntries = Integer.parseInt(subs[1].trim());
|
int numEntries = Integer.parseInt(subs[2].trim());
|
||||||
return numEntries;
|
return numEntries;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -57,8 +57,7 @@ public class AppendClasspath extends DynamicArchiveTestBase {
|
||||||
"-Xlog:cds+dynamic=debug",
|
"-Xlog:cds+dynamic=debug",
|
||||||
"-cp", appJar, "Hello")
|
"-cp", appJar, "Hello")
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// runtime with classpath containing the one used in dump time,
|
// runtime with classpath containing the one used in dump time,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -139,8 +139,7 @@ public class ArchiveConsistency extends DynamicArchiveTestBase {
|
||||||
"-Xlog:cds+dynamic=debug",
|
"-Xlog:cds+dynamic=debug",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
File jsa = new File(topArchiveName);
|
File jsa = new File(topArchiveName);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -59,8 +59,7 @@ public class ClassResolutionFailure extends DynamicArchiveTestBase {
|
||||||
"-Xlog:class+load=trace",
|
"-Xlog:class+load=trace",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
run(topArchiveName,
|
run(topArchiveName,
|
||||||
|
|
|
@ -43,27 +43,24 @@ import jtreg.SkippedException;
|
||||||
public class DynamicArchiveRelocationTest extends DynamicArchiveTestBase {
|
public class DynamicArchiveRelocationTest extends DynamicArchiveTestBase {
|
||||||
public static void main(String... args) throws Exception {
|
public static void main(String... args) throws Exception {
|
||||||
try {
|
try {
|
||||||
testOuter(false);
|
testOuter();
|
||||||
testOuter(true);
|
|
||||||
} catch (SkippedException s) {
|
} catch (SkippedException s) {
|
||||||
s.printStackTrace();
|
s.printStackTrace();
|
||||||
throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)");
|
throw new RuntimeException("Archive mapping should always succeed after JDK-8231610 (did the machine run out of memory?)");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void testOuter(boolean dump_base_reloc) throws Exception {
|
static void testOuter() throws Exception {
|
||||||
testInner(dump_base_reloc, true, false);
|
testInner(true, false);
|
||||||
testInner(dump_base_reloc, false, true);
|
testInner(false, true);
|
||||||
testInner(dump_base_reloc, true, true);
|
testInner(true, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static boolean dump_base_reloc, dump_top_reloc, run_reloc;
|
static boolean dump_top_reloc, run_reloc;
|
||||||
|
|
||||||
// dump_base_reloc - force relocation of archive when dumping base archive
|
|
||||||
// dump_top_reloc - force relocation of archive when dumping top archive
|
// dump_top_reloc - force relocation of archive when dumping top archive
|
||||||
// run_reloc - force relocation of archive when running
|
// run_reloc - force relocation of archive when running
|
||||||
static void testInner(boolean dump_base_reloc, boolean dump_top_reloc, boolean run_reloc) throws Exception {
|
static void testInner(boolean dump_top_reloc, boolean run_reloc) throws Exception {
|
||||||
DynamicArchiveRelocationTest.dump_base_reloc = dump_base_reloc;
|
|
||||||
DynamicArchiveRelocationTest.dump_top_reloc = dump_top_reloc;
|
DynamicArchiveRelocationTest.dump_top_reloc = dump_top_reloc;
|
||||||
DynamicArchiveRelocationTest.run_reloc = run_reloc;
|
DynamicArchiveRelocationTest.run_reloc = run_reloc;
|
||||||
|
|
||||||
|
@ -74,15 +71,14 @@ public class DynamicArchiveRelocationTest extends DynamicArchiveTestBase {
|
||||||
static void doTest() throws Exception {
|
static void doTest() throws Exception {
|
||||||
caseCount += 1;
|
caseCount += 1;
|
||||||
System.out.println("============================================================");
|
System.out.println("============================================================");
|
||||||
System.out.println("case = " + caseCount + ", base = " + dump_base_reloc
|
System.out.println("case = " + caseCount
|
||||||
+ ", top = " + dump_top_reloc
|
+ ", top_reloc = " + dump_top_reloc
|
||||||
+ ", run = " + run_reloc);
|
+ ", run = " + run_reloc);
|
||||||
System.out.println("============================================================");
|
System.out.println("============================================================");
|
||||||
|
|
||||||
String appJar = ClassFileInstaller.getJarPath("hello.jar");
|
String appJar = ClassFileInstaller.getJarPath("hello.jar");
|
||||||
String mainClass = "Hello";
|
String mainClass = "Hello";
|
||||||
String forceRelocation = "-XX:ArchiveRelocationMode=1";
|
String forceRelocation = "-XX:ArchiveRelocationMode=1";
|
||||||
String dumpBaseRelocArg = dump_base_reloc ? forceRelocation : "-showversion";
|
|
||||||
String dumpTopRelocArg = dump_top_reloc ? forceRelocation : "-showversion";
|
String dumpTopRelocArg = dump_top_reloc ? forceRelocation : "-showversion";
|
||||||
String runRelocArg = run_reloc ? forceRelocation : "-showversion";
|
String runRelocArg = run_reloc ? forceRelocation : "-showversion";
|
||||||
String logArg = "-Xlog:cds=debug,cds+reloc=debug";
|
String logArg = "-Xlog:cds=debug,cds+reloc=debug";
|
||||||
|
@ -101,11 +97,8 @@ public class DynamicArchiveRelocationTest extends DynamicArchiveTestBase {
|
||||||
|
|
||||||
// (1) Dump base archive (static)
|
// (1) Dump base archive (static)
|
||||||
|
|
||||||
OutputAnalyzer out = TestCommon.dumpBaseArchive(baseArchiveName, unlockArg, dumpBaseRelocArg, logArg);
|
OutputAnalyzer out = TestCommon.dumpBaseArchive(baseArchiveName, unlockArg, logArg);
|
||||||
if (dump_base_reloc) {
|
out.shouldContain("Relocating archive from");
|
||||||
out.shouldContain("ArchiveRelocationMode == 1: always allocate class space at an alternative address");
|
|
||||||
out.shouldContain("Relocating archive from");
|
|
||||||
}
|
|
||||||
|
|
||||||
// (2) Dump top archive (dynamic)
|
// (2) Dump top archive (dynamic)
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -80,8 +80,7 @@ public class DynamicLotsOfClasses extends DynamicArchiveTestBase {
|
||||||
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
|
"-XX:+UnlockDiagnosticVMOptions", "-XX:+WhiteBoxAPI",
|
||||||
"-cp", appJar, mainClass, classList)
|
"-cp", appJar, mainClass, classList)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -65,8 +65,7 @@ public class HelloDynamic extends DynamicArchiveTestBase {
|
||||||
"-Xlog:cds+dynamic=debug",
|
"-Xlog:cds+dynamic=debug",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
run2(baseArchiveName, topArchiveName,
|
run2(baseArchiveName, topArchiveName,
|
||||||
"-Xlog:class+load",
|
"-Xlog:class+load",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -62,8 +62,7 @@ public class HelloDynamicCustom extends DynamicArchiveTestBase {
|
||||||
"-cp", appJar,
|
"-cp", appJar,
|
||||||
mainAppClass, customJarPath, "false", "false")
|
mainAppClass, customJarPath, "false", "false")
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x")
|
||||||
.shouldContain("Written dynamic archive 0x")
|
|
||||||
.shouldNotContain("klasses.*=.*CustomLoadee")
|
.shouldNotContain("klasses.*=.*CustomLoadee")
|
||||||
.shouldHaveExitValue(0);
|
.shouldHaveExitValue(0);
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -70,8 +70,7 @@ public class HelloDynamicCustomUnload extends DynamicArchiveTestBase {
|
||||||
"-cp", appJar,
|
"-cp", appJar,
|
||||||
mainAppClass, customJarPath, "true", "false")
|
mainAppClass, customJarPath, "true", "false")
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x")
|
||||||
.shouldContain("Written dynamic archive 0x")
|
|
||||||
.shouldNotContain("klasses.*=.*CustomLoadee") // Fixme -- use a better way to decide if a class has been archived
|
.shouldNotContain("klasses.*=.*CustomLoadee") // Fixme -- use a better way to decide if a class has been archived
|
||||||
.shouldHaveExitValue(0);
|
.shouldHaveExitValue(0);
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -57,8 +57,7 @@ public class JITInteraction extends DynamicArchiveTestBase {
|
||||||
"-XX:+PrintCompilation",
|
"-XX:+PrintCompilation",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -74,8 +74,7 @@ public class LambdaInBaseArchive extends DynamicArchiveTestBase {
|
||||||
"-Xlog:class+load,cds,cds+dynamic=debug",
|
"-Xlog:class+load,cds,cds+dynamic=debug",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
run2(baseArchiveName, topArchiveName,
|
run2(baseArchiveName, topArchiveName,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -106,8 +106,7 @@ public class MainModuleOnly extends DynamicArchiveTestBase {
|
||||||
"--module-path", moduleDir.toString(),
|
"--module-path", moduleDir.toString(),
|
||||||
"-m", TEST_MODULE1)
|
"-m", TEST_MODULE1)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// run with the archive using the same command line as in dump time.
|
// run with the archive using the same command line as in dump time.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -72,8 +72,7 @@ public class MismatchedBaseArchive extends DynamicArchiveTestBase {
|
||||||
"-Xlog:cds+dynamic=debug",
|
"-Xlog:cds+dynamic=debug",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
run2(helloBaseArchive, topArchiveName,
|
run2(helloBaseArchive, topArchiveName,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -65,8 +65,7 @@ public class MissingArchive extends DynamicArchiveTestBase {
|
||||||
"-Xlog:cds+dynamic=debug",
|
"-Xlog:cds+dynamic=debug",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Use -Xshare:auto so top archive can fail after base archive has succeeded,
|
// Use -Xshare:auto so top archive can fail after base archive has succeeded,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -83,8 +83,7 @@ public class SharedArchiveFileOption extends DynamicArchiveTestBase {
|
||||||
"-Xlog:cds+dynamic=debug",
|
"-Xlog:cds+dynamic=debug",
|
||||||
"-cp", appJar, mainClass)
|
"-cp", appJar, mainClass)
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// same archive file specified for -XX:SharedArchiveFile and -XX:ArchiveClassesAtExit
|
// same archive file specified for -XX:SharedArchiveFile and -XX:ArchiveClassesAtExit
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2019, 2020, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -66,8 +66,7 @@ public class UnusedCPDuringDump extends DynamicArchiveTestBase {
|
||||||
"-cp", dir.getPath(),
|
"-cp", dir.getPath(),
|
||||||
"Hello")
|
"Hello")
|
||||||
.assertNormalExit(output -> {
|
.assertNormalExit(output -> {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Running with -cp different from dumping. It should be fine because
|
// Running with -cp different from dumping. It should be fine because
|
||||||
|
|
|
@ -271,8 +271,7 @@ public class CDSTestUtils {
|
||||||
if (!DYNAMIC_DUMP) {
|
if (!DYNAMIC_DUMP) {
|
||||||
output.shouldContain("Loading classes to share");
|
output.shouldContain("Loading classes to share");
|
||||||
} else {
|
} else {
|
||||||
output.shouldContain("Buffer-space to target-space delta")
|
output.shouldContain("Written dynamic archive 0x");
|
||||||
.shouldContain("Written dynamic archive 0x");
|
|
||||||
}
|
}
|
||||||
output.shouldHaveExitValue(0);
|
output.shouldHaveExitValue(0);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue