mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-17 17:44:40 +02:00
8301106: Allow archived Java strings to be moved by GC
Reviewed-by: dholmes
This commit is contained in:
parent
9643f654da
commit
b524a74165
13 changed files with 239 additions and 147 deletions
|
@ -91,7 +91,7 @@ void ArchiveHeapLoader::fixup_regions() {
|
||||||
} else if (_loading_failed) {
|
} else if (_loading_failed) {
|
||||||
fill_failed_loaded_heap();
|
fill_failed_loaded_heap();
|
||||||
}
|
}
|
||||||
if (is_fully_available()) {
|
if (is_in_use()) {
|
||||||
if (!MetaspaceShared::use_full_module_graph()) {
|
if (!MetaspaceShared::use_full_module_graph()) {
|
||||||
// Need to remove all the archived java.lang.Module objects from HeapShared::roots().
|
// Need to remove all the archived java.lang.Module objects from HeapShared::roots().
|
||||||
ClassLoaderDataShared::clear_archived_oops();
|
ClassLoaderDataShared::clear_archived_oops();
|
||||||
|
@ -472,7 +472,7 @@ void ArchiveHeapLoader::finish_initialization() {
|
||||||
verify_loaded_heap();
|
verify_loaded_heap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (is_fully_available()) {
|
if (is_in_use()) {
|
||||||
patch_native_pointers();
|
patch_native_pointers();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,13 +65,7 @@ public:
|
||||||
NOT_CDS_JAVA_HEAP(return false;)
|
NOT_CDS_JAVA_HEAP(return false;)
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool are_archived_strings_available() {
|
static bool is_in_use() {
|
||||||
return is_loaded() || closed_regions_mapped();
|
|
||||||
}
|
|
||||||
static bool are_archived_mirrors_available() {
|
|
||||||
return is_fully_available();
|
|
||||||
}
|
|
||||||
static bool is_fully_available() {
|
|
||||||
return is_loaded() || is_mapped();
|
return is_loaded() || is_mapped();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -316,16 +316,16 @@ void ReadClosure::do_tag(int tag) {
|
||||||
void ReadClosure::do_oop(oop *p) {
|
void ReadClosure::do_oop(oop *p) {
|
||||||
if (UseCompressedOops) {
|
if (UseCompressedOops) {
|
||||||
narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
|
narrowOop o = CompressedOops::narrow_oop_cast(nextPtr());
|
||||||
if (CompressedOops::is_null(o) || !ArchiveHeapLoader::is_fully_available()) {
|
if (CompressedOops::is_null(o) || !ArchiveHeapLoader::is_in_use()) {
|
||||||
*p = nullptr;
|
*p = nullptr;
|
||||||
} else {
|
} else {
|
||||||
assert(ArchiveHeapLoader::can_use(), "sanity");
|
assert(ArchiveHeapLoader::can_use(), "sanity");
|
||||||
assert(ArchiveHeapLoader::is_fully_available(), "must be");
|
assert(ArchiveHeapLoader::is_in_use(), "must be");
|
||||||
*p = ArchiveHeapLoader::decode_from_archive(o);
|
*p = ArchiveHeapLoader::decode_from_archive(o);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
intptr_t dumptime_oop = nextPtr();
|
intptr_t dumptime_oop = nextPtr();
|
||||||
if (dumptime_oop == 0 || !ArchiveHeapLoader::is_fully_available()) {
|
if (dumptime_oop == 0 || !ArchiveHeapLoader::is_in_use()) {
|
||||||
*p = nullptr;
|
*p = nullptr;
|
||||||
} else {
|
} else {
|
||||||
assert(!ArchiveHeapLoader::is_loaded(), "ArchiveHeapLoader::can_load() is not supported for uncompessed oops");
|
assert(!ArchiveHeapLoader::is_loaded(), "ArchiveHeapLoader::can_load() is not supported for uncompessed oops");
|
||||||
|
|
|
@ -261,7 +261,7 @@ oop HeapShared::get_root(int index, bool clear) {
|
||||||
void HeapShared::clear_root(int index) {
|
void HeapShared::clear_root(int index) {
|
||||||
assert(index >= 0, "sanity");
|
assert(index >= 0, "sanity");
|
||||||
assert(UseSharedSpaces, "must be");
|
assert(UseSharedSpaces, "must be");
|
||||||
if (ArchiveHeapLoader::is_fully_available()) {
|
if (ArchiveHeapLoader::is_in_use()) {
|
||||||
if (log_is_enabled(Debug, cds, heap)) {
|
if (log_is_enabled(Debug, cds, heap)) {
|
||||||
oop old = roots()->obj_at(index);
|
oop old = roots()->obj_at(index);
|
||||||
log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
|
log_debug(cds, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
|
||||||
|
@ -378,8 +378,6 @@ void HeapShared::remove_scratch_objects(Klass* k) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapShared::archive_java_mirrors() {
|
void HeapShared::archive_java_mirrors() {
|
||||||
init_seen_objects_table();
|
|
||||||
|
|
||||||
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
|
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
|
||||||
BasicType bt = (BasicType)i;
|
BasicType bt = (BasicType)i;
|
||||||
if (!is_reference_type(bt)) {
|
if (!is_reference_type(bt)) {
|
||||||
|
@ -404,7 +402,7 @@ void HeapShared::archive_java_mirrors() {
|
||||||
if (m != nullptr) {
|
if (m != nullptr) {
|
||||||
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
|
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
|
||||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
|
||||||
guarantee(success, "scratch mirrors should not point to any unachivable objects");
|
guarantee(success, "scratch mirrors must point to only archivable objects");
|
||||||
buffered_k->set_archived_java_mirror(append_root(m));
|
buffered_k->set_archived_java_mirror(append_root(m));
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
log_trace(cds, heap, mirror)(
|
log_trace(cds, heap, mirror)(
|
||||||
|
@ -425,8 +423,16 @@ void HeapShared::archive_java_mirrors() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
delete_seen_objects_table();
|
void HeapShared::archive_strings() {
|
||||||
|
oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
|
||||||
|
bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array, /*is_closed_archive=*/ false);
|
||||||
|
// We must succeed because:
|
||||||
|
// - _dumped_interned_strings do not contain any large strings.
|
||||||
|
// - StringTable::init_shared_table() doesn't create any large arrays.
|
||||||
|
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
|
||||||
|
StringTable::set_shared_strings_array_index(append_root(shared_strings_array));
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapShared::mark_native_pointers(oop orig_obj) {
|
void HeapShared::mark_native_pointers(oop orig_obj) {
|
||||||
|
@ -501,7 +507,7 @@ void HeapShared::check_enum_obj(int level,
|
||||||
|
|
||||||
// See comments in HeapShared::check_enum_obj()
|
// See comments in HeapShared::check_enum_obj()
|
||||||
bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
|
bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
|
||||||
if (!ArchiveHeapLoader::is_fully_available()) {
|
if (!ArchiveHeapLoader::is_in_use()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -556,7 +562,6 @@ void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
|
||||||
}
|
}
|
||||||
|
|
||||||
ArchiveHeapWriter::write(_pending_roots, closed_regions, open_regions, closed_bitmaps, open_bitmaps);
|
ArchiveHeapWriter::write(_pending_roots, closed_regions, open_regions, closed_bitmaps, open_bitmaps);
|
||||||
StringTable::write_shared_table(_dumped_interned_strings);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void HeapShared::copy_interned_strings() {
|
void HeapShared::copy_interned_strings() {
|
||||||
|
@ -564,14 +569,13 @@ void HeapShared::copy_interned_strings() {
|
||||||
|
|
||||||
auto copier = [&] (oop s, bool value_ignored) {
|
auto copier = [&] (oop s, bool value_ignored) {
|
||||||
assert(s != nullptr, "sanity");
|
assert(s != nullptr, "sanity");
|
||||||
if (!ArchiveHeapWriter::is_string_too_large_to_archive(s)) {
|
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
|
||||||
bool success = archive_reachable_objects_from(1, _default_subgraph_info,
|
bool success = archive_reachable_objects_from(1, _default_subgraph_info,
|
||||||
s, /*is_closed_archive=*/true);
|
s, /*is_closed_archive=*/true);
|
||||||
assert(success, "must be");
|
assert(success, "string must be short enough to be archived");
|
||||||
// Prevent string deduplication from changing the value field to
|
// Prevent string deduplication from changing the value field to
|
||||||
// something not in the archive.
|
// something not in the archive.
|
||||||
java_lang_String::set_deduplication_forbidden(s);
|
java_lang_String::set_deduplication_forbidden(s);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
_dumped_interned_strings->iterate_all(copier);
|
_dumped_interned_strings->iterate_all(copier);
|
||||||
|
|
||||||
|
@ -589,10 +593,18 @@ void HeapShared::copy_closed_objects() {
|
||||||
false /* is_full_module_graph */);
|
false /* is_full_module_graph */);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void HeapShared::copy_special_open_objects() {
|
||||||
|
// Archive special objects that do not belong to any subgraphs
|
||||||
|
init_seen_objects_table();
|
||||||
|
archive_java_mirrors();
|
||||||
|
archive_strings();
|
||||||
|
delete_seen_objects_table();
|
||||||
|
}
|
||||||
|
|
||||||
void HeapShared::copy_open_objects() {
|
void HeapShared::copy_open_objects() {
|
||||||
assert(HeapShared::can_write(), "must be");
|
assert(HeapShared::can_write(), "must be");
|
||||||
|
|
||||||
archive_java_mirrors();
|
copy_special_open_objects();
|
||||||
|
|
||||||
archive_object_subgraphs(open_archive_subgraph_entry_fields,
|
archive_object_subgraphs(open_archive_subgraph_entry_fields,
|
||||||
false /* is_closed_archive */,
|
false /* is_closed_archive */,
|
||||||
|
@ -861,7 +873,7 @@ void HeapShared::serialize_root(SerializeClosure* soc) {
|
||||||
assert(oopDesc::is_oop_or_null(roots_oop), "is oop");
|
assert(oopDesc::is_oop_or_null(roots_oop), "is oop");
|
||||||
// Create an OopHandle only if we have actually mapped or loaded the roots
|
// Create an OopHandle only if we have actually mapped or loaded the roots
|
||||||
if (roots_oop != nullptr) {
|
if (roots_oop != nullptr) {
|
||||||
assert(ArchiveHeapLoader::is_fully_available(), "must be");
|
assert(ArchiveHeapLoader::is_in_use(), "must be");
|
||||||
_roots = OopHandle(Universe::vm_global(), roots_oop);
|
_roots = OopHandle(Universe::vm_global(), roots_oop);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -921,7 +933,7 @@ static void verify_the_heap(Klass* k, const char* which) {
|
||||||
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
|
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
|
||||||
void HeapShared::resolve_classes(JavaThread* current) {
|
void HeapShared::resolve_classes(JavaThread* current) {
|
||||||
assert(UseSharedSpaces, "runtime only!");
|
assert(UseSharedSpaces, "runtime only!");
|
||||||
if (!ArchiveHeapLoader::is_fully_available()) {
|
if (!ArchiveHeapLoader::is_in_use()) {
|
||||||
return; // nothing to do
|
return; // nothing to do
|
||||||
}
|
}
|
||||||
resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields);
|
resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields);
|
||||||
|
@ -954,7 +966,7 @@ void HeapShared::resolve_classes_for_subgraph_of(JavaThread* current, Klass* k)
|
||||||
|
|
||||||
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
|
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
|
||||||
JavaThread* THREAD = current;
|
JavaThread* THREAD = current;
|
||||||
if (!ArchiveHeapLoader::is_fully_available()) {
|
if (!ArchiveHeapLoader::is_in_use()) {
|
||||||
return; // nothing to do
|
return; // nothing to do
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -344,6 +344,7 @@ private:
|
||||||
static void mark_native_pointers(oop orig_obj);
|
static void mark_native_pointers(oop orig_obj);
|
||||||
static bool has_been_archived(oop orig_obj);
|
static bool has_been_archived(oop orig_obj);
|
||||||
static void archive_java_mirrors();
|
static void archive_java_mirrors();
|
||||||
|
static void archive_strings();
|
||||||
public:
|
public:
|
||||||
static void reset_archived_object_states(TRAPS);
|
static void reset_archived_object_states(TRAPS);
|
||||||
static void create_archived_object_cache() {
|
static void create_archived_object_cache() {
|
||||||
|
@ -364,6 +365,7 @@ private:
|
||||||
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
|
||||||
static void copy_closed_objects();
|
static void copy_closed_objects();
|
||||||
static void copy_open_objects();
|
static void copy_open_objects();
|
||||||
|
static void copy_special_open_objects();
|
||||||
|
|
||||||
static bool archive_reachable_objects_from(int level,
|
static bool archive_reachable_objects_from(int level,
|
||||||
KlassSubGraphInfo* subgraph_info,
|
KlassSubGraphInfo* subgraph_info,
|
||||||
|
|
|
@ -801,6 +801,7 @@ void MetaspaceShared::preload_and_dump_impl(TRAPS) {
|
||||||
log_info(cds)("Rewriting and linking classes: done");
|
log_info(cds)("Rewriting and linking classes: done");
|
||||||
|
|
||||||
#if INCLUDE_CDS_JAVA_HEAP
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
|
StringTable::allocate_shared_strings_array(CHECK);
|
||||||
ArchiveHeapWriter::init();
|
ArchiveHeapWriter::init();
|
||||||
if (use_full_module_graph()) {
|
if (use_full_module_graph()) {
|
||||||
HeapShared::reset_archived_object_states(CHECK);
|
HeapShared::reset_archived_object_states(CHECK);
|
||||||
|
@ -1437,9 +1438,6 @@ void MetaspaceShared::initialize_shared_spaces() {
|
||||||
ReadClosure rc(&array);
|
ReadClosure rc(&array);
|
||||||
serialize(&rc);
|
serialize(&rc);
|
||||||
|
|
||||||
// Initialize the run-time symbol table.
|
|
||||||
SymbolTable::create_table();
|
|
||||||
|
|
||||||
// Finish up archived heap initialization. These must be
|
// Finish up archived heap initialization. These must be
|
||||||
// done after ReadClosure.
|
// done after ReadClosure.
|
||||||
static_mapinfo->patch_heap_embedded_pointers();
|
static_mapinfo->patch_heap_embedded_pointers();
|
||||||
|
|
|
@ -889,7 +889,7 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (k->is_shared() && k->has_archived_mirror_index()) {
|
if (k->is_shared() && k->has_archived_mirror_index()) {
|
||||||
if (ArchiveHeapLoader::are_archived_mirrors_available()) {
|
if (ArchiveHeapLoader::is_in_use()) {
|
||||||
bool present = restore_archived_mirror(k, Handle(), Handle(), Handle(), CHECK);
|
bool present = restore_archived_mirror(k, Handle(), Handle(), Handle(), CHECK);
|
||||||
assert(present, "Missing archived mirror for %s", k->external_name());
|
assert(present, "Missing archived mirror for %s", k->external_name());
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
#include "logging/logStream.hpp"
|
#include "logging/logStream.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
|
#include "memory/oopFactory.hpp"
|
||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "oops/access.inline.hpp"
|
#include "oops/access.inline.hpp"
|
||||||
#include "oops/compressedOops.hpp"
|
#include "oops/compressedOops.hpp"
|
||||||
|
@ -72,26 +73,31 @@ const size_t REHASH_LEN = 100;
|
||||||
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
|
const double CLEAN_DEAD_HIGH_WATER_MARK = 0.5;
|
||||||
|
|
||||||
#if INCLUDE_CDS_JAVA_HEAP
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
inline oop read_string_from_compact_hashtable(address base_address, u4 offset) {
|
bool StringTable::_is_two_dimensional_shared_strings_array = false;
|
||||||
assert(ArchiveHeapLoader::are_archived_strings_available(), "sanity");
|
OopHandle StringTable::_shared_strings_array;
|
||||||
if (UseCompressedOops) {
|
int StringTable::_shared_strings_array_root_index;
|
||||||
assert(sizeof(narrowOop) == sizeof(offset), "must be");
|
|
||||||
narrowOop v = CompressedOops::narrow_oop_cast(offset);
|
inline oop StringTable::read_string_from_compact_hashtable(address base_address, u4 index) {
|
||||||
return ArchiveHeapLoader::decode_from_archive(v);
|
assert(ArchiveHeapLoader::is_in_use(), "sanity");
|
||||||
|
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
|
||||||
|
oop s;
|
||||||
|
|
||||||
|
if (!_is_two_dimensional_shared_strings_array) {
|
||||||
|
s = array->obj_at((int)index);
|
||||||
} else {
|
} else {
|
||||||
assert(!ArchiveHeapLoader::is_loaded(), "Pointer relocation for uncompressed oops is unimplemented");
|
int primary_index = index >> _secondary_array_index_bits;
|
||||||
intptr_t dumptime_oop = (uintptr_t)offset;
|
int secondary_index = index & _secondary_array_index_mask;
|
||||||
assert(dumptime_oop != 0, "null strings cannot be interned");
|
objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
|
||||||
intptr_t runtime_oop = dumptime_oop +
|
s = secondary->obj_at(secondary_index);
|
||||||
(intptr_t)FileMapInfo::current_info()->header()->heap_begin() +
|
|
||||||
(intptr_t)ArchiveHeapLoader::mapped_heap_delta();
|
|
||||||
return (oop)cast_to_oop(runtime_oop);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
assert(java_lang_String::is_instance(s), "must be");
|
||||||
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef CompactHashtable<
|
typedef CompactHashtable<
|
||||||
const jchar*, oop,
|
const jchar*, oop,
|
||||||
read_string_from_compact_hashtable,
|
StringTable::read_string_from_compact_hashtable,
|
||||||
java_lang_String::equals> SharedStringTable;
|
java_lang_String::equals> SharedStringTable;
|
||||||
|
|
||||||
static SharedStringTable _shared_table;
|
static SharedStringTable _shared_table;
|
||||||
|
@ -224,6 +230,12 @@ void StringTable::create_table() {
|
||||||
_local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN, true);
|
_local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN, true);
|
||||||
_oop_storage = OopStorageSet::create_weak("StringTable Weak", mtSymbol);
|
_oop_storage = OopStorageSet::create_weak("StringTable Weak", mtSymbol);
|
||||||
_oop_storage->register_num_dead_callback(&gc_notification);
|
_oop_storage->register_num_dead_callback(&gc_notification);
|
||||||
|
|
||||||
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
|
if (ArchiveHeapLoader::is_in_use()) {
|
||||||
|
_shared_strings_array = OopHandle(Universe::vm_global(), HeapShared::get_root(_shared_strings_array_root_index));
|
||||||
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t StringTable::item_added() {
|
size_t StringTable::item_added() {
|
||||||
|
@ -755,49 +767,131 @@ oop StringTable::lookup_shared(const jchar* name, int len) {
|
||||||
return _shared_table.lookup(name, java_lang_String::hash_code(name, len), len);
|
return _shared_table.lookup(name, java_lang_String::hash_code(name, len), len);
|
||||||
}
|
}
|
||||||
|
|
||||||
class EncodeSharedStringsAsOffsets : StackObj {
|
// This is called BEFORE we enter the CDS safepoint. We can allocate heap objects.
|
||||||
CompactHashtableWriter* _writer;
|
// This should be called when we know no more strings will be added (which will be easy
|
||||||
private:
|
// to guarantee because CDS runs with a single Java thread. See JDK-8253495.)
|
||||||
u4 compute_delta(oop s) {
|
void StringTable::allocate_shared_strings_array(TRAPS) {
|
||||||
HeapWord* start = G1CollectedHeap::heap()->reserved().start();
|
assert(DumpSharedSpaces, "must be");
|
||||||
intx offset = ((address)(void*)s) - ((address)(void*)start);
|
if (_items_count > (size_t)max_jint) {
|
||||||
assert(offset >= 0, "must be");
|
fatal("Too many strings to be archived: " SIZE_FORMAT, _items_count);
|
||||||
if (offset > 0xffffffff) {
|
|
||||||
fatal("too large");
|
|
||||||
}
|
|
||||||
return (u4)offset;
|
|
||||||
}
|
}
|
||||||
public:
|
|
||||||
EncodeSharedStringsAsOffsets(CompactHashtableWriter* writer) : _writer(writer) {}
|
|
||||||
bool do_entry(oop s, bool value_ignored) {
|
|
||||||
assert(s != nullptr, "sanity");
|
|
||||||
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "must be");
|
|
||||||
oop req_s = ArchiveHeapWriter::source_obj_to_requested_obj(s);
|
|
||||||
assert(req_s != nullptr, "must have been archived");
|
|
||||||
unsigned int hash = java_lang_String::hash_code(s);
|
|
||||||
if (UseCompressedOops) {
|
|
||||||
_writer->add(hash, CompressedOops::narrow_oop_value(req_s));
|
|
||||||
} else {
|
|
||||||
_writer->add(hash, compute_delta(req_s));
|
|
||||||
}
|
|
||||||
return true; // keep iterating
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Write the _shared_table (a CompactHashtable) into the CDS archive file.
|
int total = (int)_items_count;
|
||||||
void StringTable::write_shared_table(const DumpedInternedStrings* dumped_interned_strings) {
|
size_t single_array_size = objArrayOopDesc::object_size(total);
|
||||||
|
|
||||||
|
log_info(cds)("allocated string table for %d strings", total);
|
||||||
|
|
||||||
|
if (!ArchiveHeapWriter::is_too_large_to_archive(single_array_size)) {
|
||||||
|
// The entire table can fit in a single array
|
||||||
|
objArrayOop array = oopFactory::new_objArray(vmClasses::Object_klass(), total, CHECK);
|
||||||
|
_shared_strings_array = OopHandle(Universe::vm_global(), array);
|
||||||
|
log_info(cds)("string table array (single level) length = %d", total);
|
||||||
|
} else {
|
||||||
|
// Split the table in two levels of arrays.
|
||||||
|
int primary_array_length = (total + _secondary_array_max_length - 1) / _secondary_array_max_length;
|
||||||
|
size_t primary_array_size = objArrayOopDesc::object_size(primary_array_length);
|
||||||
|
size_t secondary_array_size = objArrayOopDesc::object_size(_secondary_array_max_length);
|
||||||
|
|
||||||
|
if (ArchiveHeapWriter::is_too_large_to_archive(secondary_array_size)) {
|
||||||
|
// This can only happen if you have an extremely large number of classes that
|
||||||
|
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
|
||||||
|
// but bail out for safety.
|
||||||
|
log_error(cds)("Too many strings to be archived: " SIZE_FORMAT, _items_count);
|
||||||
|
os::_exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
objArrayOop primary = oopFactory::new_objArray(vmClasses::Object_klass(), primary_array_length, CHECK);
|
||||||
|
objArrayHandle primaryHandle(THREAD, primary);
|
||||||
|
_shared_strings_array = OopHandle(Universe::vm_global(), primary);
|
||||||
|
|
||||||
|
log_info(cds)("string table array (primary) length = %d", primary_array_length);
|
||||||
|
for (int i = 0; i < primary_array_length; i++) {
|
||||||
|
int len;
|
||||||
|
if (total > _secondary_array_max_length) {
|
||||||
|
len = _secondary_array_max_length;
|
||||||
|
} else {
|
||||||
|
len = total;
|
||||||
|
}
|
||||||
|
total -= len;
|
||||||
|
|
||||||
|
objArrayOop secondary = oopFactory::new_objArray(vmClasses::Object_klass(), len, CHECK);
|
||||||
|
primaryHandle()->obj_at_put(i, secondary);
|
||||||
|
|
||||||
|
log_info(cds)("string table array (secondary)[%d] length = %d", i, len);
|
||||||
|
assert(!ArchiveHeapWriter::is_too_large_to_archive(secondary), "sanity");
|
||||||
|
}
|
||||||
|
|
||||||
|
assert(total == 0, "must be");
|
||||||
|
_is_two_dimensional_shared_strings_array = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
void StringTable::verify_secondary_array_index_bits() {
|
||||||
|
int max;
|
||||||
|
for (max = 1; ; max++) {
|
||||||
|
size_t next_size = objArrayOopDesc::object_size(1 << (max + 1));
|
||||||
|
if (ArchiveHeapWriter::is_too_large_to_archive(next_size)) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Currently max is 17 for +UseCompressedOops, 16 for -UseCompressedOops.
|
||||||
|
// When we add support for Shenandoah (which has a smaller mininum region size than G1),
|
||||||
|
// max will become 15/14.
|
||||||
|
//
|
||||||
|
// We use _secondary_array_index_bits==14 as that will be the eventual value, and will
|
||||||
|
// make testing easier.
|
||||||
|
assert(_secondary_array_index_bits <= max,
|
||||||
|
"_secondary_array_index_bits (%d) must be smaller than max possible value (%d)",
|
||||||
|
_secondary_array_index_bits, max);
|
||||||
|
}
|
||||||
|
#endif // PRODUCT
|
||||||
|
|
||||||
|
// This is called AFTER we enter the CDS safepoint.
|
||||||
|
//
|
||||||
|
// For each shared string:
|
||||||
|
// [1] Store it into _shared_strings_array. Encode its position as a 32-bit index.
|
||||||
|
// [2] Store the index and hashcode into _shared_table.
|
||||||
|
oop StringTable::init_shared_table(const DumpedInternedStrings* dumped_interned_strings) {
|
||||||
assert(HeapShared::can_write(), "must be");
|
assert(HeapShared::can_write(), "must be");
|
||||||
|
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
|
||||||
|
|
||||||
|
verify_secondary_array_index_bits();
|
||||||
|
|
||||||
_shared_table.reset();
|
_shared_table.reset();
|
||||||
CompactHashtableWriter writer(_items_count, ArchiveBuilder::string_stats());
|
CompactHashtableWriter writer(_items_count, ArchiveBuilder::string_stats());
|
||||||
|
|
||||||
// Encode the strings in the CompactHashtable using offsets -- we know that the
|
int index = 0;
|
||||||
// strings will not move during runtime because they are inside the G1 closed
|
auto copy_into_array = [&] (oop string, bool value_ignored) {
|
||||||
// archive region.
|
unsigned int hash = java_lang_String::hash_code(string);
|
||||||
EncodeSharedStringsAsOffsets offset_finder(&writer);
|
writer.add(hash, index);
|
||||||
dumped_interned_strings->iterate(&offset_finder);
|
|
||||||
|
if (!_is_two_dimensional_shared_strings_array) {
|
||||||
|
assert(index < array->length(), "no strings should have been added");
|
||||||
|
array->obj_at_put(index, string);
|
||||||
|
} else {
|
||||||
|
int primary_index = index >> _secondary_array_index_bits;
|
||||||
|
int secondary_index = index & _secondary_array_index_mask;
|
||||||
|
|
||||||
|
assert(primary_index < array->length(), "no strings should have been added");
|
||||||
|
objArrayOop secondary = (objArrayOop)array->obj_at(primary_index);
|
||||||
|
|
||||||
|
assert(secondary != nullptr && secondary->is_objArray(), "must be");
|
||||||
|
assert(secondary_index < secondary->length(), "no strings should have been added");
|
||||||
|
secondary->obj_at_put(secondary_index, string);
|
||||||
|
}
|
||||||
|
|
||||||
|
index ++;
|
||||||
|
};
|
||||||
|
dumped_interned_strings->iterate_all(copy_into_array);
|
||||||
|
|
||||||
writer.dump(&_shared_table, "string");
|
writer.dump(&_shared_table, "string");
|
||||||
|
|
||||||
|
return array;
|
||||||
|
}
|
||||||
|
|
||||||
|
void StringTable::set_shared_strings_array_index(int root_index) {
|
||||||
|
_shared_strings_array_root_index = root_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
|
void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
|
||||||
|
@ -806,47 +900,11 @@ void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
|
||||||
if (soc->writing()) {
|
if (soc->writing()) {
|
||||||
// Sanity. Make sure we don't use the shared table at dump time
|
// Sanity. Make sure we don't use the shared table at dump time
|
||||||
_shared_table.reset();
|
_shared_table.reset();
|
||||||
} else if (!ArchiveHeapLoader::are_archived_strings_available()) {
|
} else if (!ArchiveHeapLoader::is_in_use()) {
|
||||||
_shared_table.reset();
|
_shared_table.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
soc->do_bool(&_is_two_dimensional_shared_strings_array);
|
||||||
|
soc->do_u4((u4*)(&_shared_strings_array_root_index));
|
||||||
}
|
}
|
||||||
|
|
||||||
class SharedStringTransfer {
|
|
||||||
JavaThread* _current;
|
|
||||||
public:
|
|
||||||
SharedStringTransfer(JavaThread* current) : _current(current) {}
|
|
||||||
|
|
||||||
void do_value(oop string) {
|
|
||||||
JavaThread* THREAD = _current;
|
|
||||||
ExceptionMark rm(THREAD);
|
|
||||||
HandleMark hm(THREAD);
|
|
||||||
StringTable::intern(string, THREAD);
|
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
|
||||||
// The archived constant pools contains strings that must be in the interned string table.
|
|
||||||
// If we fail here, it means the VM runs out of memory during bootstrap, so there's no point
|
|
||||||
// of trying to recover from here.
|
|
||||||
vm_exit_during_initialization("Failed to transfer shared strings to interned string table");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// If the CDS archive heap is loaded (not mapped) into the old generation,
|
|
||||||
// it's possible for the shared strings to move due to full GC, making the
|
|
||||||
// _shared_table invalid. Therefore, we proactively copy all the shared
|
|
||||||
// strings into the _local_table, which can deal with oop relocation.
|
|
||||||
void StringTable::transfer_shared_strings_to_local_table() {
|
|
||||||
assert(ArchiveHeapLoader::is_loaded(), "must be");
|
|
||||||
EXCEPTION_MARK;
|
|
||||||
|
|
||||||
// Reset _shared_table so that during the transfer, StringTable::intern()
|
|
||||||
// will not look up from there. Instead, it will create a new entry in
|
|
||||||
// _local_table for each element in shared_table_copy.
|
|
||||||
SharedStringTable shared_table_copy = _shared_table;
|
|
||||||
_shared_table.reset();
|
|
||||||
|
|
||||||
SharedStringTransfer transfer(THREAD);
|
|
||||||
shared_table_copy.iterate(&transfer);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif //INCLUDE_CDS_JAVA_HEAP
|
#endif //INCLUDE_CDS_JAVA_HEAP
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/padded.hpp"
|
#include "memory/padded.hpp"
|
||||||
#include "oops/oop.hpp"
|
#include "oops/oop.hpp"
|
||||||
|
#include "oops/oopHandle.hpp"
|
||||||
#include "oops/weakHandle.hpp"
|
#include "oops/weakHandle.hpp"
|
||||||
#include "utilities/tableStatistics.hpp"
|
#include "utilities/tableStatistics.hpp"
|
||||||
|
|
||||||
|
@ -104,14 +105,46 @@ class StringTable : public CHeapObj<mtSymbol>{
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sharing
|
// Sharing
|
||||||
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
|
static inline oop read_string_from_compact_hashtable(address base_address, u4 index);
|
||||||
|
|
||||||
|
private:
|
||||||
|
static bool _is_two_dimensional_shared_strings_array;
|
||||||
|
static OopHandle _shared_strings_array;
|
||||||
|
static int _shared_strings_array_root_index;
|
||||||
|
|
||||||
|
// All the shared strings are referenced through _shared_strings_array to keep them alive.
|
||||||
|
// Each shared string is stored as a 32-bit index in ::_shared_table. The index
|
||||||
|
// is interpreted in two ways:
|
||||||
|
//
|
||||||
|
// [1] _is_two_dimensional_shared_strings_array = false: _shared_strings_array is an Object[].
|
||||||
|
// Each shared string is stored as _shared_strings_array[index]
|
||||||
|
//
|
||||||
|
// [2] _is_two_dimensional_shared_strings_array = true: _shared_strings_array is an Object[][]
|
||||||
|
// This happens when there are too many elements in the shared table. We store them
|
||||||
|
// using two levels of objArrays, such that none of the arrays are too big for
|
||||||
|
// ArchiveHeapWriter::is_too_large_to_archive(). In this case, the index is splited into two
|
||||||
|
// parts. Each shared string is stored as _shared_strings_array[primary_index][secondary_index]:
|
||||||
|
//
|
||||||
|
// [bits 31 .. 14][ bits 13 .. 0 ]
|
||||||
|
// primary_index secondary_index
|
||||||
|
const static int _secondary_array_index_bits = 14;
|
||||||
|
const static int _secondary_array_max_length = 1 << _secondary_array_index_bits;
|
||||||
|
const static int _secondary_array_index_mask = _secondary_array_max_length - 1;
|
||||||
|
|
||||||
|
// make sure _secondary_array_index_bits is not too big
|
||||||
|
static void verify_secondary_array_index_bits() PRODUCT_RETURN;
|
||||||
|
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static oop lookup_shared(const jchar* name, int len, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
|
static oop lookup_shared(const jchar* name, int len, unsigned int hash) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
|
||||||
public:
|
public:
|
||||||
static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
|
static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
|
||||||
static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0);
|
static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0);
|
||||||
static void write_shared_table(const DumpedInternedStrings* dumped_interned_strings) NOT_CDS_JAVA_HEAP_RETURN;
|
static void allocate_shared_strings_array(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
|
||||||
|
static oop init_shared_table(const DumpedInternedStrings* dumped_interned_strings) NOT_CDS_JAVA_HEAP_RETURN_(NULL);
|
||||||
|
static void set_shared_strings_array_index(int root_index) NOT_CDS_JAVA_HEAP_RETURN;
|
||||||
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
|
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
|
||||||
static void transfer_shared_strings_to_local_table() NOT_CDS_JAVA_HEAP_RETURN;
|
|
||||||
|
|
||||||
// Jcmd
|
// Jcmd
|
||||||
static void dump(outputStream* st, bool verbose=false);
|
static void dump(outputStream* st, bool verbose=false);
|
||||||
|
|
|
@ -244,7 +244,7 @@ void Universe::set_archived_basic_type_mirror_index(BasicType t, int index) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Universe::update_archived_basic_type_mirrors() {
|
void Universe::update_archived_basic_type_mirrors() {
|
||||||
if (ArchiveHeapLoader::are_archived_mirrors_available()) {
|
if (ArchiveHeapLoader::is_in_use()) {
|
||||||
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
|
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
|
||||||
int index = _archived_basic_type_mirror_indices[i];
|
int index = _archived_basic_type_mirror_indices[i];
|
||||||
if (!is_reference_type((BasicType)i) && index >= 0) {
|
if (!is_reference_type((BasicType)i) && index >= 0) {
|
||||||
|
@ -455,7 +455,7 @@ void Universe::genesis(TRAPS) {
|
||||||
void Universe::initialize_basic_type_mirrors(TRAPS) {
|
void Universe::initialize_basic_type_mirrors(TRAPS) {
|
||||||
#if INCLUDE_CDS_JAVA_HEAP
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
if (UseSharedSpaces &&
|
if (UseSharedSpaces &&
|
||||||
ArchiveHeapLoader::are_archived_mirrors_available() &&
|
ArchiveHeapLoader::is_in_use() &&
|
||||||
_basic_type_mirrors[T_INT].resolve() != nullptr) {
|
_basic_type_mirrors[T_INT].resolve() != nullptr) {
|
||||||
assert(ArchiveHeapLoader::can_use(), "Sanity");
|
assert(ArchiveHeapLoader::can_use(), "Sanity");
|
||||||
|
|
||||||
|
@ -812,28 +812,17 @@ jint universe_init() {
|
||||||
DynamicArchive::check_for_dynamic_dump();
|
DynamicArchive::check_for_dynamic_dump();
|
||||||
if (UseSharedSpaces) {
|
if (UseSharedSpaces) {
|
||||||
// Read the data structures supporting the shared spaces (shared
|
// Read the data structures supporting the shared spaces (shared
|
||||||
// system dictionary, symbol table, etc.). After that, access to
|
// system dictionary, symbol table, etc.)
|
||||||
// the file (other than the mapped regions) is no longer needed, and
|
|
||||||
// the file is closed. Closing the file does not affect the
|
|
||||||
// currently mapped regions.
|
|
||||||
MetaspaceShared::initialize_shared_spaces();
|
MetaspaceShared::initialize_shared_spaces();
|
||||||
StringTable::create_table();
|
|
||||||
if (ArchiveHeapLoader::is_loaded()) {
|
|
||||||
StringTable::transfer_shared_strings_to_local_table();
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
SymbolTable::create_table();
|
|
||||||
StringTable::create_table();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#if INCLUDE_CDS
|
|
||||||
if (Arguments::is_dumping_archive()) {
|
if (Arguments::is_dumping_archive()) {
|
||||||
MetaspaceShared::prepare_for_dumping();
|
MetaspaceShared::prepare_for_dumping();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
SymbolTable::create_table();
|
||||||
|
StringTable::create_table();
|
||||||
|
|
||||||
if (strlen(VerifySubSet) > 0) {
|
if (strlen(VerifySubSet) > 0) {
|
||||||
Universe::initialize_verify_flags();
|
Universe::initialize_verify_flags();
|
||||||
}
|
}
|
||||||
|
|
|
@ -337,7 +337,7 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
|
||||||
if (vmClasses::Object_klass_loaded()) {
|
if (vmClasses::Object_klass_loaded()) {
|
||||||
ClassLoaderData* loader_data = pool_holder()->class_loader_data();
|
ClassLoaderData* loader_data = pool_holder()->class_loader_data();
|
||||||
#if INCLUDE_CDS_JAVA_HEAP
|
#if INCLUDE_CDS_JAVA_HEAP
|
||||||
if (ArchiveHeapLoader::is_fully_available() &&
|
if (ArchiveHeapLoader::is_in_use() &&
|
||||||
_cache->archived_references() != nullptr) {
|
_cache->archived_references() != nullptr) {
|
||||||
oop archived = _cache->archived_references();
|
oop archived = _cache->archived_references();
|
||||||
// Create handle for the archived resolved reference array object
|
// Create handle for the archived resolved reference array object
|
||||||
|
|
|
@ -603,7 +603,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
|
||||||
if (this->has_archived_mirror_index()) {
|
if (this->has_archived_mirror_index()) {
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
log_debug(cds, mirror)("%s has raw archived mirror", external_name());
|
log_debug(cds, mirror)("%s has raw archived mirror", external_name());
|
||||||
if (ArchiveHeapLoader::are_archived_mirrors_available()) {
|
if (ArchiveHeapLoader::is_in_use()) {
|
||||||
bool present = java_lang_Class::restore_archived_mirror(this, loader, module_handle,
|
bool present = java_lang_Class::restore_archived_mirror(this, loader, module_handle,
|
||||||
protection_domain,
|
protection_domain,
|
||||||
CHECK);
|
CHECK);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -28,7 +28,7 @@
|
||||||
* @requires vm.cds.write.archived.java.heap
|
* @requires vm.cds.write.archived.java.heap
|
||||||
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
|
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
|
||||||
* @build HelloString
|
* @build HelloString
|
||||||
* @run driver/timeout=500 SharedStringsStress
|
* @run driver/timeout=650 SharedStringsStress
|
||||||
*/
|
*/
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.FileOutputStream;
|
import java.io.FileOutputStream;
|
||||||
|
@ -52,7 +52,10 @@ public class SharedStringsStress {
|
||||||
out.println("VERSION: 1.0");
|
out.println("VERSION: 1.0");
|
||||||
out.println("@SECTION: String");
|
out.println("@SECTION: String");
|
||||||
out.println("31: shared_test_string_unique_14325");
|
out.println("31: shared_test_string_unique_14325");
|
||||||
for (int i=0; i<200000; i++) {
|
// Create enough entries to require the shared string
|
||||||
|
// table to split into two levels of Object arrays. See
|
||||||
|
// StringTable::allocate_shared_table() in HotSpot.
|
||||||
|
for (int i=0; i<260000; i++) {
|
||||||
String s = "generated_string " + i;
|
String s = "generated_string " + i;
|
||||||
out.println(s.length() + ": " + s);
|
out.println(s.length() + ": " + s);
|
||||||
}
|
}
|
||||||
|
@ -91,8 +94,11 @@ public class SharedStringsStress {
|
||||||
"-Xlog:gc+region+cds",
|
"-Xlog:gc+region+cds",
|
||||||
"-Xlog:gc+region=trace"));
|
"-Xlog:gc+region=trace"));
|
||||||
TestCommon.checkDump(dumpOutput);
|
TestCommon.checkDump(dumpOutput);
|
||||||
|
dumpOutput.shouldContain("string table array (primary)");
|
||||||
|
dumpOutput.shouldContain("string table array (secondary)");
|
||||||
|
|
||||||
OutputAnalyzer execOutput = TestCommon.exec(appJar,
|
OutputAnalyzer execOutput = TestCommon.exec(appJar,
|
||||||
TestCommon.concat(vmOptionsPrefix, "HelloString"));
|
TestCommon.concat(vmOptionsPrefix, "-Xlog:cds", "HelloString"));
|
||||||
TestCommon.checkExec(execOutput);
|
TestCommon.checkExec(execOutput);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue