8298048: Combine CDS archive heap into a single block

Co-authored-by: Thomas Schatzl <tschatzl@openjdk.org>
Reviewed-by: matsaave, tschatzl
This commit is contained in:
Ioi Lam 2023-04-21 15:29:45 +00:00
parent d518dbf726
commit 723037a79d
83 changed files with 803 additions and 3090 deletions

View file

@ -161,8 +161,7 @@ ArchiveBuilder::ArchiveBuilder() :
_ro_src_objs(),
_src_obj_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
_buffered_to_src_table(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE),
_total_closed_heap_region_size(0),
_total_open_heap_region_size(0),
_total_heap_region_size(0),
_estimated_metaspaceobj_bytes(0),
_estimated_hashtable_bytes(0)
{
@ -1051,12 +1050,11 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
}
#if INCLUDE_CDS_JAVA_HEAP
// open and closed archive regions
static void log_heap_regions(const char* which, GrowableArray<MemRegion> *regions) {
for (int i = 0; i < regions->length(); i++) {
address start = address(regions->at(i).start());
address end = address(regions->at(i).end());
log_region(which, start, end, to_requested(start));
static void log_heap_region(ArchiveHeapInfo* heap_info) {
MemRegion r = heap_info->memregion();
address start = address(r.start());
address end = address(r.end());
log_region("heap", start, end, to_requested(start));
while (start < end) {
size_t byte_size;
@ -1073,20 +1071,19 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
p2i(to_requested(start)));
byte_size = ArchiveHeapWriter::heap_roots_word_size() * BytesPerWord;
} else {
// We have reached the end of the region
// We have reached the end of the region, but have some unused space
// at the end.
log_info(cds, map)(PTR_FORMAT ": @@ Unused heap space " SIZE_FORMAT " bytes",
p2i(to_requested(start)), size_t(end - start));
log_data(start, end, to_requested(start), /*is_heap=*/true);
break;
}
address oop_end = start + byte_size;
log_data(start, oop_end, to_requested(start), /*is_heap=*/true);
start = oop_end;
}
if (start < end) {
log_info(cds, map)(PTR_FORMAT ": @@ Unused heap space " SIZE_FORMAT " bytes",
p2i(to_requested(start)), size_t(end - start));
log_data(start, end, to_requested(start), /*is_heap=*/true);
}
}
}
static address to_requested(address p) {
return ArchiveHeapWriter::buffered_addr_to_requested_addr(p);
}
@ -1118,8 +1115,7 @@ class ArchiveBuilder::CDSMapLogger : AllStatic {
public:
static void log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
GrowableArray<MemRegion> *closed_heap_regions,
GrowableArray<MemRegion> *open_heap_regions,
ArchiveHeapInfo* heap_info,
char* bitmap, size_t bitmap_size_in_bytes) {
log_info(cds, map)("%s CDS archive map for %s", DumpSharedSpaces ? "Static" : "Dynamic", mapinfo->full_path());
@ -1140,11 +1136,8 @@ public:
log_data((address)bitmap, bitmap_end, 0);
#if INCLUDE_CDS_JAVA_HEAP
if (closed_heap_regions != nullptr) {
log_heap_regions("closed heap region", closed_heap_regions);
}
if (open_heap_regions != nullptr) {
log_heap_regions("open heap region", open_heap_regions);
if (heap_info->is_used()) {
log_heap_region(heap_info);
}
#endif
@ -1161,11 +1154,7 @@ void ArchiveBuilder::clean_up_src_obj_table() {
_src_obj_table.iterate(&cleaner);
}
void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions,
GrowableArray<ArchiveHeapBitmapInfo>* closed_heap_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_heap_bitmaps) {
void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info) {
// Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
// MetaspaceShared::n_regions (internal to hotspot).
assert(NUM_CDS_REGIONS == MetaspaceShared::n_regions, "sanity");
@ -1174,23 +1163,14 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
size_t bitmap_size_in_bytes;
char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), closed_heap_bitmaps, open_heap_bitmaps,
char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::ptrmap(), heap_info,
bitmap_size_in_bytes);
if (closed_heap_regions != nullptr) {
_total_closed_heap_region_size = mapinfo->write_heap_regions(
closed_heap_regions,
closed_heap_bitmaps,
MetaspaceShared::first_closed_heap_region,
MetaspaceShared::max_num_closed_heap_regions);
_total_open_heap_region_size = mapinfo->write_heap_regions(
open_heap_regions,
open_heap_bitmaps,
MetaspaceShared::first_open_heap_region,
MetaspaceShared::max_num_open_heap_regions);
if (heap_info->is_used()) {
_total_heap_region_size = mapinfo->write_heap_region(heap_info);
}
print_region_stats(mapinfo, closed_heap_regions, open_heap_regions);
print_region_stats(mapinfo, heap_info);
mapinfo->set_requested_base((char*)MetaspaceShared::requested_base_address());
mapinfo->set_header_crc(mapinfo->compute_header_crc());
@ -1204,7 +1184,7 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo,
}
if (log_is_enabled(Info, cds, map)) {
CDSMapLogger::log(this, mapinfo, closed_heap_regions, open_heap_regions,
CDSMapLogger::log(this, mapinfo, heap_info,
bitmap, bitmap_size_in_bytes);
}
CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
@ -1215,20 +1195,16 @@ void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegi
mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
}
void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions) {
void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, ArchiveHeapInfo* heap_info) {
// Print statistics of all the regions
const size_t bitmap_used = mapinfo->region_at(MetaspaceShared::bm)->used();
const size_t bitmap_reserved = mapinfo->region_at(MetaspaceShared::bm)->used_aligned();
const size_t total_reserved = _ro_region.reserved() + _rw_region.reserved() +
bitmap_reserved +
_total_closed_heap_region_size +
_total_open_heap_region_size;
_total_heap_region_size;
const size_t total_bytes = _ro_region.used() + _rw_region.used() +
bitmap_used +
_total_closed_heap_region_size +
_total_open_heap_region_size;
_total_heap_region_size;
const double total_u_perc = percent_of(total_bytes, total_reserved);
_rw_region.print(total_reserved);
@ -1236,9 +1212,8 @@ void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
print_bitmap_region_stats(bitmap_used, total_reserved);
if (closed_heap_regions != nullptr) {
print_heap_region_stats(closed_heap_regions, "ca", total_reserved);
print_heap_region_stats(open_heap_regions, "oa", total_reserved);
if (heap_info->is_used()) {
print_heap_region_stats(heap_info, total_reserved);
}
log_debug(cds)("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
@ -1250,16 +1225,12 @@ void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
size, size/double(total_size)*100.0, size);
}
void ArchiveBuilder::print_heap_region_stats(GrowableArray<MemRegion>* regions,
const char *name, size_t total_size) {
int arr_len = regions == nullptr ? 0 : regions->length();
for (int i = 0; i < arr_len; i++) {
char* start = (char*)regions->at(i).start();
size_t size = regions->at(i).byte_size();
void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
char* start = info->start();
size_t size = info->byte_size();
char* top = start + size;
log_debug(cds)("%s%d space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
name, i, size, size/double(total_size)*100.0, size, p2i(start));
}
log_debug(cds)("hp space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [100.0%% used] at " INTPTR_FORMAT,
size, size/double(total_size)*100.0, size, p2i(start));
}
void ArchiveBuilder::report_out_of_space(const char* name, size_t needed_bytes) {

View file

@ -36,7 +36,7 @@
#include "utilities/resizeableResourceHash.hpp"
#include "utilities/resourceHash.hpp"
struct ArchiveHeapBitmapInfo;
class ArchiveHeapInfo;
class CHeapBitMap;
class FileMapInfo;
class Klass;
@ -234,15 +234,11 @@ private:
// statistics
DumpAllocStats _alloc_stats;
size_t _total_closed_heap_region_size;
size_t _total_open_heap_region_size;
size_t _total_heap_region_size;
void print_region_stats(FileMapInfo *map_info,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions);
void print_region_stats(FileMapInfo *map_info, ArchiveHeapInfo* heap_info);
void print_bitmap_region_stats(size_t size, size_t total_size);
void print_heap_region_stats(GrowableArray<MemRegion>* regions,
const char *name, size_t total_size);
void print_heap_region_stats(ArchiveHeapInfo* heap_info, size_t total_size);
// For global access.
static ArchiveBuilder* _current;
@ -403,11 +399,7 @@ public:
void relocate_vm_classes();
void make_klasses_shareable();
void relocate_to_requested();
void write_archive(FileMapInfo* mapinfo,
GrowableArray<MemRegion>* closed_heap_regions,
GrowableArray<MemRegion>* open_heap_regions,
GrowableArray<ArchiveHeapBitmapInfo>* closed_heap_oopmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_heap_oopmaps);
void write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info);
void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
bool read_only, bool allow_exec);

View file

@ -38,8 +38,7 @@
#if INCLUDE_CDS_JAVA_HEAP
bool ArchiveHeapLoader::_closed_regions_mapped = false;
bool ArchiveHeapLoader::_open_regions_mapped = false;
bool ArchiveHeapLoader::_is_mapped = false;
bool ArchiveHeapLoader::_is_loaded = false;
bool ArchiveHeapLoader::_narrow_oop_base_initialized = false;
@ -49,15 +48,9 @@ int ArchiveHeapLoader::_narrow_oop_shift;
// Support for loaded heap.
uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0;
uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0;
uintptr_t ArchiveHeapLoader::_dumptime_base_0 = UINTPTR_MAX;
uintptr_t ArchiveHeapLoader::_dumptime_base_1 = UINTPTR_MAX;
uintptr_t ArchiveHeapLoader::_dumptime_base_2 = UINTPTR_MAX;
uintptr_t ArchiveHeapLoader::_dumptime_base_3 = UINTPTR_MAX;
uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX;
uintptr_t ArchiveHeapLoader::_dumptime_top = 0;
intx ArchiveHeapLoader::_runtime_offset_0 = 0;
intx ArchiveHeapLoader::_runtime_offset_1 = 0;
intx ArchiveHeapLoader::_runtime_offset_2 = 0;
intx ArchiveHeapLoader::_runtime_offset_3 = 0;
intx ArchiveHeapLoader::_runtime_offset = 0;
bool ArchiveHeapLoader::_loading_failed = false;
// Support for mapped heap.
@ -84,10 +77,10 @@ void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) {
_narrow_oop_shift = shift;
}
void ArchiveHeapLoader::fixup_regions() {
void ArchiveHeapLoader::fixup_region() {
FileMapInfo* mapinfo = FileMapInfo::current_info();
if (is_mapped()) {
mapinfo->fixup_mapped_heap_regions();
mapinfo->fixup_mapped_heap_region();
} else if (_loading_failed) {
fill_failed_loaded_heap();
}
@ -160,9 +153,8 @@ class PatchUncompressedEmbeddedPointers: public BitMapClosure {
void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
FileMapInfo* info,
FileMapRegion* map_region,
MemRegion region) {
narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address(map_region);
narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address();
narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
log_info(cds)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
(uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
@ -188,7 +180,6 @@ void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
// Patch all the non-null pointers that are embedded in the archived heap objects
// in this (mapped) region
void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
FileMapRegion* map_region,
MemRegion region, address oopmap,
size_t oopmap_size_in_bits) {
BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
@ -200,7 +191,7 @@ void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
#endif
if (UseCompressedOops) {
patch_compressed_embedded_pointers(bm, info, map_region, region);
patch_compressed_embedded_pointers(bm, info, region);
} else {
PatchUncompressedEmbeddedPointers patcher((oop*)region.start());
bm.iterate(&patcher);
@ -219,44 +210,15 @@ struct LoadedArchiveHeapRegion {
uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
intx _runtime_offset; // If an object's dump time address P is within in this region, its
// runtime address is P + _runtime_offset
static int comparator(const void* a, const void* b) {
LoadedArchiveHeapRegion* reg_a = (LoadedArchiveHeapRegion*)a;
LoadedArchiveHeapRegion* reg_b = (LoadedArchiveHeapRegion*)b;
if (reg_a->_dumptime_base < reg_b->_dumptime_base) {
return -1;
} else if (reg_a->_dumptime_base == reg_b->_dumptime_base) {
return 0;
} else {
return 1;
}
}
uintptr_t top() {
return _dumptime_base + _region_size;
}
};
void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_regions,
int num_loaded_regions) {
_dumptime_base_0 = loaded_regions[0]._dumptime_base;
_dumptime_base_1 = loaded_regions[1]._dumptime_base;
_dumptime_base_2 = loaded_regions[2]._dumptime_base;
_dumptime_base_3 = loaded_regions[3]._dumptime_base;
_dumptime_top = loaded_regions[num_loaded_regions-1].top();
_runtime_offset_0 = loaded_regions[0]._runtime_offset;
_runtime_offset_1 = loaded_regions[1]._runtime_offset;
_runtime_offset_2 = loaded_regions[2]._runtime_offset;
_runtime_offset_3 = loaded_regions[3]._runtime_offset;
assert(2 <= num_loaded_regions && num_loaded_regions <= 4, "must be");
if (num_loaded_regions < 4) {
_dumptime_base_3 = UINTPTR_MAX;
}
if (num_loaded_regions < 3) {
_dumptime_base_2 = UINTPTR_MAX;
}
void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
_dumptime_base = loaded_region->_dumptime_base;
_dumptime_top = loaded_region->top();
_runtime_offset = loaded_region->_runtime_offset;
}
bool ArchiveHeapLoader::can_load() {
@ -267,36 +229,18 @@ bool ArchiveHeapLoader::can_load() {
return Universe::heap()->can_load_archived_objects();
}
template <int NUM_LOADED_REGIONS>
class PatchLoadedRegionPointers: public BitMapClosure {
class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
narrowOop* _start;
intx _offset_0;
intx _offset_1;
intx _offset_2;
intx _offset_3;
uintptr_t _base_0;
uintptr_t _base_1;
uintptr_t _base_2;
uintptr_t _base_3;
intx _offset;
uintptr_t _base;
uintptr_t _top;
static_assert(MetaspaceShared::max_num_heap_regions == 4, "can't handle more than 4 regions");
static_assert(NUM_LOADED_REGIONS >= 2, "we have at least 2 loaded regions");
static_assert(NUM_LOADED_REGIONS <= 4, "we have at most 4 loaded regions");
public:
PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_regions)
PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
: _start(start),
_offset_0(loaded_regions[0]._runtime_offset),
_offset_1(loaded_regions[1]._runtime_offset),
_offset_2(loaded_regions[2]._runtime_offset),
_offset_3(loaded_regions[3]._runtime_offset),
_base_0(loaded_regions[0]._dumptime_base),
_base_1(loaded_regions[1]._dumptime_base),
_base_2(loaded_regions[2]._dumptime_base),
_base_3(loaded_regions[3]._dumptime_base) {
_top = loaded_regions[NUM_LOADED_REGIONS-1].top();
}
_offset(loaded_region->_runtime_offset),
_base(loaded_region->_dumptime_base),
_top(loaded_region->top()) {}
bool do_bit(size_t offset) {
assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
@ -304,138 +248,94 @@ class PatchLoadedRegionPointers: public BitMapClosure {
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
assert(_base_0 <= o && o < _top, "must be");
assert(_base <= o && o < _top, "must be");
// We usually have only 2 regions for the default archive. Use template to avoid unnecessary comparisons.
if (NUM_LOADED_REGIONS > 3 && o >= _base_3) {
o += _offset_3;
} else if (NUM_LOADED_REGIONS > 2 && o >= _base_2) {
o += _offset_2;
} else if (o >= _base_1) {
o += _offset_1;
} else {
o += _offset_0;
}
o += _offset;
ArchiveHeapLoader::assert_in_loaded_heap(o);
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
return true;
}
};
int ArchiveHeapLoader::init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
MemRegion& archive_space) {
size_t total_bytes = 0;
int num_loaded_regions = 0;
for (int i = MetaspaceShared::first_archive_heap_region;
i <= MetaspaceShared::last_archive_heap_region; i++) {
FileMapRegion* r = mapinfo->region_at(i);
FileMapRegion* r = mapinfo->region_at(MetaspaceShared::hp);
r->assert_is_heap_region();
if (r->used() > 0) {
if (r->used() == 0) {
return false;
}
assert(is_aligned(r->used(), HeapWordSize), "must be");
total_bytes += r->used();
LoadedArchiveHeapRegion* ri = &loaded_regions[num_loaded_regions++];
ri->_region_index = i;
ri->_region_size = r->used();
ri->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address(r);
}
}
loaded_region->_region_index = MetaspaceShared::hp;
loaded_region->_region_size = r->used();
loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address();
assert(is_aligned(total_bytes, HeapWordSize), "must be");
size_t word_size = total_bytes / HeapWordSize;
HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
if (buffer == nullptr) {
return 0;
return false;
}
archive_space = MemRegion(buffer, word_size);
_loaded_heap_bottom = (uintptr_t)archive_space.start();
_loaded_heap_top = _loaded_heap_bottom + total_bytes;
return num_loaded_regions;
loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
return true;
}
void ArchiveHeapLoader::sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
uintptr_t buffer) {
// Find the relocation offset of the pointers in each region
qsort(loaded_regions, num_loaded_regions, sizeof(LoadedArchiveHeapRegion),
LoadedArchiveHeapRegion::comparator);
uintptr_t p = buffer;
for (int i = 0; i < num_loaded_regions; i++) {
// This region will be loaded at p, so all objects inside this
// region will be shifted by ri->offset
LoadedArchiveHeapRegion* ri = &loaded_regions[i];
ri->_runtime_offset = p - ri->_dumptime_base;
p += ri->_region_size;
}
assert(p == _loaded_heap_top, "must be");
}
bool ArchiveHeapLoader::load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
int num_loaded_regions, uintptr_t buffer) {
bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
uintptr_t load_address) {
uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
if (bitmap_base == 0) {
_loading_failed = true;
return false; // OOM or CRC error
}
uintptr_t load_address = buffer;
for (int i = 0; i < num_loaded_regions; i++) {
LoadedArchiveHeapRegion* ri = &loaded_regions[i];
FileMapRegion* r = mapinfo->region_at(ri->_region_index);
if (!mapinfo->read_region(ri->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
// There's no easy way to free the buffer, so we will fill it with zero later
// in fill_failed_loaded_heap(), and it will eventually be GC'ed.
log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", i);
log_warning(cds)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
_loading_failed = true;
return false;
}
assert(r->mapped_base() == (char*)load_address, "sanity");
log_info(cds)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
" size " SIZE_FORMAT_W(6) " delta " INTX_FORMAT,
ri->_region_index, load_address, load_address + ri->_region_size,
ri->_region_size, ri->_runtime_offset);
loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
loaded_region->_region_size, loaded_region->_runtime_offset);
uintptr_t oopmap = bitmap_base + r->oopmap_offset();
BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
if (num_loaded_regions == 4) {
PatchLoadedRegionPointers<4> patcher((narrowOop*)load_address, loaded_regions);
PatchLoadedRegionPointers patcher((narrowOop*)load_address, loaded_region);
bm.iterate(&patcher);
} else if (num_loaded_regions == 3) {
PatchLoadedRegionPointers<3> patcher((narrowOop*)load_address, loaded_regions);
bm.iterate(&patcher);
} else {
assert(num_loaded_regions == 2, "must be");
PatchLoadedRegionPointers<2> patcher((narrowOop*)load_address, loaded_regions);
bm.iterate(&patcher);
}
assert(r->mapped_base() == (char*)load_address, "sanity");
load_address += r->used();
}
return true;
}
bool ArchiveHeapLoader::load_heap_regions(FileMapInfo* mapinfo) {
bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
assert(UseCompressedOops, "loaded heap for !UseCompressedOops is unimplemented");
init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
LoadedArchiveHeapRegion loaded_regions[MetaspaceShared::max_num_heap_regions];
memset(loaded_regions, 0, sizeof(loaded_regions));
LoadedArchiveHeapRegion loaded_region;
memset(&loaded_region, 0, sizeof(loaded_region));
MemRegion archive_space;
int num_loaded_regions = init_loaded_regions(mapinfo, loaded_regions, archive_space);
if (num_loaded_regions <= 0) {
if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
return false;
}
sort_loaded_regions(loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start());
if (!load_regions(mapinfo, loaded_regions, num_loaded_regions, (uintptr_t)archive_space.start())) {
if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
assert(_loading_failed, "must be");
return false;
}
init_loaded_heap_relocation(loaded_regions, num_loaded_regions);
init_loaded_heap_relocation(&loaded_region);
_is_loaded = true;
return true;
@ -448,14 +348,14 @@ class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
VerifyLoadedHeapEmbeddedPointers(ResourceHashtable<uintptr_t, bool>* table) : _table(table) {}
virtual void do_oop(narrowOop* p) {
// This should be called before the loaded regions are modified, so all the embedded pointers
// must be null, or must point to a valid object in the loaded regions.
// This should be called before the loaded region is modified, so all the embedded pointers
// must be null, or must point to a valid object in the loaded region.
narrowOop v = *p;
if (!CompressedOops::is_null(v)) {
oop o = CompressedOops::decode_not_null(v);
uintptr_t u = cast_from_oop<uintptr_t>(o);
ArchiveHeapLoader::assert_in_loaded_heap(u);
guarantee(_table->contains(u), "must point to beginning of object in loaded archived regions");
guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
}
}
virtual void do_oop(oop* p) {
@ -539,15 +439,12 @@ void ArchiveHeapLoader::patch_native_pointers() {
return;
}
for (int i = MetaspaceShared::first_archive_heap_region;
i <= MetaspaceShared::last_archive_heap_region; i++) {
FileMapRegion* r = FileMapInfo::current_info()->region_at(i);
FileMapRegion* r = FileMapInfo::current_info()->region_at(MetaspaceShared::hp);
if (r->mapped_base() != nullptr && r->has_ptrmap()) {
log_info(cds, heap)("Patching native pointers in heap region %d", i);
log_info(cds, heap)("Patching native pointers in heap region");
BitMapView bm = r->ptrmap_view();
PatchNativePointers patcher((Metadata**)r->mapped_base());
bm.iterate(&patcher);
}
}
}
#endif // INCLUDE_CDS_JAVA_HEAP

View file

@ -40,24 +40,21 @@ struct LoadedArchiveHeapRegion;
class ArchiveHeapLoader : AllStatic {
public:
// At runtime, heap regions in the CDS archive can be used in two different ways,
// At runtime, the heap region in the CDS archive can be used in two different ways,
// depending on the GC type:
// - Mapped: (G1 only) the regions are directly mapped into the Java heap
// - Loaded: At VM start-up, the objects in the heap regions are copied into the
// - Mapped: (G1 only) the region is directly mapped into the Java heap
// - Loaded: At VM start-up, the objects in the heap region are copied into the
// Java heap. This is easier to implement than mapping but
// slightly less efficient, as the embedded pointers need to be relocated.
static bool can_use() { return can_map() || can_load(); }
// Can this VM map archived heap regions? Currently only G1+compressed{oops,cp}
// Can this VM map archived heap region? Currently only G1+compressed{oops,cp}
static bool can_map() {
CDS_JAVA_HEAP_ONLY(return (UseG1GC && UseCompressedClassPointers);)
NOT_CDS_JAVA_HEAP(return false;)
}
static bool is_mapped() {
return closed_regions_mapped() && open_regions_mapped();
}
// Can this VM load the objects from archived heap regions into the heap at start-up?
// Can this VM load the objects from archived heap region into the heap at start-up?
static bool can_load() NOT_CDS_JAVA_HEAP_RETURN_(false);
static void finish_initialization() NOT_CDS_JAVA_HEAP_RETURN;
static bool is_loaded() {
@ -76,25 +73,17 @@ public:
NOT_CDS_JAVA_HEAP_RETURN_(0L);
}
static void set_closed_regions_mapped() {
CDS_JAVA_HEAP_ONLY(_closed_regions_mapped = true;)
static void set_mapped() {
CDS_JAVA_HEAP_ONLY(_is_mapped = true;)
NOT_CDS_JAVA_HEAP_RETURN;
}
static bool closed_regions_mapped() {
CDS_JAVA_HEAP_ONLY(return _closed_regions_mapped;)
NOT_CDS_JAVA_HEAP_RETURN_(false);
}
static void set_open_regions_mapped() {
CDS_JAVA_HEAP_ONLY(_open_regions_mapped = true;)
NOT_CDS_JAVA_HEAP_RETURN;
}
static bool open_regions_mapped() {
CDS_JAVA_HEAP_ONLY(return _open_regions_mapped;)
static bool is_mapped() {
CDS_JAVA_HEAP_ONLY(return _is_mapped;)
NOT_CDS_JAVA_HEAP_RETURN_(false);
}
// NarrowOops stored in the CDS archive may use a different encoding scheme
// than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_regions_impl.
// than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_region_impl.
// To decode them, do not use CompressedOops::decode_not_null. Use this
// function instead.
inline static oop decode_from_archive(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
@ -104,34 +93,25 @@ public:
static void patch_compressed_embedded_pointers(BitMapView bm,
FileMapInfo* info,
FileMapRegion* map_region,
MemRegion region) NOT_CDS_JAVA_HEAP_RETURN;
static void patch_embedded_pointers(FileMapInfo* info,
FileMapRegion* map_region,
MemRegion region, address oopmap,
size_t oopmap_size_in_bits) NOT_CDS_JAVA_HEAP_RETURN;
static void fixup_regions() NOT_CDS_JAVA_HEAP_RETURN;
static void fixup_region() NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void init_mapped_heap_relocation(ptrdiff_t delta, int dumptime_oop_shift);
private:
static bool _closed_regions_mapped;
static bool _open_regions_mapped;
static bool _is_mapped;
static bool _is_loaded;
// Support for loaded archived heap. These are cached values from
// LoadedArchiveHeapRegion's.
static uintptr_t _dumptime_base_0;
static uintptr_t _dumptime_base_1;
static uintptr_t _dumptime_base_2;
static uintptr_t _dumptime_base_3;
static uintptr_t _dumptime_base;
static uintptr_t _dumptime_top;
static intx _runtime_offset_0;
static intx _runtime_offset_1;
static intx _runtime_offset_2;
static intx _runtime_offset_3;
static intx _runtime_offset;
static uintptr_t _loaded_heap_bottom;
static uintptr_t _loaded_heap_top;
@ -148,14 +128,10 @@ private:
static bool _mapped_heap_relocation_initialized;
static void init_narrow_oop_decoding(address base, int shift);
static int init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
static bool init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
MemRegion& archive_space);
static void sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
uintptr_t buffer);
static bool load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
int num_loaded_regions, uintptr_t buffer);
static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info,
int num_loaded_regions);
static bool load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region, uintptr_t buffer);
static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info);
static void patch_native_pointers();
static void finish_loaded_heap();
static void verify_loaded_heap();
@ -168,9 +144,11 @@ private:
template<bool IS_MAPPED>
inline static oop decode_from_archive_impl(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
class PatchLoadedRegionPointers;
public:
static bool load_heap_regions(FileMapInfo* mapinfo);
static bool load_heap_region(FileMapInfo* mapinfo);
static void assert_in_loaded_heap(uintptr_t o) {
assert(is_in_loaded_heap(o), "must be");
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,18 +38,10 @@ inline oop ArchiveHeapLoader::decode_from_archive_impl(narrowOop v) {
assert(_narrow_oop_base_initialized, "relocation information must have been initialized");
uintptr_t p = ((uintptr_t)_narrow_oop_base) + ((uintptr_t)v << _narrow_oop_shift);
if (IS_MAPPED) {
assert(_dumptime_base_0 == UINTPTR_MAX, "must be");
} else if (p >= _dumptime_base_0) {
assert(_dumptime_base == UINTPTR_MAX, "must be");
} else if (p >= _dumptime_base) {
assert(p < _dumptime_top, "must be");
if (p >= _dumptime_base_3) {
p += _runtime_offset_3;
} else if (p >= _dumptime_base_2) {
p += _runtime_offset_2;
} else if (p >= _dumptime_base_1) {
p += _runtime_offset_1;
} else {
p += _runtime_offset_0;
}
p += _runtime_offset;
}
oop result = cast_to_oop((uintptr_t)p);

View file

@ -47,26 +47,16 @@
#if INCLUDE_CDS_JAVA_HEAP
GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer;
// The following are offsets from buffer_bottom()
size_t ArchiveHeapWriter::_buffer_top;
size_t ArchiveHeapWriter::_open_bottom;
size_t ArchiveHeapWriter::_open_top;
size_t ArchiveHeapWriter::_closed_bottom;
size_t ArchiveHeapWriter::_closed_top;
size_t ArchiveHeapWriter::_heap_roots_bottom;
size_t ArchiveHeapWriter::_buffer_used;
size_t ArchiveHeapWriter::_heap_roots_bottom_offset;
size_t ArchiveHeapWriter::_heap_roots_word_size;
address ArchiveHeapWriter::_requested_open_region_bottom;
address ArchiveHeapWriter::_requested_open_region_top;
address ArchiveHeapWriter::_requested_closed_region_bottom;
address ArchiveHeapWriter::_requested_closed_region_top;
ResourceBitMap* ArchiveHeapWriter::_closed_oopmap;
ResourceBitMap* ArchiveHeapWriter::_open_oopmap;
address ArchiveHeapWriter::_requested_bottom;
address ArchiveHeapWriter::_requested_top;
GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
@ -80,10 +70,8 @@ void ArchiveHeapWriter::init() {
_buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable();
_requested_open_region_bottom = nullptr;
_requested_open_region_top = nullptr;
_requested_closed_region_bottom = nullptr;
_requested_closed_region_top = nullptr;
_requested_bottom = nullptr;
_requested_top = nullptr;
_native_pointers = new GrowableArrayCHeap<NativePointerInfo, mtClassShared>(2048);
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(10000);
@ -97,17 +85,13 @@ void ArchiveHeapWriter::add_source_obj(oop src_obj) {
_source_objs->append(src_obj);
}
// For the time being, always support two regions (to be strictly compatible with existing G1
// mapping code. We might eventually use a single region (JDK-8298048).
void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
GrowableArray<MemRegion>* closed_regions, GrowableArray<MemRegion>* open_regions,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
ArchiveHeapInfo* heap_info) {
assert(HeapShared::can_write(), "sanity");
allocate_buffer();
copy_source_objs_to_buffer(roots);
set_requested_address_for_regions(closed_regions, open_regions);
relocate_embedded_oops(roots, closed_bitmaps, open_bitmaps);
set_requested_address(heap_info);
relocate_embedded_oops(roots, heap_info);
}
bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
@ -133,18 +117,15 @@ bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
}
// Various lookup functions between source_obj, buffered_obj and requested_obj
bool ArchiveHeapWriter::is_in_requested_regions(oop o) {
assert(_requested_open_region_bottom != nullptr, "do not call before this is initialized");
assert(_requested_closed_region_bottom != nullptr, "do not call before this is initialized");
bool ArchiveHeapWriter::is_in_requested_range(oop o) {
assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
address a = cast_from_oop<address>(o);
return (_requested_open_region_bottom <= a && a < _requested_open_region_top) ||
(_requested_closed_region_bottom <= a && a < _requested_closed_region_top);
return (_requested_bottom <= a && a < _requested_top);
}
oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
oop req_obj = cast_to_oop(_requested_open_region_bottom + offset);
assert(is_in_requested_regions(req_obj), "must be");
oop req_obj = cast_to_oop(_requested_bottom + offset);
assert(is_in_requested_range(req_obj), "must be");
return req_obj;
}
@ -168,30 +149,22 @@ oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
}
address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
return _requested_open_region_bottom + buffered_address_to_offset(buffered_addr);
return _requested_bottom + buffered_address_to_offset(buffered_addr);
}
oop ArchiveHeapWriter::heap_roots_requested_address() {
return requested_obj_from_buffer_offset(_heap_roots_bottom);
return cast_to_oop(_requested_bottom + _heap_roots_bottom_offset);
}
address ArchiveHeapWriter::heap_region_requested_bottom(int heap_region_idx) {
address ArchiveHeapWriter::requested_address() {
assert(_buffer != nullptr, "must be initialized");
switch (heap_region_idx) {
case MetaspaceShared::first_closed_heap_region:
return _requested_closed_region_bottom;
case MetaspaceShared::first_open_heap_region:
return _requested_open_region_bottom;
default:
ShouldNotReachHere();
return nullptr;
}
return _requested_bottom;
}
void ArchiveHeapWriter::allocate_buffer() {
int initial_buffer_size = 100000;
_buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
_open_bottom = _buffer_top = 0;
_buffer_used = 0;
ensure_buffer_space(1); // so that buffer_bottom() works
}
@ -203,7 +176,7 @@ void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
Klass* k = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
int length = roots != nullptr ? roots->length() : 0;
int length = roots->length();
_heap_roots_word_size = objArrayOopDesc::object_size(length);
size_t byte_size = _heap_roots_word_size * HeapWordSize;
if (byte_size >= MIN_GC_REGION_ALIGNMENT) {
@ -213,10 +186,10 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShar
maybe_fill_gc_region_gap(byte_size);
size_t new_top = _buffer_top + byte_size;
ensure_buffer_space(new_top);
size_t new_used = _buffer_used + byte_size;
ensure_buffer_space(new_used);
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
memset(mem, 0, byte_size);
{
// This is copied from MemAllocator::finish
@ -238,40 +211,27 @@ void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShar
* arrayOop->obj_at_addr<oop>(i) = o;
}
}
log_info(cds)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
log_info(cds, heap)("archived obj roots[%d] = " SIZE_FORMAT " bytes, klass = %p, obj = %p", length, byte_size, k, mem);
_heap_roots_bottom = _buffer_top;
_buffer_top = new_top;
_heap_roots_bottom_offset = _buffer_used;
_buffer_used = new_used;
}
void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
copy_source_objs_to_buffer_by_region(/*copy_open_region=*/true);
copy_roots_to_buffer(roots);
_open_top = _buffer_top;
// Align the closed region to the next G1 region
_buffer_top = _closed_bottom = align_up(_buffer_top, HeapRegion::GrainBytes);
copy_source_objs_to_buffer_by_region(/*copy_open_region=*/false);
_closed_top = _buffer_top;
log_info(cds, heap)("Size of open region = " SIZE_FORMAT " bytes", _open_top - _open_bottom);
log_info(cds, heap)("Size of closed region = " SIZE_FORMAT " bytes", _closed_top - _closed_bottom);
}
void ArchiveHeapWriter::copy_source_objs_to_buffer_by_region(bool copy_open_region) {
for (int i = 0; i < _source_objs->length(); i++) {
oop src_obj = _source_objs->at(i);
HeapShared::CachedOopInfo* info = HeapShared::archived_object_cache()->get(src_obj);
assert(info != nullptr, "must be");
if (info->in_open_region() == copy_open_region) {
// For region-based collectors such as G1, we need to make sure that we don't have
// an object that can possible span across two regions.
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
info->set_buffer_offset(buffer_offset);
_buffer_offset_to_source_obj_table->put(buffer_offset, src_obj);
}
}
copy_roots_to_buffer(roots);
log_info(cds)("Size of heap region = " SIZE_FORMAT " bytes, %d objects, %d roots",
_buffer_used, _source_objs->length() + 1, roots->length());
}
size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
@ -298,7 +258,7 @@ int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
void ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
Klass* oak = Universe::objectArrayKlassObj(); // already relocated to point to archived klass
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_top);
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
memset(mem, 0, fill_bytes);
oopDesc::set_mark(mem, markWord::prototype());
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(oak);
@ -313,10 +273,10 @@ void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
// required_byte_size has been allocated. If not, fill the remainder of the current
// region.
size_t min_filler_byte_size = filler_array_byte_size(0);
size_t new_top = _buffer_top + required_byte_size + min_filler_byte_size;
size_t new_used = _buffer_used + required_byte_size + min_filler_byte_size;
const size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
const size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
const size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
const size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
if (cur_min_region_bottom != next_min_region_bottom) {
// Make sure that no objects span across MIN_GC_REGION_ALIGNMENT. This way
@ -326,16 +286,16 @@ void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
"no buffered object can be larger than %d bytes", MIN_GC_REGION_ALIGNMENT);
const size_t filler_end = next_min_region_bottom;
const size_t fill_bytes = filler_end - _buffer_top;
const size_t fill_bytes = filler_end - _buffer_used;
assert(fill_bytes > 0, "must be");
ensure_buffer_space(filler_end);
int array_length = filler_array_length(fill_bytes);
log_info(cds, heap)("Inserting filler obj array of %d elements (" SIZE_FORMAT " bytes total) @ buffer offset " SIZE_FORMAT,
array_length, fill_bytes, _buffer_top);
array_length, fill_bytes, _buffer_used);
init_filler_array_at_buffer_top(array_length, fill_bytes);
_buffer_top = filler_end;
_buffer_used = filler_end;
}
}
@ -344,72 +304,58 @@ size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
size_t byte_size = src_obj->size() * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");
// For region-based collectors such as G1, the archive heap may be mapped into
// multiple regions. We need to make sure that we don't have an object that can possible
// span across two regions.
maybe_fill_gc_region_gap(byte_size);
size_t new_top = _buffer_top + byte_size;
assert(new_top > _buffer_top, "no wrap around");
size_t new_used = _buffer_used + byte_size;
assert(new_used > _buffer_used, "no wrap around");
size_t cur_min_region_bottom = align_down(_buffer_top, MIN_GC_REGION_ALIGNMENT);
size_t next_min_region_bottom = align_down(new_top, MIN_GC_REGION_ALIGNMENT);
size_t cur_min_region_bottom = align_down(_buffer_used, MIN_GC_REGION_ALIGNMENT);
size_t next_min_region_bottom = align_down(new_used, MIN_GC_REGION_ALIGNMENT);
assert(cur_min_region_bottom == next_min_region_bottom, "no object should cross minimal GC region boundaries");
ensure_buffer_space(new_top);
ensure_buffer_space(new_used);
address from = cast_from_oop<address>(src_obj);
address to = offset_to_buffered_address<address>(_buffer_top);
assert(is_object_aligned(_buffer_top), "sanity");
address to = offset_to_buffered_address<address>(_buffer_used);
assert(is_object_aligned(_buffer_used), "sanity");
assert(is_object_aligned(byte_size), "sanity");
memcpy(to, from, byte_size);
size_t buffered_obj_offset = _buffer_top;
_buffer_top = new_top;
size_t buffered_obj_offset = _buffer_used;
_buffer_used = new_used;
return buffered_obj_offset;
}
void ArchiveHeapWriter::set_requested_address_for_regions(GrowableArray<MemRegion>* closed_regions,
GrowableArray<MemRegion>* open_regions) {
assert(closed_regions->length() == 0, "must be");
assert(open_regions->length() == 0, "must be");
void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
assert(!info->is_used(), "only set once");
assert(UseG1GC, "must be");
address heap_end = (address)G1CollectedHeap::heap()->reserved().end();
log_info(cds, heap)("Heap end = %p", heap_end);
size_t closed_region_byte_size = _closed_top - _closed_bottom;
size_t open_region_byte_size = _open_top - _open_bottom;
assert(closed_region_byte_size > 0, "must archived at least one object for closed region!");
assert(open_region_byte_size > 0, "must archived at least one object for open region!");
size_t heap_region_byte_size = _buffer_used;
assert(heap_region_byte_size > 0, "must archived at least one object!");
// The following two asserts are ensured by copy_source_objs_to_buffer_by_region().
assert(is_aligned(_closed_bottom, HeapRegion::GrainBytes), "sanity");
assert(is_aligned(_open_bottom, HeapRegion::GrainBytes), "sanity");
_requested_bottom = align_down(heap_end - heap_region_byte_size, HeapRegion::GrainBytes);
assert(is_aligned(_requested_bottom, HeapRegion::GrainBytes), "sanity");
_requested_closed_region_bottom = align_down(heap_end - closed_region_byte_size, HeapRegion::GrainBytes);
_requested_open_region_bottom = _requested_closed_region_bottom - (_closed_bottom - _open_bottom);
_requested_top = _requested_bottom + _buffer_used;
assert(is_aligned(_requested_closed_region_bottom, HeapRegion::GrainBytes), "sanity");
assert(is_aligned(_requested_open_region_bottom, HeapRegion::GrainBytes), "sanity");
_requested_open_region_top = _requested_open_region_bottom + (_open_top - _open_bottom);
_requested_closed_region_top = _requested_closed_region_bottom + (_closed_top - _closed_bottom);
assert(_requested_open_region_top <= _requested_closed_region_bottom, "no overlap");
closed_regions->append(MemRegion(offset_to_buffered_address<HeapWord*>(_closed_bottom),
offset_to_buffered_address<HeapWord*>(_closed_top)));
open_regions->append( MemRegion(offset_to_buffered_address<HeapWord*>(_open_bottom),
offset_to_buffered_address<HeapWord*>(_open_top)));
info->set_memregion(MemRegion(offset_to_buffered_address<HeapWord*>(0),
offset_to_buffered_address<HeapWord*>(_buffer_used)));
}
// Oop relocation
template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) {
assert(is_in_requested_regions(cast_to_oop(p)), "must be");
assert(is_in_requested_range(cast_to_oop(p)), "must be");
address addr = address(p);
assert(addr >= _requested_open_region_bottom, "must be");
size_t offset = addr - _requested_open_region_bottom;
assert(addr >= _requested_bottom, "must be");
size_t offset = addr - _requested_bottom;
return offset_to_buffered_address<T*>(offset);
}
@ -421,7 +367,7 @@ template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buff
template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
oop request_oop) {
assert(is_in_requested_regions(request_oop), "must be");
assert(is_in_requested_range(request_oop), "must be");
store_oop_in_buffer(buffered_addr, request_oop);
}
@ -445,30 +391,22 @@ oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
return CompressedOops::decode(*buffered_addr);
}
template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer) {
template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
if (!CompressedOops::is_null(source_referent)) {
oop request_referent = source_obj_to_requested_obj(source_referent);
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
mark_oop_pointer<T>(field_addr_in_buffer);
mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
}
}
template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr) {
template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
ResourceBitMap* oopmap;
address requested_region_bottom;
if (request_p >= (T*)_requested_closed_region_bottom) {
assert(request_p < (T*)_requested_closed_region_top, "sanity");
oopmap = _closed_oopmap;
requested_region_bottom = _requested_closed_region_bottom;
} else {
assert(request_p >= (T*)_requested_open_region_bottom, "sanity");
assert(request_p < (T*)_requested_open_region_top, "sanity");
oopmap = _open_oopmap;
requested_region_bottom = _requested_open_region_bottom;
}
assert(request_p >= (T*)_requested_bottom, "sanity");
assert(request_p < (T*)_requested_top, "sanity");
requested_region_bottom = _requested_bottom;
// Mark the pointer in the oopmap
T* region_bottom = (T*)requested_region_bottom;
@ -501,18 +439,19 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s
}
// Relocate an element in the buffered copy of HeapShared::roots()
template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index) {
template <typename T> void ArchiveHeapWriter::relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap) {
size_t offset = (size_t)((objArrayOop)requested_roots)->obj_at_offset<T>(index);
relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset));
relocate_field_in_buffer<T>((T*)(buffered_heap_roots_addr() + offset), oopmap);
}
class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
oop _src_obj;
address _buffered_obj;
CHeapBitMap* _oopmap;
public:
EmbeddedOopRelocator(oop src_obj, address buffered_obj) :
_src_obj(src_obj), _buffered_obj(buffered_obj) {}
EmbeddedOopRelocator(oop src_obj, address buffered_obj, CHeapBitMap* oopmap) :
_src_obj(src_obj), _buffered_obj(buffered_obj), _oopmap(oopmap) {}
void do_oop(narrowOop *p) { EmbeddedOopRelocator::do_oop_work(p); }
void do_oop( oop *p) { EmbeddedOopRelocator::do_oop_work(p); }
@ -520,82 +459,40 @@ public:
private:
template <class T> void do_oop_work(T *p) {
size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset));
ArchiveHeapWriter::relocate_field_in_buffer<T>((T*)(_buffered_obj + field_offset), _oopmap);
}
};
// Update all oop fields embedded in the buffered objects
void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
ArchiveHeapInfo* heap_info) {
size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
size_t closed_region_byte_size = _closed_top - _closed_bottom;
size_t open_region_byte_size = _open_top - _open_bottom;
ResourceBitMap closed_oopmap(closed_region_byte_size / oopmap_unit);
ResourceBitMap open_oopmap (open_region_byte_size / oopmap_unit);
_closed_oopmap = &closed_oopmap;
_open_oopmap = &open_oopmap;
size_t heap_region_byte_size = _buffer_used;
heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
auto iterator = [&] (oop src_obj, HeapShared::CachedOopInfo& info) {
oop requested_obj = requested_obj_from_buffer_offset(info.buffer_offset());
update_header_for_requested_obj(requested_obj, src_obj, src_obj->klass());
address buffered_obj = offset_to_buffered_address<address>(info.buffer_offset());
EmbeddedOopRelocator relocator(src_obj, buffered_obj);
EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
src_obj->oop_iterate(&relocator);
};
HeapShared::archived_object_cache()->iterate_all(iterator);
// Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
// doesn't have a corresponding src_obj, so we can't use EmbeddedOopRelocator on it.
oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_bottom);
oop requested_roots = requested_obj_from_buffer_offset(_heap_roots_bottom_offset);
update_header_for_requested_obj(requested_roots, nullptr, Universe::objectArrayKlassObj());
int length = roots != nullptr ? roots->length() : 0;
for (int i = 0; i < length; i++) {
if (UseCompressedOops) {
relocate_root_at<narrowOop>(requested_roots, i);
relocate_root_at<narrowOop>(requested_roots, i, heap_info->oopmap());
} else {
relocate_root_at<oop>(requested_roots, i);
relocate_root_at<oop>(requested_roots, i, heap_info->oopmap());
}
}
closed_bitmaps->append(make_bitmap_info(&closed_oopmap, /*is_open=*/false, /*is_oopmap=*/true));
open_bitmaps ->append(make_bitmap_info(&open_oopmap, /*is_open=*/false, /*is_oopmap=*/true));
closed_bitmaps->append(compute_ptrmap(/*is_open=*/false));
open_bitmaps ->append(compute_ptrmap(/*is_open=*/true));
_closed_oopmap = nullptr;
_open_oopmap = nullptr;
}
ArchiveHeapBitmapInfo ArchiveHeapWriter::make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap) {
size_t size_in_bits = bitmap->size();
size_t size_in_bytes;
uintptr_t* buffer;
if (size_in_bits > 0) {
size_in_bytes = bitmap->size_in_bytes();
buffer = (uintptr_t*)NEW_C_HEAP_ARRAY(char, size_in_bytes, mtInternal);
bitmap->write_to(buffer, size_in_bytes);
} else {
size_in_bytes = 0;
buffer = nullptr;
}
log_info(cds, heap)("%s @ " INTPTR_FORMAT " (" SIZE_FORMAT_W(6) " bytes) for %s heap region",
is_oopmap ? "Oopmap" : "Ptrmap",
p2i(buffer), size_in_bytes,
is_open? "open" : "closed");
ArchiveHeapBitmapInfo info;
info._map = (address)buffer;
info._size_in_bits = size_in_bits;
info._size_in_bytes = size_in_bytes;
return info;
compute_ptrmap(heap_info);
}
void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
@ -608,18 +505,18 @@ void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
}
}
ArchiveHeapBitmapInfo ArchiveHeapWriter::compute_ptrmap(bool is_open) {
void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
int num_non_null_ptrs = 0;
Metadata** bottom = (Metadata**) (is_open ? _requested_open_region_bottom: _requested_closed_region_bottom);
Metadata** top = (Metadata**) (is_open ? _requested_open_region_top: _requested_closed_region_top); // exclusive
ResourceBitMap ptrmap(top - bottom);
Metadata** bottom = (Metadata**) _requested_bottom;
Metadata** top = (Metadata**) _requested_top; // exclusive
heap_info->ptrmap()->resize(top - bottom);
BitMap::idx_t max_idx = 32; // paranoid - don't make it too small
for (int i = 0; i < _native_pointers->length(); i++) {
NativePointerInfo info = _native_pointers->at(i);
oop src_obj = info._src_obj;
int field_offset = info._field_offset;
HeapShared::CachedOopInfo* p = HeapShared::archived_object_cache()->get(src_obj);
if (p->in_open_region() == is_open) {
// requested_field_addr = the address of this field in the requested space
oop requested_obj = requested_obj_from_buffer_offset(p->buffer_offset());
Metadata** requested_field_addr = (Metadata**)(cast_from_oop<address>(requested_obj) + field_offset);
@ -627,8 +524,9 @@ ArchiveHeapBitmapInfo ArchiveHeapWriter::compute_ptrmap(bool is_open) {
// Mark this field in the bitmap
BitMap::idx_t idx = requested_field_addr - bottom;
ptrmap.set_bit(idx);
heap_info->ptrmap()->set_bit(idx);
num_non_null_ptrs ++;
max_idx = MAX2(max_idx, idx);
// Set the native pointer to the requested address of the metadata (at runtime, the metadata will have
// this address if the RO/RW regions are mapped at the default location).
@ -641,17 +539,10 @@ ArchiveHeapBitmapInfo ArchiveHeapWriter::compute_ptrmap(bool is_open) {
address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
*buffered_field_addr = (Metadata*)requested_native_ptr;
}
}
log_info(cds, heap)("compute_ptrmap: marked %d non-null native pointers for %s heap region",
num_non_null_ptrs, is_open ? "open" : "closed");
if (num_non_null_ptrs == 0) {
ResourceBitMap empty;
return make_bitmap_info(&empty, is_open, /*is_oopmap=*/ false);
} else {
return make_bitmap_info(&ptrmap, is_open, /*is_oopmap=*/ false);
}
heap_info->ptrmap()->resize(max_idx + 1);
log_info(cds, heap)("calculate_ptrmap: marked %d non-null native pointers for heap region (" SIZE_FORMAT " bits)",
num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
}
#endif // INCLUDE_CDS_JAVA_HEAP

View file

@ -35,11 +35,28 @@
#include "utilities/macros.hpp"
#include "utilities/resourceHash.hpp"
#if INCLUDE_CDS_JAVA_HEAP
struct ArchiveHeapBitmapInfo;
class MemRegion;
class ArchiveHeapInfo {
MemRegion _memregion;
CHeapBitMap _oopmap;
CHeapBitMap _ptrmap;
public:
ArchiveHeapInfo() : _memregion(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
bool is_used() { return !_memregion.is_empty(); }
MemRegion memregion() { return _memregion; }
void set_memregion(MemRegion r) { _memregion = r; }
char* start() { return (char*)_memregion.start(); }
size_t byte_size() { return _memregion.byte_size(); }
CHeapBitMap* oopmap() { return &_oopmap; }
CHeapBitMap* ptrmap() { return &_ptrmap; }
};
#if INCLUDE_CDS_JAVA_HEAP
class ArchiveHeapWriter : AllStatic {
class EmbeddedOopRelocator;
struct NativePointerInfo {
@ -72,31 +89,16 @@ class ArchiveHeapWriter : AllStatic {
static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
// The exclusive top of the last object that has been copied into this->_buffer.
static size_t _buffer_top;
// The bounds of the open region inside this->_buffer.
static size_t _open_bottom; // inclusive
static size_t _open_top; // exclusive
// The bounds of the closed region inside this->_buffer.
static size_t _closed_bottom; // inclusive
static size_t _closed_top; // exclusive
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
static size_t _buffer_used;
// The bottom of the copy of Heap::roots() inside this->_buffer.
static size_t _heap_roots_bottom;
static size_t _heap_roots_bottom_offset;
static size_t _heap_roots_word_size;
static address _requested_open_region_bottom;
static address _requested_open_region_top;
static address _requested_closed_region_bottom;
static address _requested_closed_region_top;
static ResourceBitMap* _closed_oopmap;
static ResourceBitMap* _open_oopmap;
static ArchiveHeapBitmapInfo _closed_oopmap_info;
static ArchiveHeapBitmapInfo _open_oopmap_info;
// The address range of the requested location of the archived heap objects.
static address _requested_bottom;
static address _requested_top;
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
@ -127,8 +129,9 @@ class ArchiveHeapWriter : AllStatic {
return offset_to_buffered_address<address>(0);
}
// The exclusive end of the last object that was copied into the buffer.
static address buffer_top() {
return buffer_bottom() + _buffer_top;
return buffer_bottom() + _buffer_used;
}
static bool in_buffer(address buffered_addr) {
@ -142,7 +145,6 @@ class ArchiveHeapWriter : AllStatic {
static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static void copy_source_objs_to_buffer_by_region(bool copy_open_region);
static size_t copy_one_source_obj_to_buffer(oop src_obj);
static void maybe_fill_gc_region_gap(size_t required_byte_size);
@ -150,14 +152,10 @@ class ArchiveHeapWriter : AllStatic {
static int filler_array_length(size_t fill_bytes);
static void init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
static void set_requested_address_for_regions(GrowableArray<MemRegion>* closed_regions,
GrowableArray<MemRegion>* open_regions);
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
static ArchiveHeapBitmapInfo compute_ptrmap(bool is_open);
static ArchiveHeapBitmapInfo make_bitmap_info(ResourceBitMap* bitmap, bool is_open, bool is_oopmap);
static bool is_in_requested_regions(oop o);
static void set_requested_address(ArchiveHeapInfo* info);
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info);
static void compute_ptrmap(ArchiveHeapInfo *info);
static bool is_in_requested_range(oop o);
static oop requested_obj_from_buffer_offset(size_t offset);
static oop load_oop_from_buffer(oop* buffered_addr);
@ -169,9 +167,9 @@ class ArchiveHeapWriter : AllStatic {
template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
template <typename T> static T* requested_addr_to_buffered_addr(T* p);
template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer);
template <typename T> static void mark_oop_pointer(T* buffered_addr);
template <typename T> static void relocate_root_at(oop requested_roots, int index);
template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
template <typename T> static void relocate_root_at(oop requested_roots, int index, CHeapBitMap* oopmap);
static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
public:
@ -180,14 +178,11 @@ public:
static bool is_too_large_to_archive(size_t size);
static bool is_too_large_to_archive(oop obj);
static bool is_string_too_large_to_archive(oop string);
static void write(GrowableArrayCHeap<oop, mtClassShared>*,
GrowableArray<MemRegion>* closed_regions, GrowableArray<MemRegion>* open_regions,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
static address heap_region_requested_bottom(int heap_region_idx);
static oop heap_roots_requested_address();
static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
static address requested_address(); // requested address of the lowest achived heap object
static oop heap_roots_requested_address(); // requested address of HeapShared::roots()
static address buffered_heap_roots_addr() {
return offset_to_buffered_address<address>(_heap_roots_bottom);
return offset_to_buffered_address<address>(_heap_roots_bottom_offset);
}
static size_t heap_roots_word_size() {
return _heap_roots_word_size;

View file

@ -226,7 +226,7 @@ void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
}
void DumpRegion::print(size_t total_bytes) const {
log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
log_debug(cds)("%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
_name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
p2i(ArchiveBuilder::current()->to_requested(_base)));
}

View file

@ -54,7 +54,7 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all) {
LogMessage(cds) msg;
msg.debug("Detailed metadata info (excluding heap regions):");
msg.debug("Detailed metadata info (excluding heap region):");
msg.debug("%s", hdr);
msg.debug("%s", sep);
for (int type = 0; type < int(_number_of_types); type ++) {

View file

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/archiveUtils.inline.hpp"
#include "cds/cds_globals.hpp"
#include "cds/classPrelinker.hpp"
@ -323,7 +324,8 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data) {
assert(dynamic_info != nullptr, "Sanity");
dynamic_info->open_for_write();
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr, nullptr, nullptr);
ArchiveHeapInfo no_heap_for_dynamic_dump;
ArchiveBuilder::write_archive(dynamic_info, &no_heap_for_dynamic_dump);
address base = _requested_dynamic_archive_bottom;
address top = _requested_dynamic_archive_top;

View file

@ -1413,7 +1413,7 @@ bool FileMapInfo::init_from_file(int fd) {
size_t len = os::lseek(fd, 0, SEEK_END);
for (int i = 0; i <= MetaspaceShared::last_valid_region; i++) {
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
FileMapRegion* r = region_at(i);
if (r->file_offset() > len || len - r->file_offset() < r->used()) {
log_warning(cds)("The shared archive file has been truncated.");
@ -1515,12 +1515,14 @@ void FileMapRegion::init(int region_index, size_t mapping_offset, size_t size, b
_mapped_base = nullptr;
}
void FileMapRegion::init_bitmaps(ArchiveHeapBitmapInfo oopmap, ArchiveHeapBitmapInfo ptrmap) {
_oopmap_offset = oopmap._bm_region_offset;
_oopmap_size_in_bits = oopmap._size_in_bits;
void FileMapRegion::init_oopmap(size_t offset, size_t size_in_bits) {
_oopmap_offset = offset;
_oopmap_size_in_bits = size_in_bits;
}
_ptrmap_offset = ptrmap._bm_region_offset;
_ptrmap_size_in_bits = ptrmap._size_in_bits;
void FileMapRegion::init_ptrmap(size_t offset, size_t size_in_bits) {
_ptrmap_offset = offset;
_ptrmap_size_in_bits = size_in_bits;
}
BitMapView FileMapRegion::bitmap_view(bool is_oopmap) {
@ -1559,7 +1561,7 @@ bool FileMapRegion::check_region_crc() const {
static const char* region_name(int region_index) {
static const char* names[] = {
"rw", "ro", "bm", "ca0", "ca1", "oa0", "oa1"
"rw", "ro", "bm", "hp"
};
const int num_regions = sizeof(names)/sizeof(names[0]);
assert(0 <= region_index && region_index < num_regions, "sanity");
@ -1600,7 +1602,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
assert(HeapShared::can_write(), "sanity");
#if INCLUDE_CDS_JAVA_HEAP
assert(!DynamicDumpSharedSpaces, "must be");
requested_base = (char*)ArchiveHeapWriter::heap_region_requested_bottom(region);
requested_base = (char*)ArchiveHeapWriter::requested_address();
if (UseCompressedOops) {
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
@ -1620,7 +1622,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
r->set_file_offset(_file_offset);
int crc = ClassLoader::crc32(0, base, (jint)size);
if (size > 0) {
log_info(cds)("Shared file region (%-3s) %d: " SIZE_FORMAT_W(8)
log_info(cds)("Shared file region (%s) %d: " SIZE_FORMAT_W(8)
" bytes, addr " INTPTR_FORMAT " file offset 0x%08" PRIxPTR
" crc 0x%08x",
region_name(region), region, size, p2i(requested_base), _file_offset, crc);
@ -1633,113 +1635,49 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
}
}
size_t FileMapInfo::set_bitmaps_offset(GrowableArray<ArchiveHeapBitmapInfo>* bitmaps, size_t curr_size) {
for (int i = 0; i < bitmaps->length(); i++) {
bitmaps->at(i)._bm_region_offset = curr_size;
curr_size += bitmaps->at(i)._size_in_bytes;
}
return curr_size;
static size_t write_bitmap(const CHeapBitMap* map, char* output, size_t offset) {
size_t size_in_bytes = map->size_in_bytes();
map->write_to((BitMap::bm_word_t*)(output + offset), size_in_bytes);
return offset + size_in_bytes;
}
size_t FileMapInfo::write_bitmaps(GrowableArray<ArchiveHeapBitmapInfo>* bitmaps, size_t curr_offset, char* buffer) {
for (int i = 0; i < bitmaps->length(); i++) {
memcpy(buffer + curr_offset, bitmaps->at(i)._map, bitmaps->at(i)._size_in_bytes);
curr_offset += bitmaps->at(i)._size_in_bytes;
}
return curr_offset;
}
char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps,
char* FileMapInfo::write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info,
size_t &size_in_bytes) {
size_t size_in_bits = ptrmap->size();
size_in_bytes = ptrmap->size_in_bytes();
if (closed_bitmaps != nullptr && open_bitmaps != nullptr) {
size_in_bytes = set_bitmaps_offset(closed_bitmaps, size_in_bytes);
size_in_bytes = set_bitmaps_offset(open_bitmaps, size_in_bytes);
if (heap_info->is_used()) {
size_in_bytes += heap_info->oopmap()->size_in_bytes();
size_in_bytes += heap_info->ptrmap()->size_in_bytes();
}
// The bitmap region contains up to 3 parts:
// ptrmap: metaspace pointers inside the ro/rw regions
// heap_info->oopmap(): Java oop pointers in the heap region
// heap_info->ptrmap(): metaspace pointers in the heap region
char* buffer = NEW_C_HEAP_ARRAY(char, size_in_bytes, mtClassShared);
ptrmap->write_to((BitMap::bm_word_t*)buffer, ptrmap->size_in_bytes());
header()->set_ptrmap_size_in_bits(size_in_bits);
size_t written = 0;
written = write_bitmap(ptrmap, buffer, written);
header()->set_ptrmap_size_in_bits(ptrmap->size());
if (closed_bitmaps != nullptr && open_bitmaps != nullptr) {
size_t curr_offset = write_bitmaps(closed_bitmaps, ptrmap->size_in_bytes(), buffer);
write_bitmaps(open_bitmaps, curr_offset, buffer);
if (heap_info->is_used()) {
FileMapRegion* r = region_at(MetaspaceShared::hp);
r->init_oopmap(written, heap_info->oopmap()->size());
written = write_bitmap(heap_info->oopmap(), buffer, written);
r->init_ptrmap(written, heap_info->ptrmap()->size());
written = write_bitmap(heap_info->ptrmap(), buffer, written);
}
write_region(MetaspaceShared::bm, (char*)buffer, size_in_bytes, /*read_only=*/true, /*allow_exec=*/false);
return buffer;
}
// Write out the given archive heap memory regions. GC code combines multiple
// consecutive archive GC regions into one MemRegion whenever possible and
// produces the 'regions' array.
//
// If the archive heap memory size is smaller than a single dump time GC region
// size, there is only one MemRegion in the array.
//
// If the archive heap memory size is bigger than one dump time GC region size,
// the 'regions' array may contain more than one consolidated MemRegions. When
// the first/bottom archive GC region is a partial GC region (with the empty
// portion at the higher address within the region), one MemRegion is used for
// the bottom partial archive GC region. The rest of the consecutive archive
// GC regions are combined into another MemRegion.
//
// Here's the mapping from (archive heap GC regions) -> (GrowableArray<MemRegion> *regions).
// + We have 1 or more archive heap regions: ah0, ah1, ah2 ..... ahn
// + We have 1 or 2 consolidated heap memory regions: r0 and r1
//
// If there's a single archive GC region (ah0), then r0 == ah0, and r1 is empty.
// Otherwise:
//
// "X" represented space that's occupied by heap objects.
// "_" represented unused spaced in the heap region.
//
//
// |ah0 | ah1 | ah2| ...... | ahn|
// |XXXXXX|__ |XXXXX|XXXX|XXXXXXXX|XXXX|
// |<-r0->| |<- r1 ----------------->|
// ^^^
// |
// +-- gap
size_t FileMapInfo::write_heap_regions(GrowableArray<MemRegion>* regions,
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps,
int first_region_id, int max_num_regions) {
assert(max_num_regions <= 2, "Only support maximum 2 memory regions");
int arr_len = regions == nullptr ? 0 : regions->length();
if (arr_len > max_num_regions) {
log_error(cds)("Unable to write archive heap memory regions: "
"number of memory regions exceeds maximum due to fragmentation. "
"Please increase java heap size "
"(current MaxHeapSize is " SIZE_FORMAT ", InitialHeapSize is " SIZE_FORMAT ").",
MaxHeapSize, InitialHeapSize);
MetaspaceShared::unrecoverable_writing_error();
}
size_t total_size = 0;
for (int i = 0; i < max_num_regions; i++) {
char* start = nullptr;
size_t size = 0;
if (i < arr_len) {
start = (char*)regions->at(i).start();
size = regions->at(i).byte_size();
total_size += size;
}
int region_idx = i + first_region_id;
write_region(region_idx, start, size, false, false);
if (size > 0) {
int oopmap_idx = i * 2;
int ptrmap_idx = i * 2 + 1;
region_at(region_idx)->init_bitmaps(bitmaps->at(oopmap_idx),
bitmaps->at(ptrmap_idx));
}
}
return total_size;
size_t FileMapInfo::write_heap_region(ArchiveHeapInfo* heap_info) {
char* start = heap_info->start();
size_t size = heap_info->byte_size();
write_region(MetaspaceShared::hp, start, size, false, false);
return size;
}
// Dump bytes to file -- at the current file position.
@ -1832,8 +1770,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
}
// Memory map a region in the address space.
static const char* shared_region_name[] = { "ReadWrite", "ReadOnly", "Bitmap",
"String1", "String2", "OpenArchive1", "OpenArchive2" };
static const char* shared_region_name[] = { "ReadWrite", "ReadOnly", "Bitmap", "Heap" };
MapArchiveResult FileMapInfo::map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs) {
DEBUG_ONLY(FileMapRegion* last_region = nullptr);
@ -2054,58 +1991,38 @@ size_t FileMapInfo::readonly_total() {
return total;
}
static MemRegion *closed_heap_regions = nullptr;
static MemRegion *open_heap_regions = nullptr;
static int num_closed_heap_regions = 0;
static int num_open_heap_regions = 0;
#if INCLUDE_CDS_JAVA_HEAP
bool FileMapInfo::has_heap_regions() {
return (region_at(MetaspaceShared::first_closed_heap_region)->used() > 0);
MemRegion FileMapInfo::_mapped_heap_memregion;
bool FileMapInfo::has_heap_region() {
return (region_at(MetaspaceShared::hp)->used() > 0);
}
// Returns the address range of the archived heap regions computed using the
// Returns the address range of the archived heap region computed using the
// current oop encoding mode. This range may be different than the one seen at
// dump time due to encoding mode differences. The result is used in determining
// if/how these regions should be relocated at run time.
MemRegion FileMapInfo::get_heap_regions_requested_range() {
address start = (address) max_uintx;
address end = nullptr;
for (int i = MetaspaceShared::first_closed_heap_region;
i <= MetaspaceShared::last_valid_region;
i++) {
FileMapRegion* r = region_at(i);
MemRegion FileMapInfo::get_heap_region_requested_range() {
FileMapRegion* r = region_at(MetaspaceShared::hp);
size_t size = r->used();
if (size > 0) {
address s = heap_region_requested_address(r);
address e = s + size;
log_info(cds)("Heap region %s = " INTPTR_FORMAT " - " INTPTR_FORMAT " = " SIZE_FORMAT_W(8) " bytes",
region_name(i), p2i(s), p2i(e), size);
if (start > s) {
start = s;
}
if (end < e) {
end = e;
}
}
}
assert(end != nullptr, "must have at least one used heap region");
assert(size > 0, "must have non-empty heap region");
start = align_down(start, HeapRegion::GrainBytes);
end = align_up(end, HeapRegion::GrainBytes);
address start = heap_region_requested_address();
address end = start + size;
log_info(cds)("Requested heap region [" INTPTR_FORMAT " - " INTPTR_FORMAT "] = " SIZE_FORMAT_W(8) " bytes",
p2i(start), p2i(end), size);
return MemRegion((HeapWord*)start, (HeapWord*)end);
}
void FileMapInfo::map_or_load_heap_regions() {
void FileMapInfo::map_or_load_heap_region() {
bool success = false;
if (can_use_heap_regions()) {
if (can_use_heap_region()) {
if (ArchiveHeapLoader::can_map()) {
success = map_heap_regions();
success = map_heap_region();
} else if (ArchiveHeapLoader::can_load()) {
success = ArchiveHeapLoader::load_heap_regions(this);
success = ArchiveHeapLoader::load_heap_region(this);
} else {
if (!UseCompressedOops && !ArchiveHeapLoader::can_map()) {
// TODO - remove implicit knowledge of G1
@ -2121,8 +2038,8 @@ void FileMapInfo::map_or_load_heap_regions() {
}
}
bool FileMapInfo::can_use_heap_regions() {
if (!has_heap_regions()) {
bool FileMapInfo::can_use_heap_region() {
if (!has_heap_region()) {
return false;
}
if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) {
@ -2168,22 +2085,22 @@ bool FileMapInfo::can_use_heap_regions() {
}
// The actual address of this region during dump time.
address FileMapInfo::heap_region_dumptime_address(FileMapRegion* r) {
address FileMapInfo::heap_region_dumptime_address() {
FileMapRegion* r = region_at(MetaspaceShared::hp);
assert(UseSharedSpaces, "runtime only");
r->assert_is_heap_region();
assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
if (UseCompressedOops) {
return /*dumptime*/ narrow_oop_base() + r->mapping_offset();
} else {
return heap_region_requested_address(r);
return heap_region_requested_address();
}
}
// The address where this region can be mapped into the runtime heap without
// patching any of the pointers that are embedded in this region.
address FileMapInfo::heap_region_requested_address(FileMapRegion* r) {
address FileMapInfo::heap_region_requested_address() {
assert(UseSharedSpaces, "runtime only");
r->assert_is_heap_region();
FileMapRegion* r = region_at(MetaspaceShared::hp);
assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
assert(ArchiveHeapLoader::can_map(), "cannot be used by ArchiveHeapLoader::can_load() mode");
if (UseCompressedOops) {
@ -2209,284 +2126,171 @@ address FileMapInfo::heap_region_requested_address(FileMapRegion* r) {
// The address where this shared heap region is actually mapped at runtime. This function
// can be called only after we have determined the value for ArchiveHeapLoader::mapped_heap_delta().
address FileMapInfo::heap_region_mapped_address(FileMapRegion* r) {
address FileMapInfo::heap_region_mapped_address() {
assert(UseSharedSpaces, "runtime only");
r->assert_is_heap_region();
assert(ArchiveHeapLoader::can_map(), "cannot be used by ArchiveHeapLoader::can_load() mode");
return heap_region_requested_address(r) + ArchiveHeapLoader::mapped_heap_delta();
return heap_region_requested_address() + ArchiveHeapLoader::mapped_heap_delta();
}
//
// Map the closed and open archive heap objects to the runtime java heap.
//
// The shared objects are mapped at (or close to ) the java heap top in
// closed archive regions. The mapped objects contain no out-going
// references to any other java heap regions. GC does not write into the
// mapped closed archive heap region.
//
// The open archive heap objects are mapped below the shared objects in
// the runtime java heap. The mapped open archive heap data only contains
// references to the shared objects and open archive objects initially.
// During runtime execution, out-going references to any other java heap
// regions may be added. GC may mark and update references in the mapped
// open archive objects.
void FileMapInfo::map_heap_regions_impl() {
// G1 -- always map at the very top of the heap to avoid fragmentation.
assert(UseG1GC, "the following code assumes G1");
_heap_pointers_need_patching = false;
MemRegion heap_range = G1CollectedHeap::heap()->reserved();
MemRegion archive_range = get_heap_regions_requested_range();
address heap_end = (address)heap_range.end();
address archive_end = (address)archive_range.end();
assert(is_aligned(heap_end, HeapRegion::GrainBytes), "must be");
assert(is_aligned(archive_end, HeapRegion::GrainBytes), "must be");
if (UseCompressedOops &&
(narrow_oop_mode() != CompressedOops::mode() ||
narrow_oop_shift() != CompressedOops::shift())) {
log_info(cds)("CDS heap data needs to be relocated because the archive was created with an incompatible oop encoding mode.");
_heap_pointers_need_patching = true;
} else if (!heap_range.contains(archive_range)) {
log_info(cds)("CDS heap data needs to be relocated because");
log_info(cds)("the desired range " PTR_FORMAT " - " PTR_FORMAT, p2i(archive_range.start()), p2i(archive_range.end()));
log_info(cds)("is outside of the heap " PTR_FORMAT " - " PTR_FORMAT, p2i(heap_range.start()), p2i(heap_range.end()));
_heap_pointers_need_patching = true;
} else {
assert(heap_end >= archive_end, "must be");
if (heap_end != archive_end) {
log_info(cds)("CDS heap data needs to be relocated to the end of the runtime heap to reduce fragmentation");
_heap_pointers_need_patching = true;
}
}
ptrdiff_t delta = 0;
if (_heap_pointers_need_patching) {
delta = heap_end - archive_end;
}
log_info(cds)("CDS heap data relocation delta = " INTX_FORMAT " bytes", delta);
FileMapRegion* r = region_at(MetaspaceShared::first_closed_heap_region);
address relocated_closed_heap_region_bottom = heap_region_requested_address(r) + delta;
if (!is_aligned(relocated_closed_heap_region_bottom, HeapRegion::GrainBytes)) {
// Align the bottom of the closed archive heap regions at G1 region boundary.
// This will avoid the situation where the highest open region and the lowest
// closed region sharing the same G1 region. Otherwise we will fail to map the
// open regions.
size_t align = size_t(relocated_closed_heap_region_bottom) % HeapRegion::GrainBytes;
delta -= align;
log_info(cds)("CDS heap data needs to be relocated lower by a further " SIZE_FORMAT
" bytes to " INTX_FORMAT " to be aligned with HeapRegion::GrainBytes",
align, delta);
_heap_pointers_need_patching = true;
}
ArchiveHeapLoader::init_mapped_heap_relocation(delta, narrow_oop_shift());
relocated_closed_heap_region_bottom = heap_region_mapped_address(r);
assert(is_aligned(relocated_closed_heap_region_bottom, HeapRegion::GrainBytes),
"must be");
bool FileMapInfo::map_heap_region() {
init_heap_region_relocation();
if (_heap_pointers_need_patching) {
char* bitmap_base = map_bitmap_region();
if (bitmap_base == nullptr) {
log_info(cds)("CDS heap cannot be used because bitmap region cannot be mapped");
_heap_pointers_need_patching = false;
return;
}
}
// Map the closed heap regions: GC does not write into these regions.
if (map_heap_regions(MetaspaceShared::first_closed_heap_region,
MetaspaceShared::max_num_closed_heap_regions,
/*is_open_archive=*/ false,
&closed_heap_regions, &num_closed_heap_regions)) {
ArchiveHeapLoader::set_closed_regions_mapped();
// Now, map the open heap regions: GC can write into these regions.
if (map_heap_regions(MetaspaceShared::first_open_heap_region,
MetaspaceShared::max_num_open_heap_regions,
/*is_open_archive=*/ true,
&open_heap_regions, &num_open_heap_regions)) {
ArchiveHeapLoader::set_open_regions_mapped();
}
}
}
bool FileMapInfo::map_heap_regions() {
map_heap_regions_impl();
if (!ArchiveHeapLoader::closed_regions_mapped()) {
assert(closed_heap_regions == nullptr &&
num_closed_heap_regions == 0, "sanity");
}
if (!ArchiveHeapLoader::open_regions_mapped()) {
assert(open_heap_regions == nullptr && num_open_heap_regions == 0, "sanity");
return false;
} else {
}
}
if (map_heap_region_impl()) {
#ifdef ASSERT
// The "old" regions must be parsable -- we cannot have any unused space
// at the start of the lowest G1 region that contains archived objects.
assert(is_aligned(_mapped_heap_memregion.start(), HeapRegion::GrainBytes), "must be");
// Make sure we map at the very top of the heap - see comments in
// init_heap_region_relocation().
MemRegion heap_range = G1CollectedHeap::heap()->reserved();
assert(heap_range.contains(_mapped_heap_memregion), "must be");
address heap_end = (address)heap_range.end();
address mapped_heap_region_end = (address)_mapped_heap_memregion.end();
assert(heap_end >= mapped_heap_region_end, "must be");
assert(heap_end - mapped_heap_region_end < (intx)(HeapRegion::GrainBytes),
"must be at the top of the heap to avoid fragmentation");
#endif
ArchiveHeapLoader::set_mapped();
return true;
} else {
return false;
}
}
bool FileMapInfo::map_heap_regions(int first, int max, bool is_open_archive,
MemRegion** regions_ret, int* num_regions_ret) {
MemRegion* regions = MemRegion::create_array(max, mtInternal);
void FileMapInfo::init_heap_region_relocation() {
assert(UseG1GC, "the following code assumes G1");
_heap_pointers_need_patching = false;
struct Cleanup {
MemRegion* _regions;
uint _length;
bool _aborted;
Cleanup(MemRegion* regions, uint length) : _regions(regions), _length(length), _aborted(true) { }
~Cleanup() { if (_aborted) { MemRegion::destroy_array(_regions, _length); } }
} cleanup(regions, max);
MemRegion heap_range = G1CollectedHeap::heap()->reserved();
MemRegion archive_range = get_heap_region_requested_range();
FileMapRegion* r;
int num_regions = 0;
address requested_bottom = (address)archive_range.start();
address heap_end = (address)heap_range.end();
assert(is_aligned(heap_end, HeapRegion::GrainBytes), "must be");
for (int i = first;
i < first + max; i++) {
r = region_at(i);
// We map the archive heap region at the very top of the heap to avoid fragmentation.
// To do that, we make sure that the bottom of the archived region is at the same
// address as the bottom of the highest possible G1 region.
address mapped_bottom = heap_end - align_up(archive_range.byte_size(), HeapRegion::GrainBytes);
if (UseCompressedOops &&
(narrow_oop_mode() != CompressedOops::mode() ||
narrow_oop_shift() != CompressedOops::shift())) {
log_info(cds)("CDS heap data needs to be relocated because the archive was created with an incompatible oop encoding mode.");
_heap_pointers_need_patching = true;
} else if (requested_bottom != mapped_bottom) {
log_info(cds)("CDS heap data needs to be relocated because it is mapped at a different address @ " INTPTR_FORMAT,
p2i(mapped_bottom));
_heap_pointers_need_patching = true;
}
ptrdiff_t delta = 0;
if (_heap_pointers_need_patching) {
delta = mapped_bottom - requested_bottom;
}
log_info(cds)("CDS heap data relocation delta = " INTX_FORMAT " bytes", delta);
ArchiveHeapLoader::init_mapped_heap_relocation(delta, narrow_oop_shift());
}
bool FileMapInfo::map_heap_region_impl() {
FileMapRegion* r = region_at(MetaspaceShared::hp);
size_t size = r->used();
if (size > 0) {
HeapWord* start = (HeapWord*)heap_region_mapped_address(r);
regions[num_regions] = MemRegion(start, size / HeapWordSize);
num_regions ++;
log_info(cds)("Trying to map heap data: region[%d] at " INTPTR_FORMAT ", size = " SIZE_FORMAT_W(8) " bytes",
i, p2i(start), size);
}
}
if (num_regions == 0) {
if (size > 0) {
HeapWord* start = (HeapWord*)heap_region_mapped_address();
_mapped_heap_memregion = MemRegion(start, size / HeapWordSize);
log_info(cds)("Trying to map heap data at " INTPTR_FORMAT ", size = " SIZE_FORMAT_W(8) " bytes",
p2i(start), size);
} else {
return false; // no archived java heap data
}
// Check that regions are within the java heap
if (!G1CollectedHeap::heap()->check_archive_addresses(regions, num_regions)) {
// Check that the region is within the java heap
if (!G1CollectedHeap::heap()->check_archive_addresses(_mapped_heap_memregion)) {
log_info(cds)("Unable to allocate region, range is not within java heap.");
return false;
}
// allocate from java heap
if (!G1CollectedHeap::heap()->alloc_archive_regions(
regions, num_regions, is_open_archive)) {
if (!G1CollectedHeap::heap()->alloc_archive_regions(_mapped_heap_memregion)) {
log_info(cds)("Unable to allocate region, java heap range is already in use.");
return false;
}
// Map the archived heap data. No need to call MemTracker::record_virtual_memory_type()
// for mapped regions as they are part of the reserved java heap, which is
// already recorded.
for (int i = 0; i < num_regions; i++) {
r = region_at(first + i);
char* addr = (char*)regions[i].start();
// for mapped region as it is part of the reserved java heap, which is already recorded.
char* addr = (char*)_mapped_heap_memregion.start();
char* base = os::map_memory(_fd, _full_path, r->file_offset(),
addr, regions[i].byte_size(), r->read_only(),
addr, _mapped_heap_memregion.byte_size(), r->read_only(),
r->allow_exec());
if (base == nullptr || base != addr) {
// dealloc the regions from java heap
dealloc_heap_regions(regions, num_regions);
log_info(cds)("Unable to map at required address in java heap. "
dealloc_heap_region();
log_info(cds)("UseSharedSpaces: Unable to map at required address in java heap. "
INTPTR_FORMAT ", size = " SIZE_FORMAT " bytes",
p2i(addr), regions[i].byte_size());
p2i(addr), _mapped_heap_memregion.byte_size());
return false;
}
r->set_mapped_base(base);
if (VerifySharedSpaces && !r->check_region_crc()) {
// dealloc the regions from java heap
dealloc_heap_regions(regions, num_regions);
log_info(cds)("mapped heap regions are corrupt");
dealloc_heap_region();
log_info(cds)("mapped heap region is corrupt");
return false;
}
}
cleanup._aborted = false;
// the shared heap data is mapped successfully
*regions_ret = regions;
*num_regions_ret = num_regions;
return true;
}
void FileMapInfo::patch_heap_embedded_pointers() {
if (!_heap_pointers_need_patching) {
return;
}
patch_heap_embedded_pointers(closed_heap_regions,
num_closed_heap_regions,
MetaspaceShared::first_closed_heap_region);
patch_heap_embedded_pointers(open_heap_regions,
num_open_heap_regions,
MetaspaceShared::first_open_heap_region);
}
narrowOop FileMapInfo::encoded_heap_region_dumptime_address(FileMapRegion* r) {
narrowOop FileMapInfo::encoded_heap_region_dumptime_address() {
assert(UseSharedSpaces, "runtime only");
assert(UseCompressedOops, "sanity");
r->assert_is_heap_region();
FileMapRegion* r = region_at(MetaspaceShared::hp);
return CompressedOops::narrow_oop_cast(r->mapping_offset() >> narrow_oop_shift());
}
void FileMapInfo::patch_heap_embedded_pointers(MemRegion* regions, int num_regions,
int first_region_idx) {
void FileMapInfo::patch_heap_embedded_pointers() {
if (!ArchiveHeapLoader::is_mapped() || !_heap_pointers_need_patching) {
return;
}
char* bitmap_base = map_bitmap_region();
assert(bitmap_base != nullptr, "must have already been mapped");
for (int i=0; i<num_regions; i++) {
int region_idx = i + first_region_idx;
FileMapRegion* r = region_at(region_idx);
FileMapRegion* r = region_at(MetaspaceShared::hp);
ArchiveHeapLoader::patch_embedded_pointers(
this, r, regions[i],
this, _mapped_heap_memregion,
(address)(region_at(MetaspaceShared::bm)->mapped_base()) + r->oopmap_offset(),
r->oopmap_size_in_bits());
}
}
// This internally allocates objects using vmClasses::Object_klass(), so it
// must be called after the Object_klass is loaded
void FileMapInfo::fixup_mapped_heap_regions() {
assert(vmClasses::Object_klass_loaded(), "must be");
// If any closed regions were found, call the fill routine to make them parseable.
// Note that closed_heap_regions may be non-null even if no regions were found.
if (num_closed_heap_regions != 0) {
assert(closed_heap_regions != nullptr,
"Null closed_heap_regions array with non-zero count");
G1CollectedHeap::heap()->fill_archive_regions(closed_heap_regions,
num_closed_heap_regions);
// G1 marking uses the BOT for object chunking during marking in
// G1CMObjArrayProcessor::process_slice(); for this reason we need to
// initialize the BOT for closed archive regions too.
G1CollectedHeap::heap()->populate_archive_regions_bot_part(closed_heap_regions,
num_closed_heap_regions);
}
void FileMapInfo::fixup_mapped_heap_region() {
if (ArchiveHeapLoader::is_mapped()) {
assert(!_mapped_heap_memregion.is_empty(), "sanity");
// do the same for mapped open archive heap regions
if (num_open_heap_regions != 0) {
assert(open_heap_regions != nullptr, "Null open_heap_regions array with non-zero count");
G1CollectedHeap::heap()->fill_archive_regions(open_heap_regions,
num_open_heap_regions);
// Populate the open archive regions' G1BlockOffsetTableParts. That ensures
// Populate the archive regions' G1BlockOffsetTableParts. That ensures
// fast G1BlockOffsetTablePart::block_start operations for any given address
// within the open archive regions when trying to find start of an object
// within the archive regions when trying to find start of an object
// (e.g. during card table scanning).
G1CollectedHeap::heap()->populate_archive_regions_bot_part(open_heap_regions,
num_open_heap_regions);
G1CollectedHeap::heap()->populate_archive_regions_bot_part(_mapped_heap_memregion);
}
}
// dealloc the archive regions from java heap
void FileMapInfo::dealloc_heap_regions(MemRegion* regions, int num) {
if (num > 0) {
assert(regions != nullptr, "Null archive regions array with non-zero count");
G1CollectedHeap::heap()->dealloc_archive_regions(regions, num);
}
void FileMapInfo::dealloc_heap_region() {
G1CollectedHeap::heap()->dealloc_archive_regions(_mapped_heap_memregion);
}
#endif // INCLUDE_CDS_JAVA_HEAP

View file

@ -40,6 +40,7 @@
static const int JVM_IDENT_MAX = 256;
class ArchiveHeapInfo;
class BitMapView;
class CHeapBitMap;
class ClassFileStream;
@ -104,13 +105,6 @@ public:
}
};
struct ArchiveHeapBitmapInfo {
address _map; // bitmap for relocating embedded oops
size_t _bm_region_offset; // this bitmap is stored at this offset from the bottom of the BM region
size_t _size_in_bits;
size_t _size_in_bytes;
};
class SharedPathTable {
Array<u8>* _table;
int _size;
@ -173,7 +167,8 @@ public:
void set_mapped_from_file(bool v) { _mapped_from_file = v; }
void init(int region_index, size_t mapping_offset, size_t size, bool read_only,
bool allow_exec, int crc);
void init_bitmaps(ArchiveHeapBitmapInfo oopmap, ArchiveHeapBitmapInfo ptrmap);
void init_oopmap(size_t offset, size_t size_in_bits);
void init_ptrmap(size_t offset, size_t size_in_bits);
BitMapView oopmap_view();
BitMapView ptrmap_view();
bool has_ptrmap() { return _ptrmap_size_in_bits != 0; }
@ -451,26 +446,20 @@ public:
void write_header();
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);
char* write_bitmap_region(const CHeapBitMap* ptrmap,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps,
char* write_bitmap_region(const CHeapBitMap* ptrmap, ArchiveHeapInfo* heap_info,
size_t &size_in_bytes);
size_t write_heap_regions(GrowableArray<MemRegion>* regions,
GrowableArray<ArchiveHeapBitmapInfo>* bitmaps,
int first_region_id, int max_num_regions);
size_t write_heap_region(ArchiveHeapInfo* heap_info);
void write_bytes(const void* buffer, size_t count);
void write_bytes_aligned(const void* buffer, size_t count);
size_t read_bytes(void* buffer, size_t count);
static size_t readonly_total();
MapArchiveResult map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs);
void unmap_regions(int regions[], int num_regions);
void map_or_load_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
void map_or_load_heap_region() NOT_CDS_JAVA_HEAP_RETURN;
void fixup_mapped_heap_region() NOT_CDS_JAVA_HEAP_RETURN;
void patch_heap_embedded_pointers() NOT_CDS_JAVA_HEAP_RETURN;
void patch_heap_embedded_pointers(MemRegion* regions, int num_regions,
int first_region_idx) NOT_CDS_JAVA_HEAP_RETURN;
bool has_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false);
MemRegion get_heap_regions_requested_range() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion());
bool has_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false);
MemRegion get_heap_region_requested_range() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion());
bool read_region(int i, char* base, size_t size, bool do_commit);
char* map_bitmap_region();
void unmap_region(int i);
@ -566,23 +555,22 @@ public:
unsigned int runtime_prefix_len) NOT_CDS_RETURN_(false);
bool validate_boot_class_paths() NOT_CDS_RETURN_(false);
bool validate_app_class_paths(int shared_app_paths_len) NOT_CDS_RETURN_(false);
bool map_heap_regions(int first, int max, bool is_open_archive,
MemRegion** regions_ret, int* num_regions_ret) NOT_CDS_JAVA_HEAP_RETURN_(false);
void dealloc_heap_regions(MemRegion* regions, int num) NOT_CDS_JAVA_HEAP_RETURN;
bool can_use_heap_regions();
bool load_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false);
bool map_heap_regions() NOT_CDS_JAVA_HEAP_RETURN_(false);
void map_heap_regions_impl() NOT_CDS_JAVA_HEAP_RETURN;
bool map_heap_region_impl() NOT_CDS_JAVA_HEAP_RETURN_(false);
void dealloc_heap_region() NOT_CDS_JAVA_HEAP_RETURN;
bool can_use_heap_region();
bool load_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false);
bool map_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false);
void init_heap_region_relocation();
MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs);
bool relocate_pointers_in_core_regions(intx addr_delta);
static size_t set_bitmaps_offset(GrowableArray<ArchiveHeapBitmapInfo> *bitmaps, size_t curr_size);
static size_t write_bitmaps(GrowableArray<ArchiveHeapBitmapInfo> *bitmaps, size_t curr_offset, char* buffer);
static MemRegion _mapped_heap_memregion;
public:
address heap_region_dumptime_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_requested_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_mapped_address(FileMapRegion* r) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
narrowOop encoded_heap_region_dumptime_address(FileMapRegion* r);
address heap_region_dumptime_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_requested_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_mapped_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
narrowOop encoded_heap_region_dumptime_address();
private:

View file

@ -82,7 +82,6 @@ struct ArchivableStaticFieldInfo {
};
bool HeapShared::_disable_writing = false;
bool HeapShared::_copying_open_region_objects = false;
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
@ -103,10 +102,7 @@ static const ArchivedKlassSubGraphInfoRecord* _test_class_record = nullptr;
// If you add new entries to the following tables, you should know what you're doing!
//
// Entry fields for shareable subgraphs archived in the closed archive heap
// region. Warning: Objects in the subgraphs should not have reference fields
// assigned at runtime.
static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
static ArchivableStaticFieldInfo archive_subgraph_entry_fields[] = {
{"java/lang/Integer$IntegerCache", "archivedCache"},
{"java/lang/Long$LongCache", "archivedCache"},
{"java/lang/Byte$ByteCache", "archivedCache"},
@ -114,10 +110,6 @@ static ArchivableStaticFieldInfo closed_archive_subgraph_entry_fields[] = {
{"java/lang/Character$CharacterCache", "archivedCache"},
{"java/util/jar/Attributes$Name", "KNOWN_NAMES"},
{"sun/util/locale/BaseLocale", "constantBaseLocales"},
{nullptr, nullptr},
};
// Entry fields for subgraphs archived in the open archive heap region.
static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
{"jdk/internal/module/ArchivedModuleGraph", "archivedModuleGraph"},
{"java/util/ImmutableCollections", "archivedObjects"},
{"java/lang/ModuleLayer", "EMPTY_LAYER"},
@ -129,8 +121,8 @@ static ArchivableStaticFieldInfo open_archive_subgraph_entry_fields[] = {
{nullptr, nullptr},
};
// Entry fields for subgraphs archived in the open archive heap region (full module graph).
static ArchivableStaticFieldInfo fmg_open_archive_subgraph_entry_fields[] = {
// full module graph
static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
{"jdk/internal/loader/ArchivedClassLoaders", "archivedClassLoaders"},
{"jdk/internal/module/ArchivedBootLayer", "archivedBootLayer"},
{"java/lang/Module$ArchivedData", "archivedData"},
@ -153,9 +145,8 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan
}
bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) {
return is_subgraph_root_class_of(closed_archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(open_archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_open_archive_subgraph_entry_fields, ik);
return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) ||
is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik);
}
unsigned HeapShared::oop_hash(oop const& p) {
@ -383,7 +374,7 @@ void HeapShared::archive_java_mirrors() {
if (!is_reference_type(bt)) {
oop m = _scratch_basic_type_mirrors[i].resolve();
assert(m != nullptr, "sanity");
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
assert(success, "sanity");
log_trace(cds, heap, mirror)(
@ -401,7 +392,7 @@ void HeapShared::archive_java_mirrors() {
oop m = scratch_java_mirror(orig_k);
if (m != nullptr) {
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(orig_k);
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m, /*is_closed_archive=*/ false);
bool success = archive_reachable_objects_from(1, _default_subgraph_info, m);
guarantee(success, "scratch mirrors must point to only archivable objects");
buffered_k->set_archived_java_mirror(append_root(m));
ResourceMark rm;
@ -414,8 +405,7 @@ void HeapShared::archive_java_mirrors() {
InstanceKlass* ik = InstanceKlass::cast(buffered_k);
oop rr = ik->constants()->prepare_resolved_references_for_archiving();
if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr,
/*is_closed_archive=*/false);
bool success = HeapShared::archive_reachable_objects_from(1, _default_subgraph_info, rr);
assert(success, "must be");
int root_index = append_root(rr);
ik->constants()->cache()->set_archived_references(root_index);
@ -427,7 +417,7 @@ void HeapShared::archive_java_mirrors() {
void HeapShared::archive_strings() {
oop shared_strings_array = StringTable::init_shared_table(_dumped_interned_strings);
bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array, /*is_closed_archive=*/ false);
bool success = archive_reachable_objects_from(1, _default_subgraph_info, shared_strings_array);
// We must succeed because:
// - _dumped_interned_strings do not contain any large strings.
// - StringTable::init_shared_table() doesn't create any large arrays.
@ -463,8 +453,7 @@ void HeapShared::mark_native_pointers(oop orig_obj) {
// the static fields out of the archived heap.
void HeapShared::check_enum_obj(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj,
bool is_closed_archive) {
oop orig_obj) {
assert(level > 1, "must never be called at the first (outermost) level");
Klass* k = orig_obj->klass();
Klass* buffered_k = ArchiveBuilder::get_buffered_klass(k);
@ -493,7 +482,7 @@ void HeapShared::check_enum_obj(int level,
guarantee(false, "static field %s::%s is of the wrong type",
ik->external_name(), fd.name()->as_C_string());
}
bool success = archive_reachable_objects_from(level, subgraph_info, oop_field, is_closed_archive);
bool success = archive_reachable_objects_from(level, subgraph_info, oop_field);
assert(success, "VM should have exited with unarchivable objects for _level > 1");
int root_index = append_root(oop_field);
log_info(cds, heap)("Archived enum obj @%d %s::%s (" INTPTR_FORMAT ")",
@ -532,10 +521,7 @@ bool HeapShared::initialize_enum_klass(InstanceKlass* k, TRAPS) {
return true;
}
void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
GrowableArray<MemRegion>* open_regions,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps) {
void HeapShared::archive_objects(ArchiveHeapInfo *heap_info) {
{
NoSafepointVerifier nsv;
@ -549,19 +535,13 @@ void HeapShared::archive_objects(GrowableArray<MemRegion>* closed_regions,
p2i((address)G1CollectedHeap::heap()->reserved().start()),
UseCompressedOops ? p2i(CompressedOops::end()) :
p2i((address)G1CollectedHeap::heap()->reserved().end()));
log_info(cds)("Dumping objects to closed archive heap region ...");
copy_closed_objects();
_copying_open_region_objects = true;
log_info(cds)("Dumping objects to open archive heap region ...");
copy_open_objects();
copy_objects();
CDSHeapVerifier::verify();
check_default_subgraph_classes();
}
ArchiveHeapWriter::write(_pending_roots, closed_regions, open_regions, closed_bitmaps, open_bitmaps);
ArchiveHeapWriter::write(_pending_roots, heap_info);
}
void HeapShared::copy_interned_strings() {
@ -570,9 +550,8 @@ void HeapShared::copy_interned_strings() {
auto copier = [&] (oop s, bool value_ignored) {
assert(s != nullptr, "sanity");
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(s), "large strings must have been filtered");
bool success = archive_reachable_objects_from(1, _default_subgraph_info,
s, /*is_closed_archive=*/true);
assert(success, "string must be short enough to be archived");
bool success = archive_reachable_objects_from(1, _default_subgraph_info, s);
assert(success, "must be");
// Prevent string deduplication from changing the value field to
// something not in the archive.
java_lang_String::set_deduplication_forbidden(s);
@ -582,18 +561,7 @@ void HeapShared::copy_interned_strings() {
delete_seen_objects_table();
}
void HeapShared::copy_closed_objects() {
assert(HeapShared::can_write(), "must be");
// Archive interned string objects
copy_interned_strings();
archive_object_subgraphs(closed_archive_subgraph_entry_fields,
true /* is_closed_archive */,
false /* is_full_module_graph */);
}
void HeapShared::copy_special_open_objects() {
void HeapShared::copy_special_objects() {
// Archive special objects that do not belong to any subgraphs
init_seen_objects_table();
archive_java_mirrors();
@ -601,17 +569,17 @@ void HeapShared::copy_special_open_objects() {
delete_seen_objects_table();
}
void HeapShared::copy_open_objects() {
void HeapShared::copy_objects() {
assert(HeapShared::can_write(), "must be");
copy_special_open_objects();
copy_interned_strings();
copy_special_objects();
archive_object_subgraphs(open_archive_subgraph_entry_fields,
false /* is_closed_archive */,
archive_object_subgraphs(archive_subgraph_entry_fields,
false /* is_full_module_graph */);
if (MetaspaceShared::use_full_module_graph()) {
archive_object_subgraphs(fmg_open_archive_subgraph_entry_fields,
false /* is_closed_archive */,
archive_object_subgraphs(fmg_archive_subgraph_entry_fields,
true /* is_full_module_graph */);
Modules::verify_archived_modules();
}
@ -645,8 +613,7 @@ KlassSubGraphInfo* HeapShared::get_subgraph_info(Klass* k) {
}
// Add an entry field to the current KlassSubGraphInfo.
void KlassSubGraphInfo::add_subgraph_entry_field(
int static_field_offset, oop v, bool is_closed_archive) {
void KlassSubGraphInfo::add_subgraph_entry_field(int static_field_offset, oop v) {
assert(DumpSharedSpaces, "dump time only");
if (_subgraph_entry_fields == nullptr) {
_subgraph_entry_fields =
@ -836,7 +803,7 @@ struct CopyKlassSubGraphInfoToArchive : StackObj {
// Build the records of archived subgraph infos, which include:
// - Entry points to all subgraphs from the containing class mirror. The entry
// points are static fields in the mirror. For each entry point, the field
// offset, value and is_closed_archive flag are recorded in the sub-graph
// offset, and value are recorded in the sub-graph
// info. The value is stored back to the corresponding field at runtime.
// - A list of klasses that need to be loaded/initialized before archived
// java object sub-graph can be accessed at runtime.
@ -936,9 +903,8 @@ void HeapShared::resolve_classes(JavaThread* current) {
if (!ArchiveHeapLoader::is_in_use()) {
return; // nothing to do
}
resolve_classes_for_subgraphs(current, closed_archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, open_archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_open_archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields);
}
void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) {
@ -1115,7 +1081,6 @@ void HeapShared::clear_archived_roots_of(Klass* k) {
class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
int _level;
bool _is_closed_archive;
bool _record_klasses_only;
KlassSubGraphInfo* _subgraph_info;
oop _referencing_obj;
@ -1126,11 +1091,10 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
WalkOopAndArchiveClosure* _last;
public:
WalkOopAndArchiveClosure(int level,
bool is_closed_archive,
bool record_klasses_only,
KlassSubGraphInfo* subgraph_info,
oop orig) :
_level(level), _is_closed_archive(is_closed_archive),
_level(level),
_record_klasses_only(record_klasses_only),
_subgraph_info(subgraph_info),
_referencing_obj(orig) {
@ -1162,7 +1126,7 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure {
}
bool success = HeapShared::archive_reachable_objects_from(
_level + 1, _subgraph_info, obj, _is_closed_archive);
_level + 1, _subgraph_info, obj);
assert(success, "VM should have exited with unarchivable objects for _level > 1");
}
}
@ -1178,23 +1142,7 @@ WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr;
HeapShared::CachedOopInfo HeapShared::make_cached_oop_info() {
WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current();
oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj();
return CachedOopInfo(referrer, _copying_open_region_objects);
}
void HeapShared::check_closed_region_object(InstanceKlass* k) {
// Check fields in the object
for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
if (!fs.access_flags().is_static()) {
BasicType ft = fs.field_descriptor().field_type();
if (!fs.access_flags().is_final() && is_reference_type(ft)) {
ResourceMark rm;
log_warning(cds, heap)(
"Please check reference field in %s instance in closed archive heap region: %s %s",
k->external_name(), (fs.name())->as_C_string(),
(fs.signature())->as_C_string());
}
}
}
return CachedOopInfo(referrer);
}
// (1) If orig_obj has not been archived yet, archive it.
@ -1203,8 +1151,7 @@ void HeapShared::check_closed_region_object(InstanceKlass* k) {
// (3) Record the klasses of all orig_obj and all reachable objects.
bool HeapShared::archive_reachable_objects_from(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj,
bool is_closed_archive) {
oop orig_obj) {
assert(orig_obj != nullptr, "must be");
if (!JavaClasses::is_supported_for_archiving(orig_obj)) {
@ -1260,14 +1207,10 @@ bool HeapShared::archive_reachable_objects_from(int level,
Klass *orig_k = orig_obj->klass();
subgraph_info->add_subgraph_object_klass(orig_k);
WalkOopAndArchiveClosure walker(level, is_closed_archive, record_klasses_only,
subgraph_info, orig_obj);
WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj);
orig_obj->oop_iterate(&walker);
if (is_closed_archive && orig_k->is_instance_klass()) {
check_closed_region_object(InstanceKlass::cast(orig_k));
}
check_enum_obj(level + 1, subgraph_info, orig_obj, is_closed_archive);
check_enum_obj(level + 1, subgraph_info, orig_obj);
return true;
}
@ -1308,8 +1251,7 @@ bool HeapShared::archive_reachable_objects_from(int level,
void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
const char* klass_name,
int field_offset,
const char* field_name,
bool is_closed_archive) {
const char* field_name) {
assert(DumpSharedSpaces, "dump time only");
assert(k->is_shared_boot_class(), "must be boot class");
@ -1327,8 +1269,7 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
f->print_on(&out);
}
bool success = archive_reachable_objects_from(1, subgraph_info, f, is_closed_archive);
bool success = archive_reachable_objects_from(1, subgraph_info, f);
if (!success) {
log_error(cds, heap)("Archiving failed %s::%s (some reachable objects cannot be archived)",
klass_name, field_name);
@ -1336,13 +1277,13 @@ void HeapShared::archive_reachable_objects_from_static_field(InstanceKlass *k,
// Note: the field value is not preserved in the archived mirror.
// Record the field as a new subGraph entry point. The recorded
// information is restored from the archive at runtime.
subgraph_info->add_subgraph_entry_field(field_offset, f, is_closed_archive);
subgraph_info->add_subgraph_entry_field(field_offset, f);
log_info(cds, heap)("Archived field %s::%s => " PTR_FORMAT, klass_name, field_name, p2i(f));
}
} else {
// The field contains null, we still need to record the entry point,
// so it can be restored at runtime.
subgraph_info->add_subgraph_entry_field(field_offset, nullptr, false);
subgraph_info->add_subgraph_entry_field(field_offset, nullptr);
}
}
@ -1572,17 +1513,16 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[],
void HeapShared::init_subgraph_entry_fields(TRAPS) {
assert(HeapShared::can_write(), "must be");
_dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable();
init_subgraph_entry_fields(closed_archive_subgraph_entry_fields, CHECK);
init_subgraph_entry_fields(open_archive_subgraph_entry_fields, CHECK);
init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK);
if (MetaspaceShared::use_full_module_graph()) {
init_subgraph_entry_fields(fmg_open_archive_subgraph_entry_fields, CHECK);
init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK);
}
}
#ifndef PRODUCT
void HeapShared::setup_test_class(const char* test_class_name) {
ArchivableStaticFieldInfo* p = open_archive_subgraph_entry_fields;
int num_slots = sizeof(open_archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
ArchivableStaticFieldInfo* p = archive_subgraph_entry_fields;
int num_slots = sizeof(archive_subgraph_entry_fields) / sizeof(ArchivableStaticFieldInfo);
assert(p[num_slots - 2].klass_name == nullptr, "must have empty slot that's patched below");
assert(p[num_slots - 1].klass_name == nullptr, "must have empty slot that marks the end of the list");
@ -1649,7 +1589,6 @@ void HeapShared::init_for_dumping(TRAPS) {
}
void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
bool is_closed_archive,
bool is_full_module_graph) {
_num_total_subgraph_recordings = 0;
_num_total_walked_objs = 0;
@ -1680,14 +1619,12 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
}
archive_reachable_objects_from_static_field(f->klass, f->klass_name,
f->offset, f->field_name,
is_closed_archive);
f->offset, f->field_name);
}
done_recording_subgraph(info->klass, klass_name);
}
log_info(cds, heap)("Archived subgraph records in %s archive heap region = %d",
is_closed_archive ? "closed" : "open",
log_info(cds, heap)("Archived subgraph records = %d",
_num_total_subgraph_recordings);
log_info(cds, heap)(" Walked %d objects", _num_total_walked_objs);
log_info(cds, heap)(" Archived %d objects", _num_total_archived_objs);

View file

@ -47,6 +47,7 @@ class KlassToOopHandleTable;
class ResourceBitMap;
struct ArchivableStaticFieldInfo;
class ArchiveHeapInfo;
// A dump time sub-graph info for Klass _k. It includes the entry points
// (static fields in _k's mirror) of the archived sub-graphs reachable
@ -61,8 +62,7 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
// object sub-graphs can be accessed at runtime.
GrowableArray<Klass*>* _subgraph_object_klasses;
// A list of _k's static fields as the entry points of archived sub-graphs.
// For each entry field, it is a tuple of field_offset, field_value and
// is_closed_archive flag.
// For each entry field, it is a tuple of field_offset, field_value
GrowableArray<int>* _subgraph_entry_fields;
// Does this KlassSubGraphInfo belong to the archived full module graph
@ -97,8 +97,7 @@ class KlassSubGraphInfo: public CHeapObj<mtClass> {
GrowableArray<int>* subgraph_entry_fields() {
return _subgraph_entry_fields;
}
void add_subgraph_entry_field(int static_field_offset, oop v,
bool is_closed_archive);
void add_subgraph_entry_field(int static_field_offset, oop v);
void add_subgraph_object_klass(Klass *orig_k);
int num_subgraph_object_klasses() {
return _subgraph_object_klasses == nullptr ? 0 :
@ -141,7 +140,7 @@ class HeapShared: AllStatic {
friend class VerifySharedOopClosure;
public:
// Can this VM write heap regions into the CDS archive? Currently only G1+compressed{oops,cp}
// Can this VM write a heap region into the CDS archive? Currently only G1+compressed{oops,cp}
static bool can_write() {
CDS_JAVA_HEAP_ONLY(
if (_disable_writing) {
@ -165,7 +164,6 @@ public:
private:
#if INCLUDE_CDS_JAVA_HEAP
static bool _disable_writing;
static bool _copying_open_region_objects;
static DumpedInternedStrings *_dumped_interned_strings;
// statistics
@ -189,22 +187,18 @@ public:
// The location of this object inside ArchiveHeapWriter::_buffer
size_t _buffer_offset;
bool _in_open_region;
public:
CachedOopInfo(oop orig_referrer, bool in_open_region)
CachedOopInfo(oop orig_referrer)
: _orig_referrer(orig_referrer),
_buffer_offset(0), _in_open_region(in_open_region) {}
_buffer_offset(0) {}
oop orig_referrer() const { return _orig_referrer; }
bool in_open_region() const { return _in_open_region; }
void set_buffer_offset(size_t offset) { _buffer_offset = offset; }
size_t buffer_offset() const { return _buffer_offset; }
};
private:
static void check_enum_obj(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj,
bool is_closed_archive);
static void check_enum_obj(int level, KlassSubGraphInfo* subgraph_info,
oop orig_obj);
typedef ResourceHashtable<oop, CachedOopInfo,
36137, // prime number
@ -239,18 +233,15 @@ private:
static DumpTimeKlassSubGraphInfoTable* _dump_time_subgraph_info_table;
static RunTimeKlassSubGraphInfoTable _run_time_subgraph_info_table;
static void check_closed_region_object(InstanceKlass* k);
static CachedOopInfo make_cached_oop_info();
static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
bool is_closed_archive,
bool is_full_module_graph);
// Archive object sub-graph starting from the given static field
// in Klass k's mirror.
static void archive_reachable_objects_from_static_field(
InstanceKlass* k, const char* klass_name,
int field_offset, const char* field_name,
bool is_closed_archive);
int field_offset, const char* field_name);
static void verify_subgraph_from_static_field(
InstanceKlass* k, int field_offset) PRODUCT_RETURN;
@ -359,18 +350,13 @@ private:
return _archived_object_cache;
}
static void archive_objects(GrowableArray<MemRegion>* closed_regions,
GrowableArray<MemRegion>* open_regions,
GrowableArray<ArchiveHeapBitmapInfo>* closed_bitmaps,
GrowableArray<ArchiveHeapBitmapInfo>* open_bitmaps);
static void copy_closed_objects();
static void copy_open_objects();
static void copy_special_open_objects();
static void archive_objects(ArchiveHeapInfo* heap_info);
static void copy_objects();
static void copy_special_objects();
static bool archive_reachable_objects_from(int level,
KlassSubGraphInfo* subgraph_info,
oop orig_obj,
bool is_closed_archive);
oop orig_obj);
static ResourceBitMap calculate_oopmap(MemRegion region); // marks all the oop pointers
static void add_to_dumped_interned_strings(oop string);
@ -380,7 +366,7 @@ private:
static void remove_scratch_objects(Klass* k);
// We use the HeapShared::roots() array to make sure that objects stored in the
// archived heap regions are not prematurely collected. These roots include:
// archived heap region are not prematurely collected. These roots include:
//
// - mirrors of classes that have not yet been loaded.
// - ConstantPool::resolved_references() of classes that have not yet been loaded.
@ -410,8 +396,7 @@ private:
public:
static void init_scratch_objects(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static bool is_heap_region(int idx) {
CDS_JAVA_HEAP_ONLY(return (idx >= MetaspaceShared::first_closed_heap_region &&
idx <= MetaspaceShared::last_open_heap_region);)
CDS_JAVA_HEAP_ONLY(return (idx == MetaspaceShared::hp);)
NOT_CDS_JAVA_HEAP_RETURN_(false);
}

View file

@ -98,12 +98,7 @@ bool MetaspaceShared::_use_full_module_graph = true;
// The CDS archive is divided into the following regions:
// rw - read-write metadata
// ro - read-only metadata and read-only tables
//
// ca0 - closed archive heap space #0
// ca1 - closed archive heap space #1 (may be empty)
// oa0 - open archive heap space #0
// oa1 - open archive heap space #1 (may be empty)
//
// hp - heap region
// bm - bitmap for relocating the above 7 regions.
//
// The rw and ro regions are linearly allocated, in the order of rw->ro.
@ -119,8 +114,9 @@ bool MetaspaceShared::_use_full_module_graph = true;
// [5] SymbolTable, StringTable, SystemDictionary, and a few other read-only data
// are copied into the ro region as read-only tables.
//
// The ca0/ca1 and oa0/oa1 regions are populated inside HeapShared::archive_objects.
// Their layout is independent of the rw/ro regions.
// The heap region is populated by HeapShared::archive_objects.
//
// The bitmap region is used to relocate the ro/rw/hp regions.
static DumpRegion _symbol_region("symbols");
@ -431,11 +427,7 @@ void MetaspaceShared::rewrite_nofast_bytecodes_and_calculate_fingerprints(Thread
class VM_PopulateDumpSharedSpace : public VM_Operation {
private:
GrowableArray<MemRegion> *_closed_heap_regions;
GrowableArray<MemRegion> *_open_heap_regions;
GrowableArray<ArchiveHeapBitmapInfo> *_closed_heap_bitmaps;
GrowableArray<ArchiveHeapBitmapInfo> *_open_heap_bitmaps;
ArchiveHeapInfo _heap_info;
void dump_java_heap_objects(GrowableArray<Klass*>* klasses) NOT_CDS_JAVA_HEAP_RETURN;
void dump_shared_symbol_table(GrowableArray<Symbol*>* symbols) {
@ -446,11 +438,7 @@ private:
public:
VM_PopulateDumpSharedSpace() : VM_Operation(),
_closed_heap_regions(nullptr),
_open_heap_regions(nullptr),
_closed_heap_bitmaps(nullptr),
_open_heap_bitmaps(nullptr) {}
VM_PopulateDumpSharedSpace() : VM_Operation(), _heap_info() {}
bool skip_operation() const { return false; }
@ -550,11 +538,7 @@ void VM_PopulateDumpSharedSpace::doit() {
mapinfo->set_serialized_data(serialized_data);
mapinfo->set_cloned_vtables(cloned_vtables);
mapinfo->open_for_write();
builder.write_archive(mapinfo,
_closed_heap_regions,
_open_heap_regions,
_closed_heap_bitmaps,
_open_heap_bitmaps);
builder.write_archive(mapinfo, &_heap_info);
if (PrintSystemDictionaryAtExit) {
SystemDictionary::print();
@ -874,14 +858,7 @@ void VM_PopulateDumpSharedSpace::dump_java_heap_objects(GrowableArray<Klass*>* k
}
}
// The closed and open archive heap space has maximum two regions.
// See FileMapInfo::write_heap_regions() for details.
_closed_heap_regions = new GrowableArray<MemRegion>(2);
_open_heap_regions = new GrowableArray<MemRegion>(2);
_closed_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
_open_heap_bitmaps = new GrowableArray<ArchiveHeapBitmapInfo>(2);
HeapShared::archive_objects(_closed_heap_regions, _open_heap_regions,
_closed_heap_bitmaps, _open_heap_bitmaps);
HeapShared::archive_objects(&_heap_info);
ArchiveBuilder::OtherROAllocMark mark;
HeapShared::write_subgraph_info_table();
}
@ -1177,9 +1154,9 @@ MapArchiveResult MetaspaceShared::map_archives(FileMapInfo* static_mapinfo, File
assert(ccs_end > cds_base, "Sanity check");
CompressedKlassPointers::initialize(cds_base, ccs_end - cds_base);
// map_heap_regions() compares the current narrow oop and klass encodings
// map_or_load_heap_region() compares the current narrow oop and klass encodings
// with the archived ones, so it must be done after all encodings are determined.
static_mapinfo->map_or_load_heap_regions();
static_mapinfo->map_or_load_heap_region();
}
});
log_info(cds)("optimized module handling: %s", MetaspaceShared::use_optimized_module_handling() ? "enabled" : "disabled");

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -60,23 +60,9 @@ class MetaspaceShared : AllStatic {
rw = 0, // read-write shared space
ro = 1, // read-only shared space
bm = 2, // relocation bitmaps (freed after file mapping is finished)
hp = 3, // heap region
num_core_region = 2, // rw and ro
num_non_heap_regions = 3, // rw and ro and bm
// java heap regions
first_closed_heap_region = bm + 1,
max_num_closed_heap_regions = 2,
last_closed_heap_region = first_closed_heap_region + max_num_closed_heap_regions - 1,
first_open_heap_region = last_closed_heap_region + 1,
max_num_open_heap_regions = 2,
last_open_heap_region = first_open_heap_region + max_num_open_heap_regions - 1,
max_num_heap_regions = max_num_closed_heap_regions + max_num_open_heap_regions,
first_archive_heap_region = first_closed_heap_region,
last_archive_heap_region = last_open_heap_region,
last_valid_region = last_open_heap_region,
n_regions = last_valid_region + 1 // total number of regions
n_regions = 4 // total number of regions
};
static void prepare_for_dumping() NOT_CDS_RETURN;
@ -106,8 +92,8 @@ public:
static void initialize_shared_spaces() NOT_CDS_RETURN;
// Return true if given address is in the shared metaspace regions (i.e., excluding any
// mapped heap regions.)
// Return true if given address is in the shared metaspace regions (i.e., excluding the
// mapped heap region.)
static bool is_in_shared_metaspace(const void* p) {
return MetaspaceObj::is_shared((const MetaspaceObj*)p);
}

View file

@ -1121,9 +1121,6 @@ bool java_lang_Class::restore_archived_mirror(Klass *k,
// mirror is archived, restore
log_debug(cds, mirror)("Archived mirror is: " PTR_FORMAT, p2i(m));
if (ArchiveHeapLoader::is_mapped()) {
assert(Universe::heap()->is_archived_object(m), "must be archived mirror object");
}
assert(as_Klass(m) == k, "must be");
Handle mirror(THREAD, m);

View file

@ -142,7 +142,7 @@ void vmClasses::resolve_all(TRAPS) {
// Object_klass is resolved. See the above resolve_through()
// call. No mirror objects are accessed/restored in the above call.
// Mirrors are restored after java.lang.Class is loaded.
ArchiveHeapLoader::fixup_regions();
ArchiveHeapLoader::fixup_region();
// Initialize the constant pool for the Object_class
assert(Object_klass()->is_shared(), "must be");

View file

@ -97,8 +97,6 @@ void G1Allocator::reuse_retained_old_region(G1EvacInfo* evacuation_info,
HeapRegion** retained_old) {
HeapRegion* retained_region = *retained_old;
*retained_old = NULL;
assert(retained_region == NULL || !retained_region->is_archive(),
"Archive region should not be alloc region (index %u)", retained_region->hrm_index());
// We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!),

View file

@ -511,72 +511,51 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
return NULL;
}
bool G1CollectedHeap::check_archive_addresses(MemRegion* ranges, size_t count) {
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
for (size_t i = 0; i < count; i++) {
if (!reserved.contains(ranges[i].start()) || !reserved.contains(ranges[i].last())) {
return false;
}
}
return true;
bool G1CollectedHeap::check_archive_addresses(MemRegion range) {
return _hrm.reserved().contains(range);
}
bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
size_t count,
bool open) {
template <typename Func>
void G1CollectedHeap::iterate_regions_in_range(MemRegion range, const Func& func) {
// Mark each G1 region touched by the range as old, add it to
// the old set, and set top.
HeapRegion* curr_region = _hrm.addr_to_region(range.start());
HeapRegion* end_region = _hrm.addr_to_region(range.last());
while (curr_region != nullptr) {
bool is_last = curr_region == end_region;
HeapRegion* next_region = is_last ? nullptr : _hrm.next_region_in_heap(curr_region);
func(curr_region, is_last);
curr_region = next_region;
}
}
bool G1CollectedHeap::alloc_archive_regions(MemRegion range) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MutexLocker x(Heap_lock);
MemRegion reserved = _hrm.reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
// Temporarily disable pretouching of heap pages. This interface is used
// when mmap'ing archived heap data in, so pre-touching is wasted.
FlagSetting fs(AlwaysPreTouch, false);
// For each specified MemRegion range, allocate the corresponding G1
// regions and mark them as archive regions. We expect the ranges
// in ascending starting address order, without overlap.
for (size_t i = 0; i < count; i++) {
MemRegion curr_range = ranges[i];
HeapWord* start_address = curr_range.start();
size_t word_size = curr_range.word_size();
HeapWord* last_address = curr_range.last();
// For the specified MemRegion range, allocate the corresponding G1
// region(s) and mark them as old region(s).
HeapWord* start_address = range.start();
size_t word_size = range.word_size();
HeapWord* last_address = range.last();
size_t commits = 0;
guarantee(reserved.contains(start_address) && reserved.contains(last_address),
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address));
guarantee(start_address > prev_last_addr,
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr));
prev_last_addr = last_address;
// Check for ranges that start in the same G1 region in which the previous
// range ended, and adjust the start address so we don't try to allocate
// the same region again. If the current range is entirely within that
// region, skip it, just adjusting the recorded top.
HeapRegion* start_region = _hrm.addr_to_region(start_address);
if ((prev_last_region != NULL) && (start_region == prev_last_region)) {
start_address = start_region->end();
if (start_address > last_address) {
increase_used(word_size * HeapWordSize);
start_region->set_top(last_address + 1);
continue;
}
start_region->set_top(start_address);
curr_range = MemRegion(start_address, last_address + 1);
start_region = _hrm.addr_to_region(start_address);
}
// Perform the actual region allocation, exiting if it fails.
// Then note how much new space we have allocated.
if (!_hrm.allocate_containing_regions(curr_range, &commits, workers())) {
if (!_hrm.allocate_containing_regions(range, &commits, workers())) {
return false;
}
increase_used(word_size * HeapWordSize);
@ -586,98 +565,67 @@ bool G1CollectedHeap::alloc_archive_regions(MemRegion* ranges,
}
// Mark each G1 region touched by the range as archive, add it to
// Mark each G1 region touched by the range as old, add it to
// the old set, and set top.
HeapRegion* curr_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
prev_last_region = last_region;
auto set_region_to_old = [&] (HeapRegion* r, bool is_last) {
assert(r->is_empty() && !r->is_pinned(), "Region already in use (%u)", r->hrm_index());
while (curr_region != NULL) {
assert(curr_region->is_empty() && !curr_region->is_pinned(),
"Region already in use (index %u)", curr_region->hrm_index());
if (open) {
curr_region->set_open_archive();
} else {
curr_region->set_closed_archive();
}
_hr_printer.alloc(curr_region);
_archive_set.add(curr_region);
HeapWord* top;
HeapRegion* next_region;
if (curr_region != last_region) {
top = curr_region->end();
next_region = _hrm.next_region_in_heap(curr_region);
} else {
top = last_address + 1;
next_region = NULL;
}
curr_region->set_top(top);
curr_region = next_region;
}
}
HeapWord* top = is_last ? last_address + 1 : r->end();
r->set_top(top);
r->set_old();
_hr_printer.alloc(r);
_old_set.add(r);
};
iterate_regions_in_range(range, set_region_to_old);
return true;
}
void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) {
void G1CollectedHeap::populate_archive_regions_bot_part(MemRegion range) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord *prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
// For each MemRegion, create filler objects, if needed, in the G1 regions
// that contain the address range. The address range actually within the
// MemRegion will not be modified. That is assumed to have been initialized
// elsewhere, probably via an mmap of archived heap data.
iterate_regions_in_range(range,
[&] (HeapRegion* r, bool is_last) {
r->update_bot();
});
}
void G1CollectedHeap::dealloc_archive_regions(MemRegion range) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
MemRegion reserved = _hrm.reserved();
size_t size_used = 0;
uint shrink_count = 0;
// Free the G1 regions that are within the specified range.
MutexLocker x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
HeapWord* start_address = range.start();
HeapWord* last_address = range.last();
assert(reserved.contains(start_address) && reserved.contains(last_address),
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address));
assert(start_address > prev_last_addr,
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr));
size_used += range.byte_size();
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
HeapWord* bottom_address = start_region->bottom();
// Free, empty and uncommit regions with CDS archive content.
auto dealloc_archive_region = [&] (HeapRegion* r, bool is_last) {
guarantee(r->is_old(), "Expected old region at index %u", r->hrm_index());
_old_set.remove(r);
r->set_free();
r->set_top(r->bottom());
_hrm.shrink_at(r->hrm_index(), 1);
shrink_count++;
};
// Check for a range beginning in the same region in which the
// previous one ended.
if (start_region == prev_last_region) {
bottom_address = prev_last_addr + 1;
}
iterate_regions_in_range(range, dealloc_archive_region);
// Verify that the regions were all marked as archive regions by
// alloc_archive_regions.
HeapRegion* curr_region = start_region;
while (curr_region != NULL) {
guarantee(curr_region->is_archive(),
"Expected archive region at index %u", curr_region->hrm_index());
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
}
prev_last_addr = last_address;
prev_last_region = last_region;
// Fill the memory below the allocated range with dummy object(s),
// if the region bottom does not match the range start, or if the previous
// range ended within the same G1 region, and there is a gap.
assert(start_address >= bottom_address, "bottom address should not be greater than start address");
if (start_address > bottom_address) {
size_t fill_size = pointer_delta(start_address, bottom_address);
G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
increase_used(fill_size * HeapWordSize);
}
if (shrink_count != 0) {
log_debug(gc, ergo, heap)("Attempt heap shrinking (CDS archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * shrink_count);
// Explicit uncommit.
uncommit_regions(shrink_count);
}
decrease_used(size_used);
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
@ -705,99 +653,6 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
return result;
}
void G1CollectedHeap::populate_archive_regions_bot_part(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
HeapWord* st = ranges[0].start();
HeapWord* last = ranges[count-1].last();
HeapRegion* hr_st = _hrm.addr_to_region(st);
HeapRegion* hr_last = _hrm.addr_to_region(last);
HeapRegion* hr_curr = hr_st;
while (hr_curr != NULL) {
hr_curr->update_bot();
if (hr_curr != hr_last) {
hr_curr = _hrm.next_region_in_heap(hr_curr);
} else {
hr_curr = NULL;
}
}
}
void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
assert(!is_init_completed(), "Expect to be called at JVM init time");
assert(ranges != NULL, "MemRegion array NULL");
assert(count != 0, "No MemRegions provided");
MemRegion reserved = _hrm.reserved();
HeapWord* prev_last_addr = NULL;
HeapRegion* prev_last_region = NULL;
size_t size_used = 0;
uint shrink_count = 0;
// For each Memregion, free the G1 regions that constitute it, and
// notify mark-sweep that the range is no longer to be considered 'archive.'
MutexLocker x(Heap_lock);
for (size_t i = 0; i < count; i++) {
HeapWord* start_address = ranges[i].start();
HeapWord* last_address = ranges[i].last();
assert(reserved.contains(start_address) && reserved.contains(last_address),
"MemRegion outside of heap [" PTR_FORMAT ", " PTR_FORMAT "]",
p2i(start_address), p2i(last_address));
assert(start_address > prev_last_addr,
"Ranges not in ascending order: " PTR_FORMAT " <= " PTR_FORMAT ,
p2i(start_address), p2i(prev_last_addr));
size_used += ranges[i].byte_size();
prev_last_addr = last_address;
HeapRegion* start_region = _hrm.addr_to_region(start_address);
HeapRegion* last_region = _hrm.addr_to_region(last_address);
// Check for ranges that start in the same G1 region in which the previous
// range ended, and adjust the start address so we don't try to free
// the same region again. If the current range is entirely within that
// region, skip it.
if (start_region == prev_last_region) {
start_address = start_region->end();
if (start_address > last_address) {
continue;
}
start_region = _hrm.addr_to_region(start_address);
}
prev_last_region = last_region;
// After verifying that each region was marked as an archive region by
// alloc_archive_regions, set it free and empty and uncommit it.
HeapRegion* curr_region = start_region;
while (curr_region != NULL) {
guarantee(curr_region->is_archive(),
"Expected archive region at index %u", curr_region->hrm_index());
uint curr_index = curr_region->hrm_index();
_archive_set.remove(curr_region);
curr_region->set_free();
curr_region->set_top(curr_region->bottom());
if (curr_region != last_region) {
curr_region = _hrm.next_region_in_heap(curr_region);
} else {
curr_region = NULL;
}
_hrm.shrink_at(curr_index, 1);
shrink_count++;
}
}
if (shrink_count != 0) {
log_debug(gc, ergo, heap)("Attempt heap shrinking (archive regions). Total size: " SIZE_FORMAT "B",
HeapRegion::GrainWords * HeapWordSize * shrink_count);
// Explicit uncommit.
uncommit_regions(shrink_count);
}
decrease_used(size_used);
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
@ -1344,16 +1199,6 @@ public:
const char* get_description() { return "Old Regions"; }
};
class ArchiveRegionSetChecker : public HeapRegionSetChecker {
public:
void check_mt_safety() {
guarantee(!Universe::is_fully_initialized() || SafepointSynchronize::is_at_safepoint(),
"May only change archive regions during initialization or safepoint.");
}
bool is_correct_type(HeapRegion* hr) { return hr->is_archive(); }
const char* get_description() { return "Archive Regions"; }
};
class HumongousRegionSetChecker : public HeapRegionSetChecker {
public:
void check_mt_safety() {
@ -1388,7 +1233,6 @@ G1CollectedHeap::G1CollectedHeap() :
_collection_pause_end(Ticks::now()),
_soft_ref_policy(),
_old_set("Old Region Set", new OldRegionSetChecker()),
_archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
_humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
_bot(NULL),
_listener(),
@ -2293,10 +2137,6 @@ bool G1CollectedHeap::supports_concurrent_gc_breakpoints() const {
return true;
}
bool G1CollectedHeap::is_archived_object(oop object) const {
return object != NULL && heap_region_containing(object)->is_archive();
}
class PrintRegionClosure: public HeapRegionClosure {
outputStream* _st;
public:
@ -2378,7 +2218,6 @@ void G1CollectedHeap::print_regions_on(outputStream* st) const {
st->print_cr("Heap Regions: E=young(eden), S=young(survivor), O=old, "
"HS=humongous(starts), HC=humongous(continues), "
"CS=collection set, F=free, "
"OA=open archive, CA=closed archive, "
"TAMS=top-at-mark-start, "
"PB=parsable bottom");
PrintRegionClosure blk(st);
@ -2809,12 +2648,10 @@ void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
}
void G1CollectedHeap::remove_from_old_gen_sets(const uint old_regions_removed,
const uint archive_regions_removed,
const uint humongous_regions_removed) {
if (old_regions_removed > 0 || archive_regions_removed > 0 || humongous_regions_removed > 0) {
if (old_regions_removed > 0 || humongous_regions_removed > 0) {
MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
_old_set.bulk_remove(old_regions_removed);
_archive_set.bulk_remove(archive_regions_removed);
_humongous_set.bulk_remove(humongous_regions_removed);
}
@ -2905,9 +2742,7 @@ bool G1CollectedHeap::check_young_list_empty() {
// Remove the given HeapRegion from the appropriate region set.
void G1CollectedHeap::prepare_region_for_full_compaction(HeapRegion* hr) {
if (hr->is_archive()) {
_archive_set.remove(hr);
} else if (hr->is_humongous()) {
if (hr->is_humongous()) {
_humongous_set.remove(hr);
} else if (hr->is_old()) {
_old_set.remove(hr);
@ -2943,7 +2778,6 @@ private:
bool _free_list_only;
HeapRegionSet* _old_set;
HeapRegionSet* _archive_set;
HeapRegionSet* _humongous_set;
HeapRegionManager* _hrm;
@ -2953,15 +2787,13 @@ private:
public:
RebuildRegionSetsClosure(bool free_list_only,
HeapRegionSet* old_set,
HeapRegionSet* archive_set,
HeapRegionSet* humongous_set,
HeapRegionManager* hrm) :
_free_list_only(free_list_only), _old_set(old_set), _archive_set(archive_set),
_free_list_only(free_list_only), _old_set(old_set),
_humongous_set(humongous_set), _hrm(hrm), _total_used(0) {
assert(_hrm->num_free_regions() == 0, "pre-condition");
if (!free_list_only) {
assert(_old_set->is_empty(), "pre-condition");
assert(_archive_set->is_empty(), "pre-condition");
assert(_humongous_set->is_empty(), "pre-condition");
}
}
@ -2977,11 +2809,9 @@ public:
if (r->is_humongous()) {
_humongous_set->add(r);
} else if (r->is_archive()) {
_archive_set->add(r);
} else {
assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
// We now move all (non-humongous, non-old, non-archive) regions to old gen,
// We now move all (non-humongous, non-old) regions to old gen,
// and register them as such.
r->move_to_old();
_old_set->add(r);
@ -3006,7 +2836,7 @@ void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
}
RebuildRegionSetsClosure cl(free_list_only,
&_old_set, &_archive_set, &_humongous_set,
&_old_set, &_humongous_set,
&_hrm);
heap_region_iterate(&cl);

View file

@ -182,9 +182,8 @@ private:
static size_t _humongous_object_threshold_in_words;
// These sets keep track of old, archive and humongous regions respectively.
// These sets keep track of old and humongous regions respectively.
HeapRegionSet _old_set;
HeapRegionSet _archive_set;
HeapRegionSet _humongous_set;
// Young gen memory statistics before GC.
@ -702,31 +701,30 @@ public:
FreeRegionList* free_list);
// Facility for allocating a fixed range within the heap and marking
// the containing regions as 'archive'. For use at JVM init time, when the
// caller may mmap archived heap data at the specified range(s).
// Verify that the MemRegions specified in the argument array are within the
// reserved heap.
bool check_archive_addresses(MemRegion* range, size_t count);
// the containing regions as 'old'. For use at JVM init time, when the
// caller may mmap archived heap data at the specified range.
// Commit the appropriate G1 regions containing the specified MemRegions
// and mark them as 'archive' regions. The regions in the array must be
// non-overlapping and in order of ascending address.
bool alloc_archive_regions(MemRegion* range, size_t count, bool open);
// Verify that the range is within the reserved heap.
bool check_archive_addresses(MemRegion range);
// Insert any required filler objects in the G1 regions around the specified
// ranges to make the regions parseable. This must be called after
// alloc_archive_regions, and after class loading has occurred.
void fill_archive_regions(MemRegion* range, size_t count);
// Execute func(HeapRegion* r, bool is_last) on every region covered by the
// given range.
template <typename Func>
void iterate_regions_in_range(MemRegion range, const Func& func);
// Commit the appropriate G1 region(s) containing the specified range
// and mark them as 'old' region(s).
bool alloc_archive_regions(MemRegion range);
// Populate the G1BlockOffsetTablePart for archived regions with the given
// memory ranges.
void populate_archive_regions_bot_part(MemRegion* range, size_t count);
// memory range.
void populate_archive_regions_bot_part(MemRegion range);
// For each of the specified MemRegions, uncommit the containing G1 regions
// For the specified range, uncommit the containing G1 regions
// which had been allocated by alloc_archive_regions. This should be called
// rather than fill_archive_regions at JVM init time if the archive file
// mapping failed, with the same non-overlapping and sorted MemRegion array.
void dealloc_archive_regions(MemRegion* range, size_t count);
// at JVM init time if the archive heap's contents cannot be used (e.g., if
// CRC check fails).
void dealloc_archive_regions(MemRegion range);
private:
@ -1003,10 +1001,8 @@ public:
inline void old_set_add(HeapRegion* hr);
inline void old_set_remove(HeapRegion* hr);
inline void archive_set_add(HeapRegion* hr);
size_t non_young_capacity_bytes() {
return (old_regions_count() + _archive_set.length() + humongous_regions_count()) * HeapRegion::GrainBytes;
return (old_regions_count() + humongous_regions_count()) * HeapRegion::GrainBytes;
}
// Determine whether the given region is one that we are using as an
@ -1025,7 +1021,6 @@ public:
void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause);
void remove_from_old_gen_sets(const uint old_regions_removed,
const uint archive_regions_removed,
const uint humongous_regions_removed);
void prepend_to_freelist(FreeRegionList* list);
void decrement_summary_bytes(size_t bytes);
@ -1215,7 +1210,6 @@ public:
size_t survivor_regions_used_bytes() const { return _survivor.used_bytes(); }
uint young_regions_count() const { return _eden.length() + _survivor.length(); }
uint old_regions_count() const { return _old_set.length(); }
uint archive_regions_count() const { return _archive_set.length(); }
uint humongous_regions_count() const { return _humongous_set.length(); }
#ifdef ASSERT
@ -1282,8 +1276,6 @@ public:
WorkerThreads* safepoint_workers() override { return _workers; }
bool is_archived_object(oop object) const override;
// The methods below are here for convenience and dispatch the
// appropriate method depending on value of the given VerifyOption
// parameter. The values for that parameter, and their meanings,

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -148,10 +148,6 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
_old_set.remove(hr);
}
inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
_archive_set.add(hr);
}
// It dirties the cards that cover the block so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
@ -262,7 +258,7 @@ inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
}
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
return !is_marked(obj) && !hr->is_closed_archive();
return !is_marked(obj);
}
inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,9 +84,8 @@ void G1CollectionSetCandidates::verify() const {
HeapRegion *cur = _regions[idx];
guarantee(cur != NULL, "Regions after _front_idx %u cannot be NULL but %u is", _front_idx, idx);
// The first disjunction filters out regions with objects that were explicitly
// pinned after being added to the collection set candidates. Archive regions
// should never have been added to the collection set though.
guarantee((cur->is_pinned() && !cur->is_archive()) ||
// pinned after being added to the collection set candidates.
guarantee(cur->is_pinned() ||
G1CollectionSetChooser::should_add(cur),
"Region %u should be eligible for addition.", cur->hrm_index());
if (prev != NULL) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -192,7 +192,7 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// sets for old regions.
r->rem_set()->clear(true /* only_cardset */);
} else {
assert(r->is_archive() || !r->is_old() || !r->rem_set()->is_tracked(),
assert(!r->is_old() || !r->rem_set()->is_tracked(),
"Missed to clear unused remembered set of region %u (%s) that is %s",
r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1329,7 +1329,6 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
size_t _freed_bytes;
FreeRegionList* _local_cleanup_list;
uint _old_regions_removed;
uint _archive_regions_removed;
uint _humongous_regions_removed;
public:
@ -1339,16 +1338,14 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
_freed_bytes(0),
_local_cleanup_list(local_cleanup_list),
_old_regions_removed(0),
_archive_regions_removed(0),
_humongous_regions_removed(0) { }
size_t freed_bytes() { return _freed_bytes; }
const uint old_regions_removed() { return _old_regions_removed; }
const uint archive_regions_removed() { return _archive_regions_removed; }
const uint humongous_regions_removed() { return _humongous_regions_removed; }
bool do_heap_region(HeapRegion *hr) {
if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young() && !hr->is_closed_archive()) {
if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young()) {
log_trace(gc)("Reclaimed empty old gen region %u (%s) bot " PTR_FORMAT,
hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
_freed_bytes += hr->used();
@ -1356,9 +1353,6 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
if (hr->is_humongous()) {
_humongous_regions_removed++;
_g1h->free_humongous_region(hr, _local_cleanup_list);
} else if (hr->is_open_archive()) {
_archive_regions_removed++;
_g1h->free_region(hr, _local_cleanup_list);
} else {
_old_regions_removed++;
_g1h->free_region(hr, _local_cleanup_list);
@ -1389,9 +1383,8 @@ public:
_g1h->heap_region_par_iterate_from_worker_offset(&cl, &_hrclaimer, worker_id);
assert(cl.is_complete(), "Shouldn't have aborted!");
// Now update the old/archive/humongous region sets
// Now update the old/humongous region sets
_g1h->remove_from_old_gen_sets(cl.old_regions_removed(),
cl.archive_regions_removed(),
cl.humongous_regions_removed());
{
MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
@ -1890,7 +1883,6 @@ HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
assert(_finger >= end, "the finger should have moved forward");
if (limit > bottom) {
assert(!curr_region->is_closed_archive(), "CA regions should be skipped");
return curr_region;
} else {
assert(limit == bottom,

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,11 +54,6 @@ inline bool G1CMIsAliveClosure::do_object_b(oop obj) {
return true;
}
// All objects in closed archive regions are live.
if (hr->is_closed_archive()) {
return true;
}
// All objects that are marked are live.
return _g1h->is_marked(obj);
}
@ -72,7 +67,7 @@ inline bool G1CMSubjectToDiscoveryClosure::do_object_b(oop obj) {
return false;
}
assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
return _g1h->heap_region_containing(obj)->is_old_or_humongous_or_archive();
return _g1h->heap_region_containing(obj)->is_old_or_humongous();
}
inline bool G1ConcurrentMark::mark_in_bitmap(uint const worker_id, oop const obj) {

View file

@ -257,8 +257,6 @@ void G1FullCollector::complete_collection() {
void G1FullCollector::before_marking_update_attribute_table(HeapRegion* hr) {
if (hr->is_free()) {
_region_attr_table.set_free(hr->hrm_index());
} else if (hr->is_closed_archive()) {
_region_attr_table.set_skip_marking(hr->hrm_index());
} else if (hr->is_pinned()) {
_region_attr_table.set_skip_compacting(hr->hrm_index());
} else {

View file

@ -130,7 +130,6 @@ public:
inline bool is_compacting(oop obj) const;
inline bool is_skip_compacting(uint region_index) const;
inline bool is_skip_marking(oop obj) const;
// Are we (potentially) going to compact into this region?
inline bool is_compaction_target(uint region_index) const;

View file

@ -40,10 +40,6 @@ bool G1FullCollector::is_skip_compacting(uint region_index) const {
return _region_attr_table.is_skip_compacting(region_index);
}
bool G1FullCollector::is_skip_marking(oop obj) const {
return _region_attr_table.is_skip_marking(cast_from_oop<HeapWord*>(obj));
}
bool G1FullCollector::is_compaction_target(uint region_index) const {
return _region_attr_table.is_compacting(region_index) || is_free(region_index);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,10 +68,8 @@ class G1AdjustRegionClosure : public HeapRegionClosure {
// work distribution.
oop obj = cast_to_oop(r->humongous_start_region()->bottom());
obj->oop_iterate(&cl, MemRegion(r->bottom(), r->top()));
} else if (!r->is_closed_archive() && !r->is_free()) {
// Closed archive regions never change references and only contain
// references into other closed regions and are always live. Free
// regions do not contain objects to iterate. So skip both.
} else if (!r->is_free()) {
// Free regions do not contain objects to iterate. So skip them.
G1AdjustLiveClosure adjust(&cl);
r->apply_to_marked_objects(_bitmap, &adjust);
}

View file

@ -31,17 +31,15 @@
// fast access during the full collection. In particular some parts of the
// region type information is encoded in these per-region bytes. Value encoding
// has been specifically chosen to make required accesses fast. In particular,
// the table specifies whether a Full GC cycle should be compacting, skip
// compacting, or skip marking (liveness analysis) a region.
// the table specifies whether a Full GC cycle should be compacting or skip
// compacting a region.
// Reasons for not compacting a region:
// (1) the HeapRegion itself has been pinned at the start of Full GC.
// (2) the occupancy of the region is too high to be considered eligible for compaction.
// The only examples for skipping marking for regions are Closed Archive regions.
class G1FullGCHeapRegionAttr : public G1BiasedMappedArray<uint8_t> {
static const uint8_t Compacting = 0; // Region will be compacted.
static const uint8_t SkipCompacting = 1; // Region should not be compacted, but otherwise handled as usual.
static const uint8_t SkipMarking = 2; // Region contents are not even marked through, but contain live objects.
static const uint8_t Free = 3; // Regions is free.
static const uint8_t Free = 2; // Region is free.
static const uint8_t Invalid = 255;
@ -56,15 +54,9 @@ public:
void set_invalid(uint idx) { set_by_index(idx, Invalid); }
void set_compacting(uint idx) { set_by_index(idx, Compacting); }
void set_skip_marking(uint idx) { set_by_index(idx, SkipMarking); }
void set_skip_compacting(uint idx) { set_by_index(idx, SkipCompacting); }
void set_free(uint idx) { set_by_index(idx, Free); }
bool is_skip_marking(HeapWord* obj) const {
assert(!is_free(obj), "Should not have objects in free regions.");
return get_by_address(obj) == SkipMarking;
}
bool is_compacting(HeapWord* obj) const {
assert(!is_free(obj), "Should not have objects in free regions.");
return get_by_address(obj) == Compacting;

View file

@ -45,10 +45,6 @@
#include "utilities/debug.hpp"
inline bool G1FullGCMarker::mark_object(oop obj) {
if (_collector->is_skip_marking(obj)) {
return false;
}
// Try to mark.
if (!_bitmap->par_mark(obj)) {
// Lost mark race.
@ -83,11 +79,8 @@ template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
oop obj = CompressedOops::decode_not_null(heap_oop);
if (mark_object(obj)) {
_oop_stack.push(obj);
assert(_bitmap->is_marked(obj), "Must be marked now - map self");
} else {
assert(_bitmap->is_marked(obj) || _collector->is_skip_marking(obj),
"Must be marked by other or object in skip marking region");
}
assert(_bitmap->is_marked(obj), "Must be marked");
}
}

View file

@ -78,7 +78,7 @@ inline void G1AdjustClosure::do_oop(oop* p) { do_oop_work(p); }
inline void G1AdjustClosure::do_oop(narrowOop* p) { do_oop_work(p); }
inline bool G1IsAliveClosure::do_object_b(oop p) {
return _bitmap->is_marked(p) || _collector->is_skip_marking(p);
return _bitmap->is_marked(p);
}
template<typename T>

View file

@ -48,8 +48,6 @@ bool G1FullGCPrepareTask::G1CalculatePointersClosure::do_heap_region(HeapRegion*
assert(_collector->is_compaction_target(region_idx), "must be");
assert(!hr->is_pinned(), "must be");
assert(!hr->is_closed_archive(), "must be");
assert(!hr->is_open_archive(), "must be");
prepare_for_compaction(hr);

View file

@ -92,13 +92,6 @@ inline bool G1DetermineCompactionQueueClosure::do_heap_region(HeapRegion* hr) {
} else {
_collector->set_has_humongous();
}
} else if (hr->is_open_archive()) {
bool is_empty = _collector->live_words(hr->hrm_index()) == 0;
if (is_empty) {
free_pinned_region<false>(hr);
}
} else if (hr->is_closed_archive()) {
// nothing to do with closed archive region
} else {
assert(MarkSweepDeadRatio > 0,
"only skip compaction for other regions when MarkSweepDeadRatio > 0");

View file

@ -40,7 +40,7 @@ bool G1FullGCResetMetadataTask::G1ResetMetadataClosure::do_heap_region(HeapRegio
uint const region_idx = hr->hrm_index();
if (!_collector->is_compaction_target(region_idx)) {
assert(!hr->is_free(), "all free regions should be compaction targets");
assert(_collector->is_skip_compacting(region_idx) || hr->is_closed_archive(), "must be");
assert(_collector->is_skip_compacting(region_idx), "must be");
if (hr->needs_scrubbing_during_full_gc()) {
scrub_skip_compacting_region(hr, hr->is_young());
}
@ -90,12 +90,6 @@ void G1FullGCResetMetadataTask::G1ResetMetadataClosure::reset_skip_compacting(He
if (hr->is_humongous()) {
oop obj = cast_to_oop(hr->humongous_start_region()->bottom());
assert(_collector->mark_bitmap()->is_marked(obj), "must be live");
} else if (hr->is_open_archive()) {
bool is_empty = (_collector->live_words(hr->hrm_index()) == 0);
assert(!is_empty, "should contain at least one live obj");
} else if (hr->is_closed_archive()) {
// should early-return above
ShouldNotReachHere();
} else {
assert(_collector->live_words(region_index) > _collector->scope()->region_compaction_threshold(),
"should be quite full");

View file

@ -37,8 +37,6 @@ class G1HeapRegionTraceType : AllStatic {
StartsHumongous,
ContinuesHumongous,
Old,
OpenArchive,
ClosedArchive,
G1HeapRegionTypeEndSentinel
};
@ -50,8 +48,6 @@ class G1HeapRegionTraceType : AllStatic {
case StartsHumongous: return "Starts Humongous";
case ContinuesHumongous: return "Continues Humongous";
case Old: return "Old";
case OpenArchive: return "OpenArchive";
case ClosedArchive: return "ClosedArchive";
default: ShouldNotReachHere(); return NULL;
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,7 +33,6 @@ G1HeapTransition::Data::Data(G1CollectedHeap* g1_heap) :
_eden_length(g1_heap->eden_regions_count()),
_survivor_length(g1_heap->survivor_regions_count()),
_old_length(g1_heap->old_regions_count()),
_archive_length(g1_heap->archive_regions_count()),
_humongous_length(g1_heap->humongous_regions_count()),
_meta_sizes(MetaspaceUtils::get_combined_statistics()),
_eden_length_per_node(NULL),
@ -67,19 +66,17 @@ struct DetailedUsage : public StackObj {
size_t _eden_used;
size_t _survivor_used;
size_t _old_used;
size_t _archive_used;
size_t _humongous_used;
size_t _eden_region_count;
size_t _survivor_region_count;
size_t _old_region_count;
size_t _archive_region_count;
size_t _humongous_region_count;
DetailedUsage() :
_eden_used(0), _survivor_used(0), _old_used(0), _archive_used(0), _humongous_used(0),
_eden_used(0), _survivor_used(0), _old_used(0), _humongous_used(0),
_eden_region_count(0), _survivor_region_count(0), _old_region_count(0),
_archive_region_count(0), _humongous_region_count(0) {}
_humongous_region_count(0) {}
};
class DetailedUsageClosure: public HeapRegionClosure {
@ -89,9 +86,6 @@ public:
if (r->is_old()) {
_usage._old_used += r->used();
_usage._old_region_count++;
} else if (r->is_archive()) {
_usage._archive_used += r->used();
_usage._archive_region_count++;
} else if (r->is_survivor()) {
_usage._survivor_used += r->used();
_usage._survivor_region_count++;
@ -152,8 +146,6 @@ void G1HeapTransition::print() {
after._survivor_length, usage._survivor_region_count);
assert(usage._old_region_count == after._old_length, "Expected old to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._old_length, usage._old_region_count);
assert(usage._archive_region_count == after._archive_length, "Expected archive to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._archive_length, usage._archive_region_count);
assert(usage._humongous_region_count == after._humongous_length, "Expected humongous to be " SIZE_FORMAT " but was " SIZE_FORMAT,
after._humongous_length, usage._humongous_region_count);
}
@ -172,11 +164,6 @@ void G1HeapTransition::print() {
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._old_used / K, ((after._old_length * HeapRegion::GrainBytes) - usage._old_used) / K);
log_info(gc, heap)("Archive regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._archive_length, after._archive_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",
usage._archive_used / K, ((after._archive_length * HeapRegion::GrainBytes) - usage._archive_used) / K);
log_info(gc, heap)("Humongous regions: " SIZE_FORMAT "->" SIZE_FORMAT,
_before._humongous_length, after._humongous_length);
log_trace(gc, heap)(" Used: " SIZE_FORMAT "K, Waste: " SIZE_FORMAT "K",

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,6 @@ class G1HeapTransition {
size_t _eden_length;
size_t _survivor_length;
size_t _old_length;
size_t _archive_length;
size_t _humongous_length;
const MetaspaceCombinedStats _meta_sizes;

View file

@ -233,99 +233,6 @@ public:
size_t live_bytes() { return _live_bytes; }
};
class VerifyArchiveOopClosure: public BasicOopIterateClosure {
HeapRegion* _hr;
public:
VerifyArchiveOopClosure(HeapRegion *hr) : _hr(hr) { }
void do_oop(narrowOop *p) { do_oop_work(p); }
void do_oop( oop *p) { do_oop_work(p); }
template <class T> void do_oop_work(T *p) {
oop obj = RawAccess<>::oop_load(p);
if (_hr->is_open_archive()) {
guarantee(obj == NULL || G1CollectedHeap::heap()->heap_region_containing(obj)->is_archive(),
"Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
p2i(p), p2i(obj));
} else {
assert(_hr->is_closed_archive(), "should be closed archive region");
guarantee(obj == NULL || G1CollectedHeap::heap()->heap_region_containing(obj)->is_closed_archive(),
"Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT,
p2i(p), p2i(obj));
}
}
};
class VerifyObjectInArchiveRegionClosure: public ObjectClosure {
HeapRegion* _hr;
public:
VerifyObjectInArchiveRegionClosure(HeapRegion *hr, bool verbose)
: _hr(hr) { }
// Verify that all object pointers are to archive regions.
void do_object(oop o) {
VerifyArchiveOopClosure checkOop(_hr);
assert(o != NULL, "Should not be here for NULL oops");
o->oop_iterate(&checkOop);
}
};
// Should be only used at CDS dump time
class VerifyReadyForArchivingRegionClosure : public HeapRegionClosure {
bool _seen_free;
bool _has_holes;
bool _has_unexpected_holes;
bool _has_humongous;
public:
bool has_holes() {return _has_holes;}
bool has_unexpected_holes() {return _has_unexpected_holes;}
bool has_humongous() {return _has_humongous;}
VerifyReadyForArchivingRegionClosure() : HeapRegionClosure() {
_seen_free = false;
_has_holes = false;
_has_unexpected_holes = false;
_has_humongous = false;
}
virtual bool do_heap_region(HeapRegion* hr) {
const char* hole = "";
if (hr->is_free()) {
_seen_free = true;
} else {
if (_seen_free) {
_has_holes = true;
if (hr->is_humongous()) {
hole = " hole";
} else {
_has_unexpected_holes = true;
hole = " hole **** unexpected ****";
}
}
}
if (hr->is_humongous()) {
_has_humongous = true;
}
log_info(gc, region, cds)("HeapRegion " PTR_FORMAT " %s%s", p2i(hr->bottom()), hr->get_type_str(), hole);
return false;
}
};
class VerifyArchivePointerRegionClosure: public HeapRegionClosure {
virtual bool do_heap_region(HeapRegion* r) {
if (r->is_archive()) {
VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
r->object_iterate(&verify_oop_pointers);
}
return false;
}
};
void G1HeapVerifier::verify_archive_regions() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
VerifyArchivePointerRegionClosure cl;
g1h->heap_region_iterate(&cl);
}
class VerifyRegionClosure: public HeapRegionClosure {
private:
VerifyOption _vo;
@ -346,14 +253,7 @@ public:
// Humongous and old regions regions might be of any state, so can't check here.
guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s", r->hrm_index(), r->rem_set()->get_state_str());
// For archive regions, verify there are no heap pointers to non-pinned regions.
if (r->is_closed_archive()) {
VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false);
r->object_iterate(&verify_oop_pointers);
} else if (r->is_open_archive()) {
VerifyObjsInRegionClosure verify_open_archive_oop(r, _vo);
r->object_iterate(&verify_open_archive_oop);
} else if (r->is_continues_humongous()) {
if (r->is_continues_humongous()) {
// Verify that the continues humongous regions' remembered set state
// matches the one from the starts humongous region.
if (r->rem_set()->get_state_str() != r->humongous_start_region()->rem_set()->get_state_str()) {
@ -482,22 +382,19 @@ void G1HeapVerifier::verify(VerifyOption vo) {
class VerifyRegionListsClosure : public HeapRegionClosure {
private:
HeapRegionSet* _old_set;
HeapRegionSet* _archive_set;
HeapRegionSet* _humongous_set;
HeapRegionManager* _hrm;
public:
uint _old_count;
uint _archive_count;
uint _humongous_count;
uint _free_count;
VerifyRegionListsClosure(HeapRegionSet* old_set,
HeapRegionSet* archive_set,
HeapRegionSet* humongous_set,
HeapRegionManager* hrm) :
_old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _archive_count(), _humongous_count(), _free_count(){ }
_old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
_old_count(), _humongous_count(), _free_count(){ }
bool do_heap_region(HeapRegion* hr) {
if (hr->is_young()) {
@ -508,24 +405,20 @@ public:
} else if (hr->is_empty()) {
assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list.", hr->hrm_index());
_free_count++;
} else if (hr->is_archive()) {
assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set.", hr->hrm_index());
_archive_count++;
} else if (hr->is_old()) {
assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set.", hr->hrm_index());
_old_count++;
} else {
// There are no other valid region types. Check for one invalid
// one we can identify: pinned without old or humongous set.
assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index());
assert(!hr->is_pinned(), "Heap region %u is pinned but not old or humongous.", hr->hrm_index());
ShouldNotReachHere();
}
return false;
}
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count);
guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u.", archive_set->length(), _archive_count);
guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count);
guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count);
}
@ -540,9 +433,9 @@ void G1HeapVerifier::verify_region_sets() {
// Finally, make sure that the region accounting in the lists is
// consistent with what we see in the heap.
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
_g1h->heap_region_iterate(&cl);
cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, &_g1h->_hrm);
cl.verify_counts(&_g1h->_old_set, &_g1h->_humongous_set, &_g1h->_hrm);
}
void G1HeapVerifier::prepare_for_verify() {
@ -693,11 +586,6 @@ public:
return true;
}
if (region_attr.is_in_cset()) {
if (hr->is_archive()) {
log_error(gc, verify)("## is_archive in collection set for region %u", i);
_failures = true;
return true;
}
if (hr->is_young() != (region_attr.is_young())) {
log_error(gc, verify)("## is_young %d / region attr type %s inconsistency for region %u",
hr->is_young(), region_attr.get_type_str(), i);

View file

@ -80,8 +80,6 @@ public:
void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
void verify_dirty_young_regions() PRODUCT_RETURN;
static void verify_archive_regions();
};
#endif // SHARE_GC_G1_G1HEAPVERIFIER_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -199,7 +199,7 @@ private:
// collection. Subsumes common checks like filtering out everything but old and
// humongous regions outside the collection set.
// This is valid because we are not interested in scanning stray remembered set
// entries from free or archive regions.
// entries from free regions.
HeapWord** _scan_top;
class G1ClearCardTableTask : public G1AbstractSubTask {
@ -327,7 +327,7 @@ public:
// as we do not clean up remembered sets before merging heap roots.
bool contains_cards_to_process(uint const region_idx) const {
HeapRegion* hr = G1CollectedHeap::heap()->region_at_or_null(region_idx);
return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous_or_archive());
return (hr != NULL && !hr->in_collection_set() && hr->is_old_or_humongous());
}
size_t num_visited_cards() const {
@ -426,7 +426,7 @@ public:
void add_dirty_region(uint const region) {
#ifdef ASSERT
HeapRegion* hr = G1CollectedHeap::heap()->region_at(region);
assert(!hr->in_collection_set() && hr->is_old_or_humongous_or_archive(),
assert(!hr->in_collection_set() && hr->is_old_or_humongous(),
"Region %u is not suitable for scanning, is %sin collection set or %s",
hr->hrm_index(), hr->in_collection_set() ? "" : "not ", hr->get_short_type_str());
#endif
@ -714,7 +714,7 @@ public:
}
bool do_heap_region(HeapRegion* r) {
assert(!r->in_collection_set() && r->is_old_or_humongous_or_archive(),
assert(!r->in_collection_set() && r->is_old_or_humongous(),
"Should only be called on old gen non-collection set regions but region %u is not.",
r->hrm_index());
uint const region_idx = r->hrm_index();
@ -883,7 +883,7 @@ void G1RemSet::prepare_region_for_scan(HeapRegion* r) {
// to NULL (don't scan) in the initialization.
if (r->in_collection_set()) {
assert_scan_top_is_null(hrm_index);
} else if (r->is_old_or_humongous_or_archive()) {
} else if (r->is_old_or_humongous()) {
_scan_state->set_scan_top(hrm_index, r->top());
} else {
assert_scan_top_is_null(hrm_index);
@ -1476,7 +1476,7 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) {
// In the normal (non-stale) case, the synchronization between the
// enqueueing of the card and processing it here will have ensured
// we see the up-to-date region type here.
if (!r->is_old_or_humongous_or_archive()) {
if (!r->is_old_or_humongous()) {
return false;
}
@ -1485,9 +1485,8 @@ bool G1RemSet::clean_card_before_refine(CardValue** const card_ptr_addr) {
// (part of) an object at the end of the allocated space and extend
// beyond the end of allocation.
// Non-humongous objects are either allocated in the old regions during GC,
// or mapped in archive regions during startup. So if region is old or
// archive then top is stable.
// Non-humongous objects are either allocated in the old regions during GC.
// So if region is old then top is stable.
// Humongous object allocation sets top last; if top has not yet been set,
// this is a stale card and we'll end up with an empty intersection.
// If this is not a stale card, the synchronization between the
@ -1518,7 +1517,7 @@ void G1RemSet::refine_card_concurrently(CardValue* const card_ptr,
// And find the region containing it.
HeapRegion* r = _g1h->heap_region_containing(start);
// This reload of the top is safe even though it happens after the full
// fence, because top is stable for old, archive and unfiltered humongous
// fence, because top is stable for old and unfiltered humongous
// regions, so it must return the same value as the previous load when
// cleaning the card. Also cleaning the card and refinement of the card
// cannot span across safepoint, so we don't need to worry about top being

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -187,7 +187,6 @@ private:
RegionTypeCounter _humongous;
RegionTypeCounter _free;
RegionTypeCounter _old;
RegionTypeCounter _archive;
RegionTypeCounter _all;
size_t _max_rs_mem_sz;
@ -211,7 +210,7 @@ private:
public:
HRRSStatsIter() : _young("Young"), _humongous("Humongous"),
_free("Free"), _old("Old"), _archive("Archive"), _all("All"),
_free("Free"), _old("Old"), _all("All"),
_max_rs_mem_sz(0), _max_rs_mem_sz_region(NULL),
_max_code_root_mem_sz(0), _max_code_root_mem_sz_region(NULL)
{}
@ -244,8 +243,6 @@ public:
current = &_humongous;
} else if (r->is_old()) {
current = &_old;
} else if (r->is_archive()) {
current = &_archive;
} else {
ShouldNotReachHere();
}
@ -258,7 +255,7 @@ public:
}
void print_summary_on(outputStream* out) {
RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, &_archive, NULL };
RegionTypeCounter* counters[] = { &_young, &_humongous, &_free, &_old, NULL };
out->print_cr(" Current rem set statistics");
out->print_cr(" Total per region rem sets sizes = " SIZE_FORMAT

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,12 +30,11 @@
#include "runtime/safepoint.hpp"
bool G1RemSetTrackingPolicy::needs_scan_for_rebuild(HeapRegion* r) const {
// All non-free, non-young, non-closed archive regions need to be scanned for references;
// At every gc we gather references to other regions in young, and closed archive
// regions by definition do not have references going outside the closed archive.
// All non-free and non-young regions need to be scanned for references;
// At every gc we gather references to other regions in young.
// Free regions trivially do not need scanning because they do not contain live
// objects.
return !(r->is_young() || r->is_closed_archive() || r->is_free());
return !(r->is_young() || r->is_free());
}
void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
@ -45,9 +44,6 @@ void G1RemSetTrackingPolicy::update_at_allocate(HeapRegion* r) {
} else if (r->is_humongous()) {
// Collect remembered sets for humongous regions by default to allow eager reclaim.
r->rem_set()->set_state_complete();
} else if (r->is_archive()) {
// Archive regions never move ever. So never build remembered sets for them.
r->rem_set()->set_state_untracked();
} else if (r->is_old()) {
// By default, do not create remembered set for new old regions.
r->rem_set()->set_state_untracked();
@ -79,10 +75,6 @@ bool G1RemSetTrackingPolicy::update_humongous_before_rebuild(HeapRegion* r, bool
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(r->is_humongous(), "Region %u should be humongous", r->hrm_index());
if (r->is_archive()) {
return false;
}
assert(!r->rem_set()->is_updating(), "Remembered set of region %u is updating before rebuild", r->hrm_index());
bool selected_for_rebuild = false;
@ -104,9 +96,8 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(!r->is_humongous(), "Region %u is humongous", r->hrm_index());
// Only consider updating the remembered set for old gen regions - excluding archive regions
// which never move (but are "Old" regions).
if (!r->is_old() || r->is_archive()) {
// Only consider updating the remembered set for old gen regions.
if (!r->is_old()) {
return false;
}
@ -137,9 +128,8 @@ bool G1RemSetTrackingPolicy::update_before_rebuild(HeapRegion* r, size_t live_by
void G1RemSetTrackingPolicy::update_after_rebuild(HeapRegion* r) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
if (r->is_old_or_humongous_or_archive()) {
if (r->is_old_or_humongous()) {
if (r->rem_set()->is_updating()) {
assert(!r->is_archive(), "Archive region %u with remembered set", r->hrm_index());
r->rem_set()->set_state_complete();
}
G1CollectedHeap* g1h = G1CollectedHeap::heap();

View file

@ -263,7 +263,7 @@ public:
virtual ~EagerlyReclaimHumongousObjectsTask() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->remove_from_old_gen_sets(0, 0, _humongous_regions_reclaimed);
g1h->remove_from_old_gen_sets(0, _humongous_regions_reclaimed);
g1h->decrement_summary_bytes(_bytes_freed);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -179,16 +179,6 @@ void HeapRegion::set_old() {
_type.set_old();
}
void HeapRegion::set_open_archive() {
report_region_type_change(G1HeapRegionTraceType::OpenArchive);
_type.set_open_archive();
}
void HeapRegion::set_closed_archive() {
report_region_type_change(G1HeapRegionTraceType::ClosedArchive);
_type.set_closed_archive();
}
void HeapRegion::set_starts_humongous(HeapWord* obj_top, size_t fill_size) {
assert(!is_humongous(), "sanity / pre-condition");
assert(top() == bottom(), "should be empty");

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -375,11 +375,10 @@ public:
// During the concurrent scrubbing phase, can there be any areas with unloaded
// classes or dead objects in this region?
// This set only includes old and open archive regions - humongous regions only
// contain a single object which is either dead or live, contents of closed archive
// regions never die (so is always contiguous), and young regions are never even
// This set only includes old regions - humongous regions only
// contain a single object which is either dead or live, and young regions are never even
// considered during concurrent scrub.
bool needs_scrubbing() const { return is_old() || is_open_archive(); }
bool needs_scrubbing() const { return is_old(); }
// Same question as above, during full gc. Full gc needs to scrub any region that
// might be skipped for compaction. This includes young generation regions as the
// region relabeling to old happens later than scrubbing.
@ -403,19 +402,10 @@ public:
bool is_old_or_humongous() const { return _type.is_old_or_humongous(); }
bool is_old_or_humongous_or_archive() const { return _type.is_old_or_humongous_or_archive(); }
// A pinned region contains objects which are not moved by garbage collections.
// Humongous regions and archive regions are pinned.
// Humongous regions are pinned.
bool is_pinned() const { return _type.is_pinned(); }
// An archive region is a pinned region, also tagged as old, which
// should not be marked during mark/sweep. This allows the address
// space to be shared by JVM instances.
bool is_archive() const { return _type.is_archive(); }
bool is_open_archive() const { return _type.is_open_archive(); }
bool is_closed_archive() const { return _type.is_closed_archive(); }
void set_free();
void set_eden();
@ -425,9 +415,6 @@ public:
void move_to_old();
void set_old();
void set_open_archive();
void set_closed_archive();
// For a humongous region, region in which it starts.
HeapRegion* humongous_start_region() const {
return _humongous_start_region;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -142,11 +142,6 @@ inline bool HeapRegion::block_is_obj(const HeapWord* const p, HeapWord* const pb
inline bool HeapRegion::is_obj_dead(const oop obj, HeapWord* const pb) const {
assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
// Objects in closed archive regions are always live.
if (is_closed_archive()) {
return false;
}
// From Remark until a region has been concurrently scrubbed, parts of the
// region is not guaranteed to be parsable. Use the bitmap for liveness.
if (obj_in_unparsable_area(obj, pb)) {
@ -294,10 +289,7 @@ inline void HeapRegion::reset_parsable_bottom() {
}
inline void HeapRegion::note_start_of_marking() {
assert(!is_closed_archive() || top_at_mark_start() == bottom(), "CA region's TAMS must always be at bottom");
if (!is_closed_archive()) {
set_top_at_mark_start(top());
}
_gc_efficiency = -1.0;
}
@ -496,7 +488,7 @@ HeapWord* HeapRegion::oops_on_memregion_seq_iterate_careful(MemRegion mr,
if (is_humongous()) {
return do_oops_on_memregion_in_humongous<Closure, in_gc_pause>(mr, cl);
}
assert(is_old() || is_archive(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
assert(is_old(), "Wrongly trying to iterate over region %u type %s", _hrm_index, get_type_str());
// Because mr has been trimmed to what's been allocated in this
// region, the objects in these parts of the heap have non-NULL

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,8 +37,7 @@ void HeapRegionSetBase::verify_region(HeapRegion* hr) {
assert(_checker == NULL || _checker->is_correct_type(hr), "Wrong type of region %u (%s) and set %s",
hr->hrm_index(), hr->get_type_str(), name());
assert(!hr->is_free() || hr->is_empty(), "Free region %u is not empty for set %s", hr->hrm_index(), name());
assert(!hr->is_empty() || hr->is_free() || hr->is_archive(),
"Empty region %u is not free or archive for set %s", hr->hrm_index(), name());
assert(!hr->is_empty() || hr->is_free(), "Empty region %u is not free or old for set %s", hr->hrm_index(), name());
}
#endif

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,8 +39,6 @@ bool HeapRegionType::is_valid(Tag tag) {
case StartsHumongousTag:
case ContinuesHumongousTag:
case OldTag:
case OpenArchiveTag:
case ClosedArchiveTag:
return true;
default:
return false;
@ -56,8 +54,6 @@ const char* HeapRegionType::get_str() const {
case StartsHumongousTag: return "HUMS";
case ContinuesHumongousTag: return "HUMC";
case OldTag: return "OLD";
case OpenArchiveTag: return "OARC";
case ClosedArchiveTag: return "CARC";
default:
ShouldNotReachHere();
return NULL; // keep some compilers happy
@ -73,8 +69,6 @@ const char* HeapRegionType::get_short_str() const {
case StartsHumongousTag: return "HS";
case ContinuesHumongousTag: return "HC";
case OldTag: return "O";
case OpenArchiveTag: return "OA";
case ClosedArchiveTag: return "CA";
default:
ShouldNotReachHere();
return NULL; // keep some compilers happy
@ -90,8 +84,6 @@ G1HeapRegionTraceType::Type HeapRegionType::get_trace_type() {
case StartsHumongousTag: return G1HeapRegionTraceType::StartsHumongous;
case ContinuesHumongousTag: return G1HeapRegionTraceType::ContinuesHumongous;
case OldTag: return G1HeapRegionTraceType::Old;
case OpenArchiveTag: return G1HeapRegionTraceType::OpenArchive;
case ClosedArchiveTag: return G1HeapRegionTraceType::ClosedArchive;
default:
ShouldNotReachHere();
return G1HeapRegionTraceType::Free; // keep some compilers happy

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -38,7 +38,7 @@ private:
// We encode the value of the heap region type so the generation can be
// determined quickly. The tag is split into two parts:
//
// major type (young, old, humongous, archive) : top N-1 bits
// major type (young, old, humongous) : top N-1 bits
// minor type (eden / survivor, starts / cont hum, etc.) : bottom 1 bit
//
// If there's need to increase the number of minor types in the
@ -58,10 +58,6 @@ private:
//
// 01000 0 [16] Old Mask
//
// 10000 0 [32] Archive Mask
// 10100 0 [40] Open Archive
// 10100 1 [41] Closed Archive
//
typedef enum {
FreeTag = 0,
@ -75,20 +71,7 @@ private:
ContinuesHumongousTag = HumongousMask | PinnedMask + 1,
OldMask = 16,
OldTag = OldMask,
// Archive regions are regions with immutable content (i.e. not reclaimed, and
// not allocated into during regular operation). They differ in the kind of references
// allowed for the contained objects:
// - Closed archive regions form a separate self-contained (closed) object graph
// within the set of all of these regions. No references outside of closed
// archive regions are allowed.
// - Open archive regions have no restrictions on the references of their objects.
// Objects within these regions are allowed to have references to objects
// contained in any other kind of regions.
ArchiveMask = 32,
OpenArchiveTag = ArchiveMask | PinnedMask,
ClosedArchiveTag = ArchiveMask | PinnedMask + 1
OldTag = OldMask
} Tag;
volatile Tag _tag;
@ -134,18 +117,11 @@ public:
bool is_starts_humongous() const { return get() == StartsHumongousTag; }
bool is_continues_humongous() const { return get() == ContinuesHumongousTag; }
bool is_archive() const { return (get() & ArchiveMask) != 0; }
bool is_open_archive() const { return get() == OpenArchiveTag; }
bool is_closed_archive() const { return get() == ClosedArchiveTag; }
// is_old regions may or may not also be pinned
bool is_old() const { return (get() & OldMask) != 0; }
bool is_old_or_humongous() const { return (get() & (OldMask | HumongousMask)) != 0; }
bool is_old_or_humongous_or_archive() const { return (get() & (OldMask | HumongousMask | ArchiveMask)) != 0; }
// is_pinned regions may be archive or humongous
bool is_pinned() const { return (get() & PinnedMask) != 0; }
// Setters
@ -180,8 +156,6 @@ public:
return true;
}
}
void set_open_archive() { set_from(OpenArchiveTag, FreeTag); }
void set_closed_archive() { set_from(ClosedArchiveTag, FreeTag); }
// Misc

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,7 +57,6 @@
nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \
nonstatic_field(G1CollectedHeap, _monitoring_support, G1MonitoringSupport*) \
nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _archive_set, HeapRegionSetBase) \
nonstatic_field(G1CollectedHeap, _humongous_set, HeapRegionSetBase) \
\
nonstatic_field(G1MonitoringSupport, _eden_space_committed, size_t) \
@ -80,7 +79,6 @@
declare_constant(HeapRegionType::SurvTag) \
declare_constant(HeapRegionType::HumongousMask) \
declare_constant(HeapRegionType::PinnedMask) \
declare_constant(HeapRegionType::ArchiveMask) \
declare_constant(HeapRegionType::StartsHumongousTag) \
declare_constant(HeapRegionType::ContinuesHumongousTag) \
declare_constant(HeapRegionType::OldMask) \

View file

@ -638,10 +638,6 @@ void CollectedHeap::reset_promotion_should_fail() {
#endif // #ifndef PRODUCT
bool CollectedHeap::is_archived_object(oop object) const {
return false;
}
// It's the caller's responsibility to ensure glitch-freedom
// (if required).
void CollectedHeap::update_capacity_and_used_at_gc() {

View file

@ -513,9 +513,6 @@ class CollectedHeap : public CHeapObj<mtGC> {
virtual void pin_object(JavaThread* thread, oop obj) = 0;
virtual void unpin_object(JavaThread* thread, oop obj) = 0;
// Is the given object inside a CDS archive area?
virtual bool is_archived_object(oop object) const;
// Support for loading objects from CDS archive into the heap
// (usually as a snapshot of the old generation).
virtual bool can_load_archived_objects() const { return false; }

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,11 +35,11 @@
//
// Also, this is a C header file. Do not use C++ here.
#define NUM_CDS_REGIONS 7 // this must be the same as MetaspaceShared::n_regions
#define NUM_CDS_REGIONS 4 // this must be the same as MetaspaceShared::n_regions
#define CDS_ARCHIVE_MAGIC 0xf00baba2
#define CDS_DYNAMIC_ARCHIVE_MAGIC 0xf00baba8
#define CDS_GENERIC_HEADER_SUPPORTED_MIN_VERSION 13
#define CURRENT_CDS_ARCHIVE_VERSION 17
#define CURRENT_CDS_ARCHIVE_VERSION 18
typedef struct CDSFileMapRegion {
int _crc; // CRC checksum of this region.

View file

@ -56,10 +56,7 @@ void InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
if (klass != nullptr) {
if (klass->class_loader_data() == nullptr) {
// This is a mirror that belongs to a shared class that has not be loaded yet.
// It's only reachable via HeapShared::roots(). All of its fields should be zero
// so there's no need to scan.
assert(klass->is_shared(), "must be");
return;
} else if (klass->is_instance_klass() && klass->class_loader_data()->has_class_mirror_holder()) {
// A non-strong hidden class doesn't have its own class loader,
// so when handling the java mirror for the class we need to make sure its class

View file

@ -231,13 +231,6 @@ jdouble oopDesc::double_field_acquire(int offset) const { return A
void oopDesc::release_double_field_put(int offset, jdouble value) { Atomic::release_store(field_addr<jdouble>(offset), value); }
#ifdef ASSERT
void oopDesc::verify_forwardee(oop forwardee) {
#if INCLUDE_CDS_JAVA_HEAP
assert(!Universe::heap()->is_archived_object(forwardee) && !Universe::heap()->is_archived_object(this),
"forwarding archive object");
#endif
}
bool oopDesc::size_might_change() {
// UseParallelGC and UseG1GC can change the length field
// of an "old copy" of an object array in the young gen so it indicates

View file

@ -257,8 +257,6 @@ class oopDesc {
// Forward pointer operations for scavenge
inline bool is_forwarded() const;
void verify_forwardee(oop forwardee) NOT_DEBUG_RETURN;
inline void forward_to(oop p);
// Like "forward_to", but inserts the forwarding pointer atomically.

View file

@ -266,14 +266,12 @@ bool oopDesc::is_forwarded() const {
// Used by scavengers
void oopDesc::forward_to(oop p) {
verify_forwardee(p);
markWord m = markWord::encode_pointer_as_mark(p);
assert(m.decode_pointer() == p, "encoding must be reversible");
set_mark(m);
}
oop oopDesc::forward_to_atomic(oop p, markWord compare, atomic_memory_order order) {
verify_forwardee(p);
markWord m = markWord::encode_pointer_as_mark(p);
assert(m.decode_pointer() == p, "encoding must be reversible");
markWord old_mark = cas_set_mark(m, compare, order);

View file

@ -2006,11 +2006,6 @@ WB_ENTRY(jboolean, WB_CDSMemoryMappingFailed(JNIEnv* env, jobject wb))
return FileMapInfo::memory_mapping_failed();
WB_END
WB_ENTRY(jboolean, WB_IsShared(JNIEnv* env, jobject wb, jobject obj))
oop obj_oop = JNIHandles::resolve(obj);
return Universe::heap()->is_archived_object(obj_oop);
WB_END
WB_ENTRY(jboolean, WB_IsSharedInternedString(JNIEnv* env, jobject wb, jobject str))
ResourceMark rm(THREAD);
oop str_oop = JNIHandles::resolve(str);
@ -2024,19 +2019,7 @@ WB_ENTRY(jboolean, WB_IsSharedClass(JNIEnv* env, jobject wb, jclass clazz))
WB_END
WB_ENTRY(jboolean, WB_AreSharedStringsMapped(JNIEnv* env))
return ArchiveHeapLoader::closed_regions_mapped();
WB_END
WB_ENTRY(jobject, WB_GetResolvedReferences(JNIEnv* env, jobject wb, jclass clazz))
Klass *k = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(clazz));
if (k->is_instance_klass()) {
InstanceKlass *ik = InstanceKlass::cast(k);
ConstantPool *cp = ik->constants();
objArrayOop refs = cp->resolved_references();
return (jobject)JNIHandles::make_local(THREAD, refs);
} else {
return nullptr;
}
return ArchiveHeapLoader::is_mapped();
WB_END
WB_ENTRY(void, WB_LinkClass(JNIEnv* env, jobject wb, jclass clazz))
@ -2049,7 +2032,7 @@ WB_ENTRY(void, WB_LinkClass(JNIEnv* env, jobject wb, jclass clazz))
WB_END
WB_ENTRY(jboolean, WB_AreOpenArchiveHeapObjectsMapped(JNIEnv* env))
return ArchiveHeapLoader::open_regions_mapped();
return ArchiveHeapLoader::is_mapped();
WB_END
WB_ENTRY(jboolean, WB_IsCDSIncluded(JNIEnv* env))
@ -2766,11 +2749,9 @@ static JNINativeMethod methods[] = {
{CC"getCDSGenericHeaderMinVersion", CC"()I", (void*)&WB_GetCDSGenericHeaderMinVersion},
{CC"getCurrentCDSVersion", CC"()I", (void*)&WB_GetCDSCurrentVersion},
{CC"isSharingEnabled", CC"()Z", (void*)&WB_IsSharingEnabled},
{CC"isShared", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsShared },
{CC"isSharedInternedString", CC"(Ljava/lang/String;)Z", (void*)&WB_IsSharedInternedString },
{CC"isSharedClass", CC"(Ljava/lang/Class;)Z", (void*)&WB_IsSharedClass },
{CC"areSharedStringsMapped", CC"()Z", (void*)&WB_AreSharedStringsMapped },
{CC"getResolvedReferences", CC"(Ljava/lang/Class;)Ljava/lang/Object;", (void*)&WB_GetResolvedReferences},
{CC"linkClass", CC"(Ljava/lang/Class;)V", (void*)&WB_LinkClass},
{CC"areOpenArchiveHeapObjectsMapped", CC"()Z", (void*)&WB_AreOpenArchiveHeapObjectsMapped},
{CC"isCDSIncluded", CC"()Z", (void*)&WB_IsCDSIncluded },

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -57,8 +57,6 @@ public class G1CollectedHeap extends CollectedHeap {
private static AddressField monitoringSupportField;
// HeapRegionSet _old_set;
private static long oldSetFieldOffset;
// HeapRegionSet _archive_set;
private static long archiveSetFieldOffset;
// HeapRegionSet _humongous_set;
private static long humongousSetFieldOffset;
@ -77,7 +75,6 @@ public class G1CollectedHeap extends CollectedHeap {
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
monitoringSupportField = type.getAddressField("_monitoring_support");
oldSetFieldOffset = type.getField("_old_set").getOffset();
archiveSetFieldOffset = type.getField("_archive_set").getOffset();
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
}
@ -108,11 +105,6 @@ public class G1CollectedHeap extends CollectedHeap {
return VMObjectFactory.newObject(HeapRegionSetBase.class, oldSetAddr);
}
public HeapRegionSetBase archiveSet() {
Address archiveSetAddr = addr.addOffsetTo(archiveSetFieldOffset);
return VMObjectFactory.newObject(HeapRegionSetBase.class, archiveSetAddr);
}
public HeapRegionSetBase humongousSet() {
Address humongousSetAddr = addr.addOffsetTo(humongousSetFieldOffset);
return VMObjectFactory.newObject(HeapRegionSetBase.class, humongousSetAddr);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,6 @@ public class HeapRegionType extends VMObject {
private static int startsHumongousTag;
private static int continuesHumongousTag;
private static int pinnedMask;
private static int archiveMask;
private static int oldMask;
private static CIntegerField tagField;
private int tag;
@ -70,7 +69,6 @@ public class HeapRegionType extends VMObject {
survTag = db.lookupIntConstant("HeapRegionType::SurvTag");
startsHumongousTag = db.lookupIntConstant("HeapRegionType::StartsHumongousTag");
continuesHumongousTag = db.lookupIntConstant("HeapRegionType::ContinuesHumongousTag");
archiveMask = db.lookupIntConstant("HeapRegionType::ArchiveMask");
humongousMask = db.lookupIntConstant("HeapRegionType::HumongousMask");
pinnedMask = db.lookupIntConstant("HeapRegionType::PinnedMask");
oldMask = db.lookupIntConstant("HeapRegionType::OldMask");
@ -104,10 +102,6 @@ public class HeapRegionType extends VMObject {
return tagField.getValue(addr) == continuesHumongousTag;
}
public boolean isArchive() {
return (tagField.getValue(addr) & archiveMask) != 0;
}
public boolean isPinned() {
return (tagField.getValue(addr) & pinnedMask) != 0;
}
@ -136,9 +130,6 @@ public class HeapRegionType extends VMObject {
if (isContinuesHumongous()) {
return "ContinuesHumongous";
}
if (isArchive()) {
return "Archive";
}
if (isPinned()) {
return "Pinned";
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -248,9 +248,8 @@ public class HeapSummary extends Tool {
long edenSpaceRegionNum = monitoringSupport.edenSpaceRegionNum();
long survivorSpaceRegionNum = monitoringSupport.survivorSpaceRegionNum();
HeapRegionSetBase oldSet = g1h.oldSet();
HeapRegionSetBase archiveSet = g1h.archiveSet();
HeapRegionSetBase humongousSet = g1h.humongousSet();
long oldGenRegionNum = oldSet.length() + archiveSet.length() + humongousSet.length();
long oldGenRegionNum = oldSet.length() + humongousSet.length();
printG1Space(tty, "G1 Heap:", g1h.n_regions(),
g1h.used(), g1h.capacity());
tty.println("G1 Young Generation:");

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -63,20 +63,22 @@ public class SpaceUtilizationCheck {
// Look for output like this. The pattern will only match the first 2 regions, which is what we need to check
//
// [4.682s][debug][cds] rw space: 4391632 [ 33.7% of total] out of 4395008 bytes [ 99.9% used] at 0x0000000800007000
// [4.682s][debug][cds] ro space: 7570632 [ 58.0% of total] out of 7573504 bytes [100.0% used] at 0x0000000800438000
// [4.682s][debug][cds] bm space: 213528 [ 1.6% of total] out of 213528 bytes [100.0% used]
// [4.682s][debug][cds] ca0 space: 507904 [ 3.9% of total] out of 507904 bytes [100.0% used] at 0x00000000fff00000
// [4.682s][debug][cds] oa0 space: 327680 [ 2.5% of total] out of 327680 bytes [100.0% used] at 0x00000000ffe00000
// [4.682s][debug][cds] total : 13036288 [100.0% of total] out of 13049856 bytes [ 99.9% used]
// [0.938s][debug][cds] rw space: 5253952 [ 35.2% of total] out of 5255168 bytes [100.0% used] at 0x0000000800000000
// [0.938s][debug][cds] ro space: 8353976 [ 55.9% of total] out of 8355840 bytes [100.0% used] at 0x0000000800503000
// [0.938s][debug][cds] bm space: 262232 [ 1.8% of total] out of 262232 bytes [100.0% used]
// [0.938s][debug][cds] hp space: 1057712 [ 7.1% of total] out of 1057712 bytes [100.0% used] at 0x00007fa24c180090
// [0.938s][debug][cds] total : 14927872 [100.0% of total] out of 14934960 bytes [100.0% used]
long last_region = -1;
Hashtable<String,String> checked = new Hashtable<>();
for (String line : output.getStdout().split("\n")) {
if (line.contains(" space:") && !line.contains("st space:")) {
if (line.contains(" space:")) {
Matcher matcher = pattern.matcher(line);
if (matcher.find()) {
String name = matcher.group(1);
if (!name.equals("rw") && ! name.equals("ro")) {
continue;
}
System.out.println("Checking " + name + " in : " + line);
checked.put(name, name);
long used = Long.parseLong(matcher.group(2));

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,10 +50,7 @@ public class SharedArchiveConsistency {
"rw", // ReadWrite
"ro", // ReadOnly
"bm", // relocation bitmaps
"first_closed_archive",
"last_closed_archive",
"first_open_archive",
"last_open_archive"
"hp", // heap
};
public static final String HELLO_WORLD = "Hello World";

View file

@ -27,10 +27,8 @@
* @summary Test primitive box caches integrity in various scenarios (IntegerCache etc)
* @requires vm.cds.write.archived.java.heap
* @library /test/jdk/lib/testlibrary /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox
* @compile CheckIntegerCacheApp.java
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar boxCache.jar CheckIntegerCacheApp
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver ArchivedIntegerCacheTest
*/
@ -44,8 +42,6 @@ import jdk.test.lib.helpers.ClassFileInstaller;
public class ArchivedIntegerCacheTest {
public static void main(String[] args) throws Exception {
String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
String appJar = ClassFileInstaller.getJarPath("boxCache.jar");
Path userDir = Paths.get(CDSTestUtils.getOutputDir());
@ -55,32 +51,25 @@ public class ArchivedIntegerCacheTest {
// Dump default archive
//
OutputAnalyzer output = TestCommon.dump(appJar,
TestCommon.list("CheckIntegerCacheApp"),
use_whitebox_jar);
TestCommon.list("CheckIntegerCacheApp"));
TestCommon.checkDump(output);
// Test case 1)
// - Default options
System.out.println("----------------------- Test case 1 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
output = TestCommon.exec(appJar,
"CheckIntegerCacheApp",
"127",
"true");
"127");
TestCommon.checkExec(output);
// Test case 2)
// - Default archive
// - Larger -XX:AutoBoxCacheMax
System.out.println("----------------------- Test case 2 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
output = TestCommon.exec(appJar,
"-XX:AutoBoxCacheMax=20000",
"CheckIntegerCacheApp",
"20000",
"false");
"20000");
TestCommon.checkExec(output);
//
@ -88,22 +77,18 @@ public class ArchivedIntegerCacheTest {
//
output = TestCommon.dump(appJar,
TestCommon.list("CheckIntegerCacheApp"),
"-XX:AutoBoxCacheMax=20000",
use_whitebox_jar);
"-XX:AutoBoxCacheMax=20000");
TestCommon.checkDump(output);
// Test case 3)
// - Large archived cache
// - Default options
System.out.println("----------------------- Test case 3 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
output = TestCommon.exec(appJar,
"--module-path",
moduleDir.toString(),
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckIntegerCacheApp",
"127",
"true");
"127");
TestCommon.checkExec(output);
@ -111,30 +96,24 @@ public class ArchivedIntegerCacheTest {
// - Large archived cache
// - Matching options
System.out.println("----------------------- Test case 4 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
output = TestCommon.exec(appJar,
"--module-path",
moduleDir.toString(),
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-XX:AutoBoxCacheMax=20000",
"CheckIntegerCacheApp",
"20000",
"true");
"20000");
TestCommon.checkExec(output);
// Test case 5)
// - Large archived cache
// - Larger requested cache
System.out.println("----------------------- Test case 5 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
output = TestCommon.exec(appJar,
"--module-path",
moduleDir.toString(),
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-XX:AutoBoxCacheMax=30000",
"CheckIntegerCacheApp",
"30000",
"false");
"30000");
TestCommon.checkExec(output);
// Test case 6)
@ -146,8 +125,7 @@ public class ArchivedIntegerCacheTest {
"-XX:NewSize=1g",
"-Xlog:cds+heap=info",
"-Xlog:gc+region+cds",
"-Xlog:gc+region=trace",
use_whitebox_jar);
"-Xlog:gc+region=trace");
TestCommon.checkDump(output,
"Cannot archive the sub-graph referenced from [Ljava.lang.Integer; object");
}

View file

@ -1,189 +0,0 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @summary Test archived system module sub-graph and verify objects are archived.
* @requires vm.cds.write.archived.java.heap
* @library /test/jdk/lib/testlibrary /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox
* @compile CheckArchivedModuleApp.java
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar CheckArchivedModuleApp
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver ArchivedModuleComboTest
*/
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.cds.CDSTestUtils;
import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.whitebox.WhiteBox;
public class ArchivedModuleComboTest {
public static void main(String[] args) throws Exception {
String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
String appJar = ClassFileInstaller.getJarPath("app.jar");
Path userDir = Paths.get(CDSTestUtils.getOutputDir());
Path moduleDir = Files.createTempDirectory(userDir, "mods");
//
// Dump without --module-path, without --show-module-resolution
//
OutputAnalyzer output = TestCommon.dump(appJar,
TestCommon.list("CheckArchivedModuleApp"),
use_whitebox_jar);
TestCommon.checkDump(output);
// Test case 1)
// - Dump without --module-path, without --show-module-resolution
// - Run from -cp only and without --show-module-resolution
// + archived boot layer module ModuleDescriptors should be used
// + archived boot layer configuration should be used
System.out.println("----------------------- Test case 1 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"yes",
"yes");
TestCommon.checkExec(output);
// Test case 2)
// - Dump without --module-path, without --show-module-resolution
// - Run from -cp only and with --show-module-resolution
// + archived boot layer module ModuleDescriptors should be used with
// --show-module-resolution (requires resolution)
// + archived boot layer Configuration should not be disabled
System.out.println("----------------------- Test case 2 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"--show-module-resolution",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"yes",
"no");
TestCommon.checkExec(output, "root java.base jrt:/java.base");
// Test case 3)
// - Dump without --module-path, without --show-module-resolution
// - Run with --module-path
// + archived boot layer module ModuleDescriptors should be disabled
// + archived boot layer Configuration should be disabled
System.out.println("----------------------- Test case 3 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"--module-path",
moduleDir.toString(),
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"no",
"no");
TestCommon.checkExec(output);
//
// Dump with --module-path specified (test case 4, 5). Use an
// empty directory as it's simple and still triggers the case
// where system module objects are not archived.
//
output = TestCommon.dump(appJar,
TestCommon.list("CheckArchivedModuleApp"),
"--module-path",
moduleDir.toString(),
use_whitebox_jar);
TestCommon.checkDump(output);
// Test case 4)
// - Dump with --module-path
// - Run from -cp only, no archived boot layer module ModuleDescriptors
// and Configuration should be found.
System.out.println("----------------------- Test case 4 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"no",
"no");
TestCommon.checkExec(output);
// Test case 5)
// - Dump with --module-path
// - Run with --module-path, no archived boot layer module ModuleDescriptors
// and Configuration should be found.
System.out.println("----------------------- Test case 5 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"--module-path",
moduleDir.toString(),
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"no",
"no");
TestCommon.checkExec(output);
//
// Dump without --module-path, with --show-module-resolution
//
output = TestCommon.dump(appJar,
TestCommon.list("CheckArchivedModuleApp"),
"--show-module-resolution",
use_whitebox_jar);
TestCommon.checkDump(output, "root java.base jrt:/java.base");
// Test case 6)
// - Dump without --module-path, with --show-module-resolution
// - Run from -cp only and without --show-module-resolution
// + archived boot layer module ModuleDescriptors should be used
// + archived boot layer Configuration should be used
System.out.println("----------------------- Test case 6 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"yes",
"yes");
TestCommon.checkExec(output);
// Test case 7)
// - Dump without --module-path, with --show-module-resolution
// - Run from -cp only and with --show-module-resolution
// + archived boot layer module ModuleDescriptors should be used with
// --show-module-resolution (requires resolution)
// + archived boot layer Configuration should be disabled
System.out.println("----------------------- Test case 7 ----------------------");
output = TestCommon.exec(appJar, use_whitebox_jar,
"--show-module-resolution",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"yes",
"no");
TestCommon.checkExec(output, "root java.base jrt:/java.base");
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,10 +27,6 @@
* @requires vm.cds.write.archived.java.heap
* @requires vm.flagless
* @library /test/jdk/lib/testlibrary /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox
* @compile CheckArchivedModuleApp.java
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar CheckArchivedModuleApp
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver ArchivedModuleWithCustomImageTest
*/
@ -99,8 +95,6 @@ public class ArchivedModuleWithCustomImageTest {
private static void testArchivedModuleUsingImage(Path image)
throws Throwable {
String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
String appJar = ClassFileInstaller.getJarPath("app.jar");
Path customJava = Paths.get(image.toString(), "bin", "java");
@ -116,27 +110,6 @@ public class ArchivedModuleWithCustomImageTest {
pbDump, "custom.runtime.image.dump");
TestCommon.checkDump(output);
// Test case 1):
// test archived module graph objects are used with custome runtime image
System.out.println("------------------- Test case 1 -------------------");
String[] runCmd = {customJava.toString(),
use_whitebox_jar,
"-XX:SharedArchiveFile=./ArchivedModuleWithCustomImageTest.jsa",
"-cp",
appJar,
"-Xshare:on",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckArchivedModuleApp",
"yes",
"yes"};
printCommand(runCmd);
ProcessBuilder pbRun = new ProcessBuilder();
pbRun.command(runCmd);
output = TestCommon.executeAndLog(pbRun, "custom.runtime.image.run");
output.shouldHaveExitValue(0);
// Test case 2):
// verify --show-module-resolution output
System.out.println("------------------- Test case 2 -------------------");
@ -147,7 +120,7 @@ public class ArchivedModuleWithCustomImageTest {
"--show-module-resolution",
"-version"};
printCommand(showModuleCmd1);
pbRun = new ProcessBuilder();
ProcessBuilder pbRun = new ProcessBuilder();
pbRun.command(showModuleCmd1);
output = TestCommon.executeAndLog(
pbRun, "custom.runtime.image.showModuleResolution.nocds");

View file

@ -1,152 +0,0 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import java.io.File;
import java.lang.module.Configuration;
import java.lang.module.ModuleDescriptor;
import java.util.List;
import java.util.Set;
import jdk.test.whitebox.WhiteBox;
//
// Test archived system module graph when open archive heap objects are mapped:
//
public class CheckArchivedModuleApp {
static WhiteBox wb;
public static void main(String args[]) throws Exception {
wb = WhiteBox.getWhiteBox();
if (!wb.areOpenArchiveHeapObjectsMapped()) {
System.out.println("Archived open_archive_heap objects are not mapped.");
System.out.println("This may happen during normal operation. Test Skipped.");
return;
}
if (args.length != 2) {
throw new RuntimeException(
"FAILED. Incorrect argument length: " + args.length);
}
boolean expectArchivedDescriptors = "yes".equals(args[0]);
boolean expectArchivedConfiguration = "yes".equals(args[1]);
// -XX:+EnableJVMCI adds extra system modules, in which case the system
// module objects are not archived.
Boolean enableJVMCI = wb.getBooleanVMFlag("EnableJVMCI");
if (enableJVMCI != null && enableJVMCI) {
expectArchivedDescriptors = false;
expectArchivedConfiguration = false;
}
checkModuleDescriptors(expectArchivedDescriptors);
checkConfiguration(expectArchivedConfiguration);
checkEmptyConfiguration(expectArchivedConfiguration);
checkEmptyLayer();
}
private static void checkModuleDescriptors(boolean expectArchivedDescriptors) {
Set<Module> modules = ModuleLayer.boot().modules();
for (Module m : modules) {
ModuleDescriptor md = m.getDescriptor();
String name = md.name();
if (expectArchivedDescriptors) {
if (wb.isShared(md)) {
System.out.println(name + " is archived. Expected.");
} else {
throw new RuntimeException(
"FAILED. " + name + " is not archived. Expect archived.");
}
} else {
if (!wb.isShared(md)) {
System.out.println(name + " is not archived. Expected.");
} else {
throw new RuntimeException(
"FAILED. " + name + " is archived. Expect not archived.");
}
}
}
}
private static void checkEmptyConfiguration(boolean expectArchivedConfiguration) {
// Configuration.EMPTY_CONFIGURATION uses the singletons,
// ListN.EMPTY_LIST, SetN.EMPTY_SET and MapN.EMPTY_MAP in
// ImmutableCollections for the 'parents', 'modules' and
// 'graph' fields. The ImmutableCollections singletons
// can be accessed via List.of(), Set.of() and Map.of() APIs.
// Configuration public APIs also allow access to the
// EMPTY_CONFIGURATION's 'parents' and 'modules'. When the
// archived java heap data is enabled at runtime, make sure
// the EMPTY_CONFIGURATION.parents and EMPTY_CONFIGURATION.modules
// are the archived ImmutableCollections singletons.
Configuration emptyCf = Configuration.empty();
List emptyCfParents = emptyCf.parents();
Set emptyCfModules = emptyCf.modules();
if (expectArchivedConfiguration) {
if (emptyCfParents == List.of() &&
wb.isShared(emptyCfParents)) {
System.out.println("Empty Configuration has expected parents.");
} else {
throw new RuntimeException(
"FAILED. Unexpected parents for empty Configuration.");
}
if (emptyCfModules == Set.of() &&
wb.isShared(emptyCfModules)) {
System.out.println("Empty Configuration has expected module set.");
} else {
throw new RuntimeException(
"FAILED. Unexpected module set for empty Configuration.");
}
}
}
private static void checkConfiguration(boolean expectArchivedConfiguration) {
Configuration cf = ModuleLayer.boot().configuration();
if (expectArchivedConfiguration) {
if (wb.isShared(cf)) {
System.out.println("Boot layer configuration is archived. Expected.");
} else {
throw new RuntimeException(
"FAILED. Boot layer configuration is not archived.");
}
} else {
if (!wb.isShared(cf)) {
System.out.println("Boot layer configuration is not archived. Expected.");
} else {
throw new RuntimeException(
"FAILED. Boot layer configuration is archived.");
}
}
}
private static void checkEmptyLayer() {
// ModuleLayer.EMPTY_FIELD returned by empty() method is singleton.
// Check that with CDS there is still a single instance of EMPTY_LAYER
// and boot() layer parent is THE empty layer.
if (ModuleLayer.empty() != ModuleLayer.boot().parents().get(0)) {
throw new RuntimeException("FAILED. Empty module layer is not singleton");
}
}
}

View file

@ -1,92 +0,0 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import java.io.File;
import java.net.URL;
import java.net.URLClassLoader;
import jdk.test.whitebox.WhiteBox;
//
// Test class mirror objects are cached when open archive heap objects are mapped:
// - Well-known shared library classes:
// java.lang.Object
// java.lang.String
// - Shared application class loaded by the system class loader
// - Shared application class loaded user defined class loader
//
public class CheckCachedMirrorApp {
static WhiteBox wb;
public static void main(String args[]) throws Exception {
String path = args[0];
URL url = new File(path).toURI().toURL();
URL[] urls = new URL[] {url};
URLClassLoader loader = new URLClassLoader(urls);
Class hello = loader.loadClass("Hello");
System.out.println("Loaded " + hello + " from " + url + " using loader " + loader);
wb = WhiteBox.getWhiteBox();
if (!wb.areOpenArchiveHeapObjectsMapped()) {
System.out.println("Archived open_archive_heap objects are not mapped.");
System.out.println("This may happen during normal operation. Test Skipped.");
return;
}
// Well-known shared library classes
Class object_class = Object.class;
checkMirror(object_class, true);
Class string_class = String.class;
checkMirror(string_class, true);
// Shared app class
Class app_class = CheckCachedMirrorApp.class;
checkMirror(app_class, true);
// Hello is shared class and loaded by the 'loader' defined in current app.
// It should not have cached resolved_references.
Class class_with_user_defined_loader = hello;
checkMirror(class_with_user_defined_loader, false);
}
static void checkMirror(Class c, boolean mirrorShouldBeArchived) {
System.out.print("Check cached mirror for " + c);
if (wb.isSharedClass(c)) {
// Check if the Class object is cached
if (mirrorShouldBeArchived && wb.isShared(c)) {
System.out.println(c + " mirror is cached. Expected.");
} else if (!mirrorShouldBeArchived && !wb.isShared(c)) {
System.out.println(c + " mirror is not cached. Expected.");
} else if (mirrorShouldBeArchived && !wb.isShared(c)) {
throw new RuntimeException(
"FAILED. " + c + " mirror is not cached.");
} else {
throw new RuntimeException(
"FAILED. " + c + " mirror should not be cached.");
}
} else {
System.out.println("Class " + c + "is not shared, skipping the check for mirror");
}
}
}

View file

@ -1,66 +0,0 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @summary Test archived mirror
* @requires vm.cds.write.archived.java.heap
* @requires vm.cds.custom.loaders
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox
* @compile CheckCachedMirrorApp.java
* @compile ../test-classes/Hello.java
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar CheckCachedMirrorApp
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar hello.jar Hello
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver CheckCachedMirrorTest
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.whitebox.WhiteBox;
public class CheckCachedMirrorTest {
public static void main(String[] args) throws Exception {
String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
String appJar = ClassFileInstaller.getJarPath("app.jar");
String helloJarPath = ClassFileInstaller.getJarPath("hello.jar");
String classlist[] = new String[] {
"CheckCachedMirrorApp", // built-in app loader
"java/lang/Object id: 1", // boot loader
"Hello id: 2 super: 1 source: " + helloJarPath // custom loader
};
TestCommon.testDump(appJar, classlist, use_whitebox_jar);
OutputAnalyzer output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-Xlog:cds=debug",
"CheckCachedMirrorApp",
helloJarPath);
TestCommon.checkExec(output);
}
}

View file

@ -1,65 +0,0 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @summary Test resolved_references
* @requires vm.cds
* @requires vm.cds.custom.loaders
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox
* @compile CheckCachedResolvedReferencesApp.java
* @compile ../test-classes/Hello.java
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar CheckCachedResolvedReferencesApp
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar hello.jar Hello
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver CheckCachedResolvedReferences
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.whitebox.WhiteBox;
public class CheckCachedResolvedReferences {
public static void main(String[] args) throws Exception {
String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
String appJar = ClassFileInstaller.getJarPath("app.jar");
String helloJarPath = ClassFileInstaller.getJarPath("hello.jar");
String classlist[] = new String[] {
"CheckCachedResolvedReferencesApp", // built-in app loader
"java/lang/Object id: 1", // boot loader
"Hello id: 2 super: 1 source: " + helloJarPath // custom loader
};
TestCommon.testDump(appJar, classlist, use_whitebox_jar);
OutputAnalyzer output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"CheckCachedResolvedReferencesApp",
helloJarPath);
TestCommon.checkExec(output);
}
}

View file

@ -1,77 +0,0 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import java.io.File;
import java.net.URL;
import java.net.URLClassLoader;
import jdk.test.whitebox.WhiteBox;
public class CheckCachedResolvedReferencesApp {
public static void main(String args[]) throws Exception {
String path = args[0];
URL url = new File(path).toURI().toURL();
URL[] urls = new URL[] {url};
URLClassLoader loader = new URLClassLoader(urls);
Class hello = loader.loadClass("Hello");
System.out.println("Loaded " + hello + " from " + url + " using loader " + loader);
WhiteBox wb = WhiteBox.getWhiteBox();
if (!wb.areOpenArchiveHeapObjectsMapped()) {
System.out.println("Archived open_archive_heap objects are not mapped.");
System.out.println("This may happen during normal operation. Test Skipped.");
return;
}
// CheckCachedResolvedReferencesApp is shared class and loaded by the
// AppClassLoader. It should have cached resolved_references.
if (wb.isSharedClass(CheckCachedResolvedReferencesApp.class)) {
Object refs1 = wb.getResolvedReferences(CheckCachedResolvedReferencesApp.class);
if (refs1 != null && wb.isShared(refs1)) {
System.out.println(
"resolved references from CheckCachedResolvedReferencesApp is cached");
} else {
throw new RuntimeException(
"FAILED. CheckCachedResolvedReferencesApp has no cached resolved references");
}
}
// Hello is shared class and loaded by the 'loader' defined in current app.
// It should not have cached resolved_references.
if (wb.isSharedClass(hello)) {
Object refs2 = wb.getResolvedReferences(hello);
if (refs2 != null) {
if (!wb.isShared(refs2)) {
System.out.println("resolved references from hello is not cached");
} else {
throw new RuntimeException(
"FAILED. Hello has unexpected cached resolved references");
}
} else {
throw new RuntimeException("FAILED. Hello has no resolved references");
}
}
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -22,35 +22,18 @@
*
*/
import jdk.test.whitebox.WhiteBox;
//
// Help test archived box cache consistency.
//
// Takes two arguments:
// 0: the expected maximum value expected to be archived
// 1: if the values are expected to be retrieved from the archive or not
// (only applies to IntegerCache; other caches should always be mapped
// from archive)
// args[0]: the expected maximum value expected to be archived
//
public class CheckIntegerCacheApp {
static WhiteBox wb;
public static void main(String[] args) throws Exception {
wb = WhiteBox.getWhiteBox();
if (!wb.areOpenArchiveHeapObjectsMapped()) {
System.out.println("This may happen during normal operation. Test Skipped.");
return;
}
if (args.length != 2) {
if (args.length != 1) {
throw new RuntimeException(
"FAILED. Incorrect argument length: " + args.length);
}
boolean archivedExpected = Boolean.parseBoolean(args[1]);
// Base JLS compliance check
for (int i = -128; i <= 127; i++) {
if (Integer.valueOf(i) != Integer.valueOf(i)) {
@ -69,10 +52,6 @@ public class CheckIntegerCacheApp {
throw new RuntimeException(
"FAILED. All Long values in range [-128, 127] should be interned in cache: " + i);
}
checkArchivedAsExpected(archivedExpected, Integer.valueOf(i));
checkArchivedAsExpected(true, Byte.valueOf((byte)i));
checkArchivedAsExpected(true, Short.valueOf((short)i));
checkArchivedAsExpected(true, Long.valueOf(i));
// Character cache only values 0 through 127
if (i >= 0) {
@ -80,7 +59,6 @@ public class CheckIntegerCacheApp {
throw new RuntimeException(
"FAILED. All Character values in range [0, 127] should be interned in cache: " + i);
}
checkArchivedAsExpected(true, Character.valueOf((char)i));
}
}
@ -89,31 +67,10 @@ public class CheckIntegerCacheApp {
throw new RuntimeException(
"FAILED. Value expected to be retrieved from cache: " + high);
}
checkArchivedAsExpected(archivedExpected, Integer.valueOf(high));
if (Integer.valueOf(high + 1) == Integer.valueOf(high + 1)) {
throw new RuntimeException(
"FAILED. Value not expected to be retrieved from cache: " + high);
}
checkArchivedAsExpected(false, Integer.valueOf(high + 1));
checkArchivedAsExpected(false, Short.valueOf((short)128));
checkArchivedAsExpected(false, Long.valueOf(128));
checkArchivedAsExpected(false, Character.valueOf((char)128));
}
private static void checkArchivedAsExpected(boolean archivedExpected, Object value) {
if (archivedExpected) {
if (!wb.isShared(value)) {
throw new RuntimeException(
"FAILED. Value expected to be archived: " + value +
" of type " + value.getClass().getName());
}
} else {
if (wb.isShared(value)) {
throw new RuntimeException(
"FAILED. Value not expected to be archived: " + value +
" of type " + value.getClass().getName());
}
}
}
}

View file

@ -1,94 +0,0 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import java.io.*;
import java.util.*;
import jdk.test.lib.Utils;
import jdk.test.whitebox.WhiteBox;
// All strings in archived classes are shared
public class GCStressApp {
static WhiteBox wb = WhiteBox.getWhiteBox();
static int[] arr;
static String get_shared_string() {
String shared_str = "GCStressApp_shared_string";
return shared_str;
}
static String get_shared_string1() {
String shared_str1 = "GCStressApp_shared_string1";
return shared_str1;
}
static void allocAlot() {
try {
Random random = Utils.getRandomInstance();
for (int i = 0; i < 1024 * 1024; i++) {
int len = random.nextInt(10000);
arr = new int[len];
}
} catch (java.lang.OutOfMemoryError e) { }
}
static void runGC() {
wb.fullGC();
}
public static void main(String args[]) throws Exception {
if (!wb.isSharedClass(GCStressApp.class)) {
System.out.println("GCStressApp is not shared. Possibly there was a mapping failure.");
return;
}
if (!wb.areSharedStringsMapped()) {
System.out.println("Shared strings are not mapped.");
return;
}
Object refs = wb.getResolvedReferences(GCStressApp.class);
if (wb.isShared(refs)) {
String shared_str = get_shared_string();
String shared_str1 = get_shared_string1();
if (!wb.isShared(shared_str)) {
throw new RuntimeException("FAILED. GCStressApp_shared_string is not shared");
}
if (!wb.isShared(shared_str1)) {
throw new RuntimeException("FAILED. GCStressApp_shared_string1 is not shared");
}
allocAlot();
runGC();
runGC();
runGC();
System.out.println("Passed");
} else {
System.out.println(
"No cached resolved references. Open archive heap data is not used.");
}
}
}

View file

@ -1,58 +0,0 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @key randomness
* @summary
* @requires vm.cds.write.archived.java.heap
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox jdk.test.lib.Utils
* @compile GCStressApp.java
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar gcstress.jar GCStressApp jdk.test.lib.Utils
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver GCStressTest
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.helpers.ClassFileInstaller;
public class GCStressTest {
public static void main(String[] args) throws Exception {
String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
String appJar = ClassFileInstaller.getJarPath("gcstress.jar");
String appClasses[] = TestCommon.list("GCStressApp");
OutputAnalyzer output = TestCommon.dump(appJar, appClasses,
use_whitebox_jar,
"-Xms20M", "-Xmx20M");
output = TestCommon.exec(appJar, use_whitebox_jar,
"-Xlog:cds=info",
"-Xms20M", "-Xmx20M",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI","GCStressApp");
TestCommon.checkExec(output);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,17 +64,7 @@ public class MirrorWithReferenceFieldsApp {
public void test(WhiteBox wb) {
Class c = MirrorWithReferenceFieldsApp.class;
if (wb.isSharedClass(c)) {
// Check if the Class object is cached
if (wb.isShared(c)) {
System.out.println(c + " mirror is cached. Expected.");
} else {
throw new RuntimeException(
"FAILED. " + c + " mirror should be cached.");
}
// Check fields
if (wb.isShared(archived_field)) {
if (wb.isSharedInternedString(archived_field)) {
System.out.println("archived_field is archived as excepted");
} else {
throw new RuntimeException(

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,8 +45,8 @@ public class OpenArchiveRegion {
// Dump with open archive heap region, requires G1 GC
OutputAnalyzer output = TestCommon.dump(appJar, appClasses, "-Xlog:cds=debug");
TestCommon.checkDump(output, "oa0 space:");
output.shouldNotContain("oa0 space: 0 [");
TestCommon.checkDump(output, "hp space:");
output.shouldNotContain("hp space: 0 [");
output = TestCommon.exec(appJar, "Hello");
TestCommon.checkExec(output, "Hello World");
output = TestCommon.exec(appJar, "-XX:+UseSerialGC", "Hello");
@ -55,7 +55,7 @@ public class OpenArchiveRegion {
// Dump with open archive heap region disabled when G1 GC is not in use
output = TestCommon.dump(appJar, appClasses, "-XX:+UseParallelGC");
TestCommon.checkDump(output);
output.shouldNotContain("oa0 space:");
output.shouldNotContain("hp space:");
output = TestCommon.exec(appJar, "Hello");
TestCommon.checkExec(output, "Hello World");
}

View file

@ -1,205 +0,0 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
import java.lang.reflect.Field;
import jdk.test.whitebox.WhiteBox;
//
// Test primitive type class mirror objects are cached when open archive heap
// objects are mapped.
//
public class PrimitiveTypesApp {
public static void main(String[] args) {
WhiteBox wb = WhiteBox.getWhiteBox();
if (!wb.areOpenArchiveHeapObjectsMapped()) {
System.out.println("Archived open_archive_heap objects are not mapped.");
System.out.println("This may happen during normal operation. Test Skipped.");
return;
}
FieldsTest ft = new FieldsTest();
ft.testBoolean(wb);
ft.testByte(wb);
ft.testChar(wb);
ft.testInt(wb);
ft.testShort(wb);
ft.testLong(wb);
ft.testFloat(wb);
ft.testDouble(wb);
}
}
class FieldsTest {
public boolean f_boolean;
public byte f_byte;
public char f_char;
public int f_int;
public short f_short;
public long f_long;
public float f_float;
public double f_double;
FieldsTest() {
f_byte = 1;
f_boolean = false;
f_char = 'a';
f_int = 1;
f_short = 100;
f_long = 2018L;
f_float = 1.0f;
f_double = 2.5;
}
void testBoolean(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_boolean");
f.setBoolean(this, true);
if (!f_boolean) {
throw new RuntimeException("FAILED. Field f_boolean has unexpected value: " + f_boolean);
}
checkPrimitiveType(wb, f, Boolean.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void testByte(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_byte");
f.setByte(this, (byte)9);
if (f_byte != (byte)9) {
throw new RuntimeException("FAILED. Field f_byte has unexpected value: " + f_byte);
}
checkPrimitiveType(wb, f, Byte.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void testChar(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_char");
f.setChar(this, 'b');
if (f_char != 'b') {
throw new RuntimeException("FAILED. Field f_char has unexpected value: " + f_char);
}
checkPrimitiveType(wb, f, Character.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void testInt(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_int");
f.setInt(this, 9999);
if (f_int != 9999) {
throw new RuntimeException("FAILED. Field f_int has unexpected value: " + f_int);
}
checkPrimitiveType(wb, f, Integer.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void testShort(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_short");
f.setShort(this, (short)99);
if (f_short != 99) {
throw new RuntimeException("FAILED. Field f_short has unexpected value: " + f_short);
}
checkPrimitiveType(wb, f, Short.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void testLong(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_long");
f.setLong(this, 99L);
if (f_long != 99L) {
throw new RuntimeException("FAILED. Field f_long has unexpected value: " + f_long);
}
checkPrimitiveType(wb, f, Long.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void testFloat(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_float");
f.setFloat(this, 9.9f);
if (f_float != 9.9f) {
throw new RuntimeException("FAILED. Field f_float has unexpected value: " + f_float);
}
checkPrimitiveType(wb, f, Float.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void testDouble(WhiteBox wb) {
try {
Field f = this.getClass().getDeclaredField("f_double");
f.setDouble(this, 9.9);
if (f_double != 9.9) {
throw new RuntimeException("FAILED. Field f_double has unexpected value: " + f_double);
}
checkPrimitiveType(wb, f, Double.TYPE);
} catch (NoSuchFieldException nsfe) {
throw new RuntimeException(nsfe);
} catch (IllegalAccessException iae) {
throw new RuntimeException(iae);
}
}
void checkPrimitiveType(WhiteBox wb, Field f, Class t) {
Class c = f.getType();
if (!(c.isPrimitive() && c == t)) {
throw new RuntimeException("FAILED. " + c + " is not primitive type " + t);
}
if (wb.isShared(c)) {
System.out.println(c + " is cached, expected");
} else {
throw new RuntimeException("FAILED. " + c + " is not cached.");
}
}
}

View file

@ -1,60 +0,0 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test
* @summary Test archived primitive type mirrors
* @requires vm.cds.write.archived.java.heap
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox
* @compile PrimitiveTypesApp.java
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar PrimitiveTypesApp FieldsTest
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver PrimitiveTypesTest
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.whitebox.WhiteBox;
public class PrimitiveTypesTest {
public static void main(String[] args) throws Exception {
String wbJar = ClassFileInstaller.getJarPath("WhiteBox.jar");
String use_whitebox_jar = "-Xbootclasspath/a:" + wbJar;
String appJar = ClassFileInstaller.getJarPath("app.jar");
String classlist[] = new String[] {
"PrimitiveTypesApp",
"FieldsTest"
};
TestCommon.testDump(appJar, classlist, use_whitebox_jar);
OutputAnalyzer output = TestCommon.exec(appJar, use_whitebox_jar,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+WhiteBoxAPI",
"-XX:+VerifyAfterGC",
"PrimitiveTypesApp");
TestCommon.checkExec(output);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -91,7 +91,7 @@ public class RedefineClassApp {
static void checkArchivedMirrorObject(Class klass) {
if (wb.areOpenArchiveHeapObjectsMapped()) {
if (!wb.isShared(klass)) {
if (!wb.isSharedClass(klass)) {
failed ++;
System.out.println("FAILED. " + klass + " mirror object is not archived");
return;
@ -117,14 +117,10 @@ public class RedefineClassApp {
// Call get() before redefine. All strings in archived classes are shared.
String res = object.get();
System.out.println("get() returns " + res);
if (res.equals("buzz") && wb.isShared(res)) {
System.out.println("get() returns " + res + ", string is shared");
if (res.equals("buzz")) {
System.out.println("get() returns " + res);
} else {
if (!res.equals("buzz")) {
System.out.println("FAILED. buzz is expected but got " + res);
} else {
System.out.println("FAILED. " + res + " is not shared");
}
failed ++;
return;
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -75,10 +75,7 @@ public class CDSArchiveUtils {
"rw", // ReadWrite
"ro", // ReadOnly
"bm", // relocation bitmaps
"first_closed_archive",
"last_closed_archive",
"first_open_archive",
"last_open_archive"
"hp", // heap
};
private static int num_regions = shared_region_name.length;

View file

@ -688,7 +688,6 @@ public class WhiteBox {
public native String getDefaultArchivePath();
public native boolean cdsMemoryMappingFailed();
public native boolean isSharingEnabled();
public native boolean isShared(Object o);
public native boolean isSharedClass(Class<?> c);
public native boolean areSharedStringsMapped();
public native boolean isSharedInternedString(String s);
@ -696,7 +695,6 @@ public class WhiteBox {
public native boolean isJFRIncluded();
public native boolean isDTraceIncluded();
public native boolean canWriteJavaHeapArchive();
public native Object getResolvedReferences(Class<?> c);
public native void linkClass(Class<?> c);
public native boolean areOpenArchiveHeapObjectsMapped();