This commit is contained in:
Daniel D. Daugherty 2013-08-16 10:06:58 -07:00
commit 2c28ff340a
35 changed files with 876 additions and 554 deletions

View file

@ -362,15 +362,12 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
ReservedSpace FileMapInfo::reserve_shared_memory() {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[0];
char* requested_addr = si->_base;
size_t alignment = os::vm_allocation_granularity();
size_t size = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
SharedMiscDataSize + SharedMiscCodeSize,
alignment);
size_t size = FileMapInfo::shared_spaces_size();
// Reserve the space first, then map otherwise map will go right over some
// other reserved memory (like the code cache).
ReservedSpace rs(size, alignment, false, requested_addr);
ReservedSpace rs(size, os::vm_allocation_granularity(), false, requested_addr);
if (!rs.is_reserved()) {
fail_continue(err_msg("Unable to reserve shared space at required address " INTPTR_FORMAT, requested_addr));
return rs;
@ -559,3 +556,19 @@ void FileMapInfo::print_shared_spaces() {
si->_base, si->_base + si->_used);
}
}
// Unmap mapped regions of shared space.
void FileMapInfo::stop_sharing_and_unmap(const char* msg) {
FileMapInfo *map_info = FileMapInfo::current_info();
if (map_info) {
map_info->fail_continue(msg);
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
if (map_info->_header._space[i]._base != NULL) {
map_info->unmap_region(i);
map_info->_header._space[i]._base = NULL;
}
}
} else if (DumpSharedSpaces) {
fail_stop(msg, NULL);
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -150,6 +150,15 @@ public:
// Return true if given address is in the mapped shared space.
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
void print_shared_spaces() NOT_CDS_RETURN;
static size_t shared_spaces_size() {
return align_size_up(SharedReadOnlySize + SharedReadWriteSize +
SharedMiscDataSize + SharedMiscCodeSize,
os::vm_allocation_granularity());
}
// Stop CDS sharing and unmap CDS regions.
static void stop_sharing_and_unmap(const char* msg);
};
#endif // SHARE_VM_MEMORY_FILEMAP_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -118,9 +118,12 @@ bool CodeHeap::reserve(size_t reserved_size, size_t committed_size,
_number_of_committed_segments = size_to_segments(_memory.committed_size());
_number_of_reserved_segments = size_to_segments(_memory.reserved_size());
assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
const size_t reserved_segments_alignment = MAX2((size_t)os::vm_page_size(), granularity);
const size_t reserved_segments_size = align_size_up(_number_of_reserved_segments, reserved_segments_alignment);
const size_t committed_segments_size = align_to_page_size(_number_of_committed_segments);
// reserve space for _segmap
if (!_segmap.initialize(align_to_page_size(_number_of_reserved_segments), align_to_page_size(_number_of_committed_segments))) {
if (!_segmap.initialize(reserved_segments_size, committed_segments_size)) {
return false;
}

View file

@ -35,6 +35,7 @@
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "runtime/globals.hpp"
#include "runtime/java.hpp"
#include "runtime/mutex.hpp"
#include "runtime/orderAccess.hpp"
#include "services/memTracker.hpp"
@ -54,6 +55,8 @@ size_t const allocation_from_dictionary_limit = 64 * K;
MetaWord* last_allocated = 0;
size_t Metaspace::_class_metaspace_size;
// Used in declarations in SpaceManager and ChunkManager
enum ChunkIndex {
ZeroIndex = 0,
@ -261,10 +264,6 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
// count of chunks contained in this VirtualSpace
uintx _container_count;
// Convenience functions for logical bottom and end
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
// Convenience functions to access the _virtual_space
char* low() const { return virtual_space()->low(); }
char* high() const { return virtual_space()->high(); }
@ -284,6 +283,10 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
VirtualSpaceNode(ReservedSpace rs) : _top(NULL), _next(NULL), _rs(rs), _container_count(0) {}
~VirtualSpaceNode();
// Convenience functions for logical bottom and end
MetaWord* bottom() const { return (MetaWord*) _virtual_space.low(); }
MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
// address of next available space in _virtual_space;
// Accessors
VirtualSpaceNode* next() { return _next; }
@ -1313,7 +1316,8 @@ bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
// Class virtual space should always be expanded. Call GC for the other
// metadata virtual space.
if (vsl == Metaspace::class_space_list()) return true;
if (Metaspace::using_class_space() &&
(vsl == Metaspace::class_space_list())) return true;
// If this is part of an allocation after a GC, expand
// unconditionally.
@ -2257,7 +2261,7 @@ void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
size_t raw_word_size = get_raw_word_size(word_size);
size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
assert(raw_word_size >= min_size,
err_msg("Should not deallocate dark matter " SIZE_FORMAT, word_size));
err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
block_freelists()->return_block(p, raw_word_size);
}
@ -2374,7 +2378,7 @@ MetaWord* SpaceManager::allocate_work(size_t word_size) {
if (result == NULL) {
result = grow_and_allocate(word_size);
}
if (result > 0) {
if (result != 0) {
inc_used_metrics(word_size);
assert(result != (MetaWord*) chunks_in_use(MediumIndex),
"Head of the list is being allocated");
@ -2478,7 +2482,8 @@ size_t MetaspaceAux::_allocated_used_words[] = {0, 0};
size_t MetaspaceAux::free_bytes() {
size_t result = 0;
if (Metaspace::class_space_list() != NULL) {
if (Metaspace::using_class_space() &&
(Metaspace::class_space_list() != NULL)) {
result = result + Metaspace::class_space_list()->free_bytes();
}
if (Metaspace::space_list() != NULL) {
@ -2549,6 +2554,9 @@ size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
}
size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
return 0;
}
// Don't count the space in the freelists. That space will be
// added to the capacity calculation as needed.
size_t capacity = 0;
@ -2563,18 +2571,23 @@ size_t MetaspaceAux::capacity_bytes_slow(Metaspace::MetadataType mdtype) {
}
size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
size_t reserved = (mdtype == Metaspace::ClassType) ?
Metaspace::class_space_list()->virtual_space_total() :
Metaspace::space_list()->virtual_space_total();
return reserved * BytesPerWord;
if (mdtype == Metaspace::ClassType) {
return Metaspace::using_class_space() ?
Metaspace::class_space_list()->virtual_space_total() * BytesPerWord : 0;
} else {
return Metaspace::space_list()->virtual_space_total() * BytesPerWord;
}
}
size_t MetaspaceAux::min_chunk_size() { return Metaspace::first_chunk_word_size(); }
size_t MetaspaceAux::free_chunks_total(Metaspace::MetadataType mdtype) {
if ((mdtype == Metaspace::ClassType) && !Metaspace::using_class_space()) {
return 0;
}
ChunkManager* chunk = (mdtype == Metaspace::ClassType) ?
Metaspace::class_space_list()->chunk_manager() :
Metaspace::space_list()->chunk_manager();
Metaspace::class_space_list()->chunk_manager() :
Metaspace::space_list()->chunk_manager();
chunk->slow_verify();
return chunk->free_chunks_total();
}
@ -2615,7 +2628,6 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
// This is printed when PrintGCDetails
void MetaspaceAux::print_on(outputStream* out) {
Metaspace::MetadataType ct = Metaspace::ClassType;
Metaspace::MetadataType nct = Metaspace::NonClassType;
out->print_cr(" Metaspace total "
@ -2629,12 +2641,15 @@ void MetaspaceAux::print_on(outputStream* out) {
allocated_capacity_bytes(nct)/K,
allocated_used_bytes(nct)/K,
reserved_in_bytes(nct)/K);
out->print_cr(" class space "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
allocated_capacity_bytes(ct)/K,
allocated_used_bytes(ct)/K,
reserved_in_bytes(ct)/K);
if (Metaspace::using_class_space()) {
Metaspace::MetadataType ct = Metaspace::ClassType;
out->print_cr(" class space "
SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
" reserved " SIZE_FORMAT "K",
allocated_capacity_bytes(ct)/K,
allocated_used_bytes(ct)/K,
reserved_in_bytes(ct)/K);
}
}
// Print information for class space and data space separately.
@ -2659,13 +2674,37 @@ void MetaspaceAux::print_on(outputStream* out, Metaspace::MetadataType mdtype) {
assert(!SafepointSynchronize::is_at_safepoint() || used_and_free == capacity_bytes, "Accounting is wrong");
}
// Print total fragmentation for class and data metaspaces separately
void MetaspaceAux::print_waste(outputStream* out) {
size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
// Print total fragmentation for class metaspaces
void MetaspaceAux::print_class_waste(outputStream* out) {
assert(Metaspace::using_class_space(), "class metaspace not used");
size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0;
size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
Metaspace* msp = iter.get_next();
if (msp != NULL) {
cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
}
}
out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
"large count " SIZE_FORMAT,
cls_specialized_count, cls_specialized_waste,
cls_small_count, cls_small_waste,
cls_medium_count, cls_medium_waste, cls_humongous_count);
}
// Print total fragmentation for data and class metaspaces separately
void MetaspaceAux::print_waste(outputStream* out) {
size_t specialized_waste = 0, small_waste = 0, medium_waste = 0;
size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
while (iter.repeat()) {
@ -2678,14 +2717,6 @@ void MetaspaceAux::print_waste(outputStream* out) {
medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex);
medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex);
humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex);
cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex);
cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex);
cls_small_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SmallIndex);
cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex);
cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex);
cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex);
cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex);
}
}
out->print_cr("Total fragmentation waste (words) doesn't count free space");
@ -2695,13 +2726,9 @@ void MetaspaceAux::print_waste(outputStream* out) {
"large count " SIZE_FORMAT,
specialized_count, specialized_waste, small_count,
small_waste, medium_count, medium_waste, humongous_count);
out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", "
SIZE_FORMAT " small(s) " SIZE_FORMAT ", "
SIZE_FORMAT " medium(s) " SIZE_FORMAT ", "
"large count " SIZE_FORMAT,
cls_specialized_count, cls_specialized_waste,
cls_small_count, cls_small_waste,
cls_medium_count, cls_medium_waste, cls_humongous_count);
if (Metaspace::using_class_space()) {
print_class_waste(out);
}
}
// Dump global metaspace things from the end of ClassLoaderDataGraph
@ -2714,7 +2741,9 @@ void MetaspaceAux::dump(outputStream* out) {
void MetaspaceAux::verify_free_chunks() {
Metaspace::space_list()->chunk_manager()->verify();
Metaspace::class_space_list()->chunk_manager()->verify();
if (Metaspace::using_class_space()) {
Metaspace::class_space_list()->chunk_manager()->verify();
}
}
void MetaspaceAux::verify_capacity() {
@ -2776,7 +2805,9 @@ Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
Metaspace::~Metaspace() {
delete _vsm;
delete _class_vsm;
if (using_class_space()) {
delete _class_vsm;
}
}
VirtualSpaceList* Metaspace::_space_list = NULL;
@ -2784,9 +2815,123 @@ VirtualSpaceList* Metaspace::_class_space_list = NULL;
#define VIRTUALSPACEMULTIPLIER 2
#ifdef _LP64
void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
// Figure out the narrow_klass_base and the narrow_klass_shift. The
// narrow_klass_base is the lower of the metaspace base and the cds base
// (if cds is enabled). The narrow_klass_shift depends on the distance
// between the lower base and higher address.
address lower_base;
address higher_address;
if (UseSharedSpaces) {
higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
(address)(metaspace_base + class_metaspace_size()));
lower_base = MIN2(metaspace_base, cds_base);
} else {
higher_address = metaspace_base + class_metaspace_size();
lower_base = metaspace_base;
}
Universe::set_narrow_klass_base(lower_base);
if ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint) {
Universe::set_narrow_klass_shift(0);
} else {
assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces");
Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes);
}
}
// Return TRUE if the specified metaspace_base and cds_base are close enough
// to work with compressed klass pointers.
bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
address lower_base = MIN2((address)metaspace_base, cds_base);
address higher_address = MAX2((address)(cds_base + FileMapInfo::shared_spaces_size()),
(address)(metaspace_base + class_metaspace_size()));
return ((uint64_t)(higher_address - lower_base) < (uint64_t)max_juint);
}
// Try to allocate the metaspace at the requested addr.
void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
assert(using_class_space(), "called improperly");
assert(UseCompressedKlassPointers, "Only use with CompressedKlassPtrs");
assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
"Metaspace size is too big");
ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
os::vm_allocation_granularity(),
false, requested_addr, 0);
if (!metaspace_rs.is_reserved()) {
if (UseSharedSpaces) {
// Keep trying to allocate the metaspace, increasing the requested_addr
// by 1GB each time, until we reach an address that will no longer allow
// use of CDS with compressed klass pointers.
char *addr = requested_addr;
while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
addr = addr + 1*G;
metaspace_rs = ReservedSpace(class_metaspace_size(),
os::vm_allocation_granularity(), false, addr, 0);
}
}
// If no successful allocation then try to allocate the space anywhere. If
// that fails then OOM doom. At this point we cannot try allocating the
// metaspace as if UseCompressedKlassPointers is off because too much
// initialization has happened that depends on UseCompressedKlassPointers.
// So, UseCompressedKlassPointers cannot be turned off at this point.
if (!metaspace_rs.is_reserved()) {
metaspace_rs = ReservedSpace(class_metaspace_size(),
os::vm_allocation_granularity(), false);
if (!metaspace_rs.is_reserved()) {
vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
class_metaspace_size()));
}
}
}
// If we got here then the metaspace got allocated.
MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
// Verify that we can use shared spaces. Otherwise, turn off CDS.
if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
FileMapInfo::stop_sharing_and_unmap(
"Could not allocate metaspace at a compatible address");
}
set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
UseSharedSpaces ? (address)cds_base : 0);
initialize_class_space(metaspace_rs);
if (PrintCompressedOopsMode || (PrintMiscellaneous && Verbose)) {
gclog_or_tty->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: " SIZE_FORMAT,
Universe::narrow_klass_base(), Universe::narrow_klass_shift());
gclog_or_tty->print_cr("Metaspace Size: " SIZE_FORMAT " Address: " PTR_FORMAT " Req Addr: " PTR_FORMAT,
class_metaspace_size(), metaspace_rs.base(), requested_addr);
}
}
// For UseCompressedKlassPointers the class space is reserved above the top of
// the Java heap. The argument passed in is at the base of the compressed space.
void Metaspace::initialize_class_space(ReservedSpace rs) {
// The reserved space size may be bigger because of alignment, esp with UseLargePages
assert(rs.size() >= ClassMetaspaceSize,
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
assert(using_class_space(), "Must be using class space");
_class_space_list = new VirtualSpaceList(rs);
}
#endif
void Metaspace::global_initialize() {
// Initialize the alignment for shared spaces.
int max_alignment = os::vm_page_size();
size_t cds_total = 0;
set_class_metaspace_size(align_size_up(ClassMetaspaceSize,
os::vm_allocation_granularity()));
MetaspaceShared::set_max_alignment(max_alignment);
if (DumpSharedSpaces) {
@ -2798,15 +2943,31 @@ void Metaspace::global_initialize() {
// Initialize with the sum of the shared space sizes. The read-only
// and read write metaspace chunks will be allocated out of this and the
// remainder is the misc code and data chunks.
size_t total = align_size_up(SharedReadOnlySize + SharedReadWriteSize +
SharedMiscDataSize + SharedMiscCodeSize,
os::vm_allocation_granularity());
size_t word_size = total/wordSize;
_space_list = new VirtualSpaceList(word_size);
cds_total = FileMapInfo::shared_spaces_size();
_space_list = new VirtualSpaceList(cds_total/wordSize);
#ifdef _LP64
// Set the compressed klass pointer base so that decoding of these pointers works
// properly when creating the shared archive.
assert(UseCompressedOops && UseCompressedKlassPointers,
"UseCompressedOops and UseCompressedKlassPointers must be set");
Universe::set_narrow_klass_base((address)_space_list->current_virtual_space()->bottom());
if (TraceMetavirtualspaceAllocation && Verbose) {
gclog_or_tty->print_cr("Setting_narrow_klass_base to Address: " PTR_FORMAT,
_space_list->current_virtual_space()->bottom());
}
// Set the shift to zero.
assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
"CDS region is too large");
Universe::set_narrow_klass_shift(0);
#endif
} else {
// If using shared space, open the file that contains the shared space
// and map in the memory before initializing the rest of metaspace (so
// the addresses don't conflict)
address cds_address = NULL;
if (UseSharedSpaces) {
FileMapInfo* mapinfo = new FileMapInfo();
memset(mapinfo, 0, sizeof(FileMapInfo));
@ -2821,8 +2982,22 @@ void Metaspace::global_initialize() {
assert(!mapinfo->is_open() && !UseSharedSpaces,
"archive file not closed or shared spaces not disabled.");
}
cds_total = FileMapInfo::shared_spaces_size();
cds_address = (address)mapinfo->region_base(0);
}
#ifdef _LP64
// If UseCompressedKlassPointers is set then allocate the metaspace area
// above the heap and above the CDS area (if it exists).
if (using_class_space()) {
if (UseSharedSpaces) {
allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
} else {
allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
}
}
#endif
// Initialize these before initializing the VirtualSpaceList
_first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
_first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
@ -2840,39 +3015,28 @@ void Metaspace::global_initialize() {
}
}
// For UseCompressedKlassPointers the class space is reserved as a piece of the
// Java heap because the compression algorithm is the same for each. The
// argument passed in is at the top of the compressed space
void Metaspace::initialize_class_space(ReservedSpace rs) {
// The reserved space size may be bigger because of alignment, esp with UseLargePages
assert(rs.size() >= ClassMetaspaceSize,
err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), ClassMetaspaceSize));
_class_space_list = new VirtualSpaceList(rs);
}
void Metaspace::initialize(Mutex* lock,
MetaspaceType type) {
void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
assert(space_list() != NULL,
"Metadata VirtualSpaceList has not been initialized");
_vsm = new SpaceManager(Metaspace::NonClassType, lock, space_list());
_vsm = new SpaceManager(NonClassType, lock, space_list());
if (_vsm == NULL) {
return;
}
size_t word_size;
size_t class_word_size;
vsm()->get_initial_chunk_sizes(type,
&word_size,
&class_word_size);
vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
assert(class_space_list() != NULL,
"Class VirtualSpaceList has not been initialized");
if (using_class_space()) {
assert(class_space_list() != NULL,
"Class VirtualSpaceList has not been initialized");
// Allocate SpaceManager for classes.
_class_vsm = new SpaceManager(Metaspace::ClassType, lock, class_space_list());
if (_class_vsm == NULL) {
return;
// Allocate SpaceManager for classes.
_class_vsm = new SpaceManager(ClassType, lock, class_space_list());
if (_class_vsm == NULL) {
return;
}
}
MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
@ -2888,11 +3052,13 @@ void Metaspace::initialize(Mutex* lock,
}
// Allocate chunk for class metadata objects
Metachunk* class_chunk =
class_space_list()->get_initialization_chunk(class_word_size,
class_vsm()->medium_chunk_bunch());
if (class_chunk != NULL) {
class_vsm()->add_chunk(class_chunk, true);
if (using_class_space()) {
Metachunk* class_chunk =
class_space_list()->get_initialization_chunk(class_word_size,
class_vsm()->medium_chunk_bunch());
if (class_chunk != NULL) {
class_vsm()->add_chunk(class_chunk, true);
}
}
_alloc_record_head = NULL;
@ -2906,7 +3072,8 @@ size_t Metaspace::align_word_size_up(size_t word_size) {
MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
// DumpSharedSpaces doesn't use class metadata area (yet)
if (mdtype == ClassType && !DumpSharedSpaces) {
// Also, don't use class_vsm() unless UseCompressedKlassPointers is true.
if (mdtype == ClassType && using_class_space()) {
return class_vsm()->allocate(word_size);
} else {
return vsm()->allocate(word_size);
@ -2937,14 +3104,19 @@ char* Metaspace::bottom() const {
}
size_t Metaspace::used_words_slow(MetadataType mdtype) const {
// return vsm()->allocated_used_words();
return mdtype == ClassType ? class_vsm()->sum_used_in_chunks_in_use() :
vsm()->sum_used_in_chunks_in_use(); // includes overhead!
if (mdtype == ClassType) {
return using_class_space() ? class_vsm()->sum_used_in_chunks_in_use() : 0;
} else {
return vsm()->sum_used_in_chunks_in_use(); // includes overhead!
}
}
size_t Metaspace::free_words(MetadataType mdtype) const {
return mdtype == ClassType ? class_vsm()->sum_free_in_chunks_in_use() :
vsm()->sum_free_in_chunks_in_use();
if (mdtype == ClassType) {
return using_class_space() ? class_vsm()->sum_free_in_chunks_in_use() : 0;
} else {
return vsm()->sum_free_in_chunks_in_use();
}
}
// Space capacity in the Metaspace. It includes
@ -2953,8 +3125,11 @@ size_t Metaspace::free_words(MetadataType mdtype) const {
// in the space available in the dictionary which
// is already counted in some chunk.
size_t Metaspace::capacity_words_slow(MetadataType mdtype) const {
return mdtype == ClassType ? class_vsm()->sum_capacity_in_chunks_in_use() :
vsm()->sum_capacity_in_chunks_in_use();
if (mdtype == ClassType) {
return using_class_space() ? class_vsm()->sum_capacity_in_chunks_in_use() : 0;
} else {
return vsm()->sum_capacity_in_chunks_in_use();
}
}
size_t Metaspace::used_bytes_slow(MetadataType mdtype) const {
@ -2977,8 +3152,8 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
#endif
return;
}
if (is_class) {
class_vsm()->deallocate(ptr, word_size);
if (is_class && using_class_space()) {
class_vsm()->deallocate(ptr, word_size);
} else {
vsm()->deallocate(ptr, word_size);
}
@ -2992,7 +3167,7 @@ void Metaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
#endif
return;
}
if (is_class) {
if (is_class && using_class_space()) {
class_vsm()->deallocate(ptr, word_size);
} else {
vsm()->deallocate(ptr, word_size);
@ -3101,14 +3276,18 @@ void Metaspace::purge() {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
space_list()->purge();
class_space_list()->purge();
if (using_class_space()) {
class_space_list()->purge();
}
}
void Metaspace::print_on(outputStream* out) const {
// Print both class virtual space counts and metaspace.
if (Verbose) {
vsm()->print_on(out);
vsm()->print_on(out);
if (using_class_space()) {
class_vsm()->print_on(out);
}
}
}
@ -3122,17 +3301,21 @@ bool Metaspace::contains(const void * ptr) {
// be needed. Note, locking this can cause inversion problems with the
// caller in MetaspaceObj::is_metadata() function.
return space_list()->contains(ptr) ||
class_space_list()->contains(ptr);
(using_class_space() && class_space_list()->contains(ptr));
}
void Metaspace::verify() {
vsm()->verify();
class_vsm()->verify();
if (using_class_space()) {
class_vsm()->verify();
}
}
void Metaspace::dump(outputStream* const out) const {
out->print_cr("\nVirtual space manager: " INTPTR_FORMAT, vsm());
vsm()->dump(out);
out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
class_vsm()->dump(out);
if (using_class_space()) {
out->print_cr("\nClass space manager: " INTPTR_FORMAT, class_vsm());
class_vsm()->dump(out);
}
}

View file

@ -105,6 +105,16 @@ class Metaspace : public CHeapObj<mtClass> {
// Align up the word size to the allocation word size
static size_t align_word_size_up(size_t);
// Aligned size of the metaspace.
static size_t _class_metaspace_size;
static size_t class_metaspace_size() {
return _class_metaspace_size;
}
static void set_class_metaspace_size(size_t metaspace_size) {
_class_metaspace_size = metaspace_size;
}
static size_t _first_chunk_word_size;
static size_t _first_class_chunk_word_size;
@ -131,6 +141,17 @@ class Metaspace : public CHeapObj<mtClass> {
// maintain a single list for now.
void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
#ifdef _LP64
static void set_narrow_klass_base_and_shift(address metaspace_base, address cds_base);
// Returns true if can use CDS with metaspace allocated as specified address.
static bool can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base);
static void allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base);
static void initialize_class_space(ReservedSpace rs);
#endif
class AllocRecord : public CHeapObj<mtClass> {
public:
AllocRecord(address ptr, MetaspaceObj::Type type, int byte_size)
@ -151,7 +172,6 @@ class Metaspace : public CHeapObj<mtClass> {
// Initialize globals for Metaspace
static void global_initialize();
static void initialize_class_space(ReservedSpace rs);
static size_t first_chunk_word_size() { return _first_chunk_word_size; }
static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
@ -172,8 +192,6 @@ class Metaspace : public CHeapObj<mtClass> {
MetaWord* expand_and_allocate(size_t size,
MetadataType mdtype);
static bool is_initialized() { return _class_space_list != NULL; }
static bool contains(const void *ptr);
void dump(outputStream* const out) const;
@ -190,6 +208,12 @@ class Metaspace : public CHeapObj<mtClass> {
};
void iterate(AllocRecordClosure *closure);
// Return TRUE only if UseCompressedKlassPointers is True and DumpSharedSpaces is False.
static bool using_class_space() {
return NOT_LP64(false) LP64_ONLY(UseCompressedKlassPointers && !DumpSharedSpaces);
}
};
class MetaspaceAux : AllStatic {
@ -243,8 +267,9 @@ class MetaspaceAux : AllStatic {
return _allocated_capacity_words[mdtype];
}
static size_t allocated_capacity_words() {
return _allocated_capacity_words[Metaspace::ClassType] +
_allocated_capacity_words[Metaspace::NonClassType];
return _allocated_capacity_words[Metaspace::NonClassType] +
(Metaspace::using_class_space() ?
_allocated_capacity_words[Metaspace::ClassType] : 0);
}
static size_t allocated_capacity_bytes(Metaspace::MetadataType mdtype) {
return allocated_capacity_words(mdtype) * BytesPerWord;
@ -257,8 +282,9 @@ class MetaspaceAux : AllStatic {
return _allocated_used_words[mdtype];
}
static size_t allocated_used_words() {
return _allocated_used_words[Metaspace::ClassType] +
_allocated_used_words[Metaspace::NonClassType];
return _allocated_used_words[Metaspace::NonClassType] +
(Metaspace::using_class_space() ?
_allocated_used_words[Metaspace::ClassType] : 0);
}
static size_t allocated_used_bytes(Metaspace::MetadataType mdtype) {
return allocated_used_words(mdtype) * BytesPerWord;
@ -300,6 +326,7 @@ class MetaspaceAux : AllStatic {
static void print_on(outputStream * out);
static void print_on(outputStream * out, Metaspace::MetadataType mdtype);
static void print_class_waste(outputStream* out);
static void print_waste(outputStream* out);
static void dump(outputStream* out);
static void verify_free_chunks();

View file

@ -52,7 +52,6 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
int tag = 0;
soc->do_tag(--tag);
assert(!UseCompressedOops, "UseCompressedOops doesn't work with shared archive");
// Verify the sizes of various metadata in the system.
soc->do_tag(sizeof(Method));
soc->do_tag(sizeof(ConstMethod));

View file

@ -145,8 +145,6 @@ NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
address Universe::_narrow_ptrs_base;
size_t Universe::_class_metaspace_size;
void Universe::basic_type_classes_do(void f(Klass*)) {
f(boolArrayKlassObj());
f(byteArrayKlassObj());
@ -641,6 +639,8 @@ jint universe_init() {
return status;
}
Metaspace::global_initialize();
// Create memory for metadata. Must be after initializing heap for
// DumpSharedSpaces.
ClassLoaderData::init_null_class_loader_data();
@ -693,13 +693,9 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
base = HeapBaseMinAddress;
// If the total size and the metaspace size are small enough to allow
// UnscaledNarrowOop then just use UnscaledNarrowOop.
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
(!UseCompressedKlassPointers ||
(((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
// We don't need to check the metaspace size here because it is always smaller
// than total_size.
// If the total size is small enough to allow UnscaledNarrowOop then
// just use UnscaledNarrowOop.
} else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
(Universe::narrow_oop_shift() == 0)) {
// Use 32-bits oops without encoding and
@ -716,13 +712,6 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
base = (OopEncodingHeapMax - heap_size);
}
}
// See if ZeroBaseNarrowOop encoding will work for a heap based at
// (KlassEncodingMetaspaceMax - class_metaspace_size()).
} else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
(Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
(KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
} else {
// UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
// HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
@ -732,8 +721,7 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
// Set narrow_oop_base and narrow_oop_use_implicit_null_checks
// used in ReservedHeapSpace() constructors.
// The final values will be set in initialize_heap() below.
if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
(!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
// Use zero based compressed oops
Universe::set_narrow_oop_base(NULL);
// Don't need guard page for implicit checks in indexed
@ -816,9 +804,7 @@ jint Universe::initialize_heap() {
tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
}
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
(UseCompressedKlassPointers &&
((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
// Can't reserve heap below 32Gb.
// keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
@ -849,20 +835,16 @@ jint Universe::initialize_heap() {
}
}
}
if (verbose) {
tty->cr();
tty->cr();
}
if (UseCompressedKlassPointers) {
Universe::set_narrow_klass_base(Universe::narrow_oop_base());
Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
}
Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
}
// Universe::narrow_oop_base() is one page below the metaspace
// base. The actual metaspace base depends on alignment constraints
// so we don't know its exact location here.
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
// Universe::narrow_oop_base() is one page below the heap.
assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
os::vm_page_size()) ||
Universe::narrow_oop_base() == NULL, "invalid value");
assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
Universe::narrow_oop_shift() == 0, "invalid value");
@ -882,12 +864,7 @@ jint Universe::initialize_heap() {
// Reserve the Java heap, which is now the same for all GCs.
ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
// Add in the class metaspace area so the classes in the headers can
// be compressed the same as instances.
// Need to round class space size up because it's below the heap and
// the actual alignment depends on its size.
Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
size_t total_reserved = align_size_up(heap_size, alignment);
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
@ -923,28 +900,17 @@ ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
return total_rs;
}
// Split the reserved space into main Java heap and a space for
// classes so that they can be compressed using the same algorithm
// as compressed oops. If compress oops and compress klass ptrs are
// used we need the meta space first: if the alignment used for
// compressed oops is greater than the one used for compressed klass
// ptrs, a metadata space on top of the heap could become
// unreachable.
ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
Metaspace::initialize_class_space(class_rs);
if (UseCompressedOops) {
// Universe::initialize_heap() will reset this to NULL if unscaled
// or zero-based narrow oops are actually used.
address base = (address)(total_rs.base() - os::vm_page_size());
Universe::set_narrow_oop_base(base);
}
return heap_rs;
return total_rs;
}
// It's the caller's repsonsibility to ensure glitch-freedom
// It's the caller's responsibility to ensure glitch-freedom
// (if required).
void Universe::update_heap_info_at_gc() {
_heap_capacity_at_last_gc = heap()->capacity();

View file

@ -75,10 +75,10 @@ class LatestMethodCache : public CHeapObj<mtClass> {
};
// For UseCompressedOops and UseCompressedKlassPointers.
// For UseCompressedOops.
struct NarrowPtrStruct {
// Base address for oop/klass-within-java-object materialization.
// NULL if using wide oops/klasses or zero based narrow oops/klasses.
// Base address for oop-within-java-object materialization.
// NULL if using wide oops or zero based narrow oops.
address _base;
// Number of shift bits for encoding/decoding narrow ptrs.
// 0 if using wide ptrs or zero based unscaled narrow ptrs,
@ -106,6 +106,7 @@ class Universe: AllStatic {
friend class SystemDictionary;
friend class VMStructs;
friend class VM_PopulateDumpSharedSpace;
friend class Metaspace;
friend jint universe_init();
friend void universe2_init();
@ -184,9 +185,6 @@ class Universe: AllStatic {
static struct NarrowPtrStruct _narrow_klass;
static address _narrow_ptrs_base;
// Aligned size of the metaspace.
static size_t _class_metaspace_size;
// array of dummy objects used with +FullGCAlot
debug_only(static objArrayOop _fullgc_alot_dummy_array;)
// index of next entry to clear
@ -238,15 +236,6 @@ class Universe: AllStatic {
assert(UseCompressedOops, "no compressed ptrs?");
_narrow_oop._use_implicit_null_checks = use;
}
static bool reserve_metaspace_helper(bool with_base = false);
static ReservedHeapSpace reserve_heap_metaspace(size_t heap_size, size_t alignment, bool& contiguous);
static size_t class_metaspace_size() {
return _class_metaspace_size;
}
static void set_class_metaspace_size(size_t metaspace_size) {
_class_metaspace_size = metaspace_size;
}
// Debugging
static int _verify_count; // number of verifies done