This commit is contained in:
Coleen Phillimore 2013-07-02 16:54:24 +02:00
commit 4ecee47075
1452 changed files with 45943 additions and 22788 deletions

View file

@ -71,13 +71,6 @@ bool MetaspaceObj::is_shared() const {
return MetaspaceShared::is_in_shared_space(this);
}
bool MetaspaceObj::is_metadata() const {
// GC Verify checks use this in guarantees.
// TODO: either replace them with is_metaspace_object() or remove them.
// is_metaspace_object() is slower than this test. This test doesn't
// seem very useful for metaspace objects anymore though.
return !Universe::heap()->is_in_reserved(this);
}
bool MetaspaceObj::is_metaspace_object() const {
return Metaspace::contains((void*)this);
@ -263,7 +256,7 @@ class ChunkPool: public CHeapObj<mtInternal> {
ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; }
// Allocate a new chunk from the pool (might expand the pool)
_NOINLINE_ void* allocate(size_t bytes) {
_NOINLINE_ void* allocate(size_t bytes, AllocFailType alloc_failmode) {
assert(bytes == _size, "bad size");
void* p = NULL;
// No VM lock can be taken inside ThreadCritical lock, so os::malloc
@ -273,9 +266,9 @@ class ChunkPool: public CHeapObj<mtInternal> {
p = get_first();
}
if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC);
if (p == NULL)
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "ChunkPool::allocate");
}
return p;
}
@ -372,7 +365,7 @@ class ChunkPoolCleaner : public PeriodicTask {
//--------------------------------------------------------------------------------------
// Chunk implementation
void* Chunk::operator new(size_t requested_size, size_t length) {
void* Chunk::operator new (size_t requested_size, AllocFailType alloc_failmode, size_t length) {
// requested_size is equal to sizeof(Chunk) but in order for the arena
// allocations to come out aligned as expected the size must be aligned
// to expected arena alignment.
@ -380,13 +373,14 @@ void* Chunk::operator new(size_t requested_size, size_t length) {
assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment");
size_t bytes = ARENA_ALIGN(requested_size) + length;
switch (length) {
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes);
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes);
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes);
case Chunk::size: return ChunkPool::large_pool()->allocate(bytes, alloc_failmode);
case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes, alloc_failmode);
case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes, alloc_failmode);
default: {
void *p = os::malloc(bytes, mtChunk, CALLER_PC);
if (p == NULL)
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
}
return p;
}
}
@ -440,7 +434,7 @@ NOT_PRODUCT(volatile jint Arena::_instance_count = 0;)
Arena::Arena(size_t init_size) {
size_t round_size = (sizeof (char *)) - 1;
init_size = (init_size+round_size) & ~round_size;
_first = _chunk = new (init_size) Chunk(init_size);
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, init_size) Chunk(init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
set_size_in_bytes(init_size);
@ -448,7 +442,7 @@ Arena::Arena(size_t init_size) {
}
Arena::Arena() {
_first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size);
_first = _chunk = new (AllocFailStrategy::EXIT_OOM, Chunk::init_size) Chunk(Chunk::init_size);
_hwm = _chunk->bottom(); // Save the cached hwm, max
_max = _chunk->top();
set_size_in_bytes(Chunk::init_size);
@ -555,12 +549,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
size_t len = MAX2(x, (size_t) Chunk::size);
Chunk *k = _chunk; // Get filled-up chunk address
_chunk = new (len) Chunk(len);
_chunk = new (alloc_failmode, len) Chunk(len);
if (_chunk == NULL) {
if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow");
}
return NULL;
}
if (k) k->set_next(_chunk); // Append new chunk to end of linked list

View file

@ -264,7 +264,6 @@ class ClassLoaderData;
class MetaspaceObj {
public:
bool is_metadata() const;
bool is_metaspace_object() const; // more specific test but slower
bool is_shared() const;
void print_address_on(outputStream* st) const; // nonvirtual address printing
@ -340,7 +339,7 @@ class Chunk: CHeapObj<mtChunk> {
Chunk* _next; // Next Chunk in list
const size_t _len; // Size of this Chunk
public:
void* operator new(size_t size, size_t length);
void* operator new(size_t size, AllocFailType alloc_failmode, size_t length);
void operator delete(void* p);
Chunk(size_t length);
@ -403,10 +402,15 @@ protected:
void signal_out_of_memory(size_t request, const char* whence) const;
void check_for_overflow(size_t request, const char* whence) const {
bool check_for_overflow(size_t request, const char* whence,
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const {
if (UINTPTR_MAX - request < (uintptr_t)_hwm) {
if (alloc_failmode == AllocFailStrategy::RETURN_NULL) {
return false;
}
signal_out_of_memory(request, whence);
}
return true;
}
public:
@ -430,7 +434,8 @@ protected:
assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2");
x = ARENA_ALIGN(x);
debug_only(if (UseMallocOnly) return malloc(x);)
check_for_overflow(x, "Arena::Amalloc");
if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode))
return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) {
return grow(x, alloc_failmode);
@ -444,7 +449,8 @@ protected:
void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) {
assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" );
debug_only(if (UseMallocOnly) return malloc(x);)
check_for_overflow(x, "Arena::Amalloc_4");
if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode))
return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) {
return grow(x, alloc_failmode);
@ -465,7 +471,8 @@ protected:
size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm;
x += delta;
#endif
check_for_overflow(x, "Arena::Amalloc_D");
if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode))
return NULL;
NOT_PRODUCT(inc_bytes_allocated(x);)
if (_hwm + x > _max) {
return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes.
@ -732,13 +739,21 @@ public:
// is set so that we always use malloc except for Solaris where we set the
// limit to get mapped memory.
template <class E, MEMFLAGS F>
class ArrayAllocator : StackObj {
class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
char* _addr;
bool _use_malloc;
size_t _size;
bool _free_in_destructor;
public:
ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
~ArrayAllocator() { free(); }
ArrayAllocator(bool free_in_destructor = true) :
_addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
~ArrayAllocator() {
if (_free_in_destructor) {
free();
}
}
E* allocate(size_t length);
void free();
};

View file

@ -549,3 +549,13 @@ bool FileMapInfo::is_in_shared_space(const void* p) {
return false;
}
void FileMapInfo::print_shared_spaces() {
gclog_or_tty->print_cr("Shared Spaces:");
for (int i = 0; i < MetaspaceShared::n_regions; i++) {
struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i];
gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT,
shared_region_name[i],
si->_base, si->_base + si->_used);
}
}

View file

@ -149,6 +149,7 @@ public:
// Return true if given address is in the mapped shared space.
bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false);
void print_shared_spaces() NOT_CDS_RETURN;
};
#endif // SHARE_VM_MEMORY_FILEMAP_HPP

View file

@ -157,7 +157,6 @@ KlassInfoTable::~KlassInfoTable() {
}
uint KlassInfoTable::hash(const Klass* p) {
assert(p->is_metadata(), "all klasses are metadata");
return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2);
}

View file

@ -1561,19 +1561,7 @@ bool Metadebug::test_metadata_failure() {
// ChunkManager methods
// Verification of _free_chunks_total and _free_chunks_count does not
// work with the CMS collector because its use of additional locks
// complicate the mutex deadlock detection but it can still be useful
// for detecting errors in the chunk accounting with other collectors.
size_t ChunkManager::free_chunks_total() {
#ifdef ASSERT
if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) {
MutexLockerEx cl(SpaceManager::expand_lock(),
Mutex::_no_safepoint_check_flag);
slow_locked_verify_free_chunks_total();
}
#endif
return _free_chunks_total;
}
@ -2610,14 +2598,14 @@ void MetaspaceAux::print_metaspace_change(size_t prev_metadata_used) {
"->" SIZE_FORMAT
"(" SIZE_FORMAT ")",
prev_metadata_used,
allocated_capacity_bytes(),
allocated_used_bytes(),
reserved_in_bytes());
} else {
gclog_or_tty->print(" " SIZE_FORMAT "K"
"->" SIZE_FORMAT "K"
"(" SIZE_FORMAT "K)",
prev_metadata_used / K,
allocated_capacity_bytes() / K,
allocated_used_bytes() / K,
reserved_in_bytes()/ K);
}

View file

@ -826,35 +826,15 @@ public:
bool reading() const { return true; }
};
// Save bounds of shared spaces mapped in.
static char* _ro_base = NULL;
static char* _rw_base = NULL;
static char* _md_base = NULL;
static char* _mc_base = NULL;
// Return true if given address is in the mapped shared space.
bool MetaspaceShared::is_in_shared_space(const void* p) {
if (_ro_base == NULL || _rw_base == NULL) {
return false;
} else {
return ((p >= _ro_base && p < (_ro_base + SharedReadOnlySize)) ||
(p >= _rw_base && p < (_rw_base + SharedReadWriteSize)));
}
return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p);
}
void MetaspaceShared::print_shared_spaces() {
gclog_or_tty->print_cr("Shared Spaces:");
gclog_or_tty->print(" read-only " INTPTR_FORMAT "-" INTPTR_FORMAT,
_ro_base, _ro_base + SharedReadOnlySize);
gclog_or_tty->print(" read-write " INTPTR_FORMAT "-" INTPTR_FORMAT,
_rw_base, _rw_base + SharedReadWriteSize);
gclog_or_tty->cr();
gclog_or_tty->print(" misc-data " INTPTR_FORMAT "-" INTPTR_FORMAT,
_md_base, _md_base + SharedMiscDataSize);
gclog_or_tty->print(" misc-code " INTPTR_FORMAT "-" INTPTR_FORMAT,
_mc_base, _mc_base + SharedMiscCodeSize);
gclog_or_tty->cr();
if (UseSharedSpaces) {
FileMapInfo::current_info()->print_shared_spaces();
}
}
@ -874,6 +854,11 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
char* _ro_base = NULL;
char* _rw_base = NULL;
char* _md_base = NULL;
char* _mc_base = NULL;
// Map each shared region
if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
(_rw_base = mapinfo->map_region(rw)) != NULL &&

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,6 @@ enum SH_process_strong_roots_tasks {
SH_PS_SystemDictionary_oops_do,
SH_PS_ClassLoaderDataGraph_oops_do,
SH_PS_jvmti_oops_do,
SH_PS_StringTable_oops_do,
SH_PS_CodeCache_oops_do,
// Leave this one last.
SH_PS_NumElements
@ -127,6 +126,8 @@ SharedHeap::StrongRootsScope::StrongRootsScope(SharedHeap* outer, bool activate)
{
if (_active) {
outer->change_strong_roots_parity();
// Zero the claimed high water mark in the StringTable
StringTable::clear_parallel_claimed_index();
}
}
@ -154,14 +155,16 @@ void SharedHeap::process_strong_roots(bool activate_scope,
// Global (strong) JNI handles
if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
JNIHandles::oops_do(roots);
// All threads execute this; the individual threads are task groups.
CLDToOopClosure roots_from_clds(roots);
CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
if (ParallelGCThreads > 0) {
Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots);
if (CollectedHeap::use_parallel_gc_threads()) {
Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
} else {
Threads::oops_do(roots, roots_from_clds_p, code_roots);
}
if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
ObjectSynchronizer::oops_do(roots);
if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
@ -189,8 +192,12 @@ void SharedHeap::process_strong_roots(bool activate_scope,
}
}
if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
if (so & SO_Strings) {
// All threads execute the following. A specific chunk of buckets
// from the StringTable are the individual tasks.
if (so & SO_Strings) {
if (CollectedHeap::use_parallel_gc_threads()) {
StringTable::possibly_parallel_oops_do(roots);
} else {
StringTable::oops_do(roots);
}
}

View file

@ -108,6 +108,7 @@ oop Universe::_the_null_string = NULL;
oop Universe::_the_min_jint_string = NULL;
LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
LatestMethodOopCache* Universe::_loader_addClass_cache = NULL;
LatestMethodOopCache* Universe::_pd_implies_cache = NULL;
ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL;
oop Universe::_out_of_memory_error_java_heap = NULL;
oop Universe::_out_of_memory_error_metaspace = NULL;
@ -226,6 +227,7 @@ void Universe::serialize(SerializeClosure* f, bool do_all) {
_finalizer_register_cache->serialize(f);
_loader_addClass_cache->serialize(f);
_reflect_invoke_cache->serialize(f);
_pd_implies_cache->serialize(f);
}
void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
@ -650,6 +652,7 @@ jint universe_init() {
// Metaspace::initialize_shared_spaces() tries to populate them.
Universe::_finalizer_register_cache = new LatestMethodOopCache();
Universe::_loader_addClass_cache = new LatestMethodOopCache();
Universe::_pd_implies_cache = new LatestMethodOopCache();
Universe::_reflect_invoke_cache = new ActiveMethodOopsCache();
if (UseSharedSpaces) {
@ -1116,6 +1119,23 @@ bool universe_post_init() {
Universe::_loader_addClass_cache->init(
SystemDictionary::ClassLoader_klass(), m, CHECK_false);
// Setup method for checking protection domain
InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
find_method(vmSymbols::impliesCreateAccessControlContext_name(),
vmSymbols::void_boolean_signature());
// Allow NULL which should only happen with bootstrapping.
if (m != NULL) {
if (m->is_static()) {
// NoSuchMethodException doesn't actually work because it tries to run the
// <init> function before java_lang_Class is linked. Print error and exit.
tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
return false; // initialization failed
}
Universe::_pd_implies_cache->init(
SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);;
}
// The folowing is initializing converter functions for serialization in
// JVM.cpp. If we clean up the StrictMath code above we may want to find
// a better solution for this as well.
@ -1133,6 +1153,7 @@ bool universe_post_init() {
// Initialize performance counters for metaspaces
MetaspaceCounters::initialize_performance_counters();
MemoryService::add_metaspace_memory_pools();
GC_locker::unlock(); // allow gc after bootstrapping
@ -1533,6 +1554,7 @@ bool ActiveMethodOopsCache::is_same_method(const Method* method) const {
Method* LatestMethodOopCache::get_Method() {
if (klass() == NULL) return NULL;
InstanceKlass* ik = InstanceKlass::cast(klass());
Method* m = ik->method_with_idnum(method_idnum());
assert(m != NULL, "sanity check");

View file

@ -176,6 +176,7 @@ class Universe: AllStatic {
static oop _the_min_jint_string; // A cache of "-2147483648" as a Java string
static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
static LatestMethodOopCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector
static LatestMethodOopCache* _pd_implies_cache; // method for checking protection domain attributes
static ActiveMethodOopsCache* _reflect_invoke_cache; // method for security checks
// preallocated error objects (no backtrace)
static oop _out_of_memory_error_java_heap;
@ -335,7 +336,10 @@ class Universe: AllStatic {
static oop the_min_jint_string() { return _the_min_jint_string; }
static Method* finalizer_register_method() { return _finalizer_register_cache->get_Method(); }
static Method* loader_addClass_method() { return _loader_addClass_cache->get_Method(); }
static Method* protection_domain_implies_method() { return _pd_implies_cache->get_Method(); }
static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
static oop null_ptr_exception_instance() { return _null_ptr_exception_instance; }
static oop arithmetic_exception_instance() { return _arithmetic_exception_instance; }
static oop virtual_machine_error_instance() { return _virtual_machine_error_instance; }