8194312: Support parallel and concurrent JNI global handle processing

Add OopStorage, change JNI gloabl/weak to use OopStorage.

Reviewed-by: coleenp, sspitsyn, eosterlund
This commit is contained in:
Kim Barrett 2017-11-21 09:47:55 -05:00
parent 3c2e5acfce
commit e1356ec6cf
23 changed files with 3154 additions and 286 deletions

View file

@ -0,0 +1,707 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shared/oopStorage.hpp"
#include "logging/log.hpp"
#include "memory/allocation.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/align.hpp"
#include "utilities/count_trailing_zeros.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/ostream.hpp"
OopStorage::BlockEntry::BlockEntry() : _prev(NULL), _next(NULL) {}
OopStorage::BlockEntry::~BlockEntry() {
assert(_prev == NULL, "deleting attached block");
assert(_next == NULL, "deleting attached block");
}
OopStorage::BlockList::BlockList(const BlockEntry& (*get_entry)(const Block& block)) :
_head(NULL), _tail(NULL), _get_entry(get_entry)
{}
OopStorage::BlockList::~BlockList() {
// ~OopStorage() empties its lists before destroying them.
assert(_head == NULL, "deleting non-empty block list");
assert(_tail == NULL, "deleting non-empty block list");
}
void OopStorage::BlockList::push_front(const Block& block) {
const Block* old = _head;
if (old == NULL) {
assert(_tail == NULL, "invariant");
_head = _tail = █
} else {
_get_entry(block)._next = old;
_get_entry(*old)._prev = █
_head = █
}
}
void OopStorage::BlockList::push_back(const Block& block) {
const Block* old = _tail;
if (old == NULL) {
assert(_head == NULL, "invariant");
_head = _tail = █
} else {
_get_entry(*old)._next = █
_get_entry(block)._prev = old;
_tail = █
}
}
void OopStorage::BlockList::unlink(const Block& block) {
const BlockEntry& block_entry = _get_entry(block);
const Block* prev_blk = block_entry._prev;
const Block* next_blk = block_entry._next;
block_entry._prev = NULL;
block_entry._next = NULL;
if ((prev_blk == NULL) && (next_blk == NULL)) {
assert(_head == &block, "invariant");
assert(_tail == &block, "invariant");
_head = _tail = NULL;
} else if (prev_blk == NULL) {
assert(_head == &block, "invariant");
_get_entry(*next_blk)._prev = NULL;
_head = next_blk;
} else if (next_blk == NULL) {
assert(_tail == &block, "invariant");
_get_entry(*prev_blk)._next = NULL;
_tail = prev_blk;
} else {
_get_entry(*next_blk)._prev = prev_blk;
_get_entry(*prev_blk)._next = next_blk;
}
}
// Blocks start with an array of BitsPerWord oop entries. That array
// is divided into conceptual BytesPerWord sections of BitsPerWord
// entries. Blocks are allocated aligned on section boundaries, for
// the convenience of mapping from an entry to the containing block;
// see block_for_ptr(). Aligning on section boundary rather than on
// the full _data wastes a lot less space, but makes for a bit more
// work in block_for_ptr().
const unsigned section_size = BitsPerByte;
const unsigned section_count = BytesPerWord;
const unsigned block_alignment = sizeof(oop) * section_size;
// VS2013 warns (C4351) that elements of _data will be *correctly* default
// initialized, unlike earlier versions that *incorrectly* did not do so.
#ifdef _WINDOWS
#pragma warning(push)
#pragma warning(disable: 4351)
#endif // _WINDOWS
OopStorage::Block::Block(const OopStorage* owner, void* memory) :
_data(),
_allocated_bitmask(0),
_owner(owner),
_memory(memory),
_active_entry(),
_allocate_entry()
{
STATIC_ASSERT(_data_pos == 0);
STATIC_ASSERT(section_size * section_count == ARRAY_SIZE(_data));
assert(offset_of(Block, _data) == _data_pos, "invariant");
assert(owner != NULL, "NULL owner");
assert(is_aligned(this, block_alignment), "misaligned block");
}
#ifdef _WINDOWS
#pragma warning(pop)
#endif
OopStorage::Block::~Block() {
// Clear fields used by block_for_ptr and entry validation, which
// might help catch bugs. Volatile to prevent dead-store elimination.
const_cast<uintx volatile&>(_allocated_bitmask) = 0;
const_cast<OopStorage* volatile&>(_owner) = NULL;
}
const OopStorage::BlockEntry& OopStorage::Block::get_active_entry(const Block& block) {
return block._active_entry;
}
const OopStorage::BlockEntry& OopStorage::Block::get_allocate_entry(const Block& block) {
return block._allocate_entry;
}
size_t OopStorage::Block::allocation_size() {
// _data must be first member, so aligning Block aligns _data.
STATIC_ASSERT(_data_pos == 0);
return sizeof(Block) + block_alignment - sizeof(void*);
}
size_t OopStorage::Block::allocation_alignment_shift() {
return exact_log2(block_alignment);
}
inline bool is_full_bitmask(uintx bitmask) { return ~bitmask == 0; }
inline bool is_empty_bitmask(uintx bitmask) { return bitmask == 0; }
bool OopStorage::Block::is_full() const {
return is_full_bitmask(allocated_bitmask());
}
bool OopStorage::Block::is_empty() const {
return is_empty_bitmask(allocated_bitmask());
}
uintx OopStorage::Block::bitmask_for_entry(const oop* ptr) const {
return bitmask_for_index(get_index(ptr));
}
uintx OopStorage::Block::cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value) {
return Atomic::cmpxchg(new_value, &_allocated_bitmask, compare_value);
}
bool OopStorage::Block::contains(const oop* ptr) const {
const oop* base = get_pointer(0);
return (base <= ptr) && (ptr < (base + ARRAY_SIZE(_data)));
}
unsigned OopStorage::Block::get_index(const oop* ptr) const {
assert(contains(ptr), PTR_FORMAT " not in block " PTR_FORMAT, p2i(ptr), p2i(this));
return static_cast<unsigned>(ptr - get_pointer(0));
}
oop* OopStorage::Block::allocate() {
// Use CAS loop because release may change bitmask outside of lock.
uintx allocated = allocated_bitmask();
while (true) {
assert(!is_full_bitmask(allocated), "attempt to allocate from full block");
unsigned index = count_trailing_zeros(~allocated);
uintx new_value = allocated | bitmask_for_index(index);
uintx fetched = cmpxchg_allocated_bitmask(new_value, allocated);
if (fetched == allocated) {
return get_pointer(index); // CAS succeeded; return entry for index.
}
allocated = fetched; // CAS failed; retry with latest value.
}
}
OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) {
// _data must be first member: aligning block => aligning _data.
STATIC_ASSERT(_data_pos == 0);
size_t size_needed = allocation_size();
void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, mtGC);
if (memory == NULL) {
return NULL;
}
void* block_mem = align_up(memory, block_alignment);
assert(sizeof(Block) + pointer_delta(block_mem, memory, 1) <= size_needed,
"allocated insufficient space for aligned block");
return ::new (block_mem) Block(owner, memory);
}
void OopStorage::Block::delete_block(const Block& block) {
void* memory = block._memory;
block.Block::~Block();
FREE_C_HEAP_ARRAY(char, memory);
}
// This can return a false positive if ptr is not contained by some
// block. For some uses, it is a precondition that ptr is valid,
// e.g. contained in some block in owner's _active_list. Other uses
// require additional validation of the result.
OopStorage::Block*
OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
assert(CanUseSafeFetchN(), "precondition");
STATIC_ASSERT(_data_pos == 0);
// Const-ness of ptr is not related to const-ness of containing block.
// Blocks are allocated section-aligned, so get the containing section.
oop* section_start = align_down(const_cast<oop*>(ptr), block_alignment);
// Start with a guess that the containing section is the last section,
// so the block starts section_count-1 sections earlier.
oop* section = section_start - (section_size * (section_count - 1));
// Walk up through the potential block start positions, looking for
// the owner in the expected location. If we're below the actual block
// start position, the value at the owner position will be some oop
// (possibly NULL), which can never match the owner.
intptr_t owner_addr = reinterpret_cast<intptr_t>(owner);
for (unsigned i = 0; i < section_count; ++i, section += section_size) {
Block* candidate = reinterpret_cast<Block*>(section);
intptr_t* candidate_owner_addr
= reinterpret_cast<intptr_t*>(&candidate->_owner);
if (SafeFetchN(candidate_owner_addr, 0) == owner_addr) {
return candidate;
}
}
return NULL;
}
bool OopStorage::is_valid_block_locked_or_safepoint(const Block* check_block) const {
assert_locked_or_safepoint(_allocate_mutex);
// For now, simple linear search. Do something more clever if this
// is a performance bottleneck, particularly for allocation_status.
for (const Block* block = _active_list.chead();
block != NULL;
block = _active_list.next(*block)) {
if (check_block == block) {
return true;
}
}
return false;
}
#ifdef ASSERT
void OopStorage::assert_at_safepoint() {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
}
#endif // ASSERT
//////////////////////////////////////////////////////////////////////////////
// Allocation
//
// Allocation involves the _allocate_list, which contains a subset of the
// blocks owned by a storage object. This is a doubly-linked list, linked
// through dedicated fields in the blocks. Full blocks are removed from this
// list, though they are still present in the _active_list. Empty blocks are
// kept at the end of the _allocate_list, to make it easy for empty block
// deletion to find them.
//
// allocate(), release(), and delete_empty_blocks_concurrent() all lock the
// _allocate_mutex while performing any list modifications.
//
// allocate() and release() update a block's _allocated_bitmask using CAS
// loops. This prevents loss of updates even though release() may perform
// some updates without any locking.
//
// allocate() obtains the entry from the first block in the _allocate_list,
// and updates that block's _allocated_bitmask to indicate the entry is in
// use. If this makes the block full (all entries in use), the block is
// removed from the _allocate_list so it won't be considered by future
// allocations until some entries in it are relased.
//
// release() looks up the block for the entry without locking. Once the block
// has been determined, its _allocated_bitmask needs to be updated, and its
// position in the _allocate_list may need to be updated. There are two
// cases:
//
// (a) If the block is neither full nor would become empty with the release of
// the entry, only its _allocated_bitmask needs to be updated. But if the CAS
// update fails, the applicable case may change for the retry.
//
// (b) Otherwise, the _allocate_list will also need to be modified. This
// requires locking the _allocate_mutex, and then attempting to CAS the
// _allocated_bitmask. If the CAS fails, the applicable case may change for
// the retry. If the CAS succeeds, then update the _allocate_list according
// to the the state changes. If the block changed from full to not full, then
// it needs to be added to the _allocate_list, for use in future allocations.
// If the block changed from not empty to empty, then it is moved to the end
// of the _allocate_list, for ease of empty block deletion processing.
oop* OopStorage::allocate() {
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
Block* block = _allocate_list.head();
if (block == NULL) {
// No available blocks; make a new one, and add to storage.
{
MutexUnlockerEx mul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
block = Block::new_block(this);
}
if (block != NULL) {
// Add new block to storage.
log_info(oopstorage, blocks)("%s: new block " PTR_FORMAT, name(), p2i(block));
// Add to end of _allocate_list. The mutex release allowed
// other threads to add blocks to the _allocate_list. We prefer
// to allocate from non-empty blocks, to allow empty blocks to
// be deleted.
_allocate_list.push_back(*block);
++_empty_block_count;
// Add to front of _active_list, and then record as the head
// block, for concurrent iteration protocol.
_active_list.push_front(*block);
++_block_count;
// Ensure all setup of block is complete before making it visible.
OrderAccess::release_store(&_active_head, block);
} else {
log_info(oopstorage, blocks)("%s: failed new block allocation", name());
}
block = _allocate_list.head();
if (block == NULL) {
// Failed to make new block, and no other thread made a block
// available while the mutex was released, so return failure.
return NULL;
}
}
// Allocate from first block.
assert(block != NULL, "invariant");
assert(!block->is_full(), "invariant");
if (block->is_empty()) {
// Transitioning from empty to not empty.
log_debug(oopstorage, blocks)("%s: block not empty " PTR_FORMAT, name(), p2i(block));
--_empty_block_count;
}
oop* result = block->allocate();
assert(result != NULL, "allocation failed");
assert(!block->is_empty(), "postcondition");
Atomic::inc(&_allocation_count); // release updates outside lock.
if (block->is_full()) {
// Transitioning from not full to full.
// Remove full blocks from consideration by future allocates.
log_debug(oopstorage, blocks)("%s: block full " PTR_FORMAT, name(), p2i(block));
_allocate_list.unlink(*block);
}
log_info(oopstorage, ref)("%s: allocated " PTR_FORMAT, name(), p2i(result));
return result;
}
OopStorage::Block* OopStorage::find_block_or_null(const oop* ptr) const {
assert(ptr != NULL, "precondition");
return Block::block_for_ptr(this, ptr);
}
void OopStorage::release_from_block(Block& block, uintx releasing) {
assert(releasing != 0, "invariant");
uintx allocated = block.allocated_bitmask();
while (true) {
assert(releasing == (allocated & releasing), "invariant");
uintx new_value = allocated ^ releasing;
// CAS new_value into block's allocated bitmask, retrying with
// updated allocated bitmask until the CAS succeeds.
uintx fetched;
if (!is_full_bitmask(allocated) && !is_empty_bitmask(new_value)) {
fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
if (fetched == allocated) return;
} else {
// Need special handling if transitioning from full to not full,
// or from not empty to empty. For those cases, must hold the
// _allocation_mutex when updating the allocated bitmask, to
// ensure the associated list manipulations will be consistent
// with the allocation bitmask that is visible to other threads
// in allocate() or deleting empty blocks.
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
fetched = block.cmpxchg_allocated_bitmask(new_value, allocated);
if (fetched == allocated) {
// CAS succeeded; handle special cases, which might no longer apply.
if (is_full_bitmask(allocated)) {
// Transitioning from full to not-full; add to _allocate_list.
log_debug(oopstorage, blocks)("%s: block not full " PTR_FORMAT, name(), p2i(&block));
_allocate_list.push_front(block);
assert(!block.is_full(), "invariant"); // Still not full.
}
if (is_empty_bitmask(new_value)) {
// Transitioning from not-empty to empty; move to end of
// _allocate_list, to make it a deletion candidate.
log_debug(oopstorage, blocks)("%s: block empty " PTR_FORMAT, name(), p2i(&block));
_allocate_list.unlink(block);
_allocate_list.push_back(block);
++_empty_block_count;
assert(block.is_empty(), "invariant"); // Still empty.
}
return; // Successful CAS and transitions handled.
}
}
// CAS failed; retry with latest value.
allocated = fetched;
}
}
#ifdef ASSERT
void OopStorage::check_release(const Block* block, const oop* ptr) const {
switch (allocation_status_validating_block(block, ptr)) {
case INVALID_ENTRY:
fatal("Releasing invalid entry: " PTR_FORMAT, p2i(ptr));
break;
case UNALLOCATED_ENTRY:
fatal("Releasing unallocated entry: " PTR_FORMAT, p2i(ptr));
break;
case ALLOCATED_ENTRY:
assert(block->contains(ptr), "invariant");
break;
default:
ShouldNotReachHere();
}
}
#endif // ASSERT
inline void check_release_entry(const oop* entry) {
assert(entry != NULL, "Releasing NULL");
assert(*entry == NULL, "Releasing uncleared entry: " PTR_FORMAT, p2i(entry));
}
void OopStorage::release(const oop* ptr) {
check_release_entry(ptr);
Block* block = find_block_or_null(ptr);
check_release(block, ptr);
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptr));
release_from_block(*block, block->bitmask_for_entry(ptr));
Atomic::dec(&_allocation_count);
}
void OopStorage::release(const oop* const* ptrs, size_t size) {
size_t i = 0;
while (i < size) {
Block* block = find_block_or_null(ptrs[i]);
check_release(block, ptrs[i]);
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(ptrs[i]));
size_t count = 0;
uintx releasing = 0;
for ( ; i < size; ++i) {
const oop* entry = ptrs[i];
// If entry not in block, finish block and resume outer loop with entry.
if (!block->contains(entry)) break;
check_release_entry(entry);
// Add entry to releasing bitmap.
log_info(oopstorage, ref)("%s: released " PTR_FORMAT, name(), p2i(entry));
uintx entry_bitmask = block->bitmask_for_entry(entry);
assert((releasing & entry_bitmask) == 0,
"Duplicate entry: " PTR_FORMAT, p2i(entry));
releasing |= entry_bitmask;
++count;
}
// Release the contiguous entries that are in block.
release_from_block(*block, releasing);
Atomic::sub(count, &_allocation_count);
}
}
const char* dup_name(const char* name) {
char* dup = NEW_C_HEAP_ARRAY(char, strlen(name) + 1, mtGC);
strcpy(dup, name);
return dup;
}
OopStorage::OopStorage(const char* name,
Mutex* allocate_mutex,
Mutex* active_mutex) :
_name(dup_name(name)),
_active_list(&Block::get_active_entry),
_allocate_list(&Block::get_allocate_entry),
_active_head(NULL),
_allocate_mutex(allocate_mutex),
_active_mutex(active_mutex),
_allocation_count(0),
_block_count(0),
_empty_block_count(0),
_concurrent_iteration_active(false)
{
assert(_active_mutex->rank() < _allocate_mutex->rank(),
"%s: active_mutex must have lower rank than allocate_mutex", _name);
assert(_active_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
"%s: active mutex requires safepoint check", _name);
assert(_allocate_mutex->_safepoint_check_required != Mutex::_safepoint_check_always,
"%s: allocate mutex requires safepoint check", _name);
}
void OopStorage::delete_empty_block(const Block& block) {
assert(block.is_empty(), "discarding non-empty block");
log_info(oopstorage, blocks)("%s: delete empty block " PTR_FORMAT, name(), p2i(&block));
Block::delete_block(block);
}
OopStorage::~OopStorage() {
Block* block;
while ((block = _allocate_list.head()) != NULL) {
_allocate_list.unlink(*block);
}
while ((block = _active_list.head()) != NULL) {
_active_list.unlink(*block);
Block::delete_block(*block);
}
FREE_C_HEAP_ARRAY(char, _name);
}
void OopStorage::delete_empty_blocks_safepoint(size_t retain) {
assert_at_safepoint();
// Don't interfere with a concurrent iteration.
if (_concurrent_iteration_active) return;
// Compute the number of blocks to remove, to minimize volatile accesses.
size_t empty_blocks = _empty_block_count;
if (retain < empty_blocks) {
size_t remove_count = empty_blocks - retain;
// Update volatile counters once.
_block_count -= remove_count;
_empty_block_count -= remove_count;
do {
const Block* block = _allocate_list.ctail();
assert(block != NULL, "invariant");
assert(block->is_empty(), "invariant");
// Remove block from lists, and delete it.
_active_list.unlink(*block);
_allocate_list.unlink(*block);
delete_empty_block(*block);
} while (--remove_count > 0);
// Update _active_head, in case current value was in deleted set.
_active_head = _active_list.head();
}
}
void OopStorage::delete_empty_blocks_concurrent(size_t retain) {
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
// Other threads could be adding to the empty block count while we
// release the mutex across the block deletions. Set an upper bound
// on how many blocks we'll try to release, so other threads can't
// cause an unbounded stay in this function.
if (_empty_block_count <= retain) return;
size_t limit = _empty_block_count - retain;
for (size_t i = 0; (i < limit) && (retain < _empty_block_count); ++i) {
const Block* block = _allocate_list.ctail();
assert(block != NULL, "invariant");
assert(block->is_empty(), "invariant");
{
MutexLockerEx aml(_active_mutex, Mutex::_no_safepoint_check_flag);
// Don't interfere with a concurrent iteration.
if (_concurrent_iteration_active) return;
// Remove block from _active_list, updating head if needed.
_active_list.unlink(*block);
--_block_count;
if (block == _active_head) {
_active_head = _active_list.head();
}
}
// Remove block from _allocate_list and delete it.
_allocate_list.unlink(*block);
--_empty_block_count;
// Release mutex while deleting block.
MutexUnlockerEx ul(_allocate_mutex, Mutex::_no_safepoint_check_flag);
delete_empty_block(*block);
}
}
OopStorage::EntryStatus
OopStorage::allocation_status_validating_block(const Block* block,
const oop* ptr) const {
MutexLockerEx ml(_allocate_mutex, Mutex::_no_safepoint_check_flag);
if ((block == NULL) || !is_valid_block_locked_or_safepoint(block)) {
return INVALID_ENTRY;
} else if ((block->allocated_bitmask() & block->bitmask_for_entry(ptr)) != 0) {
return ALLOCATED_ENTRY;
} else {
return UNALLOCATED_ENTRY;
}
}
OopStorage::EntryStatus OopStorage::allocation_status(const oop* ptr) const {
return allocation_status_validating_block(find_block_or_null(ptr), ptr);
}
size_t OopStorage::allocation_count() const {
return _allocation_count;
}
size_t OopStorage::block_count() const {
return _block_count;
}
size_t OopStorage::empty_block_count() const {
return _empty_block_count;
}
size_t OopStorage::total_memory_usage() const {
size_t total_size = sizeof(OopStorage);
total_size += strlen(name()) + 1;
total_size += block_count() * Block::allocation_size();
return total_size;
}
// Parallel iteration support
#if INCLUDE_ALL_GCS
static char* not_started_marker_dummy = NULL;
static void* const not_started_marker = &not_started_marker_dummy;
OopStorage::BasicParState::BasicParState(OopStorage* storage, bool concurrent) :
_storage(storage),
_next_block(not_started_marker),
_concurrent(concurrent)
{
update_iteration_state(true);
}
OopStorage::BasicParState::~BasicParState() {
update_iteration_state(false);
}
void OopStorage::BasicParState::update_iteration_state(bool value) {
if (_concurrent) {
MutexLockerEx ml(_storage->_active_mutex, Mutex::_no_safepoint_check_flag);
assert(_storage->_concurrent_iteration_active != value, "precondition");
_storage->_concurrent_iteration_active = value;
}
}
void OopStorage::BasicParState::ensure_iteration_started() {
if (!_concurrent) assert_at_safepoint();
assert(!_concurrent || _storage->_concurrent_iteration_active, "invariant");
// Ensure _next_block is not the not_started_marker, setting it to
// the _active_head to start the iteration if necessary.
if (OrderAccess::load_acquire(&_next_block) == not_started_marker) {
Atomic::cmpxchg(_storage->_active_head, &_next_block, not_started_marker);
}
assert(_next_block != not_started_marker, "postcondition");
}
OopStorage::Block* OopStorage::BasicParState::claim_next_block() {
assert(_next_block != not_started_marker, "Iteration not started");
void* next = _next_block;
while (next != NULL) {
void* new_next = _storage->_active_list.next(*static_cast<Block*>(next));
void* fetched = Atomic::cmpxchg(new_next, &_next_block, next);
if (fetched == next) break; // Claimed.
next = fetched;
}
return static_cast<Block*>(next);
}
#endif // INCLUDE_ALL_GCS
const char* OopStorage::name() const { return _name; }
#ifndef PRODUCT
void OopStorage::print_on(outputStream* st) const {
size_t allocations = _allocation_count;
size_t blocks = _block_count;
size_t empties = _empty_block_count;
// Comparison is being careful about racy accesses.
size_t used = (blocks < empties) ? 0 : (blocks - empties);
double data_size = section_size * section_count;
double alloc_percentage = percent_of((double)allocations, used * data_size);
st->print("%s: " SIZE_FORMAT " entries in " SIZE_FORMAT " blocks (%.F%%), "
SIZE_FORMAT " empties, " SIZE_FORMAT " bytes",
name(), allocations, used, alloc_percentage,
empties, total_memory_usage());
if (_concurrent_iteration_active) {
st->print(", concurrent iteration active");
}
}
#endif // !PRODUCT

View file

@ -0,0 +1,734 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHARED_OOPSTORAGE_HPP
#define SHARE_GC_SHARED_OOPSTORAGE_HPP
#include "memory/allocation.hpp"
#include "metaprogramming/conditional.hpp"
#include "metaprogramming/isConst.hpp"
#include "oops/oop.hpp"
#include "utilities/count_trailing_zeros.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
class Mutex;
class outputStream;
// OopStorage supports management of off-heap references to objects allocated
// in the Java heap. An OopStorage object provides a set of Java object
// references (oop values), which clients refer to via oop* handles to the
// associated OopStorage entries. Clients allocate entries to create a
// (possibly weak) reference to a Java object, use that reference, and release
// the reference when no longer needed.
//
// The garbage collector must know about all OopStorage objects and their
// reference strength. OopStorage provides the garbage collector with support
// for iteration over all the allocated entries.
//
// There are several categories of interaction with an OopStorage object.
//
// (1) allocation and release of entries, by the mutator or the VM.
// (2) iteration by the garbage collector, possibly concurrent with mutator.
// (3) iteration by other, non-GC, tools (only at safepoints).
// (4) cleanup of unused internal storage, possibly concurrent with mutator.
//
// A goal of OopStorage is to make these interactions thread-safe, while
// minimizing potential lock contention issues within and between these
// categories. In particular, support for concurrent iteration by the garbage
// collector, under certain restrictions, is required. Further, it must not
// block nor be blocked by other operations for long periods.
//
// Internally, OopStorage is a set of Block objects, from which entries are
// allocated and released. A block contains an oop[] and a bitmask indicating
// which entries are in use (have been allocated and not yet released). New
// blocks are constructed and added to the storage object when an entry
// allocation request is made and there are no blocks with unused entries.
// Blocks may be removed and deleted when empty.
//
// There are two important (and somewhat intertwined) protocols governing
// concurrent access to a storage object. These are the Concurrent Iteration
// Protocol and the Allocation Protocol. See the ParState class for a
// discussion of concurrent iteration and the management of thread
// interactions for this protocol. Similarly, see the allocate() function for
// a discussion of allocation.
class OopStorage : public CHeapObj<mtGC> {
public:
OopStorage(const char* name, Mutex* allocate_mutex, Mutex* active_mutex);
~OopStorage();
// These count and usage accessors are racy unless at a safepoint.
// The number of allocated and not yet released entries.
size_t allocation_count() const;
// The number of blocks of entries. Useful for sizing parallel iteration.
size_t block_count() const;
// The number of blocks with no allocated entries. Useful for sizing
// parallel iteration and scheduling block deletion.
size_t empty_block_count() const;
// Total number of blocks * memory allocation per block, plus
// bookkeeping overhead, including this storage object.
size_t total_memory_usage() const;
enum EntryStatus {
INVALID_ENTRY,
UNALLOCATED_ENTRY,
ALLOCATED_ENTRY
};
// Locks _allocate_mutex.
EntryStatus allocation_status(const oop* ptr) const;
// Allocates and returns a new entry. Returns NULL if memory allocation
// failed. Locks _allocate_mutex.
// postcondition: *result == NULL.
oop* allocate();
// Deallocates ptr, after setting its value to NULL. Locks _allocate_mutex.
// precondition: ptr is a valid allocated entry.
// precondition: *ptr == NULL.
void release(const oop* ptr);
// Releases all the ptrs. Possibly faster than individual calls to
// release(oop*). Best if ptrs is sorted by address. Locks
// _allocate_mutex.
// precondition: All elements of ptrs are valid allocated entries.
// precondition: *ptrs[i] == NULL, for i in [0,size).
void release(const oop* const* ptrs, size_t size);
// Applies f to each allocated entry's location. f must be a function or
// function object. Assume p is either a const oop* or an oop*, depending
// on whether the associated storage is const or non-const, respectively.
// Then f(p) must be a valid expression. The result of invoking f(p) must
// be implicitly convertible to bool. Iteration terminates and returns
// false if any invocation of f returns false. Otherwise, the result of
// iteration is true.
// precondition: at safepoint.
template<typename F> bool iterate_safepoint(F f);
template<typename F> bool iterate_safepoint(F f) const;
// oops_do and weak_oops_do are wrappers around iterate_safepoint, providing
// an adaptation layer allowing the use of existing is-alive closures and
// OopClosures. Assume p is either const oop* or oop*, depending on whether
// the associated storage is const or non-const, respectively. Then
//
// - closure->do_oop(p) must be a valid expression whose value is ignored.
//
// - is_alive->do_object_b(*p) must be a valid expression whose value is
// convertible to bool.
//
// For weak_oops_do, if *p == NULL then neither is_alive nor closure will be
// invoked for p. If is_alive->do_object_b(*p) is false, then closure will
// not be invoked on p, and *p will be set to NULL.
template<typename Closure> void oops_do(Closure* closure);
template<typename Closure> void oops_do(Closure* closure) const;
template<typename Closure> void weak_oops_do(Closure* closure);
template<typename IsAliveClosure, typename Closure>
void weak_oops_do(IsAliveClosure* is_alive, Closure* closure);
#if INCLUDE_ALL_GCS
// Parallel iteration is for the exclusive use of the GC.
// Other clients must use serial iteration.
template<bool concurrent, bool is_const> class ParState;
#endif // INCLUDE_ALL_GCS
// Block cleanup functions are for the exclusive use of the GC.
// Both stop deleting if there is an in-progress concurrent iteration.
// Concurrent deletion locks both the allocate_mutex and the active_mutex.
void delete_empty_blocks_safepoint(size_t retain = 1);
void delete_empty_blocks_concurrent(size_t retain = 1);
// Debugging and logging support.
const char* name() const;
void print_on(outputStream* st) const PRODUCT_RETURN;
// Provides access to storage internals, for unit testing.
class TestAccess;
private:
class Block;
class BlockList;
class BlockEntry VALUE_OBJ_CLASS_SPEC {
friend class BlockList;
// Members are mutable, and we deal exclusively with pointers to
// const, to make const blocks easier to use; a block being const
// doesn't prevent modifying its list state.
mutable const Block* _prev;
mutable const Block* _next;
// Noncopyable.
BlockEntry(const BlockEntry&);
BlockEntry& operator=(const BlockEntry&);
public:
BlockEntry();
~BlockEntry();
};
class BlockList VALUE_OBJ_CLASS_SPEC {
const Block* _head;
const Block* _tail;
const BlockEntry& (*_get_entry)(const Block& block);
// Noncopyable.
BlockList(const BlockList&);
BlockList& operator=(const BlockList&);
public:
BlockList(const BlockEntry& (*get_entry)(const Block& block));
~BlockList();
Block* head();
const Block* chead() const;
const Block* ctail() const;
Block* prev(Block& block);
Block* next(Block& block);
const Block* prev(const Block& block) const;
const Block* next(const Block& block) const;
void push_front(const Block& block);
void push_back(const Block& block);
void unlink(const Block& block);
};
class Block /* No base class, to avoid messing up alignment requirements */ {
// _data must be the first non-static data member, for alignment.
oop _data[BitsPerWord];
static const unsigned _data_pos = 0; // Position of _data.
volatile uintx _allocated_bitmask; // One bit per _data element.
const OopStorage* _owner;
void* _memory; // Unaligned storage containing block.
BlockEntry _active_entry;
BlockEntry _allocate_entry;
Block(const OopStorage* owner, void* memory);
~Block();
void check_index(unsigned index) const;
unsigned get_index(const oop* ptr) const;
template<typename F, typename BlockPtr>
static bool iterate_impl(F f, BlockPtr b);
// Noncopyable.
Block(const Block&);
Block& operator=(const Block&);
public:
static const BlockEntry& get_active_entry(const Block& block);
static const BlockEntry& get_allocate_entry(const Block& block);
static size_t allocation_size();
static size_t allocation_alignment_shift();
oop* get_pointer(unsigned index);
const oop* get_pointer(unsigned index) const;
uintx bitmask_for_index(unsigned index) const;
uintx bitmask_for_entry(const oop* ptr) const;
// Allocation bitmask accessors are racy.
bool is_full() const;
bool is_empty() const;
uintx allocated_bitmask() const;
uintx cmpxchg_allocated_bitmask(uintx new_value, uintx compare_value);
bool contains(const oop* ptr) const;
// Returns NULL if ptr is not in a block or not allocated in that block.
static Block* block_for_ptr(const OopStorage* owner, const oop* ptr);
oop* allocate();
static Block* new_block(const OopStorage* owner);
static void delete_block(const Block& block);
template<typename F> bool iterate(F f);
template<typename F> bool iterate(F f) const;
}; // class Block
const char* _name;
BlockList _active_list;
BlockList _allocate_list;
Block* volatile _active_head;
Mutex* _allocate_mutex;
Mutex* _active_mutex;
// Counts are volatile for racy unlocked accesses.
volatile size_t _allocation_count;
volatile size_t _block_count;
volatile size_t _empty_block_count;
// mutable because this gets set even for const iteration.
mutable bool _concurrent_iteration_active;
Block* find_block_or_null(const oop* ptr) const;
bool is_valid_block_locked_or_safepoint(const Block* block) const;
EntryStatus allocation_status_validating_block(const Block* block, const oop* ptr) const;
void check_release(const Block* block, const oop* ptr) const NOT_DEBUG_RETURN;
void release_from_block(Block& block, uintx release_bitmask);
void delete_empty_block(const Block& block);
static void assert_at_safepoint() NOT_DEBUG_RETURN;
template<typename F, typename Storage>
static bool iterate_impl(F f, Storage* storage);
#if INCLUDE_ALL_GCS
// Implementation support for parallel iteration
class BasicParState;
#endif // INCLUDE_ALL_GCS
// Wrapper for OopClosure-style function, so it can be used with
// iterate. Assume p is of type oop*. Then cl->do_oop(p) must be a
// valid expression whose value may be ignored.
template<typename Closure> class OopFn;
template<typename Closure> static OopFn<Closure> oop_fn(Closure* cl);
// Wrapper for BoolObjectClosure + iteration handler pair, so they
// can be used with iterate.
template<typename IsAlive, typename F> class IfAliveFn;
template<typename IsAlive, typename F>
static IfAliveFn<IsAlive, F> if_alive_fn(IsAlive* is_alive, F f);
// Wrapper for iteration handler, automatically skipping NULL entries.
template<typename F> class SkipNullFn;
template<typename F> static SkipNullFn<F> skip_null_fn(F f);
// Wrapper for iteration handler; ignore handler result and return true.
template<typename F> class AlwaysTrueFn;
};
inline OopStorage::Block* OopStorage::BlockList::head() {
return const_cast<Block*>(_head);
}
inline const OopStorage::Block* OopStorage::BlockList::chead() const {
return _head;
}
inline const OopStorage::Block* OopStorage::BlockList::ctail() const {
return _tail;
}
inline OopStorage::Block* OopStorage::BlockList::prev(Block& block) {
return const_cast<Block*>(_get_entry(block)._prev);
}
inline OopStorage::Block* OopStorage::BlockList::next(Block& block) {
return const_cast<Block*>(_get_entry(block)._next);
}
inline const OopStorage::Block* OopStorage::BlockList::prev(const Block& block) const {
return _get_entry(block)._prev;
}
inline const OopStorage::Block* OopStorage::BlockList::next(const Block& block) const {
return _get_entry(block)._next;
}
template<typename Closure>
class OopStorage::OopFn VALUE_OBJ_CLASS_SPEC {
public:
explicit OopFn(Closure* cl) : _cl(cl) {}
template<typename OopPtr> // [const] oop*
bool operator()(OopPtr ptr) const {
_cl->do_oop(ptr);
return true;
}
private:
Closure* _cl;
};
template<typename Closure>
inline OopStorage::OopFn<Closure> OopStorage::oop_fn(Closure* cl) {
return OopFn<Closure>(cl);
}
template<typename IsAlive, typename F>
class OopStorage::IfAliveFn VALUE_OBJ_CLASS_SPEC {
public:
IfAliveFn(IsAlive* is_alive, F f) : _is_alive(is_alive), _f(f) {}
bool operator()(oop* ptr) const {
bool result = true;
oop v = *ptr;
if (v != NULL) {
if (_is_alive->do_object_b(v)) {
result = _f(ptr);
} else {
*ptr = NULL; // Clear dead value.
}
}
return result;
}
private:
IsAlive* _is_alive;
F _f;
};
template<typename IsAlive, typename F>
inline OopStorage::IfAliveFn<IsAlive, F> OopStorage::if_alive_fn(IsAlive* is_alive, F f) {
return IfAliveFn<IsAlive, F>(is_alive, f);
}
template<typename F>
class OopStorage::SkipNullFn VALUE_OBJ_CLASS_SPEC {
public:
SkipNullFn(F f) : _f(f) {}
template<typename OopPtr> // [const] oop*
bool operator()(OopPtr ptr) const {
return (*ptr != NULL) ? _f(ptr) : true;
}
private:
F _f;
};
template<typename F>
inline OopStorage::SkipNullFn<F> OopStorage::skip_null_fn(F f) {
return SkipNullFn<F>(f);
}
template<typename F>
class OopStorage::AlwaysTrueFn VALUE_OBJ_CLASS_SPEC {
F _f;
public:
AlwaysTrueFn(F f) : _f(f) {}
template<typename OopPtr> // [const] oop*
bool operator()(OopPtr ptr) const { _f(ptr); return true; }
};
// Inline Block accesses for use in iteration inner loop.
inline void OopStorage::Block::check_index(unsigned index) const {
assert(index < ARRAY_SIZE(_data), "Index out of bounds: %u", index);
}
inline oop* OopStorage::Block::get_pointer(unsigned index) {
check_index(index);
return &_data[index];
}
inline const oop* OopStorage::Block::get_pointer(unsigned index) const {
check_index(index);
return &_data[index];
}
inline uintx OopStorage::Block::allocated_bitmask() const {
return _allocated_bitmask;
}
inline uintx OopStorage::Block::bitmask_for_index(unsigned index) const {
check_index(index);
return uintx(1) << index;
}
// Provide const or non-const iteration, depending on whether BlockPtr
// is const Block* or Block*, respectively.
template<typename F, typename BlockPtr> // BlockPtr := [const] Block*
inline bool OopStorage::Block::iterate_impl(F f, BlockPtr block) {
uintx bitmask = block->allocated_bitmask();
while (bitmask != 0) {
unsigned index = count_trailing_zeros(bitmask);
bitmask ^= block->bitmask_for_index(index);
if (!f(block->get_pointer(index))) {
return false;
}
}
return true;
}
template<typename F>
inline bool OopStorage::Block::iterate(F f) {
return iterate_impl(f, this);
}
template<typename F>
inline bool OopStorage::Block::iterate(F f) const {
return iterate_impl(f, this);
}
//////////////////////////////////////////////////////////////////////////////
// Support for serial iteration, always at a safepoint.
// Provide const or non-const iteration, depending on whether Storage is
// const OopStorage* or OopStorage*, respectively.
template<typename F, typename Storage> // Storage := [const] OopStorage
inline bool OopStorage::iterate_impl(F f, Storage* storage) {
assert_at_safepoint();
// Propagate const/non-const iteration to the block layer, by using
// const or non-const blocks as corresponding to Storage.
typedef typename Conditional<IsConst<Storage>::value, const Block*, Block*>::type BlockPtr;
for (BlockPtr block = storage->_active_head;
block != NULL;
block = storage->_active_list.next(*block)) {
if (!block->iterate(f)) {
return false;
}
}
return true;
}
template<typename F>
inline bool OopStorage::iterate_safepoint(F f) {
return iterate_impl(f, this);
}
template<typename F>
inline bool OopStorage::iterate_safepoint(F f) const {
return iterate_impl(f, this);
}
template<typename Closure>
inline void OopStorage::oops_do(Closure* cl) {
iterate_safepoint(oop_fn(cl));
}
template<typename Closure>
inline void OopStorage::oops_do(Closure* cl) const {
iterate_safepoint(oop_fn(cl));
}
template<typename Closure>
inline void OopStorage::weak_oops_do(Closure* cl) {
iterate_safepoint(skip_null_fn(oop_fn(cl)));
}
template<typename IsAliveClosure, typename Closure>
inline void OopStorage::weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
iterate_safepoint(if_alive_fn(is_alive, oop_fn(cl)));
}
#if INCLUDE_ALL_GCS
//////////////////////////////////////////////////////////////////////////////
// Support for parallel and optionally concurrent state iteration.
//
// Parallel iteration is for the exclusive use of the GC. Other iteration
// clients must use serial iteration.
//
// Concurrent Iteration
//
// Iteration involves the _active_list, which contains all of the blocks owned
// by a storage object. This is a doubly-linked list, linked through
// dedicated fields in the blocks.
//
// At most one concurrent ParState can exist at a time for a given storage
// object.
//
// A concurrent ParState sets the associated storage's
// _concurrent_iteration_active flag true when the state is constructed, and
// sets it false when the state is destroyed. These assignments are made with
// _active_mutex locked. Meanwhile, empty block deletion is not done while
// _concurrent_iteration_active is true. The flag check and the dependent
// removal of a block from the _active_list is performed with _active_mutex
// locked. This prevents concurrent iteration and empty block deletion from
// interfering with with each other.
//
// Both allocate() and delete_empty_blocks_concurrent() lock the
// _allocate_mutex while performing their respective list manipulations,
// preventing them from interfering with each other.
//
// When allocate() creates a new block, it is added to the front of the
// _active_list. Then _active_head is set to the new block. When concurrent
// iteration is started (by a parallel worker thread calling the state's
// iterate() function), the current _active_head is used as the initial block
// for the iteration, with iteration proceeding down the list headed by that
// block.
//
// As a result, the list over which concurrent iteration operates is stable.
// However, once the iteration is started, later allocations may add blocks to
// the front of the list that won't be examined by the iteration. And while
// the list is stable, concurrent allocate() and release() operations may
// change the set of allocated entries in a block at any time during the
// iteration.
//
// As a result, a concurrent iteration handler must accept that some
// allocations and releases that occur after the iteration started will not be
// seen by the iteration. Further, some may overlap examination by the
// iteration. To help with this, allocate() and release() have an invariant
// that an entry's value must be NULL when it is not in use.
//
// An in-progress delete_empty_blocks_concurrent() operation can contend with
// the start of a concurrent iteration over the _active_mutex. Since both are
// under GC control, that potential contention can be eliminated by never
// scheduling both operations to run at the same time.
//
// ParState<concurrent, is_const>
// concurrent must be true if iteration is concurrent with the
// mutator, false if iteration is at a safepoint.
//
// is_const must be true if the iteration is over a constant storage
// object, false if the iteration may modify the storage object.
//
// ParState([const] OopStorage* storage)
// Construct an object for managing an iteration over storage. For a
// concurrent ParState, empty block deletion for the associated storage
// is inhibited for the life of the ParState. There can be no more
// than one live concurrent ParState at a time for a given storage object.
//
// template<typename F> void iterate(F f)
// Repeatedly claims a block from the associated storage that has
// not been processed by this iteration (possibly by other threads),
// and applies f to each entry in the claimed block. Assume p is of
// type const oop* or oop*, according to is_const. Then f(p) must be
// a valid expression whose value is ignored. Concurrent uses must
// be prepared for an entry's value to change at any time, due to
// mutator activity.
//
// template<typename Closure> void oops_do(Closure* cl)
// Wrapper around iterate, providing an adaptation layer allowing
// the use of OopClosures and similar objects for iteration. Assume
// p is of type const oop* or oop*, according to is_const. Then
// cl->do_oop(p) must be a valid expression whose value is ignored.
// Concurrent uses must be prepared for the entry's value to change
// at any time, due to mutator activity.
//
// Optional operations, provided only if !concurrent && !is_const.
// These are not provided when is_const, because the storage object
// may be modified by the iteration infrastructure, even if the
// provided closure doesn't modify the storage object. These are not
// provided when concurrent because any pre-filtering behavior by the
// iteration infrastructure is inappropriate for concurrent iteration;
// modifications of the storage by the mutator could result in the
// pre-filtering being applied (successfully or not) to objects that
// are unrelated to what the closure finds in the entry.
//
// template<typename Closure> void weak_oops_do(Closure* cl)
// template<typename IsAliveClosure, typename Closure>
// void weak_oops_do(IsAliveClosure* is_alive, Closure* cl)
// Wrappers around iterate, providing an adaptation layer allowing
// the use of is-alive closures and OopClosures for iteration.
// Assume p is of type oop*. Then
//
// - cl->do_oop(p) must be a valid expression whose value is ignored.
//
// - is_alive->do_object_b(*p) must be a valid expression whose value
// is convertible to bool.
//
// If *p == NULL then neither is_alive nor cl will be invoked for p.
// If is_alive->do_object_b(*p) is false, then cl will not be
// invoked on p.
class OopStorage::BasicParState VALUE_OBJ_CLASS_SPEC {
public:
BasicParState(OopStorage* storage, bool concurrent);
~BasicParState();
template<bool is_const, typename F> void iterate(F f) {
// Wrap f in ATF so we can use Block::iterate.
AlwaysTrueFn<F> atf_f(f);
ensure_iteration_started();
typename Conditional<is_const, const Block*, Block*>::type block;
while ((block = claim_next_block()) != NULL) {
block->iterate(atf_f);
}
}
private:
OopStorage* _storage;
void* volatile _next_block;
bool _concurrent;
// Noncopyable.
BasicParState(const BasicParState&);
BasicParState& operator=(const BasicParState&);
void update_iteration_state(bool value);
void ensure_iteration_started();
Block* claim_next_block();
};
template<bool concurrent, bool is_const>
class OopStorage::ParState VALUE_OBJ_CLASS_SPEC {
BasicParState _basic_state;
public:
ParState(const OopStorage* storage) :
// For simplicity, always recorded as non-const.
_basic_state(const_cast<OopStorage*>(storage), concurrent)
{}
template<typename F>
void iterate(F f) {
_basic_state.template iterate<is_const>(f);
}
template<typename Closure>
void oops_do(Closure* cl) {
this->iterate(oop_fn(cl));
}
};
template<>
class OopStorage::ParState<false, false> VALUE_OBJ_CLASS_SPEC {
BasicParState _basic_state;
public:
ParState(OopStorage* storage) :
_basic_state(storage, false)
{}
template<typename F>
void iterate(F f) {
_basic_state.template iterate<false>(f);
}
template<typename Closure>
void oops_do(Closure* cl) {
this->iterate(oop_fn(cl));
}
template<typename Closure>
void weak_oops_do(Closure* cl) {
this->iterate(skip_null_fn(oop_fn(cl)));
}
template<typename IsAliveClosure, typename Closure>
void weak_oops_do(IsAliveClosure* is_alive, Closure* cl) {
this->iterate(if_alive_fn(is_alive, oop_fn(cl)));
}
};
#endif // INCLUDE_ALL_GCS
#endif // include guard

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -101,6 +101,7 @@
LOG_TAG(objecttagging) \ LOG_TAG(objecttagging) \
LOG_TAG(obsolete) \ LOG_TAG(obsolete) \
LOG_TAG(oopmap) \ LOG_TAG(oopmap) \
LOG_TAG(oopstorage) \
LOG_TAG(os) \ LOG_TAG(os) \
LOG_TAG(pagesize) \ LOG_TAG(pagesize) \
LOG_TAG(patch) \ LOG_TAG(patch) \

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Red Hat, Inc. * Copyright (c) 2012 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -862,16 +862,7 @@ JNI_LEAF(jobjectRefType, jni_GetObjectRefType(JNIEnv *env, jobject obj))
HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(env, obj); HOTSPOT_JNI_GETOBJECTREFTYPE_ENTRY(env, obj);
jobjectRefType ret; jobjectRefType ret = JNIHandles::handle_type(thread, obj);
if (JNIHandles::is_local_handle(thread, obj) ||
JNIHandles::is_frame_handle(thread, obj))
ret = JNILocalRefType;
else if (JNIHandles::is_global_handle(obj))
ret = JNIGlobalRefType;
else if (JNIHandles::is_weak_global_handle(obj))
ret = JNIWeakGlobalRefType;
else
ret = JNIInvalidRefType;
HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN((void *) ret); HOTSPOT_JNI_GETOBJECTREFTYPE_RETURN((void *) ret);
return ret; return ret;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -435,10 +435,7 @@ static void* check_wrapped_array_release(JavaThread* thr, const char* fn_name,
} }
oop jniCheck::validate_handle(JavaThread* thr, jobject obj) { oop jniCheck::validate_handle(JavaThread* thr, jobject obj) {
if (JNIHandles::is_frame_handle(thr, obj) || if (JNIHandles::handle_type(thr, obj) != JNIInvalidRefType) {
JNIHandles::is_local_handle(thr, obj) ||
JNIHandles::is_global_handle(obj) ||
JNIHandles::is_weak_global_handle(obj)) {
ASSERT_OOPS_ALLOWED; ASSERT_OOPS_ALLOWED;
return JNIHandles::resolve_external_guard(obj); return JNIHandles::resolve_external_guard(obj);
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -2582,9 +2582,9 @@ class SimpleRootsClosure : public OopClosure {
return; return;
} }
// ignore null or deleted handles
oop o = *obj_p; oop o = *obj_p;
if (o == NULL || o == JNIHandles::deleted_handle()) { // ignore null
if (o == NULL) {
return; return;
} }
@ -2641,9 +2641,9 @@ class JNILocalRootsClosure : public OopClosure {
return; return;
} }
// ignore null or deleted handles
oop o = *obj_p; oop o = *obj_p;
if (o == NULL || o == JNIHandles::deleted_handle()) { // ignore null
if (o == NULL) {
return; return;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/systemDictionary.hpp" #include "gc/shared/oopStorage.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "memory/iterator.hpp" #include "memory/iterator.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
@ -32,13 +32,13 @@
#include "runtime/thread.inline.hpp" #include "runtime/thread.inline.hpp"
#include "trace/traceMacros.hpp" #include "trace/traceMacros.hpp"
#include "utilities/align.hpp" #include "utilities/align.hpp"
#include "utilities/debug.hpp"
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp" #include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif #endif
JNIHandleBlock* JNIHandles::_global_handles = NULL; OopStorage* JNIHandles::_global_handles = NULL;
JNIHandleBlock* JNIHandles::_weak_global_handles = NULL; OopStorage* JNIHandles::_weak_global_handles = NULL;
oop JNIHandles::_deleted_handle = NULL;
jobject JNIHandles::make_local(oop obj) { jobject JNIHandles::make_local(oop obj) {
@ -46,7 +46,7 @@ jobject JNIHandles::make_local(oop obj) {
return NULL; // ignore null handles return NULL; // ignore null handles
} else { } else {
Thread* thread = Thread::current(); Thread* thread = Thread::current();
assert(Universe::heap()->is_in_reserved(obj), "sanity check"); assert(oopDesc::is_oop(obj), "not an oop");
assert(!current_thread_in_native(), "must not be in native"); assert(!current_thread_in_native(), "must not be in native");
return thread->active_handles()->allocate_handle(obj); return thread->active_handles()->allocate_handle(obj);
} }
@ -59,7 +59,7 @@ jobject JNIHandles::make_local(Thread* thread, oop obj) {
if (obj == NULL) { if (obj == NULL) {
return NULL; // ignore null handles return NULL; // ignore null handles
} else { } else {
assert(Universe::heap()->is_in_reserved(obj), "sanity check"); assert(oopDesc::is_oop(obj), "not an oop");
assert(thread->is_Java_thread(), "not a Java thread"); assert(thread->is_Java_thread(), "not a Java thread");
assert(!current_thread_in_native(), "must not be in native"); assert(!current_thread_in_native(), "must not be in native");
return thread->active_handles()->allocate_handle(obj); return thread->active_handles()->allocate_handle(obj);
@ -72,56 +72,72 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
return NULL; // ignore null handles return NULL; // ignore null handles
} else { } else {
JavaThread* thread = JavaThread::thread_from_jni_environment(env); JavaThread* thread = JavaThread::thread_from_jni_environment(env);
assert(Universe::heap()->is_in_reserved(obj), "sanity check"); assert(oopDesc::is_oop(obj), "not an oop");
assert(!current_thread_in_native(), "must not be in native"); assert(!current_thread_in_native(), "must not be in native");
return thread->active_handles()->allocate_handle(obj); return thread->active_handles()->allocate_handle(obj);
} }
} }
jobject JNIHandles::make_global(Handle obj) { static void report_handle_allocation_failure(AllocFailType alloc_failmode,
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); const char* handle_kind) {
assert(!current_thread_in_native(), "must not be in native"); if (alloc_failmode == AllocFailStrategy::EXIT_OOM) {
jobject res = NULL; // Fake size value, since we don't know the min allocation size here.
if (!obj.is_null()) { vm_exit_out_of_memory(sizeof(oop), OOM_MALLOC_ERROR,
// ignore null handles "Cannot create %s JNI handle", handle_kind);
MutexLocker ml(JNIGlobalHandle_lock);
assert(Universe::heap()->is_in_reserved(obj()), "sanity check");
res = _global_handles->allocate_handle(obj());
} else { } else {
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); assert(alloc_failmode == AllocFailStrategy::RETURN_NULL, "invariant");
} }
return res;
} }
jobject JNIHandles::make_global(Handle obj, AllocFailType alloc_failmode) {
jobject JNIHandles::make_weak_global(Handle obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
assert(!current_thread_in_native(), "must not be in native"); assert(!current_thread_in_native(), "must not be in native");
jobject res = NULL; jobject res = NULL;
if (!obj.is_null()) { if (!obj.is_null()) {
// ignore null handles // ignore null handles
{ assert(oopDesc::is_oop(obj()), "not an oop");
MutexLocker ml(JNIGlobalHandle_lock); oop* ptr = _global_handles->allocate();
assert(Universe::heap()->is_in_reserved(obj()), "sanity check"); // Return NULL on allocation failure.
res = _weak_global_handles->allocate_handle(obj()); if (ptr != NULL) {
*ptr = obj();
res = reinterpret_cast<jobject>(ptr);
} else {
report_handle_allocation_failure(alloc_failmode, "global");
}
} else {
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
}
return res;
}
jobject JNIHandles::make_weak_global(Handle obj, AllocFailType alloc_failmode) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
assert(!current_thread_in_native(), "must not be in native");
jobject res = NULL;
if (!obj.is_null()) {
// ignore null handles
assert(oopDesc::is_oop(obj()), "not an oop");
oop* ptr = _weak_global_handles->allocate();
// Return NULL on allocation failure.
if (ptr != NULL) {
*ptr = obj();
char* tptr = reinterpret_cast<char*>(ptr) + weak_tag_value;
res = reinterpret_cast<jobject>(tptr);
} else {
report_handle_allocation_failure(alloc_failmode, "weak global");
} }
// Add weak tag.
assert(is_aligned(res, weak_tag_alignment), "invariant");
char* tptr = reinterpret_cast<char*>(res) + weak_tag_value;
res = reinterpret_cast<jobject>(tptr);
} else { } else {
CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops()); CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
} }
return res; return res;
} }
template<bool external_guard>
oop JNIHandles::resolve_jweak(jweak handle) { oop JNIHandles::resolve_jweak(jweak handle) {
assert(is_jweak(handle), "precondition"); assert(is_jweak(handle), "precondition");
oop result = jweak_ref(handle); oop result = jweak_ref(handle);
result = guard_value<external_guard>(result);
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
if (result != NULL && UseG1GC) { if (result != NULL && UseG1GC) {
G1SATBCardTableModRefBS::enqueue(result); G1SATBCardTableModRefBS::enqueue(result);
@ -130,31 +146,30 @@ oop JNIHandles::resolve_jweak(jweak handle) {
return result; return result;
} }
template oop JNIHandles::resolve_jweak<true>(jweak);
template oop JNIHandles::resolve_jweak<false>(jweak);
bool JNIHandles::is_global_weak_cleared(jweak handle) { bool JNIHandles::is_global_weak_cleared(jweak handle) {
assert(is_jweak(handle), "not a weak handle"); assert(is_jweak(handle), "not a weak handle");
return guard_value<false>(jweak_ref(handle)) == NULL; return jweak_ref(handle) == NULL;
} }
void JNIHandles::destroy_global(jobject handle) { void JNIHandles::destroy_global(jobject handle) {
if (handle != NULL) { if (handle != NULL) {
assert(is_global_handle(handle), "Invalid delete of global JNI handle"); assert(!is_jweak(handle), "wrong method for detroying jweak");
jobject_ref(handle) = deleted_handle(); jobject_ref(handle) = NULL;
_global_handles->release(&jobject_ref(handle));
} }
} }
void JNIHandles::destroy_weak_global(jobject handle) { void JNIHandles::destroy_weak_global(jobject handle) {
if (handle != NULL) { if (handle != NULL) {
jweak_ref(handle) = deleted_handle(); assert(is_jweak(handle), "JNI handle not jweak");
jweak_ref(handle) = NULL;
_weak_global_handles->release(&jweak_ref(handle));
} }
} }
void JNIHandles::oops_do(OopClosure* f) { void JNIHandles::oops_do(OopClosure* f) {
f->do_oop(&_deleted_handle);
_global_handles->oops_do(f); _global_handles->oops_do(f);
} }
@ -165,19 +180,54 @@ void JNIHandles::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f) {
void JNIHandles::weak_oops_do(OopClosure* f) { void JNIHandles::weak_oops_do(OopClosure* f) {
AlwaysTrueClosure always_true; _weak_global_handles->weak_oops_do(f);
weak_oops_do(&always_true, f);
} }
void JNIHandles::initialize() { void JNIHandles::initialize() {
_global_handles = JNIHandleBlock::allocate_block(); _global_handles = new OopStorage("JNI Global",
_weak_global_handles = JNIHandleBlock::allocate_block(); JNIGlobalAlloc_lock,
EXCEPTION_MARK; JNIGlobalActive_lock);
// We will never reach the CATCH below since Exceptions::_throw will cause _weak_global_handles = new OopStorage("JNI Weak",
// the VM to exit if an exception is thrown during initialization JNIWeakAlloc_lock,
Klass* k = SystemDictionary::Object_klass(); JNIWeakActive_lock);
_deleted_handle = InstanceKlass::cast(k)->allocate_instance(CATCH); }
inline bool is_storage_handle(const OopStorage* storage, const oop* ptr) {
return storage->allocation_status(ptr) == OopStorage::ALLOCATED_ENTRY;
}
jobjectRefType JNIHandles::handle_type(Thread* thread, jobject handle) {
jobjectRefType result = JNIInvalidRefType;
if (is_jweak(handle)) {
if (is_storage_handle(_weak_global_handles, &jweak_ref(handle))) {
result = JNIWeakGlobalRefType;
}
} else {
switch (_global_handles->allocation_status(&jobject_ref(handle))) {
case OopStorage::ALLOCATED_ENTRY:
result = JNIGlobalRefType;
break;
case OopStorage::UNALLOCATED_ENTRY:
break; // Invalid global handle
case OopStorage::INVALID_ENTRY:
// Not in global storage. Might be a local handle.
if (is_local_handle(thread, handle) ||
(thread->is_Java_thread() &&
is_frame_handle((JavaThread*)thread, handle))) {
result = JNILocalRefType;
}
break;
default:
ShouldNotReachHere();
}
}
return result;
} }
@ -210,53 +260,37 @@ bool JNIHandles::is_frame_handle(JavaThread* thr, jobject obj) {
bool JNIHandles::is_global_handle(jobject handle) { bool JNIHandles::is_global_handle(jobject handle) {
return _global_handles->chain_contains(handle); return !is_jweak(handle) && is_storage_handle(_global_handles, &jobject_ref(handle));
} }
bool JNIHandles::is_weak_global_handle(jobject handle) { bool JNIHandles::is_weak_global_handle(jobject handle) {
return _weak_global_handles->chain_contains(handle); return is_jweak(handle) && is_storage_handle(_weak_global_handles, &jweak_ref(handle));
} }
long JNIHandles::global_handle_memory_usage() { size_t JNIHandles::global_handle_memory_usage() {
return _global_handles->memory_usage(); return _global_handles->total_memory_usage();
} }
long JNIHandles::weak_global_handle_memory_usage() { size_t JNIHandles::weak_global_handle_memory_usage() {
return _weak_global_handles->memory_usage(); return _weak_global_handles->total_memory_usage();
} }
class CountHandleClosure: public OopClosure {
private:
int _count;
public:
CountHandleClosure(): _count(0) {}
virtual void do_oop(oop* ooph) {
if (*ooph != JNIHandles::deleted_handle()) {
_count++;
}
}
virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
int count() { return _count; }
};
// We assume this is called at a safepoint: no lock is needed. // We assume this is called at a safepoint: no lock is needed.
void JNIHandles::print_on(outputStream* st) { void JNIHandles::print_on(outputStream* st) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
assert(_global_handles != NULL && _weak_global_handles != NULL, assert(_global_handles != NULL && _weak_global_handles != NULL,
"JNIHandles not initialized"); "JNIHandles not initialized");
CountHandleClosure global_handle_count; st->print_cr("JNI global refs: " SIZE_FORMAT ", weak refs: " SIZE_FORMAT,
oops_do(&global_handle_count); _global_handles->allocation_count(),
weak_oops_do(&global_handle_count); _weak_global_handles->allocation_count());
st->print_cr("JNI global references: %d", global_handle_count.count());
st->cr(); st->cr();
st->flush(); st->flush();
} }
class VerifyHandleClosure: public OopClosure { class VerifyJNIHandles: public OopClosure {
public: public:
virtual void do_oop(oop* root) { virtual void do_oop(oop* root) {
(*root)->verify(); (*root)->verify();
@ -265,7 +299,7 @@ public:
}; };
void JNIHandles::verify() { void JNIHandles::verify() {
VerifyHandleClosure verify_handle; VerifyJNIHandles verify_handle;
oops_do(&verify_handle); oops_do(&verify_handle);
weak_oops_do(&verify_handle); weak_oops_do(&verify_handle);
@ -419,34 +453,6 @@ void JNIHandleBlock::oops_do(OopClosure* f) {
} }
void JNIHandleBlock::weak_oops_do(BoolObjectClosure* is_alive,
OopClosure* f) {
for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
assert(current->pop_frame_link() == NULL,
"blocks holding weak global JNI handles should not have pop frame link set");
for (int index = 0; index < current->_top; index++) {
oop* root = &(current->_handles)[index];
oop value = *root;
// traverse heap pointers only, not deleted handles or free list pointers
if (value != NULL && Universe::heap()->is_in_reserved(value)) {
if (is_alive->do_object_b(value)) {
// The weakly referenced object is alive, update pointer
f->do_oop(root);
} else {
// The weakly referenced object is not alive, clear the reference by storing NULL
log_develop_trace(gc, ref)("Clearing JNI weak reference (" INTPTR_FORMAT ")", p2i(root));
*root = NULL;
}
}
}
// the next handle block is valid only if current block is full
if (current->_top < block_size_in_oops) {
break;
}
}
}
jobject JNIHandleBlock::allocate_handle(oop obj) { jobject JNIHandleBlock::allocate_handle(oop obj) {
assert(Universe::heap()->is_in_reserved(obj), "sanity check"); assert(Universe::heap()->is_in_reserved(obj), "sanity check");
if (_top == 0) { if (_top == 0) {
@ -514,15 +520,6 @@ jobject JNIHandleBlock::allocate_handle(oop obj) {
return allocate_handle(obj); // retry return allocate_handle(obj); // retry
} }
void JNIHandleBlock::release_handle(jobject h) {
if (h != NULL) {
assert(chain_contains(h), "does not contain the JNI handle");
// Mark the handle as deleted, allocate will reuse it
*((oop*)h) = JNIHandles::deleted_handle();
}
}
void JNIHandleBlock::rebuild_free_list() { void JNIHandleBlock::rebuild_free_list() {
assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking"); assert(_allocate_before_rebuild == 0 && _free_list == NULL, "just checking");
int free = 0; int free = 0;
@ -530,7 +527,7 @@ void JNIHandleBlock::rebuild_free_list() {
for (JNIHandleBlock* current = this; current != NULL; current = current->_next) { for (JNIHandleBlock* current = this; current != NULL; current = current->_next) {
for (int index = 0; index < current->_top; index++) { for (int index = 0; index < current->_top; index++) {
oop* handle = &(current->_handles)[index]; oop* handle = &(current->_handles)[index];
if (*handle == JNIHandles::deleted_handle()) { if (*handle == NULL) {
// this handle was cleared out by a delete call, reuse it // this handle was cleared out by a delete call, reuse it
*handle = (oop) _free_list; *handle = (oop) _free_list;
_free_list = handle; _free_list = handle;
@ -568,29 +565,43 @@ bool JNIHandleBlock::chain_contains(jobject handle) const {
} }
int JNIHandleBlock::length() const { size_t JNIHandleBlock::length() const {
int result = 1; size_t result = 1;
for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) { for (JNIHandleBlock* current = _next; current != NULL; current = current->_next) {
result++; result++;
} }
return result; return result;
} }
class CountJNIHandleClosure: public OopClosure {
private:
int _count;
public:
CountJNIHandleClosure(): _count(0) {}
virtual void do_oop(oop* ooph) { _count++; }
virtual void do_oop(narrowOop* unused) { ShouldNotReachHere(); }
int count() { return _count; }
};
const size_t JNIHandleBlock::get_number_of_live_handles() { const size_t JNIHandleBlock::get_number_of_live_handles() {
CountHandleClosure counter; CountJNIHandleClosure counter;
oops_do(&counter); oops_do(&counter);
return counter.count(); return counter.count();
} }
// This method is not thread-safe, i.e., must be called while holding a lock on the // This method is not thread-safe, i.e., must be called while holding a lock on the
// structure. // structure.
long JNIHandleBlock::memory_usage() const { size_t JNIHandleBlock::memory_usage() const {
return length() * sizeof(JNIHandleBlock); return length() * sizeof(JNIHandleBlock);
} }
#ifndef PRODUCT #ifndef PRODUCT
bool JNIHandles::is_local_handle(jobject handle) {
return JNIHandleBlock::any_contains(handle);
}
bool JNIHandleBlock::any_contains(jobject handle) { bool JNIHandleBlock::any_contains(jobject handle) {
for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) { for (JNIHandleBlock* current = _block_list; current != NULL; current = current->_block_list_link) {
if (current->contains(handle)) { if (current->contains(handle)) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "runtime/handles.hpp" #include "runtime/handles.hpp"
class JNIHandleBlock; class JNIHandleBlock;
class OopStorage;
// Interface for creating and resolving local/global JNI handles // Interface for creating and resolving local/global JNI handles
@ -36,17 +37,15 @@ class JNIHandleBlock;
class JNIHandles : AllStatic { class JNIHandles : AllStatic {
friend class VMStructs; friend class VMStructs;
private: private:
static JNIHandleBlock* _global_handles; // First global handle block static OopStorage* _global_handles;
static JNIHandleBlock* _weak_global_handles; // First weak global handle block static OopStorage* _weak_global_handles;
static oop _deleted_handle; // Sentinel marking deleted handles
inline static bool is_jweak(jobject handle); inline static bool is_jweak(jobject handle);
inline static oop& jobject_ref(jobject handle); // NOT jweak! inline static oop& jobject_ref(jobject handle); // NOT jweak!
inline static oop& jweak_ref(jobject handle); inline static oop& jweak_ref(jobject handle);
template<bool external_guard> inline static oop guard_value(oop value);
template<bool external_guard> inline static oop resolve_impl(jobject handle); template<bool external_guard> inline static oop resolve_impl(jobject handle);
template<bool external_guard> static oop resolve_jweak(jweak handle); static oop resolve_jweak(jweak handle);
// This method is not inlined in order to avoid circular includes between // This method is not inlined in order to avoid circular includes between
// this header file and thread.hpp. // this header file and thread.hpp.
@ -80,19 +79,14 @@ class JNIHandles : AllStatic {
inline static void destroy_local(jobject handle); inline static void destroy_local(jobject handle);
// Global handles // Global handles
static jobject make_global(Handle obj); static jobject make_global(Handle obj, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
static void destroy_global(jobject handle); static void destroy_global(jobject handle);
// Weak global handles // Weak global handles
static jobject make_weak_global(Handle obj); static jobject make_weak_global(Handle obj, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
static void destroy_weak_global(jobject handle); static void destroy_weak_global(jobject handle);
static bool is_global_weak_cleared(jweak handle); // Test jweak without resolution static bool is_global_weak_cleared(jweak handle); // Test jweak without resolution
// Sentinel marking deleted handles in block. Note that we cannot store NULL as
// the sentinel, since clearing weak global JNI refs are done by storing NULL in
// the handle. The handle may not be reused before destroy_weak_global is called.
static oop deleted_handle() { return _deleted_handle; }
// Initialization // Initialization
static void initialize(); static void initialize();
@ -104,8 +98,15 @@ class JNIHandles : AllStatic {
static bool is_frame_handle(JavaThread* thr, jobject obj); static bool is_frame_handle(JavaThread* thr, jobject obj);
static bool is_global_handle(jobject handle); static bool is_global_handle(jobject handle);
static bool is_weak_global_handle(jobject handle); static bool is_weak_global_handle(jobject handle);
static long global_handle_memory_usage(); static size_t global_handle_memory_usage();
static long weak_global_handle_memory_usage(); static size_t weak_global_handle_memory_usage();
#ifndef PRODUCT
// Is handle from any local block of any thread?
static bool is_local_handle(jobject handle);
#endif
static jobjectRefType handle_type(Thread* thread, jobject handle);
// Garbage collection support(global handles only, local handles are traversed from thread) // Garbage collection support(global handles only, local handles are traversed from thread)
// Traversal of regular global handles // Traversal of regular global handles
@ -164,9 +165,6 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
// Handle allocation // Handle allocation
jobject allocate_handle(oop obj); jobject allocate_handle(oop obj);
// Release Handle
void release_handle(jobject);
// Block allocation and block free list management // Block allocation and block free list management
static JNIHandleBlock* allocate_block(Thread* thread = NULL); static JNIHandleBlock* allocate_block(Thread* thread = NULL);
static void release_block(JNIHandleBlock* block, Thread* thread = NULL); static void release_block(JNIHandleBlock* block, Thread* thread = NULL);
@ -179,10 +177,8 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
static int top_offset_in_bytes() { return offset_of(JNIHandleBlock, _top); } static int top_offset_in_bytes() { return offset_of(JNIHandleBlock, _top); }
// Garbage collection support // Garbage collection support
// Traversal of regular handles // Traversal of handles
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
// Traversal of weak handles. Unreachable oops are cleared.
void weak_oops_do(BoolObjectClosure* is_alive, OopClosure* f);
// Checked JNI support // Checked JNI support
void set_planned_capacity(size_t planned_capacity) { _planned_capacity = planned_capacity; } void set_planned_capacity(size_t planned_capacity) { _planned_capacity = planned_capacity; }
@ -192,8 +188,8 @@ class JNIHandleBlock : public CHeapObj<mtInternal> {
// Debugging // Debugging
bool chain_contains(jobject handle) const; // Does this block or following blocks contain handle bool chain_contains(jobject handle) const; // Does this block or following blocks contain handle
bool contains(jobject handle) const; // Does this block contain handle bool contains(jobject handle) const; // Does this block contain handle
int length() const; // Length of chain starting with this block size_t length() const; // Length of chain starting with this block
long memory_usage() const; size_t memory_usage() const;
#ifndef PRODUCT #ifndef PRODUCT
static bool any_contains(jobject handle); // Does any block currently in use contain handle static bool any_contains(jobject handle); // Does any block currently in use contain handle
static void print_statistics(); static void print_statistics();
@ -217,19 +213,6 @@ inline oop& JNIHandles::jweak_ref(jobject handle) {
return *reinterpret_cast<oop*>(ptr); return *reinterpret_cast<oop*>(ptr);
} }
// external_guard is true if called from resolve_external_guard.
// Treat deleted (and possibly zapped) as NULL for external_guard,
// else as (asserted) error.
template<bool external_guard>
inline oop JNIHandles::guard_value(oop value) {
if (!external_guard) {
assert(value != deleted_handle(), "Used a deleted global handle");
} else if (value == deleted_handle()) {
value = NULL;
}
return value;
}
// external_guard is true if called from resolve_external_guard. // external_guard is true if called from resolve_external_guard.
template<bool external_guard> template<bool external_guard>
inline oop JNIHandles::resolve_impl(jobject handle) { inline oop JNIHandles::resolve_impl(jobject handle) {
@ -237,14 +220,12 @@ inline oop JNIHandles::resolve_impl(jobject handle) {
assert(!current_thread_in_native(), "must not be in native"); assert(!current_thread_in_native(), "must not be in native");
oop result; oop result;
if (is_jweak(handle)) { // Unlikely if (is_jweak(handle)) { // Unlikely
result = resolve_jweak<external_guard>(handle); result = resolve_jweak(handle);
} else { } else {
result = jobject_ref(handle); result = jobject_ref(handle);
// Construction of jobjects canonicalize a null value into a null // Construction of jobjects canonicalize a null value into a null
// jobject, so for non-jweak the pointee should never be null. // jobject, so for non-jweak the pointee should never be null.
assert(external_guard || result != NULL, assert(external_guard || result != NULL, "Invalid JNI handle");
"Invalid value read from jni handle");
result = guard_value<external_guard>(result);
} }
return result; return result;
} }
@ -278,7 +259,8 @@ inline oop JNIHandles::resolve_non_null(jobject handle) {
inline void JNIHandles::destroy_local(jobject handle) { inline void JNIHandles::destroy_local(jobject handle) {
if (handle != NULL) { if (handle != NULL) {
jobject_ref(handle) = deleted_handle(); assert(!is_jweak(handle), "Invalid JNI local handle");
jobject_ref(handle) = NULL;
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -43,7 +43,10 @@ Mutex* Module_lock = NULL;
Mutex* CompiledIC_lock = NULL; Mutex* CompiledIC_lock = NULL;
Mutex* InlineCacheBuffer_lock = NULL; Mutex* InlineCacheBuffer_lock = NULL;
Mutex* VMStatistic_lock = NULL; Mutex* VMStatistic_lock = NULL;
Mutex* JNIGlobalHandle_lock = NULL; Mutex* JNIGlobalAlloc_lock = NULL;
Mutex* JNIGlobalActive_lock = NULL;
Mutex* JNIWeakAlloc_lock = NULL;
Mutex* JNIWeakActive_lock = NULL;
Mutex* JNIHandleBlockFreeList_lock = NULL; Mutex* JNIHandleBlockFreeList_lock = NULL;
Mutex* ResolvedMethodTable_lock = NULL; Mutex* ResolvedMethodTable_lock = NULL;
Mutex* JmethodIdCreation_lock = NULL; Mutex* JmethodIdCreation_lock = NULL;
@ -245,7 +248,10 @@ void mutex_init() {
def(Terminator_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes); def(Terminator_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_sometimes);
def(VtableStubs_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); def(VtableStubs_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);
def(Notify_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); def(Notify_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always);
def(JNIGlobalHandle_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); // locks JNIHandleBlockFreeList_lock def(JNIGlobalAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(JNIGlobalActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNIWeakAlloc_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_never);
def(JNIWeakActive_lock , PaddedMutex , nonleaf-1, true, Monitor::_safepoint_check_never);
def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions def(JNICritical_lock , PaddedMonitor, nonleaf, true, Monitor::_safepoint_check_always); // used for JNI critical regions
def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always); def(AdapterHandlerLibrary_lock , PaddedMutex , nonleaf, true, Monitor::_safepoint_check_always);

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -36,7 +36,10 @@ extern Mutex* Module_lock; // a lock on module and package
extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access extern Mutex* CompiledIC_lock; // a lock used to guard compiled IC patching and access
extern Mutex* InlineCacheBuffer_lock; // a lock used to guard the InlineCacheBuffer extern Mutex* InlineCacheBuffer_lock; // a lock used to guard the InlineCacheBuffer
extern Mutex* VMStatistic_lock; // a lock used to guard statistics count increment extern Mutex* VMStatistic_lock; // a lock used to guard statistics count increment
extern Mutex* JNIGlobalHandle_lock; // a lock on creating JNI global handles extern Mutex* JNIGlobalAlloc_lock; // JNI global storage allocate list lock
extern Mutex* JNIGlobalActive_lock; // JNI global storage active list lock
extern Mutex* JNIWeakAlloc_lock; // JNI weak storage allocate list lock
extern Mutex* JNIWeakActive_lock; // JNI weak storage active list lock
extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list extern Mutex* JNIHandleBlockFreeList_lock; // a lock on the JNI handle block free list
extern Mutex* ResolvedMethodTable_lock; // a lock on the ResolvedMethodTable updates extern Mutex* ResolvedMethodTable_lock; // a lock on the ResolvedMethodTable updates
extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1087,7 +1087,7 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
} }
#ifndef PRODUCT #ifndef PRODUCT
// we don't keep the block list in product mode // we don't keep the block list in product mode
if (JNIHandleBlock::any_contains((jobject) addr)) { if (JNIHandles::is_local_handle((jobject) addr)) {
st->print_cr(INTPTR_FORMAT " is a local jni handle", p2i(addr)); st->print_cr(INTPTR_FORMAT " is a local jni handle", p2i(addr));
return; return;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -55,6 +55,7 @@
#include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genCollectedHeap.hpp"
#include "gc/shared/generation.hpp" #include "gc/shared/generation.hpp"
#include "gc/shared/generationSpec.hpp" #include "gc/shared/generationSpec.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/space.hpp" #include "gc/shared/space.hpp"
#include "interpreter/bytecodeInterpreter.hpp" #include "interpreter/bytecodeInterpreter.hpp"
#include "interpreter/bytecodes.hpp" #include "interpreter/bytecodes.hpp"
@ -948,10 +949,8 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
/*********************************/ \ /*********************************/ \
/* JNIHandles and JNIHandleBlock */ \ /* JNIHandles and JNIHandleBlock */ \
/*********************************/ \ /*********************************/ \
static_field(JNIHandles, _global_handles, JNIHandleBlock*) \ static_field(JNIHandles, _global_handles, OopStorage*) \
static_field(JNIHandles, _weak_global_handles, JNIHandleBlock*) \ static_field(JNIHandles, _weak_global_handles, OopStorage*) \
static_field(JNIHandles, _deleted_handle, oop) \
\
unchecked_nonstatic_field(JNIHandleBlock, _handles, JNIHandleBlock::block_size_in_oops * sizeof(Oop)) /* Note: no type */ \ unchecked_nonstatic_field(JNIHandleBlock, _handles, JNIHandleBlock::block_size_in_oops * sizeof(Oop)) /* Note: no type */ \
nonstatic_field(JNIHandleBlock, _top, int) \ nonstatic_field(JNIHandleBlock, _top, int) \
nonstatic_field(JNIHandleBlock, _next, JNIHandleBlock*) \ nonstatic_field(JNIHandleBlock, _next, JNIHandleBlock*) \
@ -1677,6 +1676,12 @@ typedef PaddedEnd<ObjectMonitor> PaddedObjectMonitor;
declare_toplevel_type(JNIHandleBlock) \ declare_toplevel_type(JNIHandleBlock) \
declare_toplevel_type(jobject) \ declare_toplevel_type(jobject) \
\ \
/**************/ \
/* OopStorage */ \
/**************/ \
\
declare_toplevel_type(OopStorage) \
\
/**********************/ \ /**********************/ \
/* Runtime1 (C1 only) */ \ /* Runtime1 (C1 only) */ \
/**********************/ \ /**********************/ \

View file

@ -115,6 +115,7 @@
template(ICBufferFull) \ template(ICBufferFull) \
template(ScavengeMonitors) \ template(ScavengeMonitors) \
template(PrintMetadata) \ template(PrintMetadata) \
template(GTestExecuteAtSafepoint) \
class VM_Operation: public CHeapObj<mtInternal> { class VM_Operation: public CHeapObj<mtInternal> {
public: public:
@ -286,6 +287,17 @@ class VM_ScavengeMonitors: public VM_ForceSafepoint {
bool is_cheap_allocated() const { return true; } bool is_cheap_allocated() const { return true; }
}; };
// Base class for invoking parts of a gtest in a safepoint.
// Derived classes provide the doit method.
// Typically also need to transition the gtest thread from native to VM.
class VM_GTestExecuteAtSafepoint: public VM_Operation {
public:
VMOp_Type type() const { return VMOp_GTestExecuteAtSafepoint; }
protected:
VM_GTestExecuteAtSafepoint() {}
};
class VM_Deoptimize: public VM_Operation { class VM_Deoptimize: public VM_Operation {
public: public:
VM_Deoptimize() {} VM_Deoptimize() {}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -1317,9 +1317,9 @@ class JNILocalsDumper : public OopClosure {
void JNILocalsDumper::do_oop(oop* obj_p) { void JNILocalsDumper::do_oop(oop* obj_p) {
// ignore null or deleted handles // ignore null handles
oop o = *obj_p; oop o = *obj_p;
if (o != NULL && o != JNIHandles::deleted_handle()) { if (o != NULL) {
writer()->write_u1(HPROF_GC_ROOT_JNI_LOCAL); writer()->write_u1(HPROF_GC_ROOT_JNI_LOCAL);
writer()->write_objectID(o); writer()->write_objectID(o);
writer()->write_u4(_thread_serial_num); writer()->write_u4(_thread_serial_num);
@ -1347,7 +1347,7 @@ void JNIGlobalsDumper::do_oop(oop* obj_p) {
oop o = *obj_p; oop o = *obj_p;
// ignore these // ignore these
if (o == NULL || o == JNIHandles::deleted_handle()) return; if (o == NULL) return;
// we ignore global ref to symbols and other internal objects // we ignore global ref to symbols and other internal objects
if (o->is_instance() || o->is_objArray() || o->is_typeArray()) { if (o->is_instance() || o->is_objArray() || o->is_typeArray()) {
@ -1422,9 +1422,6 @@ class HeapObjectDumper : public ObjectClosure {
}; };
void HeapObjectDumper::do_object(oop o) { void HeapObjectDumper::do_object(oop o) {
// hide the sentinel for deleted handles
if (o == JNIHandles::deleted_handle()) return;
// skip classes as these emitted as HPROF_GC_CLASS_DUMP records // skip classes as these emitted as HPROF_GC_CLASS_DUMP records
if (o->klass() == SystemDictionary::Class_klass()) { if (o->klass() == SystemDictionary::Class_klass()) {
if (!java_lang_Class::is_primitive(o)) { if (!java_lang_Class::is_primitive(o)) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -39,11 +39,6 @@ class ServiceUtil : public AllStatic {
// Return true if oop represents an object that is "visible" // Return true if oop represents an object that is "visible"
// to the java world. // to the java world.
static inline bool visible_oop(oop o) { static inline bool visible_oop(oop o) {
// the sentinel for deleted handles isn't visible
if (o == JNIHandles::deleted_handle()) {
return false;
}
// instance // instance
if (o->is_instance()) { if (o->is_instance()) {
// instance objects are visible // instance objects are visible

View file

@ -0,0 +1,59 @@
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc.shared;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class OopStorage extends VMObject {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("OopStorage");
}
public OopStorage(Address addr) {
super(addr);
}
public boolean findOop(Address handle) {
// TODO: walk OopStorage to find the Oop
return false;
}
public void oopsDo(AddressVisitor visitor) {
// TODO: Visit handles in OopStorage
}
}

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -103,7 +103,7 @@ public class JNIHandleBlock extends VMObject {
/** Debugging routine only. Returns non-null JNIHandleBlock /** Debugging routine only. Returns non-null JNIHandleBlock
containing the JNI handle or null if this handle block and its containing the JNI handle or null if this handle block and its
successors did not contain it (or if the handle was deleted). */ successors did not contain it. */
public JNIHandleBlock blockContainingHandle(Address jniHandle) { public JNIHandleBlock blockContainingHandle(Address jniHandle) {
JNIHandleBlock cur = this; JNIHandleBlock cur = this;
while (cur != null) { while (cur != null) {
@ -144,8 +144,8 @@ public class JNIHandleBlock extends VMObject {
Address oopAddr = addr.addOffsetTo(handlesField.getOffset() + x * VM.getVM().getOopSize()); Address oopAddr = addr.addOffsetTo(handlesField.getOffset() + x * VM.getVM().getOopSize());
OopHandle handle = oopAddr.getOopHandleAt(0); OopHandle handle = oopAddr.getOopHandleAt(0);
if (VM.getVM().getUniverse().isInReserved(handle) && !VM.getVM().getJNIHandles().isDeletedHandle(handle)) { if (VM.getVM().getUniverse().isInReserved(handle)) {
/* the oop handle is valid only if it is not freed (i.e. reserved in heap) and is not a deleted oop */ /* the oop handle is valid only if it is not freed (i.e. reserved in heap) */
return oopAddr; return oopAddr;
} else { } else {
return null; return null;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -27,11 +27,11 @@ package sun.jvm.hotspot.runtime;
import java.util.*; import java.util.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.gc.shared.OopStorage;
public class JNIHandles { public class JNIHandles {
private static AddressField globalHandlesField; private static AddressField globalHandlesField;
private static AddressField weakGlobalHandlesField; private static AddressField weakGlobalHandlesField;
private static OopField deletedHandleField;
static { static {
VM.registerVMInitializedObserver(new Observer() { VM.registerVMInitializedObserver(new Observer() {
@ -46,35 +46,26 @@ public class JNIHandles {
globalHandlesField = type.getAddressField("_global_handles"); globalHandlesField = type.getAddressField("_global_handles");
weakGlobalHandlesField = type.getAddressField("_weak_global_handles"); weakGlobalHandlesField = type.getAddressField("_weak_global_handles");
deletedHandleField = type.getOopField("_deleted_handle");
} }
public JNIHandles() { public JNIHandles() {
} }
public JNIHandleBlock globalHandles() { public OopStorage globalHandles() {
Address handleAddr = globalHandlesField.getValue(); Address handleAddr = globalHandlesField.getValue();
if (handleAddr == null) { if (handleAddr == null) {
return null; return null;
} }
return new JNIHandleBlock(handleAddr); return new OopStorage(handleAddr);
} }
public JNIHandleBlock weakGlobalHandles() { public OopStorage weakGlobalHandles() {
Address handleAddr = weakGlobalHandlesField.getValue(); Address handleAddr = weakGlobalHandlesField.getValue();
if (handleAddr == null) { if (handleAddr == null) {
return null; return null;
} }
return new JNIHandleBlock(handleAddr); return new OopStorage(handleAddr);
}
public OopHandle deletedHandle() {
return deletedHandleField.getValue();
}
public boolean isDeletedHandle(OopHandle handle) {
return (handle != null && handle.equals(deletedHandle()));
} }
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2004, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2004, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -26,6 +26,7 @@ package sun.jvm.hotspot.utilities;
import java.io.*; import java.io.*;
import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.gc.shared.OopStorage;
import sun.jvm.hotspot.memory.*; import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.oops.*; import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.runtime.*;
@ -147,7 +148,7 @@ public abstract class AbstractHeapGraphWriter implements HeapGraphWriter {
protected void writeGlobalJNIHandles() throws IOException { protected void writeGlobalJNIHandles() throws IOException {
JNIHandles handles = VM.getVM().getJNIHandles(); JNIHandles handles = VM.getVM().getJNIHandles();
JNIHandleBlock blk = handles.globalHandles(); OopStorage blk = handles.globalHandles();
if (blk != null) { if (blk != null) {
try { try {
blk.oopsDo(new AddressVisitor() { blk.oopsDo(new AddressVisitor() {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -108,41 +108,36 @@ public class PointerFinder {
// Check JNIHandles; both local and global // Check JNIHandles; both local and global
JNIHandles handles = VM.getVM().getJNIHandles(); JNIHandles handles = VM.getVM().getJNIHandles();
JNIHandleBlock handleBlock = handles.globalHandles();
if (handleBlock != null) { // --- looking in oopstorage should model OopStorage::allocation_status?
handleBlock = handleBlock.blockContainingHandle(a); // --- that is, if in a block but not allocated, then not valid.
}
if (handleBlock != null) { // Look in global handles
loc.inStrongGlobalJNIHandleBlock = true; OopStorage storage = handles.globalHandles();
loc.handleBlock = handleBlock; if ((storage != null) && storage.findOop(a)) {
loc.inStrongGlobalJNIHandles = true;
return loc; return loc;
} else { }
handleBlock = handles.weakGlobalHandles(); // Look in weak global handles
storage = handles.weakGlobalHandles();
if ((storage != null) && storage.findOop(a)) {
loc.inWeakGlobalJNIHandles = true;
return loc;
}
// Look in thread-local handles
for (JavaThread t = VM.getVM().getThreads().first(); t != null; t = t.next()) {
JNIHandleBlock handleBlock = t.activeHandles();
if (handleBlock != null) { if (handleBlock != null) {
handleBlock = handleBlock.blockContainingHandle(a); handleBlock = handleBlock.blockContainingHandle(a);
if (handleBlock != null) { if (handleBlock != null) {
loc.inWeakGlobalJNIHandleBlock = true; loc.inLocalJNIHandleBlock = true;
loc.handleBlock = handleBlock; loc.handleBlock = handleBlock;
loc.handleThread = t;
return loc; return loc;
} else {
// Look in thread-local handles
for (JavaThread t = VM.getVM().getThreads().first(); t != null; t = t.next()) {
handleBlock = t.activeHandles();
if (handleBlock != null) {
handleBlock = handleBlock.blockContainingHandle(a);
if (handleBlock != null) {
loc.inLocalJNIHandleBlock = true;
loc.handleBlock = handleBlock;
loc.handleThread = t;
return loc;
}
}
}
} }
} }
} }
// Fall through; have to return it anyway. // Fall through; have to return it anyway.
return loc; return loc;
} }

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -69,8 +69,9 @@ public class PointerLocation {
boolean inBlobOops; boolean inBlobOops;
boolean inBlobUnknownLocation; boolean inBlobUnknownLocation;
boolean inStrongGlobalJNIHandleBlock; boolean inStrongGlobalJNIHandles;
boolean inWeakGlobalJNIHandleBlock; boolean inWeakGlobalJNIHandles;
boolean inLocalJNIHandleBlock; boolean inLocalJNIHandleBlock;
JNIHandleBlock handleBlock; JNIHandleBlock handleBlock;
sun.jvm.hotspot.runtime.Thread handleThread; sun.jvm.hotspot.runtime.Thread handleThread;
@ -149,32 +150,33 @@ public class PointerLocation {
return inBlobUnknownLocation; return inBlobUnknownLocation;
} }
public boolean isInStrongGlobalJNIHandleBlock() { public boolean isInStrongGlobalJNIHandles() {
return inStrongGlobalJNIHandleBlock; return inStrongGlobalJNIHandles;
} }
public boolean isInWeakGlobalJNIHandleBlock() { public boolean isInWeakGlobalJNIHandles() {
return inWeakGlobalJNIHandleBlock; return inWeakGlobalJNIHandles;
} }
public boolean isInLocalJNIHandleBlock() { public boolean isInLocalJNIHandleBlock() {
return inLocalJNIHandleBlock; return inLocalJNIHandleBlock;
} }
/** Only valid if isInStrongGlobalJNIHandleBlock, /** Only valid if isInLocalJNIHandleBlock is true */
isInWeakGlobalJNIHandleBlock, or isInLocalJNIHandleBlock is true */
public JNIHandleBlock getJNIHandleBlock() { public JNIHandleBlock getJNIHandleBlock() {
assert isInLocalJNIHandleBlock();
return handleBlock; return handleBlock;
} }
/** Only valid if isInLocalJNIHandleBlock is true */ /** Only valid if isInLocalJNIHandleBlock is true */
public sun.jvm.hotspot.runtime.Thread getJNIHandleThread() { public sun.jvm.hotspot.runtime.Thread getJNIHandleThread() {
assert isInLocalJNIHandleBlock();
return handleThread; return handleThread;
} }
public boolean isUnknown() { public boolean isUnknown() {
return (!(isInHeap() || isInInterpreter() || isInCodeCache() || return (!(isInHeap() || isInInterpreter() || isInCodeCache() ||
isInStrongGlobalJNIHandleBlock() || isInWeakGlobalJNIHandleBlock() || isInLocalJNIHandleBlock())); isInStrongGlobalJNIHandles() || isInWeakGlobalJNIHandles() || isInLocalJNIHandleBlock()));
} }
public String toString() { public String toString() {
@ -236,25 +238,18 @@ public class PointerLocation {
b.printOn(tty); b.printOn(tty);
// FIXME: add more detail // FIXME: add more detail
} else if (isInStrongGlobalJNIHandleBlock() || } else if (isInStrongGlobalJNIHandles()) {
isInWeakGlobalJNIHandleBlock() || tty.print("In JNI strong global");
isInLocalJNIHandleBlock()) { } else if (isInWeakGlobalJNIHandles()) {
tty.print("In "); tty.print("In JNI weak global");
if (isInStrongGlobalJNIHandleBlock()) { } else if (isInLocalJNIHandleBlock()) {
tty.print("strong global"); tty.print("In thread-local");
} else if (isInWeakGlobalJNIHandleBlock()) {
tty.print("weak global");
} else {
tty.print("thread-local");
}
tty.print(" JNI handle block (" + handleBlock.top() + " handle slots present)"); tty.print(" JNI handle block (" + handleBlock.top() + " handle slots present)");
if (isInLocalJNIHandleBlock()) { if (handleThread.isJavaThread()) {
if (handleThread.isJavaThread()) { tty.print(" for JavaThread ");
tty.print(" for JavaThread "); ((JavaThread) handleThread).printThreadIDOn(tty);
((JavaThread) handleThread).printThreadIDOn(tty); } else {
} else { tty.print(" for a non-Java Thread");
tty.print("for a non-Java Thread");
}
} }
} else { } else {
// This must be last // This must be last

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -108,10 +108,10 @@ public class ReversePtrsAnalysis {
// Do global JNI handles // Do global JNI handles
JNIHandles handles = VM.getVM().getJNIHandles(); JNIHandles handles = VM.getVM().getJNIHandles();
doJNIHandleBlock(handles.globalHandles(), doOopStorage(handles.globalHandles(),
new RootVisitor("Global JNI handle root")); new RootVisitor("Global JNI handle root"));
doJNIHandleBlock(handles.weakGlobalHandles(), doOopStorage(handles.weakGlobalHandles(),
new RootVisitor("Weak global JNI handle root")); new RootVisitor("Weak global JNI handle root"));
// Do Java-level static fields // Do Java-level static fields
ClassLoaderDataGraph cldg = VM.getVM().getClassLoaderDataGraph(); ClassLoaderDataGraph cldg = VM.getVM().getClassLoaderDataGraph();
@ -306,4 +306,9 @@ public class ReversePtrsAnalysis {
private void doJNIHandleBlock(JNIHandleBlock handles, AddressVisitor oopVisitor) { private void doJNIHandleBlock(JNIHandleBlock handles, AddressVisitor oopVisitor) {
handles.oopsDo(oopVisitor); handles.oopsDo(oopVisitor);
} }
// Traverse jobjects in global JNIHandles
private void doOopStorage(OopStorage oopSet, AddressVisitor oopVisitor) {
oopSet.oopsDo(oopVisitor);
}
} }

File diff suppressed because it is too large Load diff