This commit is contained in:
Mikael Vidstedt 2019-07-05 11:01:31 -07:00
commit 15bde6d32d
30 changed files with 548 additions and 231 deletions

View file

@ -266,6 +266,19 @@ bool ClassLoaderData::ChunkedHandleList::owner_of(oop* oop_handle) {
}
#endif // PRODUCT
void ClassLoaderData::clear_claim(int claim) {
for (;;) {
int old_claim = Atomic::load(&_claim);
if ((old_claim & claim) == 0) {
return;
}
int new_claim = old_claim & ~claim;
if (Atomic::cmpxchg(new_claim, &_claim, old_claim) == old_claim) {
return;
}
}
}
bool ClassLoaderData::try_claim(int claim) {
for (;;) {
int old_claim = Atomic::load(&_claim);

View file

@ -206,16 +206,17 @@ class ClassLoaderData : public CHeapObj<mtClass> {
// The "claim" is typically used to check if oops_do needs to be applied on
// the CLD or not. Most GCs only perform strong marking during the marking phase.
enum {
_claim_none = 0,
_claim_finalizable = 2,
_claim_strong = 3
enum Claim {
_claim_none = 0,
_claim_finalizable = 2,
_claim_strong = 3,
_claim_other = 4
};
void clear_claim() { _claim = 0; }
void clear_claim(int claim);
bool claimed() const { return _claim != 0; }
bool claimed(int claim) const { return (_claim & claim) == claim; }
bool try_claim(int claim);
int get_claim() const { return _claim; }
void set_claim(int claim) { _claim = claim; }
// Computes if the CLD is alive or not. This is safe to call in concurrent
// contexts.

View file

@ -64,6 +64,11 @@ void ClassLoaderDataGraph::clear_claimed_marks() {
}
}
void ClassLoaderDataGraph::clear_claimed_marks(int claim) {
for (ClassLoaderData* cld = OrderAccess::load_acquire(&_head); cld != NULL; cld = cld->next()) {
cld->clear_claim(claim);
}
}
// Class iterator used by the compiler. It gets some number of classes at
// a safepoint to decay invocation counters on the methods.
class ClassLoaderDataGraphKlassIteratorStatic {
@ -471,7 +476,7 @@ GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
// The CLDs in [_head, _saved_head] were all added during last call to remember_new_clds(true);
ClassLoaderData* curr = _head;
while (curr != _saved_head) {
if (!curr->claimed()) {
if (!curr->claimed(ClassLoaderData::_claim_strong)) {
array->push(curr);
LogTarget(Debug, class, loader, data) lt;
if (lt.is_enabled()) {

View file

@ -68,6 +68,7 @@ class ClassLoaderDataGraph : public AllStatic {
static void clean_module_and_package_info();
static void purge();
static void clear_claimed_marks();
static void clear_claimed_marks(int claim);
// Iteration through CLDG inside a safepoint; GC support
static void cld_do(CLDClosure* cl);
static void cld_unloading_do(CLDClosure* cl);

View file

@ -92,6 +92,11 @@ void ZArguments::initialize() {
// same reason we need fixup_partial_loads
FLAG_SET_DEFAULT(VerifyBeforeIteration, false);
if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
FLAG_SET_DEFAULT(ZVerifyRoots, true);
FLAG_SET_DEFAULT(ZVerifyObjects, true);
}
// Verification of stacks not (yet) supported, for the same reason
// we need fixup_partial_loads
DEBUG_ONLY(FLAG_SET_DEFAULT(VerifyStack, false));

View file

@ -232,11 +232,11 @@ GrowableArray<MemoryPool*> ZCollectedHeap::memory_pools() {
}
void ZCollectedHeap::object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl, true /* visit_referents */);
_heap.object_iterate(cl, true /* visit_weaks */);
}
void ZCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
_heap.object_iterate(cl, true /* visit_referents */);
_heap.object_iterate(cl, true /* visit_weaks */);
}
HeapWord* ZCollectedHeap::block_start(const void* addr) const {

View file

@ -31,6 +31,7 @@
#include "gc/z/zMessagePort.inline.hpp"
#include "gc/z/zServiceability.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zVerify.hpp"
#include "logging/log.hpp"
#include "memory/universe.hpp"
#include "runtime/vmOperations.hpp"
@ -86,6 +87,9 @@ public:
GCIdMark gc_id_mark(_gc_id);
IsGCActiveMark gc_active_mark;
// Verify roots
ZVerify::roots_strong();
// Execute operation
_success = do_operation();
@ -301,8 +305,14 @@ void ZDriver::concurrent_reset_relocation_set() {
void ZDriver::pause_verify() {
if (VerifyBeforeGC || VerifyDuringGC || VerifyAfterGC) {
// Full verification
VM_Verify op;
VMThread::execute(&op);
} else if (ZVerifyRoots || ZVerifyObjects) {
// Limited verification
VM_ZVerifyOperation op;
VMThread::execute(&op);
}
}

View file

@ -41,6 +41,7 @@
#include "gc/z/zTask.hpp"
#include "gc/z/zThread.hpp"
#include "gc/z/zTracer.inline.hpp"
#include "gc/z/zVerify.hpp"
#include "gc/z/zVirtualMemory.inline.hpp"
#include "gc/z/zWorkers.inline.hpp"
#include "logging/log.hpp"
@ -340,6 +341,9 @@ bool ZHeap::mark_end() {
// Enter mark completed phase
ZGlobalPhase = ZPhaseMarkCompleted;
// Verify after mark
ZVerify::after_mark();
// Update statistics
ZStatSample(ZSamplerHeapUsedAfterMark, used());
ZStatHeap::set_at_mark_end(capacity(), allocated(), used());
@ -468,11 +472,11 @@ void ZHeap::relocate() {
used(), used_high(), used_low());
}
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) {
void ZHeap::object_iterate(ObjectClosure* cl, bool visit_weaks) {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
ZHeapIterator iter;
iter.objects_do(cl, visit_referents);
iter.objects_do(cl, visit_weaks);
}
void ZHeap::serviceability_initialize() {
@ -518,40 +522,11 @@ void ZHeap::print_extended_on(outputStream* st) const {
st->cr();
}
class ZVerifyRootsTask : public ZTask {
private:
ZStatTimerDisable _disable;
ZRootsIterator _strong_roots;
ZWeakRootsIterator _weak_roots;
public:
ZVerifyRootsTask() :
ZTask("ZVerifyRootsTask"),
_disable(),
_strong_roots(),
_weak_roots() {}
virtual void work() {
ZStatTimerDisable disable;
ZVerifyOopClosure cl;
_strong_roots.oops_do(&cl);
_weak_roots.oops_do(&cl);
}
};
void ZHeap::verify() {
// Heap verification can only be done between mark end and
// relocate start. This is the only window where all oop are
// good and the whole heap is in a consistent state.
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
{
ZVerifyRootsTask task;
_workers.run_parallel(&task);
}
{
ZVerifyObjectClosure cl;
object_iterate(&cl, false /* visit_referents */);
}
ZVerify::after_weak_processing();
}

View file

@ -161,7 +161,7 @@ public:
void relocate();
// Iteration
void object_iterate(ObjectClosure* cl, bool visit_referents);
void object_iterate(ObjectClosure* cl, bool visit_weaks);
// Serviceability
void serviceability_initialize();

View file

@ -22,6 +22,8 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
@ -83,7 +85,7 @@ public:
};
template <bool VisitReferents>
class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
class ZHeapIteratorOopClosure : public ClaimMetadataVisitingOopIterateClosure {
private:
ZHeapIterator* const _iter;
const oop _base;
@ -98,6 +100,7 @@ private:
public:
ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) :
ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other),
_iter(iter),
_base(base) {}
@ -130,6 +133,7 @@ ZHeapIterator::~ZHeapIterator() {
for (ZHeapIteratorBitMap* map; iter.next(&map);) {
delete map;
}
ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other);
}
static size_t object_index_max() {
@ -184,15 +188,23 @@ void ZHeapIterator::push_fields(oop obj) {
obj->oop_iterate(&cl);
}
template <bool VisitReferents>
class ZHeapIterateConcurrentRootsIterator : public ZConcurrentRootsIterator {
public:
ZHeapIterateConcurrentRootsIterator() :
ZConcurrentRootsIterator(ClassLoaderData::_claim_other) {}
};
template <bool VisitWeaks>
void ZHeapIterator::objects_do(ObjectClosure* cl) {
ZStatTimerDisable disable;
// Push roots to visit
push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
push_roots<ZConcurrentRootsIterator, true /* Concurrent */, false /* Weak */>();
push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>();
push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>();
push_roots<ZHeapIterateConcurrentRootsIterator, true /* Concurrent */, false /* Weak */>();
if (VisitWeaks) {
push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>();
push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>();
}
// Drain stack
while (!_visit_stack.is_empty()) {
@ -202,14 +214,14 @@ void ZHeapIterator::objects_do(ObjectClosure* cl) {
cl->do_object(obj);
// Push fields to visit
push_fields<VisitReferents>(obj);
push_fields<VisitWeaks>(obj);
}
}
void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_referents) {
if (visit_referents) {
objects_do<true /* VisitReferents */>(cl);
void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_weaks) {
if (visit_weaks) {
objects_do<true /* VisitWeaks */>(cl);
} else {
objects_do<false /* VisitReferents */>(cl);
objects_do<false /* VisitWeaks */>(cl);
}
}

View file

@ -54,7 +54,7 @@ public:
ZHeapIterator();
~ZHeapIterator();
void objects_do(ObjectClosure* cl, bool visit_referents);
void objects_do(ObjectClosure* cl, bool visit_weaks);
};
#endif // SHARE_GC_Z_ZHEAPITERATOR_HPP

View file

@ -22,6 +22,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zMark.inline.hpp"
#include "gc/z/zMarkCache.inline.hpp"
@ -632,14 +633,23 @@ public:
class ZMarkConcurrentRootsTask : public ZTask {
private:
SuspendibleThreadSetJoiner _sts_joiner;
ZConcurrentRootsIterator _roots;
ZMarkConcurrentRootsIteratorClosure _cl;
public:
ZMarkConcurrentRootsTask(ZMark* mark) :
ZTask("ZMarkConcurrentRootsTask"),
_roots(true /* marking */),
_cl() {}
_sts_joiner(true /* active */),
_roots(ClassLoaderData::_claim_strong),
_cl() {
ClassLoaderDataGraph_lock->lock();
ClassLoaderDataGraph::clear_claimed_marks();
}
~ZMarkConcurrentRootsTask() {
ClassLoaderDataGraph_lock->unlock();
}
virtual void work() {
_roots.oops_do(&_cl);

View file

@ -1,60 +0,0 @@
/*
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "gc/z/zHeap.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "gc/z/zOop.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/access.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
void ZVerifyOopClosure::do_oop(oop* p) {
guarantee(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
guarantee(!ZResurrection::is_blocked(), "Invalid phase");
const oop o = RawAccess<>::oop_load(p);
if (o != NULL) {
const uintptr_t addr = ZOop::to_address(o);
const uintptr_t good_addr = ZAddress::good(addr);
guarantee(ZAddress::is_good(addr) || ZAddress::is_finalizable_good(addr),
"Bad oop " PTR_FORMAT " found at " PTR_FORMAT ", expected " PTR_FORMAT,
addr, p2i(p), good_addr);
guarantee(oopDesc::is_oop(ZOop::from_address(good_addr)),
"Bad object " PTR_FORMAT " found at " PTR_FORMAT,
addr, p2i(p));
}
}
void ZVerifyOopClosure::do_oop(narrowOop* p) {
ShouldNotReachHere();
}
void ZVerifyObjectClosure::do_object(oop o) {
ZVerifyOopClosure cl;
o->oop_iterate(&cl);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -46,16 +46,13 @@ public:
};
template <bool finalizable>
class ZMarkBarrierOopClosure : public MetadataVisitingOopIterateClosure {
class ZMarkBarrierOopClosure : public ClaimMetadataVisitingOopIterateClosure {
public:
ZMarkBarrierOopClosure();
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
virtual void do_klass(Klass* k);
virtual void do_cld(ClassLoaderData* cld);
#ifdef ASSERT
virtual bool should_verify_oops() {
return false;
@ -80,26 +77,4 @@ public:
virtual void do_oop(narrowOop* p);
};
class ZVerifyOopClosure : public ZRootsIteratorClosure, public BasicOopIterateClosure {
public:
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
virtual ReferenceIterationMode reference_iteration_mode() {
return DO_FIELDS;
}
#ifdef ASSERT
// Verification handled by the closure itself
virtual bool should_verify_oops() {
return false;
}
#endif
};
class ZVerifyObjectClosure : public ObjectClosure {
public:
virtual void do_object(oop o);
};
#endif // SHARE_GC_Z_ZOOPCLOSURES_HPP

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,12 @@ inline void ZNMethodOopClosure::do_oop(narrowOop* p) {
template <bool finalizable>
inline ZMarkBarrierOopClosure<finalizable>::ZMarkBarrierOopClosure() :
MetadataVisitingOopIterateClosure(finalizable ? NULL : ZHeap::heap()->reference_discoverer()) {}
ClaimMetadataVisitingOopIterateClosure(finalizable
? ClassLoaderData::_claim_finalizable
: ClassLoaderData::_claim_strong,
finalizable
? NULL
: ZHeap::heap()->reference_discoverer()) {}
template <bool finalizable>
inline void ZMarkBarrierOopClosure<finalizable>::do_oop(oop* p) {
@ -67,18 +72,6 @@ inline void ZMarkBarrierOopClosure<finalizable>::do_oop(narrowOop* p) {
ShouldNotReachHere();
}
template <bool finalizable>
inline void ZMarkBarrierOopClosure<finalizable>::do_klass(Klass* k) {
ClassLoaderData* const cld = k->class_loader_data();
ZMarkBarrierOopClosure<finalizable>::do_cld(cld);
}
template <bool finalizable>
inline void ZMarkBarrierOopClosure<finalizable>::do_cld(ClassLoaderData* cld) {
const int claim = finalizable ? ClassLoaderData::_claim_finalizable : ClassLoaderData::_claim_strong;
cld->oops_do(this, claim);
}
inline bool ZPhantomIsAliveObjectClosure::do_object_b(oop o) {
return ZBarrier::is_alive_barrier_on_phantom_oop(o);
}

View file

@ -262,24 +262,16 @@ void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_ex
}
}
ZConcurrentRootsIterator::ZConcurrentRootsIterator(bool marking) :
_marking(marking),
_sts_joiner(marking /* active */),
ZConcurrentRootsIterator::ZConcurrentRootsIterator(int cld_claim) :
_jni_handles_iter(JNIHandles::global_handles()),
_cld_claim(cld_claim),
_jni_handles(this),
_class_loader_data_graph(this) {
ZStatTimer timer(ZSubPhaseConcurrentRootsSetup);
if (_marking) {
ClassLoaderDataGraph_lock->lock();
ClassLoaderDataGraph::clear_claimed_marks();
}
}
ZConcurrentRootsIterator::~ZConcurrentRootsIterator() {
ZStatTimer timer(ZSubPhaseConcurrentRootsTeardown);
if (_marking) {
ClassLoaderDataGraph_lock->unlock();
}
}
void ZConcurrentRootsIterator::do_jni_handles(ZRootsIteratorClosure* cl) {
@ -289,13 +281,8 @@ void ZConcurrentRootsIterator::do_jni_handles(ZRootsIteratorClosure* cl) {
void ZConcurrentRootsIterator::do_class_loader_data_graph(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph);
if (_marking) {
CLDToOopClosure cld_cl(cl, ClassLoaderData::_claim_strong);
ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
} else {
CLDToOopClosure cld_cl(cl, ClassLoaderData::_claim_none);
ClassLoaderDataGraph::cld_do(&cld_cl);
}
CLDToOopClosure cld_cl(cl, _cld_claim);
ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
}
void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -111,9 +111,8 @@ public:
class ZConcurrentRootsIterator {
private:
const bool _marking;
SuspendibleThreadSetJoiner _sts_joiner;
ZOopStorageIterator _jni_handles_iter;
int _cld_claim;
void do_jni_handles(ZRootsIteratorClosure* cl);
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
@ -122,7 +121,7 @@ private:
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
public:
ZConcurrentRootsIterator(bool marking = false);
ZConcurrentRootsIterator(int cld_claim);
~ZConcurrentRootsIterator();
void oops_do(ZRootsIteratorClosure* cl);

View file

@ -0,0 +1,187 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/z/zAddress.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zOop.hpp"
#include "gc/z/zResurrection.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
#include "gc/z/zVerify.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/oop.inline.hpp"
#define BAD_OOP_REPORT(addr) \
"Bad oop " PTR_FORMAT " found at " PTR_FORMAT ", expected " PTR_FORMAT, \
addr, p2i(p), ZAddress::good(addr)
class ZVerifyRootsClosure : public ZRootsIteratorClosure {
public:
virtual void do_oop(oop* p) {
uintptr_t value = ZOop::to_address(*p);
if (value == 0) {
return;
}
guarantee(!ZAddress::is_finalizable(value), BAD_OOP_REPORT(value));
guarantee(ZAddress::is_good(value), BAD_OOP_REPORT(value));
guarantee(oopDesc::is_oop(ZOop::from_address(value)), BAD_OOP_REPORT(value));
}
virtual void do_oop(narrowOop*) { ShouldNotReachHere(); }
};
template <bool VisitReferents>
class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure, public ZRootsIteratorClosure {
public:
ZVerifyOopClosure() :
ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other) {}
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
virtual ReferenceIterationMode reference_iteration_mode() {
return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
}
#ifdef ASSERT
// Verification handled by the closure itself
virtual bool should_verify_oops() {
return false;
}
#endif
};
class ZVerifyObjectClosure : public ObjectClosure {
private:
bool _visit_referents;
public:
ZVerifyObjectClosure(bool visit_referents) : _visit_referents(visit_referents) {}
virtual void do_object(oop o);
};
template <typename RootsIterator>
void ZVerify::roots_impl() {
if (ZVerifyRoots) {
ZVerifyRootsClosure cl;
RootsIterator iter;
iter.oops_do(&cl);
}
}
void ZVerify::roots_strong() {
roots_impl<ZRootsIterator>();
}
class ZVerifyConcurrentRootsIterator : public ZConcurrentRootsIterator {
public:
ZVerifyConcurrentRootsIterator()
: ZConcurrentRootsIterator(ClassLoaderData::_claim_none) {}
};
void ZVerify::roots_concurrent() {
roots_impl<ZVerifyConcurrentRootsIterator>();
}
void ZVerify::roots_weak() {
assert(!ZResurrection::is_blocked(), "Invalid phase");
roots_impl<ZWeakRootsIterator>();
}
void ZVerify::roots(bool verify_weaks) {
roots_strong();
roots_concurrent();
if (verify_weaks) {
roots_weak();
roots_concurrent_weak();
}
}
void ZVerify::objects(bool verify_weaks) {
if (ZVerifyObjects) {
ZVerifyObjectClosure cl(verify_weaks);
ZHeap::heap()->object_iterate(&cl, verify_weaks);
}
}
void ZVerify::roots_concurrent_weak() {
assert(!ZResurrection::is_blocked(), "Invalid phase");
roots_impl<ZConcurrentWeakRootsIterator>();
}
void ZVerify::roots_and_objects(bool verify_weaks) {
ZStatTimerDisable _disable;
roots(verify_weaks);
objects(verify_weaks);
}
void ZVerify::after_mark() {
// Only verify strong roots and references.
roots_and_objects(false /* verify_weaks */);
}
void ZVerify::after_weak_processing() {
// Also verify weaks - all should have been processed at this point.
roots_and_objects(true /* verify_weaks */);
}
template <bool VisitReferents>
void ZVerifyOopClosure<VisitReferents>::do_oop(oop* p) {
guarantee(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
guarantee(ZGlobalPhase == ZPhaseMarkCompleted, "Invalid phase");
guarantee(!ZResurrection::is_blocked(), "Invalid phase");
const oop o = RawAccess<>::oop_load(p);
if (o == NULL) {
return;
}
const uintptr_t addr = ZOop::to_address(o);
if (VisitReferents) {
guarantee(ZAddress::is_good(addr) || ZAddress::is_finalizable_good(addr), BAD_OOP_REPORT(addr));
} else {
// Should not encounter finalizable oops through strong-only paths. Assumes only strong roots are visited.
guarantee(ZAddress::is_good(addr), BAD_OOP_REPORT(addr));
}
const uintptr_t good_addr = ZAddress::good(addr);
guarantee(oopDesc::is_oop(ZOop::from_address(good_addr)), BAD_OOP_REPORT(addr));
}
void ZVerifyObjectClosure::do_object(oop o) {
if (_visit_referents) {
ZVerifyOopClosure<true /* VisitReferents */> cl;
o->oop_iterate((OopIterateClosure*)&cl);
} else {
ZVerifyOopClosure<false /* VisitReferents */> cl;
o->oop_iterate(&cl);
}
}

View file

@ -0,0 +1,74 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_GC_Z_ZVERIFY_HPP
#define SHARE_GC_Z_ZVERIFY_HPP
#include "memory/allocation.hpp"
class ZVerify : public AllStatic {
private:
template <typename RootsIterator>
static void roots_impl();
static void roots(bool verify_weaks);
static void roots_weak();
static void roots_concurrent();
static void roots_concurrent_weak();
static void objects(bool verify_weaks);
static void roots_and_objects(bool visit_weaks);
public:
// Verify strong (non-concurrent) roots. Should always be good.
static void roots_strong();
// Verify all strong roots and references after marking.
static void after_mark();
// Verify strong and weak roots and references.
static void after_weak_processing();
};
class VM_ZVerifyOperation : public VM_Operation {
public:
virtual bool needs_inactive_gc_locker() const {
// An inactive GC locker is needed in operations where we change the bad
// mask or move objects. Changing the bad mask will invalidate all oops,
// which makes it conceptually the same thing as moving all objects.
return false;
}
virtual void doit() {
ZVerify::after_weak_processing();
}
bool success() const {
return true;
}
virtual VMOp_Type type() const { return VMOp_ZVerify; }
};
#endif // SHARE_GC_Z_ZVERIFY_HPP

View file

@ -76,6 +76,12 @@
diagnostic(bool, ZVerifyViews, false, \
"Verify heap view accesses") \
\
diagnostic(bool, ZVerifyRoots, trueInDebug, \
"Verify roots") \
\
diagnostic(bool, ZVerifyObjects, false, \
"Verify objects") \
\
diagnostic(bool, ZVerifyMarking, false, \
"Verify marking stacks") \
\

View file

@ -144,18 +144,28 @@ class CLDToOopClosure : public CLDClosure {
void do_cld(ClassLoaderData* cld);
};
// The base class for all concurrent marking closures,
// that participates in class unloading.
// It's used to proxy through the metadata to the oops defined in them.
class MetadataVisitingOopIterateClosure: public OopIterateClosure {
class ClaimMetadataVisitingOopIterateClosure : public OopIterateClosure {
protected:
const int _claim;
public:
MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = NULL) : OopIterateClosure(rd) { }
ClaimMetadataVisitingOopIterateClosure(int claim, ReferenceDiscoverer* rd = NULL) :
OopIterateClosure(rd),
_claim(claim) { }
virtual bool do_metadata() { return true; }
virtual void do_klass(Klass* k);
virtual void do_cld(ClassLoaderData* cld);
};
// The base class for all concurrent marking closures,
// that participates in class unloading.
// It's used to proxy through the metadata to the oops defined in them.
class MetadataVisitingOopIterateClosure: public ClaimMetadataVisitingOopIterateClosure {
public:
MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = NULL);
};
// ObjectClosure is used for iterating through an object space
class ObjectClosure : public Closure {

View file

@ -39,13 +39,17 @@
#include "oops/typeArrayKlass.inline.hpp"
#include "utilities/debug.hpp"
inline void MetadataVisitingOopIterateClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(this, ClassLoaderData::_claim_strong);
// Defaults to strong claiming.
inline MetadataVisitingOopIterateClosure::MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd) :
ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_strong, rd) {}
inline void ClaimMetadataVisitingOopIterateClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(this, _claim);
}
inline void MetadataVisitingOopIterateClosure::do_klass(Klass* k) {
inline void ClaimMetadataVisitingOopIterateClosure::do_klass(Klass* k) {
ClassLoaderData* cld = k->class_loader_data();
MetadataVisitingOopIterateClosure::do_cld(cld);
ClaimMetadataVisitingOopIterateClosure::do_cld(cld);
}
#ifdef ASSERT

View file

@ -929,3 +929,91 @@ const Type *MinINode::add_ring( const Type *t0, const Type *t1 ) const {
// Otherwise just MIN them bits.
return TypeInt::make( MIN2(r0->_lo,r1->_lo), MIN2(r0->_hi,r1->_hi), MAX2(r0->_widen,r1->_widen) );
}
//------------------------------add_ring---------------------------------------
const Type *MinFNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeF *r0 = t0->is_float_constant();
const TypeF *r1 = t1->is_float_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
float f0 = r0->getf();
float f1 = r1->getf();
if (f0 != 0.0f || f1 != 0.0f) {
return f0 < f1 ? r0 : r1;
}
// handle min of 0.0, -0.0 case.
return (jint_cast(f0) < jint_cast(f1)) ? r0 : r1;
}
//------------------------------add_ring---------------------------------------
const Type *MinDNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeD *r0 = t0->is_double_constant();
const TypeD *r1 = t1->is_double_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
double d0 = r0->getd();
double d1 = r1->getd();
if (d0 != 0.0 || d1 != 0.0) {
return d0 < d1 ? r0 : r1;
}
// handle min of 0.0, -0.0 case.
return (jlong_cast(d0) < jlong_cast(d1)) ? r0 : r1;
}
//------------------------------add_ring---------------------------------------
const Type *MaxFNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeF *r0 = t0->is_float_constant();
const TypeF *r1 = t1->is_float_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
float f0 = r0->getf();
float f1 = r1->getf();
if (f0 != 0.0f || f1 != 0.0f) {
return f0 > f1 ? r0 : r1;
}
// handle max of 0.0,-0.0 case.
return (jint_cast(f0) > jint_cast(f1)) ? r0 : r1;
}
//------------------------------add_ring---------------------------------------
const Type *MaxDNode::add_ring( const Type *t0, const Type *t1 ) const {
const TypeD *r0 = t0->is_double_constant();
const TypeD *r1 = t1->is_double_constant();
if (r0->is_nan()) {
return r0;
}
if (r1->is_nan()) {
return r1;
}
double d0 = r0->getd();
double d1 = r1->getd();
if (d0 != 0.0 || d1 != 0.0) {
return d0 > d1 ? r0 : r1;
}
// handle max of 0.0, -0.0 case.
return (jlong_cast(d0) > jlong_cast(d1)) ? r0 : r1;
}

View file

@ -255,7 +255,7 @@ class MaxFNode : public MaxNode {
public:
MaxFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::FLOAT; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeF::NEG_INF; }
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
@ -267,7 +267,7 @@ class MinFNode : public MaxNode {
public:
MinFNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::FLOAT; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeF::POS_INF; }
virtual const Type *bottom_type() const { return Type::FLOAT; }
virtual uint ideal_reg() const { return Op_RegF; }
@ -279,7 +279,7 @@ class MaxDNode : public MaxNode {
public:
MaxDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::DOUBLE; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeD::NEG_INF; }
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }
@ -291,7 +291,7 @@ class MinDNode : public MaxNode {
public:
MinDNode(Node *in1, Node *in2) : MaxNode(in1, in2) {}
virtual int Opcode() const;
virtual const Type *add_ring(const Type*, const Type*) const { return Type::DOUBLE; }
virtual const Type *add_ring(const Type*, const Type*) const;
virtual const Type *add_id() const { return TypeD::POS_INF; }
virtual const Type *bottom_type() const { return Type::DOUBLE; }
virtual uint ideal_reg() const { return Op_RegD; }

View file

@ -6706,9 +6706,6 @@ bool LibraryCallKit::inline_fp_min_max(vmIntrinsics::ID id) {
fatal_unexpected_iid(id);
break;
}
if (a->is_Con() || b->is_Con()) {
return false;
}
switch (id) {
case vmIntrinsics::_maxF: n = new MaxFNode(a, b); break;
case vmIntrinsics::_minF: n = new MinFNode(a, b); break;

View file

@ -411,18 +411,8 @@ int Type::uhash( const Type *const t ) {
}
#define SMALLINT ((juint)3) // a value too insignificant to consider widening
static double pos_dinf() {
union { int64_t i; double d; } v;
v.i = CONST64(0x7ff0000000000000);
return v.d;
}
static float pos_finf() {
union { int32_t i; float f; } v;
v.i = 0x7f800000;
return v.f;
}
#define POSITIVE_INFINITE_F 0x7f800000 // hex representation for IEEE 754 single precision positive infinite
#define POSITIVE_INFINITE_D 0x7ff0000000000000 // hex representation for IEEE 754 double precision positive infinite
//--------------------------Initialize_shared----------------------------------
void Type::Initialize_shared(Compile* current) {
@ -453,13 +443,13 @@ void Type::Initialize_shared(Compile* current) {
TypeF::ZERO = TypeF::make(0.0); // Float 0 (positive zero)
TypeF::ONE = TypeF::make(1.0); // Float 1
TypeF::POS_INF = TypeF::make(pos_finf());
TypeF::NEG_INF = TypeF::make(-pos_finf());
TypeF::POS_INF = TypeF::make(jfloat_cast(POSITIVE_INFINITE_F));
TypeF::NEG_INF = TypeF::make(-jfloat_cast(POSITIVE_INFINITE_F));
TypeD::ZERO = TypeD::make(0.0); // Double 0 (positive zero)
TypeD::ONE = TypeD::make(1.0); // Double 1
TypeD::POS_INF = TypeD::make(pos_dinf());
TypeD::NEG_INF = TypeD::make(-pos_dinf());
TypeD::POS_INF = TypeD::make(jdouble_cast(POSITIVE_INFINITE_D));
TypeD::NEG_INF = TypeD::make(-jdouble_cast(POSITIVE_INFINITE_D));
TypeInt::MINUS_1 = TypeInt::make(-1); // -1
TypeInt::ZERO = TypeInt::make( 0); // 0

View file

@ -72,6 +72,7 @@
template(ZMarkStart) \
template(ZMarkEnd) \
template(ZRelocateStart) \
template(ZVerify) \
template(HandshakeOneThread) \
template(HandshakeAllThreads) \
template(HandshakeFallback) \

View file

@ -75,7 +75,7 @@ java.launcher.opt.footer = \
\ -D<name>=<value>\n\
\ set a system property\n\
\ -verbose:[class|module|gc|jni]\n\
\ enable verbose output\n\
\ enable verbose output for the given subsystem\n\
\ -version print product version to the error stream and exit\n\
\ --version print product version to the output stream and exit\n\
\ -showversion print product version to the error stream and continue\n\
@ -129,9 +129,9 @@ java.launcher.X.usage=\n\
\ append to end of bootstrap class path\n\
\ -Xcheck:jni perform additional checks for JNI functions\n\
\ -Xcomp forces compilation of methods on first invocation\n\
\ -Xdebug provided for backward compatibility\n\
\ -Xdebug does nothing. Provided for backward compatibility.\n\
\ -Xdiag show additional diagnostic messages\n\
\ -Xfuture enable strictest checks, anticipating future default\n\
\ -Xfuture enable strictest checks, anticipating future default.\n\
\ This option is deprecated and may be removed in a\n\
\ future release.\n\
\ -Xint interpreted mode execution only\n\
@ -141,7 +141,9 @@ java.launcher.X.usage=\n\
\ -Xlog:<opts> Configure or enable logging with the Java Virtual\n\
\ Machine (JVM) unified logging framework. Use -Xlog:help\n\
\ for details.\n\
\ -Xloggc:<file> log GC status to a file with time stamps\n\
\ -Xloggc:<file> log GC status to a file with time stamps.\n\
\ This option is deprecated and may be removed in a\n\
\ future release. It is replaced by -Xlog:gc:<file>.\n\
\ -Xmixed mixed mode execution (default)\n\
\ -Xmn<size> sets the initial and maximum size (in bytes) of the heap\n\
\ for the young generation (nursery)\n\
@ -152,6 +154,8 @@ java.launcher.X.usage=\n\
\ -Xshare:auto use shared class data if possible (default)\n\
\ -Xshare:off do not attempt to use shared class data\n\
\ -Xshare:on require using shared class data, otherwise fail.\n\
\ This is a testing option and may lead to intermittent\n\
\ failures. It should not be used in production environments.\n\
\ -XshowSettings show all settings and continue\n\
\ -XshowSettings:all\n\
\ show all settings and continue\n\
@ -191,7 +195,6 @@ java.launcher.X.usage=\n\
\ --patch-module <module>=<file>({0}<file>)*\n\
\ override or augment a module with classes and resources\n\
\ in JAR files or directories.\n\
\ --disable-@files disable further argument file expansion\n\
\ --source <version>\n\
\ set the version of the source in source-file mode.\n\n\
These extra options are subject to change without notice.\n
@ -199,7 +202,7 @@ These extra options are subject to change without notice.\n
# Translators please note do not translate the options themselves
java.launcher.X.macosx.usage=\
\n\
The following options are Mac OS X specific:\n\
The following options are macOS specific:\n\
\ -XstartOnFirstThread\n\
\ run the main() method on the first (AppKit) thread\n\
\ -Xdock:name=<application name>\n\

View file

@ -128,8 +128,8 @@ import java.io.OutputStream;
* <pre>
* // Create a context using default credentials
* // and the implementation specific default mechanism
* GSSManager manager ...
* GSSName targetName ...
* GSSManager manager = ...
* GSSName targetName = ...
* GSSContext context = manager.createContext(targetName, null, null,
* GSSContext.INDEFINITE_LIFETIME);
*
@ -141,21 +141,23 @@ import java.io.OutputStream;
*
* // establish a context between peers
*
* byte []inToken = new byte[0];
* byte[] inToken = new byte[0];
* byte[] outToken;
*
* // Loop while there still is a token to be processed
*
* while (!context.isEstablished()) {
*
* byte[] outToken
* = context.initSecContext(inToken, 0, inToken.length);
* outToken = context.initSecContext(inToken, 0, inToken.length);
*
* // send the output token if generated
* if (outToken != null)
* if (outToken != null) {
* sendToken(outToken);
* }
*
* if (!context.isEstablished()) {
* inToken = readToken();
* }
* }
*
* // display context information
@ -165,21 +167,40 @@ import java.io.OutputStream;
* System.out.println("Initiator = " + context.getSrcName());
* System.out.println("Acceptor = " + context.getTargName());
*
* if (context.getConfState())
* System.out.println("Confidentiality (i.e., privacy) is available");
* if (context.getConfState()) {
* System.out.println("Confidentiality (i.e., privacy) is available");
* }
*
* if (context.getIntegState())
* System.out.println("Integrity is available");
* if (context.getIntegState()) {
* System.out.println("Integrity is available");
* }
*
* // perform wrap on an application supplied message, appMsg,
* // using QOP = 0, and requesting privacy service
* byte [] appMsg ...
* byte[] appMsg = ...
*
* MessageProp mProp = new MessageProp(0, true);
*
* byte []tok = context.wrap(appMsg, 0, appMsg.length, mProp);
* outToken = context.wrap(appMsg, 0, appMsg.length, mProp);
*
* sendToken(tok);
* sendToken(outToken);
*
* // perform unwrap on an incoming application message, and check
* // its privacy state and supplementary information
* inToken = readToken();
*
* mProp = new MessageProp(0, true);
*
* appMsg = context.unwrap(inToken, 0, inToken.length, mProp);
*
* System.out.println("Was it encrypted? " + mProp.getPrivacy());
* System.out.println("Duplicate Token? " + mProp.isDuplicateToken());
* System.out.println("Old Token? " + mProp.isOldToken());
* System.out.println("Unsequenced Token? " + mProp.isUnseqToken());
* System.out.println("Gap Token? " + mProp.isGapToken());
*
* // the application determines if the privacy state and supplementary
* // information are acceptable
*
* // release the local-end of the context
* context.dispose();

View file

@ -1274,7 +1274,17 @@ public class KDC {
PAData[] inPAs = KDCReqDotPAData(asReq);
List<PAData> enc_outPAs = new ArrayList<>();
if (inPAs == null || inPAs.length == 0) {
byte[] paEncTimestamp = null;
if (inPAs != null) {
for (PAData inPA : inPAs) {
if (inPA.getType() == Krb5.PA_ENC_TIMESTAMP) {
paEncTimestamp = inPA.getValue();
}
}
}
if (paEncTimestamp == null) {
Object preauth = options.get(Option.PREAUTH_REQUIRED);
if (preauth == null || preauth.equals(Boolean.TRUE)) {
throw new KrbException(Krb5.KDC_ERR_PREAUTH_REQUIRED);
@ -1283,7 +1293,7 @@ public class KDC {
EncryptionKey pakey = null;
try {
EncryptedData data = newEncryptedData(
new DerValue(inPAs[0].getValue()));
new DerValue(paEncTimestamp));
pakey = keyForUser(body.cname, data.getEType(), false);
data.decrypt(pakey, KeyUsage.KU_PA_ENC_TS);
} catch (Exception e) {