8309044: Replace NULL with nullptr, final sweep of hotspot code

Reviewed-by: stefank, dholmes, kvn, amitkumar
This commit is contained in:
Johan Sjölen 2023-05-31 09:19:47 +00:00
parent 88236263dc
commit 4f16161607
67 changed files with 114 additions and 114 deletions

View file

@ -122,7 +122,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
} }
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, methodHandle& method, jint pc_offset, JVMCI_TRAPS) { void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, methodHandle& method, jint pc_offset, JVMCI_TRAPS) {
NativeCall* call = NULL; NativeCall* call = nullptr;
switch (_next_call_type) { switch (_next_call_type) {
case INLINE_INVOKE: case INLINE_INVOKE:
return; return;
@ -155,7 +155,7 @@ void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, methodHandle& metho
if (Continuations::enabled()) { if (Continuations::enabled()) {
// Check for proper post_call_nop // Check for proper post_call_nop
NativePostCallNop* nop = nativePostCallNop_at(call->next_instruction_address()); NativePostCallNop* nop = nativePostCallNop_at(call->next_instruction_address());
if (nop == NULL) { if (nop == nullptr) {
JVMCI_ERROR("missing post call nop at offset %d", pc_offset); JVMCI_ERROR("missing post call nop at offset %d", pc_offset);
} else { } else {
_instructions->relocate(call->next_instruction_address(), relocInfo::post_call_nop_type); _instructions->relocate(call->next_instruction_address(), relocInfo::post_call_nop_type);

View file

@ -732,7 +732,7 @@ class MacroAssembler: public Assembler {
// Load/Store klass oop from klass field. Compress. // Load/Store klass oop from klass field. Compress.
void load_klass(Register dst, Register src); void load_klass(Register dst, Register src);
void load_klass_check_null(Register dst, Register src, Label* is_null = NULL); void load_klass_check_null(Register dst, Register src, Label* is_null = nullptr);
void store_klass(Register dst_oop, Register klass, Register tmp = R0); void store_klass(Register dst_oop, Register klass, Register tmp = R0);
void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified. void store_klass_gap(Register dst_oop, Register val = noreg); // Will store 0 if val not specified.

View file

@ -4697,7 +4697,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address); StubRoutines::_call_stub_entry = generate_call_stub(StubRoutines::_call_stub_return_address);
StubRoutines::_catch_exception_entry = generate_catch_exception(); StubRoutines::_catch_exception_entry = generate_catch_exception();
if (UnsafeCopyMemory::_table == NULL) { if (UnsafeCopyMemory::_table == nullptr) {
UnsafeCopyMemory::create_table(8); UnsafeCopyMemory::create_table(8);
} }

View file

@ -2294,7 +2294,7 @@ void TemplateTable::load_invokedynamic_entry(Register method) {
__ load_resolved_indy_entry(cache, index); __ load_resolved_indy_entry(cache, index);
__ ld_ptr(method, array_base_offset + in_bytes(ResolvedIndyEntry::method_offset()), cache); __ ld_ptr(method, array_base_offset + in_bytes(ResolvedIndyEntry::method_offset()), cache);
// The invokedynamic is unresolved iff method is NULL // The invokedynamic is unresolved iff method is null
__ cmpdi(CCR0, method, 0); __ cmpdi(CCR0, method, 0);
__ bne(CCR0, resolved); __ bne(CCR0, resolved);

View file

@ -71,7 +71,7 @@ static bool emit_shared_trampolines(CodeBuffer* cb, CodeBuffer::SharedTrampoline
assert(requests->number_of_entries() >= 1, "at least one"); assert(requests->number_of_entries() >= 1, "at least one");
const int total_requested_size = MacroAssembler::max_trampoline_stub_size() * requests->number_of_entries(); const int total_requested_size = MacroAssembler::max_trampoline_stub_size() * requests->number_of_entries();
if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == NULL) { if (cb->stubs()->maybe_expand_to_ensure_remaining(total_requested_size) && cb->blob() == nullptr) {
return false; return false;
} }

View file

@ -371,5 +371,5 @@ void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register
// Make sure klass is 'reasonable', which is not zero. // Make sure klass is 'reasonable', which is not zero.
__ load_klass(obj, obj, tmp1); // get klass __ load_klass(obj, obj, tmp1); // get klass
__ beqz(obj, error); // if klass is NULL it is broken __ beqz(obj, error); // if klass is null it is broken
} }

View file

@ -4011,7 +4011,7 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_forward_exception_entry = generate_forward_exception(); StubRoutines::_forward_exception_entry = generate_forward_exception();
if (UnsafeCopyMemory::_table == NULL) { if (UnsafeCopyMemory::_table == nullptr) {
UnsafeCopyMemory::create_table(8); UnsafeCopyMemory::create_table(8);
} }
@ -4069,7 +4069,7 @@ class StubGenerator: public StubCodeGenerator {
generate_arraycopy_stubs(); generate_arraycopy_stubs();
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod(); BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs_nm != NULL) { if (bs_nm != nullptr) {
StubRoutines::riscv::_method_entry_barrier = generate_method_entry_barrier(); StubRoutines::riscv::_method_entry_barrier = generate_method_entry_barrier();
} }

View file

@ -486,5 +486,5 @@ void BarrierSetAssembler::check_oop(MacroAssembler* masm, Register obj, Register
// make sure klass is 'reasonable', which is not zero. // make sure klass is 'reasonable', which is not zero.
__ load_klass(obj, obj, tmp1); // get klass __ load_klass(obj, obj, tmp1); // get klass
__ testptr(obj, obj); __ testptr(obj, obj);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken __ jcc(Assembler::zero, error); // if klass is null it is broken
} }

View file

@ -300,7 +300,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD); cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
jcc(Assembler::equal, L); jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_base:" stop("InterpreterMacroAssembler::call_VM_base:"
" last_sp != nullptr"); " last_sp isn't null");
bind(L); bind(L);
} }
#endif /* ASSERT */ #endif /* ASSERT */

View file

@ -188,7 +188,7 @@ void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, methodHandle& method, j
if (Continuations::enabled()) { if (Continuations::enabled()) {
// Check for proper post_call_nop // Check for proper post_call_nop
NativePostCallNop* nop = nativePostCallNop_at(call->next_instruction_address()); NativePostCallNop* nop = nativePostCallNop_at(call->next_instruction_address());
if (nop == NULL) { if (nop == nullptr) {
JVMCI_ERROR("missing post call nop at offset %d", pc_offset); JVMCI_ERROR("missing post call nop at offset %d", pc_offset);
} else { } else {
_instructions->relocate(call->next_instruction_address(), relocInfo::post_call_nop_type); _instructions->relocate(call->next_instruction_address(), relocInfo::post_call_nop_type);

View file

@ -276,7 +276,7 @@ static int pipeline_res_stages_initializer(
int commentlen = 0; int commentlen = 0;
int max_stage = 0; int max_stage = 0;
i = 0; i = 0;
for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != NULL;) { for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != nullptr;) {
if (pipeline->_resdict[resource]->is_resource()->is_discrete()) { if (pipeline->_resdict[resource]->is_resource()->is_discrete()) {
if (res_stages[i] == 0) { if (res_stages[i] == 0) {
if (max_stage < 9) { if (max_stage < 9) {
@ -302,7 +302,7 @@ static int pipeline_res_stages_initializer(
templen = 0; templen = 0;
i = 0; i = 0;
for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != NULL;) { for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != nullptr;) {
if (pipeline->_resdict[resource]->is_resource()->is_discrete()) { if (pipeline->_resdict[resource]->is_resource()->is_discrete()) {
const char* const resname = res_stages[i] == 0 ? "undefined" : pipeline->_stages.name(res_stages[i] - 1); const char* const resname = res_stages[i] == 0 ? "undefined" : pipeline->_stages.name(res_stages[i] - 1);
@ -365,7 +365,7 @@ static int pipeline_res_cycles_initializer(
const char* resource; const char* resource;
i = 0; i = 0;
for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != NULL;) { for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != nullptr;) {
if (pipeline->_resdict[resource]->is_resource()->is_discrete()) { if (pipeline->_resdict[resource]->is_resource()->is_discrete()) {
if (max_cycles < res_cycles[i]) { if (max_cycles < res_cycles[i]) {
max_cycles = res_cycles[i]; max_cycles = res_cycles[i];
@ -390,7 +390,7 @@ static int pipeline_res_cycles_initializer(
templen = 0; templen = 0;
i = 0; i = 0;
for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != NULL;) { for (pipeline->_reslist.reset(); (resource = pipeline->_reslist.iter()) != nullptr;) {
if (pipeline->_resdict[resource]->is_resource()->is_discrete()) { if (pipeline->_resdict[resource]->is_resource()->is_discrete()) {
templen += snprintf_checked(&resource_cycles[templen], resource_cycles_size - templen, " %*d%c // %s\n", templen += snprintf_checked(&resource_cycles[templen], resource_cycles_size - templen, " %*d%c // %s\n",
cyclelen, res_cycles[i], (i < pipeline->_rescount-1) ? ',' : ' ', resource); cyclelen, res_cycles[i], (i < pipeline->_rescount-1) ? ',' : ' ', resource);
@ -1006,7 +1006,7 @@ void ArchDesc::build_pipe_classes(FILE *fp_cpp) {
// Don't add compound resources to the list of resource names // Don't add compound resources to the list of resource names
const char* resource; const char* resource;
i = 0; i = 0;
for (_pipeline->_reslist.reset(); (resource = _pipeline->_reslist.iter()) != NULL;) { for (_pipeline->_reslist.reset(); (resource = _pipeline->_reslist.iter()) != nullptr;) {
if (_pipeline->_resdict[resource]->is_resource()->is_discrete()) { if (_pipeline->_resdict[resource]->is_resource()->is_discrete()) {
fprintf(fp_cpp, " \"%s\"%c", resource, i < _pipeline->_rescount - 1 ? ',' : ' '); fprintf(fp_cpp, " \"%s\"%c", resource, i < _pipeline->_rescount - 1 ? ',' : ' ');
i++; i++;

View file

@ -360,7 +360,7 @@ void SharedClassPathEntry::set_name(const char* name, TRAPS) {
} }
void SharedClassPathEntry::copy_from(SharedClassPathEntry* ent, ClassLoaderData* loader_data, TRAPS) { void SharedClassPathEntry::copy_from(SharedClassPathEntry* ent, ClassLoaderData* loader_data, TRAPS) {
assert(ent != NULL, "sanity"); assert(ent != nullptr, "sanity");
_type = ent->_type; _type = ent->_type;
_is_module_path = ent->_is_module_path; _is_module_path = ent->_is_module_path;
_timestamp = ent->_timestamp; _timestamp = ent->_timestamp;

View file

@ -147,7 +147,7 @@ private:
static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr); static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0); static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0);
static void allocate_shared_strings_array(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; static void allocate_shared_strings_array(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static oop init_shared_table(const DumpedInternedStrings* dumped_interned_strings) NOT_CDS_JAVA_HEAP_RETURN_(NULL); static oop init_shared_table(const DumpedInternedStrings* dumped_interned_strings) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static void set_shared_strings_array_index(int root_index) NOT_CDS_JAVA_HEAP_RETURN; static void set_shared_strings_array_index(int root_index) NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN; static void serialize_shared_table_header(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;

View file

@ -90,7 +90,7 @@ class CompiledICInfo : public StackObj {
Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; } Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
CompiledICHolder* claim_cached_icholder() { CompiledICHolder* claim_cached_icholder() {
assert(_is_icholder, ""); assert(_is_icholder, "");
assert(_cached_value != nullptr, "must be non-nullptr"); assert(_cached_value != nullptr, "must be non-null");
_release_icholder = false; _release_icholder = false;
CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
icholder->claim(); icholder->claim();
@ -339,7 +339,7 @@ class CompiledStaticCall : public ResourceObj {
public: public:
// Code // Code
// Returns NULL if CodeBuffer::expand fails // Returns null if CodeBuffer::expand fails
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr); static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr);
static int to_interp_stub_size(); static int to_interp_stub_size();
static int to_trampoline_stub_size(); static int to_trampoline_stub_size();

View file

@ -385,7 +385,7 @@ CompileTask* CompileQueue::get(CompilerThread* thread) {
methodHandle save_hot_method; methodHandle save_hot_method;
MonitorLocker locker(MethodCompileQueue_lock); MonitorLocker locker(MethodCompileQueue_lock);
// If _first is nullptr we have no more compile jobs. There are two reasons for // If _first is null we have no more compile jobs. There are two reasons for
// having no compile jobs: First, we compiled everything we wanted. Second, // having no compile jobs: First, we compiled everything we wanted. Second,
// we ran out of code cache so compilation has been disabled. In the latter // we ran out of code cache so compilation has been disabled. In the latter
// case we perform code cache sweeps to free memory such that we can re-enable // case we perform code cache sweeps to free memory such that we can re-enable

View file

@ -239,7 +239,7 @@ oop XBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile narrowOop* p,
oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) { oop XBarrier::weak_load_barrier_on_oop_field_preloaded(volatile narrowOop* p, oop o) {
ShouldNotReachHere(); ShouldNotReachHere();
return NULL; return nullptr;
} }
oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) { oop XBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile narrowOop* p, oop o) {

View file

@ -184,7 +184,7 @@ inline bool XBarrierSet::AccessBarrier<decorators, BarrierSetT>::oop_arraycopy_i
if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) { if (!HasDecorator<decorators, ARRAYCOPY_CHECKCAST>::value) {
// No check cast, bulk barrier and bulk copy // No check cast, bulk barrier and bulk copy
XBarrier::load_barrier_on_oop_array(src, length); XBarrier::load_barrier_on_oop_array(src, length);
return Raw::oop_arraycopy_in_heap(nullptr, 0, src, NULL, 0, dst, length); return Raw::oop_arraycopy_in_heap(nullptr, 0, src, nullptr, 0, dst, length);
} }
// Check cast and copy each elements // Check cast and copy each elements

View file

@ -40,7 +40,7 @@ inline XPageTableIterator::XPageTableIterator(const XPageTable* page_table) :
inline bool XPageTableIterator::next(XPage** page) { inline bool XPageTableIterator::next(XPage** page) {
for (XPage* entry; _iter.next(&entry);) { for (XPage* entry; _iter.next(&entry);) {
if (entry != NULL && entry != _prev) { if (entry != nullptr && entry != _prev) {
// Next page found // Next page found
*page = _prev = entry; *page = _prev = entry;
return true; return true;

View file

@ -380,7 +380,7 @@ bool ZHeap::print_location(outputStream* st, zaddress addr) const {
st->print(PTR_FORMAT " is a zaddress: ", untype(addr)); st->print(PTR_FORMAT " is a zaddress: ", untype(addr));
if (addr == zaddress::null) { if (addr == zaddress::null) {
st->print_raw_cr("NULL"); st->print_raw_cr("null");
return true; return true;
} }
@ -435,7 +435,7 @@ bool ZHeap::print_location(outputStream* st, zpointer ptr) const {
const zaddress addr = ZPointer::uncolor(ptr); const zaddress addr = ZPointer::uncolor(ptr);
if (addr == zaddress::null) { if (addr == zaddress::null) {
st->print_raw_cr("NULL"); st->print_raw_cr("null");
return true; return true;
} }

View file

@ -813,7 +813,7 @@ static void trace_method_resolution(const char* prefix,
} }
st->print("%s%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ", st->print("%s%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
prefix, prefix,
(klass == nullptr ? "<nullptr>" : klass->internal_name()), (klass == nullptr ? "<null>" : klass->internal_name()),
resolved_klass->internal_name(), resolved_klass->internal_name(),
Method::name_and_sig_as_C_string(resolved_klass, Method::name_and_sig_as_C_string(resolved_klass,
method->name(), method->name(),

View file

@ -159,7 +159,7 @@ class JfrViewFlightRecordingDCmd : public JfrDCmd {
return "Medium"; return "Medium";
} }
static const JavaPermission permission() { static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p; return p;
} }
virtual const char* javaClass() const { virtual const char* javaClass() const {
@ -184,7 +184,7 @@ class JfrQueryFlightRecordingDCmd : public JfrDCmd {
return "Medium"; return "Medium";
} }
static const JavaPermission permission() { static const JavaPermission permission() {
JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", NULL}; JavaPermission p = {"java.lang.management.ManagementPermission", "monitor", nullptr};
return p; return p;
} }
virtual const char* javaClass() const { virtual const char* javaClass() const {

View file

@ -122,7 +122,7 @@ const char* get_java_thread_name(const JavaThread* jt, int& length, oop vthread)
name_str = java_lang_String::as_utf8_string(name, length); name_str = java_lang_String::as_utf8_string(name, length);
} }
} }
assert(name_str != nullptr, "unexpected nullptr thread name"); assert(name_str != nullptr, "unexpected null thread name");
return name_str; return name_str;
} }

View file

@ -144,7 +144,7 @@ void JfrRepository::set_chunk_path(jstring path, JavaThread* jt) {
ResourceMark rm(jt); ResourceMark rm(jt);
const char* const canonical_chunk_path = JfrJavaSupport::c_str(path, jt); const char* const canonical_chunk_path = JfrJavaSupport::c_str(path, jt);
if (nullptr == canonical_chunk_path && !_chunkwriter->is_valid()) { if (nullptr == canonical_chunk_path && !_chunkwriter->is_valid()) {
// new output is nullptr and current output is null // new output is null and current output is null
return; return;
} }
instance().set_chunk_path(canonical_chunk_path); instance().set_chunk_path(canonical_chunk_path);

View file

@ -116,7 +116,7 @@ BufferPtr JfrStringPool::flush(BufferPtr old, size_t used, size_t requested, Thr
migrate_outstanding_writes(old, new_buffer, used, requested); migrate_outstanding_writes(old, new_buffer, used, requested);
} }
release(old, thread); release(old, thread);
return new_buffer; // might be nullptr return new_buffer; // might be null
} }
static const size_t lease_retry = 10; static const size_t lease_retry = 10;

View file

@ -363,7 +363,7 @@ bool JVMCIEnv::pending_exception_as_string(const char** to_string, const char**
if (!is_hotspot()) { if (!is_hotspot()) {
JNIAccessMark jni(this, THREAD); JNIAccessMark jni(this, THREAD);
jthrowable ex = jni()->ExceptionOccurred(); jthrowable ex = jni()->ExceptionOccurred();
if (ex != NULL) { if (ex != nullptr) {
jni()->ExceptionClear(); jni()->ExceptionClear();
jobjectArray pair = (jobjectArray) jni()->CallStaticObjectMethod( jobjectArray pair = (jobjectArray) jni()->CallStaticObjectMethod(
JNIJVMCI::HotSpotJVMCIRuntime::clazz(), JNIJVMCI::HotSpotJVMCIRuntime::clazz(),

View file

@ -192,7 +192,7 @@ void AsyncLogWriter::initialize() {
AsyncLogWriter* self = new AsyncLogWriter(); AsyncLogWriter* self = new AsyncLogWriter();
if (self->_initialized) { if (self->_initialized) {
Atomic::release_store_fence(&AsyncLogWriter::_instance, self); Atomic::release_store_fence(&AsyncLogWriter::_instance, self);
// All readers of _instance after the fence see non-nullptr. // All readers of _instance after the fence see non-null.
// We use LogOutputList's RCU counters to ensure all synchronous logsites have completed. // We use LogOutputList's RCU counters to ensure all synchronous logsites have completed.
// After that, we start AsyncLog Thread and it exclusively takes over all logging I/O. // After that, we start AsyncLog Thread and it exclusively takes over all logging I/O.
for (LogTagSet* ts = LogTagSet::first(); ts != nullptr; ts = ts->next()) { for (LogTagSet* ts = LogTagSet::first(); ts != nullptr; ts = ts->next()) {

View file

@ -478,7 +478,7 @@ bool LogConfiguration::parse_log_arguments(const char* outputstr,
const char* decoratorstr, const char* decoratorstr,
const char* output_options, const char* output_options,
outputStream* errstream) { outputStream* errstream) {
assert(errstream != nullptr, "errstream can not be nullptr"); assert(errstream != nullptr, "errstream can not be null");
if (outputstr == nullptr || strlen(outputstr) == 0) { if (outputstr == nullptr || strlen(outputstr) == 0) {
outputstr = "stdout"; outputstr = "stdout";
} }

View file

@ -61,7 +61,7 @@ class LogConfiguration : public AllStatic {
static size_t _n_listener_callbacks; static size_t _n_listener_callbacks;
static bool _async_mode; static bool _async_mode;
// Create a new output. Returns nullptr if failed. // Create a new output. Returns null if failed.
static LogOutput* new_output(const char* name, const char* options, outputStream* errstream); static LogOutput* new_output(const char* name, const char* options, outputStream* errstream);
// Add an output to the list of configured outputs. Returns the assigned index. // Add an output to the list of configured outputs. Returns the assigned index.

View file

@ -109,7 +109,7 @@ class LogMessageBuffer : public StackObj {
// It is, however, possible to specify a prefix per LogMessageBuffer, // It is, however, possible to specify a prefix per LogMessageBuffer,
// using set_prefix(). Lines added to the LogMessageBuffer after a prefix // using set_prefix(). Lines added to the LogMessageBuffer after a prefix
// function has been set will be prefixed automatically. // function has been set will be prefixed automatically.
// Setting this to nullptr will disable prefixing. // Setting this to null will disable prefixing.
void set_prefix(size_t (*prefix_fn)(char*, size_t)) { void set_prefix(size_t (*prefix_fn)(char*, size_t)) {
_prefix_fn = prefix_fn; _prefix_fn = prefix_fn;
} }

View file

@ -100,7 +100,7 @@ public:
LogStream(const LogTargetImpl<level, T0, T1, T2, T3, T4, GuardTag>& type_carrier) LogStream(const LogTargetImpl<level, T0, T1, T2, T3, T4, GuardTag>& type_carrier)
: LogStreamImpl(LogTargetHandle(level, LogTagSetMapping<T0, T1, T2, T3, T4>::tagset())) {} : LogStreamImpl(LogTargetHandle(level, LogTagSetMapping<T0, T1, T2, T3, T4>::tagset())) {}
// Constructor to support creation from typed (likely nullptr) pointer. Mostly used by the logging framework. // Constructor to support creation from typed (likely null) pointer. Mostly used by the logging framework.
// //
// LogStream stream(log.debug()); // LogStream stream(log.debug());
// or // or

View file

@ -875,7 +875,7 @@ MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
assert(word_size <= Metaspace::max_allocation_word_size(), assert(word_size <= Metaspace::max_allocation_word_size(),
"allocation size too large (" SIZE_FORMAT ")", word_size); "allocation size too large (" SIZE_FORMAT ")", word_size);
assert(loader_data != nullptr, "Should never pass around a nullptr loader_data. " assert(loader_data != nullptr, "Should never pass around a null loader_data. "
"ClassLoaderData::the_null_class_loader_data() should have been used."); "ClassLoaderData::the_null_class_loader_data() should have been used.");
// Deal with concurrent unloading failed allocation starvation // Deal with concurrent unloading failed allocation starvation

View file

@ -277,7 +277,7 @@ void Metachunk::verify() const {
word_size(), committed_words()); word_size(), committed_words());
// Test base pointer // Test base pointer
assert(base() != nullptr, "Base pointer nullptr"); assert(base() != nullptr, "Base pointer null");
assert(vsnode() != nullptr, "No space"); assert(vsnode() != nullptr, "No space");
vsnode()->check_pointer(base()); vsnode()->check_pointer(base());

View file

@ -343,7 +343,7 @@ MetaWord* MetaspaceArena::allocate_inner(size_t requested_word_size) {
SOMETIMES(verify_locked();) SOMETIMES(verify_locked();)
if (p == nullptr) { if (p == nullptr) {
UL(info, "allocation failed, returned nullptr."); UL(info, "allocation failed, returned null.");
} else { } else {
UL2(trace, "after allocation: %u chunk(s), current:" METACHUNK_FULL_FORMAT, UL2(trace, "after allocation: %u chunk(s), current:" METACHUNK_FULL_FORMAT,
_chunks.count(), METACHUNK_FULL_FORMAT_ARGS(current_chunk())); _chunks.count(), METACHUNK_FULL_FORMAT_ARGS(current_chunk()));

View file

@ -466,7 +466,7 @@ void Universe::initialize_basic_type_mirrors(TRAPS) {
for (int i = T_BOOLEAN; i < T_VOID+1; i++) { for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
if (!is_reference_type((BasicType)i)) { if (!is_reference_type((BasicType)i)) {
oop m = _basic_type_mirrors[i].resolve(); oop m = _basic_type_mirrors[i].resolve();
assert(m != nullptr, "archived mirrors should not be nullptr"); assert(m != nullptr, "archived mirrors should not be null");
} }
} }
} else } else

View file

@ -595,7 +595,7 @@ void ReservedHeapSpace::initialize_compressed_heap(const size_t size, size_t ali
// Last, desperate try without any placement. // Last, desperate try without any placement.
if (_base == nullptr) { if (_base == nullptr) {
log_trace(gc, heap, coops)("Trying to allocate at address nullptr heap of size " SIZE_FORMAT_X, size + noaccess_prefix); log_trace(gc, heap, coops)("Trying to allocate at address null heap of size " SIZE_FORMAT_X, size + noaccess_prefix);
initialize(size + noaccess_prefix, alignment, page_size, nullptr, false); initialize(size + noaccess_prefix, alignment, page_size, nullptr, false);
} }
} }

View file

@ -885,7 +885,7 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
const Handle appendix = call_info.resolved_appendix(); const Handle appendix = call_info.resolved_appendix();
const bool has_appendix = appendix.not_null(); const bool has_appendix = appendix.not_null();
LogStream* log_stream = NULL; LogStream* log_stream = nullptr;
LogStreamHandle(Debug, methodhandles, indy) lsh_indy; LogStreamHandle(Debug, methodhandles, indy) lsh_indy;
if (lsh_indy.is_enabled()) { if (lsh_indy.is_enabled()) {
ResourceMark rm; ResourceMark rm;
@ -903,7 +903,7 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
const int appendix_index = resolved_indy_entry_at(index)->resolved_references_index(); const int appendix_index = resolved_indy_entry_at(index)->resolved_references_index();
objArrayOop resolved_references = constant_pool()->resolved_references(); objArrayOop resolved_references = constant_pool()->resolved_references();
assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob"); assert(appendix_index >= 0 && appendix_index < resolved_references->length(), "oob");
assert(resolved_references->obj_at(appendix_index) == NULL, "init just once"); assert(resolved_references->obj_at(appendix_index) == nullptr, "init just once");
resolved_references->obj_at_put(appendix_index, appendix()); resolved_references->obj_at_put(appendix_index, appendix());
} }
@ -911,7 +911,7 @@ oop ConstantPoolCache::set_dynamic_call(const CallInfo &call_info, int index) {
assert(resolved_indy_entries() != nullptr, "Invokedynamic array is empty, cannot fill with resolved information"); assert(resolved_indy_entries() != nullptr, "Invokedynamic array is empty, cannot fill with resolved information");
resolved_indy_entry_at(index)->fill_in(adapter, adapter->size_of_parameters(), as_TosState(adapter->result_type()), has_appendix); resolved_indy_entry_at(index)->fill_in(adapter, adapter->size_of_parameters(), as_TosState(adapter->result_type()), has_appendix);
if (log_stream != NULL) { if (log_stream != nullptr) {
resolved_indy_entry_at(index)->print_on(log_stream); resolved_indy_entry_at(index)->print_on(log_stream);
} }
return appendix(); return appendix();

View file

@ -488,7 +488,7 @@ const TypeFunc *OptoRuntime::notify_jvmti_vthread_Type() {
// no result type needed // no result type needed
fields = TypeTuple::fields(1); fields = TypeTuple::fields(1);
fields[TypeFunc::Parms+0] = NULL; // void fields[TypeFunc::Parms+0] = nullptr; // void
const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields); const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
return TypeFunc::make(domain,range); return TypeFunc::make(domain,range);

View file

@ -372,7 +372,7 @@ static bool invoke_JVM_OnLoad(JvmtiAgent* agent) {
ThreadToNativeFromVM ttn(thread); ThreadToNativeFromVM ttn(thread);
HandleMark hm(thread); HandleMark hm(thread);
extern struct JavaVM_ main_vm; extern struct JavaVM_ main_vm;
const jint err = (*on_load_entry)(&main_vm, const_cast<char*>(agent->options()), NULL); const jint err = (*on_load_entry)(&main_vm, const_cast<char*>(agent->options()), nullptr);
if (err != JNI_OK) { if (err != JNI_OK) {
vm_exit_during_initialization("-Xrun library failed to init", agent->name()); vm_exit_during_initialization("-Xrun library failed to init", agent->name());
} }

View file

@ -245,7 +245,7 @@ JvmtiVTMSTransitionDisabler::print_info() {
#endif #endif
// disable VTMS transitions for one virtual thread // disable VTMS transitions for one virtual thread
// no-op if thread is non-NULL and not a virtual thread // no-op if thread is non-null and not a virtual thread
JvmtiVTMSTransitionDisabler::JvmtiVTMSTransitionDisabler(jthread thread) JvmtiVTMSTransitionDisabler::JvmtiVTMSTransitionDisabler(jthread thread)
: _is_SR(false), _thread(thread) : _is_SR(false), _thread(thread)
{ {

View file

@ -1882,7 +1882,7 @@ WB_END
WB_ENTRY(jint, WB_getIndyInfoLength(JNIEnv* env, jobject wb, jclass klass)) WB_ENTRY(jint, WB_getIndyInfoLength(JNIEnv* env, jobject wb, jclass klass))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass))); InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants(); ConstantPool* cp = ik->constants();
if (cp->cache() == NULL) { if (cp->cache() == nullptr) {
return -1; return -1;
} }
return cp->resolved_indy_entries_length(); return cp->resolved_indy_entries_length();
@ -1891,7 +1891,7 @@ WB_END
WB_ENTRY(jint, WB_getIndyCPIndex(JNIEnv* env, jobject wb, jclass klass, jint index)) WB_ENTRY(jint, WB_getIndyCPIndex(JNIEnv* env, jobject wb, jclass klass, jint index))
InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass))); InstanceKlass* ik = InstanceKlass::cast(java_lang_Class::as_Klass(JNIHandles::resolve(klass)));
ConstantPool* cp = ik->constants(); ConstantPool* cp = ik->constants();
if (cp->cache() == NULL) { if (cp->cache() == nullptr) {
return -1; return -1;
} }
return cp->resolved_indy_entry_at(index)->constant_pool_index(); return cp->resolved_indy_entry_at(index)->constant_pool_index();

View file

@ -161,14 +161,14 @@ void fieldDescriptor::print_on_for(outputStream* st, oop obj) {
if (obj->obj_field(offset()) != nullptr) { if (obj->obj_field(offset()) != nullptr) {
obj->obj_field(offset())->print_value_on(st); obj->obj_field(offset())->print_value_on(st);
} else { } else {
st->print("nullptr"); st->print("null");
} }
break; break;
case T_OBJECT: case T_OBJECT:
if (obj->obj_field(offset()) != nullptr) { if (obj->obj_field(offset()) != nullptr) {
obj->obj_field(offset())->print_value_on(st); obj->obj_field(offset())->print_value_on(st);
} else { } else {
st->print("nullptr"); st->print("null");
} }
break; break;
default: default:

View file

@ -547,7 +547,7 @@ const int ObjectAlignmentInBytes = 8;
"Dump heap to file when java.lang.OutOfMemoryError is thrown " \ "Dump heap to file when java.lang.OutOfMemoryError is thrown " \
"from JVM") \ "from JVM") \
\ \
product(ccstr, HeapDumpPath, nullptr, MANAGEABLE, \ product(ccstr, HeapDumpPath, nullptr, MANAGEABLE, \
"When HeapDumpOnOutOfMemoryError is on, the path (filename or " \ "When HeapDumpOnOutOfMemoryError is on, the path (filename or " \
"directory) of the dump file (defaults to java_pid<pid>.hprof " \ "directory) of the dump file (defaults to java_pid<pid>.hprof " \
"in the working directory)") \ "in the working directory)") \
@ -601,7 +601,7 @@ const int ObjectAlignmentInBytes = 8;
product(bool, PrintAssembly, false, DIAGNOSTIC, \ product(bool, PrintAssembly, false, DIAGNOSTIC, \
"Print assembly code (using external disassembler.so)") \ "Print assembly code (using external disassembler.so)") \
\ \
product(ccstr, PrintAssemblyOptions, nullptr, DIAGNOSTIC, \ product(ccstr, PrintAssemblyOptions, nullptr, DIAGNOSTIC, \
"Print options string passed to disassembler.so") \ "Print options string passed to disassembler.so") \
\ \
notproduct(bool, PrintNMethodStatistics, false, \ notproduct(bool, PrintNMethodStatistics, false, \
@ -629,7 +629,7 @@ const int ObjectAlignmentInBytes = 8;
"Exercise compiled exception handlers") \ "Exercise compiled exception handlers") \
\ \
develop(bool, InterceptOSException, false, \ develop(bool, InterceptOSException, false, \
"Start debugger when an implicit OS (e.g. nullptr) " \ "Start debugger when an implicit OS (e.g. null pointer) " \
"exception happens") \ "exception happens") \
\ \
product(bool, PrintCodeCache, false, \ product(bool, PrintCodeCache, false, \
@ -835,7 +835,7 @@ const int ObjectAlignmentInBytes = 8;
develop(bool, StressRewriter, false, \ develop(bool, StressRewriter, false, \
"Stress linktime bytecode rewriting") \ "Stress linktime bytecode rewriting") \
\ \
product(ccstr, TraceJVMTI, nullptr, \ product(ccstr, TraceJVMTI, nullptr, \
"Trace flags for JVMTI functions and events") \ "Trace flags for JVMTI functions and events") \
\ \
product(bool, StressLdcRewrite, false, DIAGNOSTIC, \ product(bool, StressLdcRewrite, false, DIAGNOSTIC, \
@ -1025,11 +1025,11 @@ const int ObjectAlignmentInBytes = 8;
product(bool, LogVMOutput, false, DIAGNOSTIC, \ product(bool, LogVMOutput, false, DIAGNOSTIC, \
"Save VM output to LogFile") \ "Save VM output to LogFile") \
\ \
product(ccstr, LogFile, nullptr, DIAGNOSTIC, \ product(ccstr, LogFile, nullptr, DIAGNOSTIC, \
"If LogVMOutput or LogCompilation is on, save VM output to " \ "If LogVMOutput or LogCompilation is on, save VM output to " \
"this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\ "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\
\ \
product(ccstr, ErrorFile, nullptr, \ product(ccstr, ErrorFile, nullptr, \
"If an error occurs, save the error data to this file " \ "If an error occurs, save the error data to this file " \
"[default: ./hs_err_pid%p.log] (%p replaced with pid)") \ "[default: ./hs_err_pid%p.log] (%p replaced with pid)") \
\ \
@ -1066,11 +1066,11 @@ const int ObjectAlignmentInBytes = 8;
notproduct(bool, PrintSymbolTableSizeHistogram, false, \ notproduct(bool, PrintSymbolTableSizeHistogram, false, \
"print histogram of the symbol table") \ "print histogram of the symbol table") \
\ \
product(ccstr, AbortVMOnException, nullptr, DIAGNOSTIC, \ product(ccstr, AbortVMOnException, nullptr, DIAGNOSTIC, \
"Call fatal if this exception is thrown. Example: " \ "Call fatal if this exception is thrown. Example: " \
"java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \ "java -XX:AbortVMOnException=java.lang.NullPointerException Foo") \
\ \
product(ccstr, AbortVMOnExceptionMessage, nullptr, DIAGNOSTIC, \ product(ccstr, AbortVMOnExceptionMessage, nullptr, DIAGNOSTIC, \
"Call fatal if the exception pointed by AbortVMOnException " \ "Call fatal if the exception pointed by AbortVMOnException " \
"has this message") \ "has this message") \
\ \
@ -1382,7 +1382,7 @@ const int ObjectAlignmentInBytes = 8;
product(double, InlineFrequencyRatio, 0.25, DIAGNOSTIC, \ product(double, InlineFrequencyRatio, 0.25, DIAGNOSTIC, \
"Ratio of call site execution to caller method invocation") \ "Ratio of call site execution to caller method invocation") \
\ \
product(double, MinInlineFrequencyRatio, 0.0085, DIAGNOSTIC, \ product(double, MinInlineFrequencyRatio, 0.0085, DIAGNOSTIC, \
"Minimum ratio of call site execution to caller method" \ "Minimum ratio of call site execution to caller method" \
"invocation to be considered for inlining") \ "invocation to be considered for inlining") \
\ \
@ -1727,7 +1727,7 @@ const int ObjectAlignmentInBytes = 8;
product(bool, PerfDataSaveToFile, false, \ product(bool, PerfDataSaveToFile, false, \
"Save PerfData memory to hsperfdata_<pid> file on exit") \ "Save PerfData memory to hsperfdata_<pid> file on exit") \
\ \
product(ccstr, PerfDataSaveFile, nullptr, \ product(ccstr, PerfDataSaveFile, nullptr, \
"Save PerfData memory to the specified absolute pathname. " \ "Save PerfData memory to the specified absolute pathname. " \
"The string %p in the file name (if present) " \ "The string %p in the file name (if present) " \
"will be replaced by pid") \ "will be replaced by pid") \
@ -1914,7 +1914,7 @@ const int ObjectAlignmentInBytes = 8;
range(0, max_intx) \ range(0, max_intx) \
constraint(InitArrayShortSizeConstraintFunc, AfterErgo) \ constraint(InitArrayShortSizeConstraintFunc, AfterErgo) \
\ \
product(ccstr, AllocateHeapAt, nullptr, \ product(ccstr, AllocateHeapAt, nullptr, \
"Path to the directory where a temporary file will be created " \ "Path to the directory where a temporary file will be created " \
"to use as the backing store for Java Heap.") \ "to use as the backing store for Java Heap.") \
\ \
@ -1949,10 +1949,10 @@ const int ObjectAlignmentInBytes = 8;
JFR_ONLY(product(bool, FlightRecorder, false, \ JFR_ONLY(product(bool, FlightRecorder, false, \
"(Deprecated) Enable Flight Recorder")) \ "(Deprecated) Enable Flight Recorder")) \
\ \
JFR_ONLY(product(ccstr, FlightRecorderOptions, nullptr, \ JFR_ONLY(product(ccstr, FlightRecorderOptions, nullptr, \
"Flight Recorder options")) \ "Flight Recorder options")) \
\ \
JFR_ONLY(product(ccstr, StartFlightRecording, nullptr, \ JFR_ONLY(product(ccstr, StartFlightRecording, nullptr, \
"Start flight recording with options")) \ "Start flight recording with options")) \
\ \
product(bool, UseFastUnorderedTimeStamps, false, EXPERIMENTAL, \ product(bool, UseFastUnorderedTimeStamps, false, EXPERIMENTAL, \

View file

@ -68,7 +68,7 @@ class Handle {
protected: protected:
oop obj() const { return _handle == nullptr ? (oop)nullptr : *_handle; } oop obj() const { return _handle == nullptr ? (oop)nullptr : *_handle; }
oop non_null_obj() const { assert(_handle != nullptr, "resolving nullptr handle"); return *_handle; } oop non_null_obj() const { assert(_handle != nullptr, "resolving null handle"); return *_handle; }
public: public:
// Constructors // Constructors
@ -144,7 +144,7 @@ DEF_HANDLE(typeArray , is_typeArray_noinline )
Thread* _thread; \ Thread* _thread; \
protected: \ protected: \
type* obj() const { return _value; } \ type* obj() const { return _value; } \
type* non_null_obj() const { assert(_value != nullptr, "resolving nullptr _value"); return _value; } \ type* non_null_obj() const { assert(_value != nullptr, "resolving null _value"); return _value; } \
\ \
public: \ public: \
/* Constructors */ \ /* Constructors */ \

View file

@ -378,7 +378,7 @@ void JNIHandleBlock::release_block(JNIHandleBlock* block, JavaThread* thread) {
while (block != nullptr) { while (block != nullptr) {
JNIHandleBlock* next = block->_next; JNIHandleBlock* next = block->_next;
Atomic::dec(&_blocks_allocated); Atomic::dec(&_blocks_allocated);
assert(block->pop_frame_link() == nullptr, "pop_frame_link should be nullptr"); assert(block->pop_frame_link() == nullptr, "pop_frame_link should be null");
delete block; delete block;
block = next; block = next;
} }

View file

@ -113,7 +113,7 @@ inline bool JNIHandles::is_same_object(jobject handle1, jobject handle2) {
inline oop JNIHandles::resolve_non_null(jobject handle) { inline oop JNIHandles::resolve_non_null(jobject handle) {
assert(handle != nullptr, "JNI handle should not be null"); assert(handle != nullptr, "JNI handle should not be null");
oop result = resolve_impl<DECORATORS_NONE, false /* external_guard */>(handle); oop result = resolve_impl<DECORATORS_NONE, false /* external_guard */>(handle);
assert(result != nullptr, "nullptr read from jni handle"); assert(result != nullptr, "null read from jni handle");
return result; return result;
} }

View file

@ -536,7 +536,7 @@ bool ObjectMonitor::deflate_monitor() {
} else { } else {
// Attempt async deflation protocol. // Attempt async deflation protocol.
// Set a nullptr owner to DEFLATER_MARKER to force any contending thread // Set a null owner to DEFLATER_MARKER to force any contending thread
// through the slow path. This is just the first part of the async // through the slow path. This is just the first part of the async
// deflation dance. // deflation dance.
if (try_set_owner_from(nullptr, DEFLATER_MARKER) != nullptr) { if (try_set_owner_from(nullptr, DEFLATER_MARKER) != nullptr) {
@ -561,7 +561,7 @@ bool ObjectMonitor::deflate_monitor() {
// to retry. This is the second part of the async deflation dance. // to retry. This is the second part of the async deflation dance.
if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) { if (Atomic::cmpxchg(&_contentions, 0, INT_MIN) != 0) {
// Contentions was no longer 0 so we lost the race since the // Contentions was no longer 0 so we lost the race since the
// ObjectMonitor is now busy. Restore owner to nullptr if it is // ObjectMonitor is now busy. Restore owner to null if it is
// still DEFLATER_MARKER: // still DEFLATER_MARKER:
if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) { if (try_set_owner_from(DEFLATER_MARKER, nullptr) != DEFLATER_MARKER) {
// Deferred decrement for the JT EnterI() that cancelled the async deflation. // Deferred decrement for the JT EnterI() that cancelled the async deflation.
@ -666,7 +666,7 @@ const char* ObjectMonitor::is_busy_to_string(stringStream* ss) {
if (!owner_is_DEFLATER_MARKER()) { if (!owner_is_DEFLATER_MARKER()) {
ss->print("owner=" INTPTR_FORMAT, p2i(owner_raw())); ss->print("owner=" INTPTR_FORMAT, p2i(owner_raw()));
} else { } else {
// We report nullptr instead of DEFLATER_MARKER here because is_busy() // We report null instead of DEFLATER_MARKER here because is_busy()
// ignores DEFLATER_MARKER values. // ignores DEFLATER_MARKER values.
ss->print("owner=" INTPTR_FORMAT, NULL_WORD); ss->print("owner=" INTPTR_FORMAT, NULL_WORD);
} }

View file

@ -1128,7 +1128,7 @@ void os::print_location(outputStream* st, intptr_t x, bool verbose) {
address addr = (address)x; address addr = (address)x;
// Handle null first, so later checks don't need to protect against it. // Handle null first, so later checks don't need to protect against it.
if (addr == nullptr) { if (addr == nullptr) {
st->print_cr("0x0 is nullptr"); st->print_cr("0x0 is null");
return; return;
} }

View file

@ -232,7 +232,7 @@ int SafepointSynchronize::synchronize_threads(jlong safepoint_limit_time, int no
ThreadSafepointState **p_prev = &tss_head; ThreadSafepointState **p_prev = &tss_head;
for (; JavaThread *cur = jtiwh.next(); ) { for (; JavaThread *cur = jtiwh.next(); ) {
ThreadSafepointState *cur_tss = cur->safepoint_state(); ThreadSafepointState *cur_tss = cur->safepoint_state();
assert(cur_tss->get_next() == nullptr, "Must be nullptr"); assert(cur_tss->get_next() == nullptr, "Must be null");
if (thread_not_running(cur_tss)) { if (thread_not_running(cur_tss)) {
--still_running; --still_running;
} else { } else {

View file

@ -266,7 +266,7 @@ Thread::~Thread() {
delete handle_area(); delete handle_area();
delete metadata_handles(); delete metadata_handles();
// osthread() can be nullptr, if creation of thread failed. // osthread() can be null, if creation of thread failed.
if (osthread() != nullptr) os::free_thread(osthread()); if (osthread() != nullptr) os::free_thread(osthread());
// Clear Thread::current if thread is deleting itself and it has not // Clear Thread::current if thread is deleting itself and it has not

View file

@ -340,7 +340,7 @@ class Thread: public ThreadShadow {
// and logging. // and logging.
virtual const char* type_name() const { return "Thread"; } virtual const char* type_name() const { return "Thread"; }
// Returns the current thread (ASSERTS if nullptr) // Returns the current thread (ASSERTS if null)
static inline Thread* current(); static inline Thread* current();
// Returns the current thread, or null if not attached // Returns the current thread, or null if not attached
static inline Thread* current_or_null(); static inline Thread* current_or_null();

View file

@ -620,7 +620,7 @@ void SafeThreadsListPtr::verify_hazard_ptr_scanned() {
// Shared singleton data for all ThreadsList(0) instances. // Shared singleton data for all ThreadsList(0) instances.
// Used by _bootstrap_list to avoid static init time heap allocation. // Used by _bootstrap_list to avoid static init time heap allocation.
// No real entries, just the final nullptr terminator. // No real entries, just the final null terminator.
static JavaThread* const empty_threads_list_data[1] = {}; static JavaThread* const empty_threads_list_data[1] = {};
// Result has 'entries + 1' elements, with the last being the null terminator. // Result has 'entries + 1' elements, with the last being the null terminator.
@ -842,11 +842,11 @@ bool ThreadsListHandle::cv_internal_thread_to_JavaThread(jobject jthread,
FastThreadsListHandle::FastThreadsListHandle(oop thread_oop, JavaThread* java_thread) : _protected_java_thread(nullptr) { FastThreadsListHandle::FastThreadsListHandle(oop thread_oop, JavaThread* java_thread) : _protected_java_thread(nullptr) {
assert(thread_oop != nullptr, "must be"); assert(thread_oop != nullptr, "must be");
if (java_thread != nullptr) { if (java_thread != nullptr) {
// We captured a non-nullptr JavaThread* before the _tlh was created // We captured a non-null JavaThread* before the _tlh was created
// so that covers the early life stage of the target JavaThread. // so that covers the early life stage of the target JavaThread.
_protected_java_thread = java_lang_Thread::thread(thread_oop); _protected_java_thread = java_lang_Thread::thread(thread_oop);
assert(_protected_java_thread == nullptr || _tlh.includes(_protected_java_thread), "must be"); assert(_protected_java_thread == nullptr || _tlh.includes(_protected_java_thread), "must be");
// If we captured a non-nullptr JavaThread* after the _tlh was created // If we captured a non-null JavaThread* after the _tlh was created
// then that covers the end life stage of the target JavaThread and we // then that covers the end life stage of the target JavaThread and we
// we know that _tlh protects the JavaThread*. The underlying atomic // we know that _tlh protects the JavaThread*. The underlying atomic
// load is sufficient (no acquire necessary here). // load is sufficient (no acquire necessary here).

View file

@ -332,7 +332,7 @@ void VM_ThreadDump::doit() {
if (jt == nullptr || /* thread not alive */ if (jt == nullptr || /* thread not alive */
jt->is_exiting() || jt->is_exiting() ||
jt->is_hidden_from_external_view()) { jt->is_hidden_from_external_view()) {
// add a nullptr snapshot if skipped // add a null snapshot if skipped
_result->add_thread_snapshot(); _result->add_thread_snapshot();
continue; continue;
} }

View file

@ -1074,7 +1074,7 @@ void DeadlockCycle::print_on_with(ThreadsList * t_list, outputStream* st) const
"Must be an AbstractOwnableSynchronizer"); "Must be an AbstractOwnableSynchronizer");
oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker); oop ownerObj = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(waitingToLockBlocker);
currentThread = java_lang_Thread::thread(ownerObj); currentThread = java_lang_Thread::thread(ownerObj);
assert(currentThread != nullptr, "AbstractOwnableSynchronizer owning thread is unexpectedly nullptr"); assert(currentThread != nullptr, "AbstractOwnableSynchronizer owning thread is unexpectedly null");
} }
st->print_cr("%s \"%s\"", owner_desc, currentThread->name()); st->print_cr("%s \"%s\"", owner_desc, currentThread->name());
} }

View file

@ -1292,7 +1292,7 @@ inline bool ConcurrentHashTable<CONFIG, F>::
if (!try_resize_lock(thread)) { if (!try_resize_lock(thread)) {
return false; return false;
} }
assert(_new_table == nullptr || _new_table == POISON_PTR, "Must be nullptr"); assert(_new_table == nullptr || _new_table == POISON_PTR, "Must be null");
for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) { for (size_t bucket_it = 0; bucket_it < _table->_size; bucket_it++) {
Bucket* bucket = _table->get_bucket(bucket_it); Bucket* bucket = _table->get_bucket(bucket_it);
assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended"); assert(!bucket->have_redirect() && !bucket->is_locked(), "Table must be uncontended");

View file

@ -68,8 +68,8 @@ public:
*/ */
template<bool swap> template<bool swap>
static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) { static void conjoint_swap_if_needed(const void* src, void* dst, size_t byte_count, size_t elem_size) {
assert(src != nullptr, "address must not be nullptr"); assert(src != nullptr, "address must not be null");
assert(dst != nullptr, "address must not be nullptr"); assert(dst != nullptr, "address must not be null");
assert(elem_size == 2 || elem_size == 4 || elem_size == 8, assert(elem_size == 2 || elem_size == 4 || elem_size == 8,
"incorrect element size: " SIZE_FORMAT, elem_size); "incorrect element size: " SIZE_FORMAT, elem_size);
assert(is_aligned(byte_count, elem_size), assert(is_aligned(byte_count, elem_size),

View file

@ -1824,7 +1824,7 @@ bool DwarfFile::MarkedDwarfFileReader::read_sleb128(int64_t* result, const int8_
return read_leb128((uint64_t*)result, check_size, true); return read_leb128((uint64_t*)result, check_size, true);
} }
// If result is a nullptr, we do not care about the content of the string being read. // If result is a null, we do not care about the content of the string being read.
bool DwarfFile::MarkedDwarfFileReader::read_string(char* result, const size_t result_len) { bool DwarfFile::MarkedDwarfFileReader::read_string(char* result, const size_t result_len) {
char first_char; char first_char;
if (!read_non_null_char(&first_char)) { if (!read_non_null_char(&first_char)) {

View file

@ -132,7 +132,7 @@ public:
ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index); ElfFuncDescTable(FILE* file, Elf_Shdr shdr, int index);
~ElfFuncDescTable(); ~ElfFuncDescTable();
// return the function address for the function descriptor at 'index' or nullptr on error // return the function address for the function descriptor at 'index' or null on error
address lookup(Elf_Word index); address lookup(Elf_Word index);
int get_index() const { return _index; }; int get_index() const { return _index; };

View file

@ -135,14 +135,14 @@ bool Exceptions::special_exception(JavaThread* thread, const char* file, int lin
// This method should only be called from generated code, // This method should only be called from generated code,
// therefore the exception oop should be in the oopmap. // therefore the exception oop should be in the oopmap.
void Exceptions::_throw_oop(JavaThread* thread, const char* file, int line, oop exception) { void Exceptions::_throw_oop(JavaThread* thread, const char* file, int line, oop exception) {
assert(exception != nullptr, "exception should not be nullptr"); assert(exception != nullptr, "exception should not be null");
Handle h_exception(thread, exception); Handle h_exception(thread, exception);
_throw(thread, file, line, h_exception); _throw(thread, file, line, h_exception);
} }
void Exceptions::_throw(JavaThread* thread, const char* file, int line, Handle h_exception, const char* message) { void Exceptions::_throw(JavaThread* thread, const char* file, int line, Handle h_exception, const char* message) {
ResourceMark rm(thread); ResourceMark rm(thread);
assert(h_exception() != nullptr, "exception should not be nullptr"); assert(h_exception() != nullptr, "exception should not be null");
// tracing (do this up front - so it works during boot strapping) // tracing (do this up front - so it works during boot strapping)
// Note, the print_value_string() argument is not called unless logging is enabled! // Note, the print_value_string() argument is not called unless logging is enabled!

View file

@ -132,7 +132,7 @@ inline int g_isfinite(jdouble f) { return isfinite(f); }
#endif // _LP64 #endif // _LP64
// gcc warns about applying offsetof() to non-POD object or calculating // gcc warns about applying offsetof() to non-POD object or calculating
// offset directly when base address is NULL. The -Wno-invalid-offsetof // offset directly when base address is null. The -Wno-invalid-offsetof
// option could be used to suppress this warning, but we instead just // option could be used to suppress this warning, but we instead just
// avoid the use of offsetof(). // avoid the use of offsetof().
// //

View file

@ -171,7 +171,7 @@ template <class E, AnyObj::allocation_type T = AnyObj::C_HEAP,
} }
virtual void add(LinkedListNode<E>* node) { virtual void add(LinkedListNode<E>* node) {
assert(node != nullptr, "nullptr pointer"); assert(node != nullptr, "null pointer");
node->set_next(this->head()); node->set_next(this->head());
this->set_head(node); this->set_head(node);
} }
@ -385,7 +385,7 @@ template <class E, int (*FUNC)(const E&, const E&),
} }
virtual void add(LinkedListNode<E>* node) { virtual void add(LinkedListNode<E>* node) {
assert(node != nullptr, "nullptr pointer"); assert(node != nullptr, "null pointer");
LinkedListNode<E>* tmp = this->head(); LinkedListNode<E>* tmp = this->head();
LinkedListNode<E>* prev = nullptr; LinkedListNode<E>* prev = nullptr;

View file

@ -76,7 +76,7 @@ public:
~LockFreeStack() { assert(empty(), "stack not empty"); } ~LockFreeStack() { assert(empty(), "stack not empty"); }
// Atomically removes the top object from this stack and returns a // Atomically removes the top object from this stack and returns a
// pointer to that object, or nullptr if this stack is empty. Acts as a // pointer to that object, or null if this stack is empty. Acts as a
// full memory barrier. Subject to ABA behavior; callers must ensure // full memory barrier. Subject to ABA behavior; callers must ensure
// usage is safe. // usage is safe.
T* pop() { T* pop() {
@ -97,7 +97,7 @@ public:
return result; return result;
} }
// Atomically exchange the list of elements with nullptr, returning the old // Atomically exchange the list of elements with null, returning the old
// list of elements. Acts as a full memory barrier. // list of elements. Acts as a full memory barrier.
// postcondition: empty() // postcondition: empty()
T* pop_all() { T* pop_all() {

View file

@ -45,7 +45,7 @@
// //
// A queue may temporarily appear to be empty even though elements have been // A queue may temporarily appear to be empty even though elements have been
// added and not removed. For example, after running the following program, // added and not removed. For example, after running the following program,
// the value of r may be nullptr. // the value of r may be null.
// //
// thread1: q.push(a); r = q.pop(); // thread1: q.push(a); r = q.pop();
// thread2: q.push(b); // thread2: q.push(b);
@ -105,15 +105,15 @@ public:
// Thread-safe attempt to remove and return the first object in the queue. // Thread-safe attempt to remove and return the first object in the queue.
// Returns true if successful. If successful then *node_ptr is the former // Returns true if successful. If successful then *node_ptr is the former
// first object, or nullptr if the queue was empty. If unsuccessful, because // first object, or null if the queue was empty. If unsuccessful, because
// of contention with a concurrent modification, then returns false with // of contention with a concurrent modification, then returns false with
// the value of *node_ptr unspecified. Subject to ABA behavior; callers // the value of *node_ptr unspecified. Subject to ABA behavior; callers
// must ensure usage is safe. // must ensure usage is safe.
inline bool try_pop(T** node_ptr); inline bool try_pop(T** node_ptr);
// Thread-safe remove and return the first object in the queue, or nullptr // Thread-safe remove and return the first object in the queue, or null
// if the queue was empty. This just iterates on try_pop() until it // if the queue was empty. This just iterates on try_pop() until it
// succeeds, returning the (possibly nullptr) element obtained from that. // succeeds, returning the (possibly null) element obtained from that.
// Subject to ABA behavior; callers must ensure usage is safe. // Subject to ABA behavior; callers must ensure usage is safe.
inline T* pop(); inline T* pop();

View file

@ -85,7 +85,7 @@ size_t NonblockingQueue<T, next_ptr>::length() const {
// An append operation atomically exchanges the new tail with the queue tail. // An append operation atomically exchanges the new tail with the queue tail.
// It then sets the "next" value of the old tail to the head of the list being // It then sets the "next" value of the old tail to the head of the list being
// appended. If the old tail is nullptr then the queue was empty, then the // appended. If the old tail is null then the queue was empty, then the
// head of the list being appended is instead stored in the queue head. // head of the list being appended is instead stored in the queue head.
// //
// This means there is a period between the exchange and the old tail update // This means there is a period between the exchange and the old tail update
@ -107,8 +107,8 @@ void NonblockingQueue<T, next_ptr>::append(T& first, T& last) {
set_next(last, end_marker()); set_next(last, end_marker());
T* old_tail = Atomic::xchg(&_tail, &last); T* old_tail = Atomic::xchg(&_tail, &last);
if (old_tail == nullptr) { if (old_tail == nullptr) {
// If old_tail is nullptr then the queue was empty, and _head must also be // If old_tail is null then the queue was empty, and _head must also be
// nullptr. The correctness of this assertion depends on try_pop clearing // null. The correctness of this assertion depends on try_pop clearing
// first _head then _tail when taking the last entry. // first _head then _tail when taking the last entry.
assert(Atomic::load(&_head) == nullptr, "invariant"); assert(Atomic::load(&_head) == nullptr, "invariant");
// Fall through to common update of _head. // Fall through to common update of _head.
@ -126,7 +126,7 @@ void NonblockingQueue<T, next_ptr>::append(T& first, T& last) {
return; return;
} else { } else {
// A concurrent try_pop has claimed old_tail, so it is no longer in the // A concurrent try_pop has claimed old_tail, so it is no longer in the
// list. The queue was logically empty. _head is either nullptr or // list. The queue was logically empty. _head is either null or
// old_tail, depending on how far try_pop operations have progressed. // old_tail, depending on how far try_pop operations have progressed.
DEBUG_ONLY(T* old_head = Atomic::load(&_head);) DEBUG_ONLY(T* old_head = Atomic::load(&_head);)
assert((old_head == nullptr) || (old_head == old_tail), "invariant"); assert((old_head == nullptr) || (old_head == old_tail), "invariant");
@ -152,7 +152,7 @@ bool NonblockingQueue<T, next_ptr>::try_pop(T** node_ptr) {
// [Clause 1] // [Clause 1]
// There are several cases for next_node. // There are several cases for next_node.
// (1) next_node is the extension of the queue's list. // (1) next_node is the extension of the queue's list.
// (2) next_node is nullptr, because a competing try_pop took old_head. // (2) next_node is null, because a competing try_pop took old_head.
// (3) next_node is the extension of some unrelated list, because a // (3) next_node is the extension of some unrelated list, because a
// competing try_pop took old_head and put it in some other list. // competing try_pop took old_head and put it in some other list.
// //
@ -171,7 +171,7 @@ bool NonblockingQueue<T, next_ptr>::try_pop(T** node_ptr) {
// The cmpxchg to advance the list succeeded, but a concurrent try_pop // The cmpxchg to advance the list succeeded, but a concurrent try_pop
// has already claimed old_head (see [Clause 2] - old_head was the last // has already claimed old_head (see [Clause 2] - old_head was the last
// entry in the list) by nulling old_head's next field. The advance set // entry in the list) by nulling old_head's next field. The advance set
// _head to nullptr, "helping" the competing try_pop. _head will remain // _head to null, "helping" the competing try_pop. _head will remain
// nullptr until a subsequent push/append. This is a lost race, and we // nullptr until a subsequent push/append. This is a lost race, and we
// report it as such for consistency, though we could report the queue // report it as such for consistency, though we could report the queue
// was empty. We don't attempt to further help [Clause 2] by also // was empty. We don't attempt to further help [Clause 2] by also
@ -191,7 +191,7 @@ bool NonblockingQueue<T, next_ptr>::try_pop(T** node_ptr) {
} else if (is_end(Atomic::cmpxchg(next_ptr(*old_head), next_node, (T*)nullptr))) { } else if (is_end(Atomic::cmpxchg(next_ptr(*old_head), next_node, (T*)nullptr))) {
// [Clause 2] // [Clause 2]
// Old_head was the last entry and we've claimed it by setting its next // Old_head was the last entry and we've claimed it by setting its next
// value to nullptr. However, this leaves the queue in disarray. Fix up // value to null. However, this leaves the queue in disarray. Fix up
// the queue, possibly in conjunction with other concurrent operations. // the queue, possibly in conjunction with other concurrent operations.
// Any further try_pops will consider the queue empty until a // Any further try_pops will consider the queue empty until a
// push/append completes by installing a new head. // push/append completes by installing a new head.
@ -200,12 +200,12 @@ bool NonblockingQueue<T, next_ptr>::try_pop(T** node_ptr) {
// dealing with _head first gives a stronger invariant in append, and is // dealing with _head first gives a stronger invariant in append, and is
// also consistent with [Clause 1b]. // also consistent with [Clause 1b].
// Attempt to change the queue head from old_head to nullptr. Failure of // Attempt to change the queue head from old_head to null. Failure of
// the cmpxchg indicates a concurrent operation updated _head first. That // the cmpxchg indicates a concurrent operation updated _head first. That
// could be either a push/append or a try_pop in [Clause 1b]. // could be either a push/append or a try_pop in [Clause 1b].
Atomic::cmpxchg(&_head, old_head, (T*)nullptr); Atomic::cmpxchg(&_head, old_head, (T*)nullptr);
// Attempt to change the queue tail from old_head to nullptr. Failure of // Attempt to change the queue tail from old_head to null. Failure of
// the cmpxchg indicates that a concurrent push/append updated _tail first. // the cmpxchg indicates that a concurrent push/append updated _tail first.
// That operation will eventually recognize the old tail (our old_head) is // That operation will eventually recognize the old tail (our old_head) is
// no longer in the list and update _head from the list being appended. // no longer in the list and update _head from the list being appended.

View file

@ -706,7 +706,7 @@ void defaultStream::init_log() {
_outer_xmlStream = new(mtInternal) xmlStream(file); _outer_xmlStream = new(mtInternal) xmlStream(file);
start_log(); start_log();
} else { } else {
// and leave xtty as nullptr // and leave xtty as null
LogVMOutput = false; LogVMOutput = false;
DisplayVMOutput = true; DisplayVMOutput = true;
LogCompilation = false; LogCompilation = false;
@ -762,13 +762,13 @@ void defaultStream::start_log() {
// System properties don't generally contain newlines, so don't bother with unparsing. // System properties don't generally contain newlines, so don't bother with unparsing.
outputStream *text = xs->text(); outputStream *text = xs->text();
for (SystemProperty* p = Arguments::system_properties(); p != nullptr; p = p->next()) { for (SystemProperty* p = Arguments::system_properties(); p != nullptr; p = p->next()) {
assert(p->key() != nullptr, "p->key() is nullptr"); assert(p->key() != nullptr, "p->key() is null");
if (p->readable()) { if (p->readable()) {
// Print in two stages to avoid problems with long // Print in two stages to avoid problems with long
// keys/values. // keys/values.
text->print_raw(p->key()); text->print_raw(p->key());
text->put('='); text->put('=');
assert(p->value() != nullptr, "p->value() is nullptr"); assert(p->value() != nullptr, "p->value() is null");
text->print_raw_cr(p->value()); text->print_raw_cr(p->value());
} }
} }

View file

@ -324,7 +324,7 @@ class UNSIGNED5 : AllStatic {
public: public:
Writer(const ARR& array) Writer(const ARR& array)
: _array(const_cast<ARR&>(array)), _limit_ptr(nullptr), _position(0) { : _array(const_cast<ARR&>(array)), _limit_ptr(nullptr), _position(0) {
// Note: if _limit_ptr is nullptr, the ARR& is never reassigned, // Note: if _limit_ptr is null, the ARR& is never reassigned,
// because has_limit is false. So the const_cast here is safe. // because has_limit is false. So the const_cast here is safe.
assert(!has_limit(), "this writer cannot be growable"); assert(!has_limit(), "this writer cannot be growable");
} }

View file

@ -73,7 +73,7 @@ class UTF8 : AllStatic {
// Utility methods // Utility methods
// Returns nullptr if 'c' it not found. This only works as long // Returns null if 'c' it not found. This only works as long
// as 'c' is an ASCII character // as 'c' is an ASCII character
static const jbyte* strrchr(const jbyte* base, int length, jbyte c) { static const jbyte* strrchr(const jbyte* base, int length, jbyte c) {
assert(length >= 0, "sanity check"); assert(length >= 0, "sanity check");

View file

@ -42,7 +42,7 @@ class VMError : public AllStatic {
static const char* _message; static const char* _message;
static char _detail_msg[1024]; static char _detail_msg[1024];
static Thread* _thread; // nullptr if it's native thread static Thread* _thread; // null if it's native thread
// additional info for crashes // additional info for crashes
static address _pc; // faulting PC static address _pc; // faulting PC