mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-28 15:24:43 +02:00
8194691: Cleanup unnecessary casts in Atomic/OrderAccess uses
Removed unnecessary casts. Reviewed-by: coleenp, tschatzl
This commit is contained in:
parent
f4c6bc0030
commit
7f0f329daf
14 changed files with 43 additions and 46 deletions
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -439,11 +439,11 @@ void CompiledMethod::increase_unloading_clock() {
|
|||
}
|
||||
|
||||
void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
|
||||
OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
|
||||
OrderAccess::release_store(&_unloading_clock, unloading_clock);
|
||||
}
|
||||
|
||||
unsigned char CompiledMethod::unloading_clock() {
|
||||
return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
|
||||
return OrderAccess::load_acquire(&_unloading_clock);
|
||||
}
|
||||
|
||||
// Processing of oop references should have been sufficient to keep
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -270,5 +270,5 @@ bool DependencyContext::find_stale_entries() {
|
|||
#endif //PRODUCT
|
||||
|
||||
int nmethodBucket::decrement() {
|
||||
return Atomic::add(-1, (volatile int *)&_count);
|
||||
return Atomic::sub(1, &_count);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -204,7 +204,7 @@ public:
|
|||
if (_iter_states[region] != Unclaimed) {
|
||||
return false;
|
||||
}
|
||||
jint res = Atomic::cmpxchg(Claimed, (jint*)(&_iter_states[region]), Unclaimed);
|
||||
G1RemsetIterState res = Atomic::cmpxchg(Claimed, &_iter_states[region], Unclaimed);
|
||||
return (res == Unclaimed);
|
||||
}
|
||||
|
||||
|
@ -214,7 +214,7 @@ public:
|
|||
if (iter_is_complete(region)) {
|
||||
return false;
|
||||
}
|
||||
jint res = Atomic::cmpxchg(Complete, (jint*)(&_iter_states[region]), Claimed);
|
||||
G1RemsetIterState res = Atomic::cmpxchg(Complete, &_iter_states[region], Claimed);
|
||||
return (res == Claimed);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -538,7 +538,7 @@ inline void ParallelCompactData::RegionData::decrement_destination_count()
|
|||
{
|
||||
assert(_dc_and_los < dc_claimed, "already claimed");
|
||||
assert(_dc_and_los >= dc_one, "count would go negative");
|
||||
Atomic::add((int)dc_mask, (volatile int*)&_dc_and_los);
|
||||
Atomic::add(dc_mask, &_dc_and_los);
|
||||
}
|
||||
|
||||
inline HeapWord* ParallelCompactData::RegionData::data_location() const
|
||||
|
@ -578,7 +578,7 @@ inline bool ParallelCompactData::RegionData::claim_unsafe()
|
|||
inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
|
||||
{
|
||||
assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
|
||||
Atomic::add((int) words, (volatile int*) &_dc_and_los);
|
||||
Atomic::add(static_cast<region_sz_t>(words), &_dc_and_los);
|
||||
}
|
||||
|
||||
inline void ParallelCompactData::RegionData::set_highest_ref(HeapWord* addr)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -153,7 +153,7 @@ bool
|
|||
ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
||||
assert(_n_threads > 0, "Initialization is incorrect");
|
||||
assert(_offered_termination < _n_threads, "Invariant");
|
||||
Atomic::inc((int *)&_offered_termination);
|
||||
Atomic::inc(&_offered_termination);
|
||||
|
||||
uint yield_count = 0;
|
||||
// Number of hard spin loops done since last yield
|
||||
|
@ -228,7 +228,7 @@ ParallelTaskTerminator::offer_termination(TerminatorTerminator* terminator) {
|
|||
#endif
|
||||
if (peek_in_queue_set() ||
|
||||
(terminator != NULL && terminator->should_exit_termination())) {
|
||||
Atomic::dec((int *)&_offered_termination);
|
||||
Atomic::dec(&_offered_termination);
|
||||
assert(_offered_termination < _n_threads, "Invariant");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -205,7 +205,7 @@ bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
|
|||
#if !(defined SPARC || defined IA32 || defined AMD64)
|
||||
OrderAccess::fence();
|
||||
#endif
|
||||
uint localBot = OrderAccess::load_acquire((volatile juint*)&_bottom);
|
||||
uint localBot = OrderAccess::load_acquire(&_bottom);
|
||||
uint n_elems = size(localBot, oldAge.top());
|
||||
if (n_elems == 0) {
|
||||
return false;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -157,7 +157,7 @@ public:
|
|||
// Wait for the coordinator to dispatch a task.
|
||||
_start_semaphore->wait();
|
||||
|
||||
uint num_started = (uint) Atomic::add(1, (volatile jint*)&_started);
|
||||
uint num_started = Atomic::add(1u, &_started);
|
||||
|
||||
// Subtract one to get a zero-indexed worker id.
|
||||
uint worker_id = num_started - 1;
|
||||
|
@ -168,7 +168,7 @@ public:
|
|||
void worker_done_with_task() {
|
||||
// Mark that the worker is done with the task.
|
||||
// The worker is not allowed to read the state variables after this line.
|
||||
uint not_finished = (uint) Atomic::add(-1, (volatile jint*)&_not_finished);
|
||||
uint not_finished = Atomic::sub(1u, &_not_finished);
|
||||
|
||||
// The last worker signals to the coordinator that all work is completed.
|
||||
if (not_finished == 0) {
|
||||
|
@ -439,7 +439,7 @@ bool SubTasksDone::is_task_claimed(uint t) {
|
|||
#ifdef ASSERT
|
||||
if (!res) {
|
||||
assert(_claimed < _n_tasks, "Too many tasks claimed; missing clear?");
|
||||
Atomic::inc((volatile jint*) &_claimed);
|
||||
Atomic::inc(&_claimed);
|
||||
}
|
||||
#endif
|
||||
return res;
|
||||
|
|
|
@ -38,9 +38,9 @@
|
|||
inline void inc_stat_counter(volatile julong* dest, julong add_value) {
|
||||
#if defined(SPARC) || defined(X86)
|
||||
// Sparc and X86 have atomic jlong (8 bytes) instructions
|
||||
julong value = Atomic::load((volatile jlong*)dest);
|
||||
julong value = Atomic::load(dest);
|
||||
value += add_value;
|
||||
Atomic::store((jlong)value, (volatile jlong*)dest);
|
||||
Atomic::store(value, dest);
|
||||
#else
|
||||
// possible word-tearing during load/store
|
||||
*dest += add_value;
|
||||
|
|
|
@ -85,7 +85,7 @@ julong os::num_frees = 0; // # of calls to free
|
|||
julong os::free_bytes = 0; // # of bytes freed
|
||||
#endif
|
||||
|
||||
static juint cur_malloc_words = 0; // current size for MallocMaxTestWords
|
||||
static size_t cur_malloc_words = 0; // current size for MallocMaxTestWords
|
||||
|
||||
void os_init_globals() {
|
||||
// Called from init_globals().
|
||||
|
@ -629,12 +629,12 @@ static void verify_memory(void* ptr) {
|
|||
//
|
||||
static bool has_reached_max_malloc_test_peak(size_t alloc_size) {
|
||||
if (MallocMaxTestWords > 0) {
|
||||
jint words = (jint)(alloc_size / BytesPerWord);
|
||||
size_t words = (alloc_size / BytesPerWord);
|
||||
|
||||
if ((cur_malloc_words + words) > MallocMaxTestWords) {
|
||||
return true;
|
||||
}
|
||||
Atomic::add(words, (volatile jint *)&cur_malloc_words);
|
||||
Atomic::add(words, &cur_malloc_words);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1826,8 +1826,7 @@ void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
|
|||
os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
|
||||
os::SuspendResume::State to)
|
||||
{
|
||||
os::SuspendResume::State result =
|
||||
(os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
|
||||
os::SuspendResume::State result = Atomic::cmpxchg(to, &_state, from);
|
||||
if (result == from) {
|
||||
// success
|
||||
return to;
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "jvm.h"
|
||||
#include "jvmtifiles/jvmti.h"
|
||||
#include "metaprogramming/isRegisteredEnum.hpp"
|
||||
#include "metaprogramming/integralConstant.hpp"
|
||||
#include "runtime/extendedPC.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
@ -1006,6 +1008,10 @@ class os: AllStatic {
|
|||
|
||||
};
|
||||
|
||||
#ifndef _WINDOWS
|
||||
template<> struct IsRegisteredEnum<os::SuspendResume::State> : public TrueType {};
|
||||
#endif // !_WINDOWS
|
||||
|
||||
// Note that "PAUSE" is almost always used with synchronization
|
||||
// so arguably we should provide Atomic::SpinPause() instead
|
||||
// of the global SpinPause() with C linkage.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -30,24 +30,18 @@
|
|||
#include "runtime/thread.hpp"
|
||||
|
||||
inline void Thread::set_suspend_flag(SuspendFlags f) {
|
||||
assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
|
||||
uint32_t flags;
|
||||
do {
|
||||
flags = _suspend_flags;
|
||||
}
|
||||
while (Atomic::cmpxchg((jint)(flags | f),
|
||||
(volatile jint*)&_suspend_flags,
|
||||
(jint)flags) != (jint)flags);
|
||||
while (Atomic::cmpxchg((flags | f), &_suspend_flags, flags) != flags);
|
||||
}
|
||||
inline void Thread::clear_suspend_flag(SuspendFlags f) {
|
||||
assert(sizeof(jint) == sizeof(_suspend_flags), "size mismatch");
|
||||
uint32_t flags;
|
||||
do {
|
||||
flags = _suspend_flags;
|
||||
}
|
||||
while (Atomic::cmpxchg((jint)(flags & ~f),
|
||||
(volatile jint*)&_suspend_flags,
|
||||
(jint)flags) != (jint)flags);
|
||||
while (Atomic::cmpxchg((flags & ~f), &_suspend_flags, flags) != flags);
|
||||
}
|
||||
|
||||
inline void Thread::set_has_async_exception() {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -151,7 +151,7 @@ class MallocSiteTable : AllStatic {
|
|||
|
||||
~AccessLock() {
|
||||
if (_lock_state == SharedLock) {
|
||||
Atomic::dec((volatile jint*)_lock);
|
||||
Atomic::dec(_lock);
|
||||
}
|
||||
}
|
||||
// Acquire shared lock.
|
||||
|
@ -159,7 +159,7 @@ class MallocSiteTable : AllStatic {
|
|||
inline bool sharedLock() {
|
||||
jint res = Atomic::add(1, _lock);
|
||||
if (res < 0) {
|
||||
Atomic::add(-1, _lock);
|
||||
Atomic::dec(_lock);
|
||||
return false;
|
||||
}
|
||||
_lock_state = SharedLock;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
|
@ -66,8 +66,6 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC {
|
|||
assert(_size >= sz, "deallocation > allocated");
|
||||
Atomic::dec(&_count);
|
||||
if (sz > 0) {
|
||||
// unary minus operator applied to unsigned type, result still unsigned
|
||||
#pragma warning(suppress: 4146)
|
||||
Atomic::sub(sz, &_size);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ void ThreadService::add_thread(JavaThread* thread, bool daemon) {
|
|||
}
|
||||
|
||||
void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
|
||||
Atomic::dec((jint*) &_exiting_threads_count);
|
||||
Atomic::dec(&_exiting_threads_count);
|
||||
|
||||
if (thread->is_hidden_from_external_view() ||
|
||||
thread->is_jvmti_agent_thread()) {
|
||||
|
@ -131,17 +131,17 @@ void ThreadService::remove_thread(JavaThread* thread, bool daemon) {
|
|||
|
||||
if (daemon) {
|
||||
_daemon_threads_count->set_value(_daemon_threads_count->get_value() - 1);
|
||||
Atomic::dec((jint*) &_exiting_daemon_threads_count);
|
||||
Atomic::dec(&_exiting_daemon_threads_count);
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadService::current_thread_exiting(JavaThread* jt) {
|
||||
assert(jt == JavaThread::current(), "Called by current thread");
|
||||
Atomic::inc((jint*) &_exiting_threads_count);
|
||||
Atomic::inc(&_exiting_threads_count);
|
||||
|
||||
oop threadObj = jt->threadObj();
|
||||
if (threadObj != NULL && java_lang_Thread::is_daemon(threadObj)) {
|
||||
Atomic::inc((jint*) &_exiting_daemon_threads_count);
|
||||
Atomic::inc(&_exiting_daemon_threads_count);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue