This commit is contained in:
Lana Steuck 2018-01-18 18:58:46 +00:00
commit 8f528fba93
608 changed files with 7528 additions and 3300 deletions

View file

@ -1550,7 +1550,6 @@ public class ArrayList<E> extends AbstractList<E>
setBit(deathRow, i - beg);
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
expectedModCount++;
modCount++;
int w = beg;
for (i = beg; i < end; i++)

View file

@ -26,6 +26,7 @@
package java.util;
import jdk.internal.HotSpotIntrinsicCandidate;
import jdk.internal.util.ArraysSupport;
import java.lang.reflect.Array;
import java.util.concurrent.ForkJoinPool;

View file

@ -1,545 +0,0 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Oracle designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package java.util;
import jdk.internal.HotSpotIntrinsicCandidate;
import jdk.internal.misc.Unsafe;
/**
* Utility methods to find a mismatch between two primitive arrays.
*
* <p>Array equality and lexicographical comparison can be built on top of
* array mismatch functionality.
*
* <p>The mismatch method implementation, {@link #vectorizedMismatch}, leverages
* vector-based techniques to access and compare the contents of two arrays.
* The Java implementation uses {@code Unsafe.getLongUnaligned} to access the
* content of an array, thus access is supported on platforms that do not
* support unaligned access. For a byte[] array, 8 bytes (64 bits) can be
* accessed and compared as a unit rather than individually, which increases
* the performance when the method is compiled by the HotSpot VM. On supported
* platforms the mismatch implementation is intrinsified to leverage SIMD
* instructions. So for a byte[] array, 16 bytes (128 bits), 32 bytes
* (256 bits), and perhaps in the future even 64 bytes (512 bits), platform
* permitting, can be accessed and compared as a unit, which further increases
* the performance over the Java implementation.
*
* <p>None of the mismatch methods perform array bounds checks. It is the
* responsibility of the caller (direct or otherwise) to perform such checks
* before calling this method.
*/
class ArraysSupport {
static final Unsafe U = Unsafe.getUnsafe();
private static final boolean BIG_ENDIAN = U.isBigEndian();
private static final int LOG2_ARRAY_BOOLEAN_INDEX_SCALE = exactLog2(Unsafe.ARRAY_BOOLEAN_INDEX_SCALE);
private static final int LOG2_ARRAY_BYTE_INDEX_SCALE = exactLog2(Unsafe.ARRAY_BYTE_INDEX_SCALE);
private static final int LOG2_ARRAY_CHAR_INDEX_SCALE = exactLog2(Unsafe.ARRAY_CHAR_INDEX_SCALE);
private static final int LOG2_ARRAY_SHORT_INDEX_SCALE = exactLog2(Unsafe.ARRAY_SHORT_INDEX_SCALE);
private static final int LOG2_ARRAY_INT_INDEX_SCALE = exactLog2(Unsafe.ARRAY_INT_INDEX_SCALE);
private static final int LOG2_ARRAY_LONG_INDEX_SCALE = exactLog2(Unsafe.ARRAY_LONG_INDEX_SCALE);
private static final int LOG2_ARRAY_FLOAT_INDEX_SCALE = exactLog2(Unsafe.ARRAY_FLOAT_INDEX_SCALE);
private static final int LOG2_ARRAY_DOUBLE_INDEX_SCALE = exactLog2(Unsafe.ARRAY_DOUBLE_INDEX_SCALE);
private static final int LOG2_BYTE_BIT_SIZE = exactLog2(Byte.SIZE);
private static int exactLog2(int scale) {
if ((scale & (scale - 1)) != 0)
throw new Error("data type scale not a power of two");
return Integer.numberOfTrailingZeros(scale);
}
private ArraysSupport() {}
/**
* Find the relative index of the first mismatching pair of elements in two
* primitive arrays of the same component type. Pairs of elements will be
* tested in order relative to given offsets into both arrays.
*
* <p>This method does not perform type checks or bounds checks. It is the
* responsibility of the caller to perform such checks before calling this
* method.
*
* <p>The given offsets, in bytes, need not be aligned according to the
* given log<sub>2</sub> size the array elements. More specifically, an
* offset modulus the size need not be zero.
*
* @param a the first array to be tested for mismatch, or {@code null} for
* direct memory access
* @param aOffset the relative offset, in bytes, from the base address of
* the first array to test from, otherwise if the first array is
* {@code null}, an absolute address pointing to the first element to test.
* @param b the second array to be tested for mismatch, or {@code null} for
* direct memory access
* @param bOffset the relative offset, in bytes, from the base address of
* the second array to test from, otherwise if the second array is
* {@code null}, an absolute address pointing to the first element to test.
* @param length the number of array elements to test
* @param log2ArrayIndexScale log<sub>2</sub> of the array index scale, that
* corresponds to the size, in bytes, of an array element.
* @return if a mismatch is found a relative index, between 0 (inclusive)
* and {@code length} (exclusive), of the first mismatching pair of elements
* in the two arrays. Otherwise, if a mismatch is not found the bitwise
* compliment of the number of remaining pairs of elements to be checked in
* the tail of the two arrays.
*/
@HotSpotIntrinsicCandidate
static int vectorizedMismatch(Object a, long aOffset,
Object b, long bOffset,
int length,
int log2ArrayIndexScale) {
// assert a.getClass().isArray();
// assert b.getClass().isArray();
// assert 0 <= length <= sizeOf(a)
// assert 0 <= length <= sizeOf(b)
// assert 0 <= log2ArrayIndexScale <= 3
int log2ValuesPerWidth = LOG2_ARRAY_LONG_INDEX_SCALE - log2ArrayIndexScale;
int wi = 0;
for (; wi < length >> log2ValuesPerWidth; wi++) {
long bi = ((long) wi) << LOG2_ARRAY_LONG_INDEX_SCALE;
long av = U.getLongUnaligned(a, aOffset + bi);
long bv = U.getLongUnaligned(b, bOffset + bi);
if (av != bv) {
long x = av ^ bv;
int o = BIG_ENDIAN
? Long.numberOfLeadingZeros(x) >> (LOG2_BYTE_BIT_SIZE + log2ArrayIndexScale)
: Long.numberOfTrailingZeros(x) >> (LOG2_BYTE_BIT_SIZE + log2ArrayIndexScale);
return (wi << log2ValuesPerWidth) + o;
}
}
// Calculate the tail of remaining elements to check
int tail = length - (wi << log2ValuesPerWidth);
if (log2ArrayIndexScale < LOG2_ARRAY_INT_INDEX_SCALE) {
int wordTail = 1 << (LOG2_ARRAY_INT_INDEX_SCALE - log2ArrayIndexScale);
// Handle 4 bytes or 2 chars in the tail using int width
if (tail >= wordTail) {
long bi = ((long) wi) << LOG2_ARRAY_LONG_INDEX_SCALE;
int av = U.getIntUnaligned(a, aOffset + bi);
int bv = U.getIntUnaligned(b, bOffset + bi);
if (av != bv) {
int x = av ^ bv;
int o = BIG_ENDIAN
? Integer.numberOfLeadingZeros(x) >> (LOG2_BYTE_BIT_SIZE + log2ArrayIndexScale)
: Integer.numberOfTrailingZeros(x) >> (LOG2_BYTE_BIT_SIZE + log2ArrayIndexScale);
return (wi << log2ValuesPerWidth) + o;
}
tail -= wordTail;
}
return ~tail;
}
else {
return ~tail;
}
}
// Booleans
// Each boolean element takes up one byte
static int mismatch(boolean[] a,
boolean[] b,
int length) {
int i = 0;
if (length > 7) {
i = vectorizedMismatch(
a, Unsafe.ARRAY_BOOLEAN_BASE_OFFSET,
b, Unsafe.ARRAY_BOOLEAN_BASE_OFFSET,
length, LOG2_ARRAY_BOOLEAN_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[i] != b[i])
return i;
}
return -1;
}
static int mismatch(boolean[] a, int aFromIndex,
boolean[] b, int bFromIndex,
int length) {
int i = 0;
if (length > 7) {
int aOffset = Unsafe.ARRAY_BOOLEAN_BASE_OFFSET + aFromIndex;
int bOffset = Unsafe.ARRAY_BOOLEAN_BASE_OFFSET + bFromIndex;
i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_BOOLEAN_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[aFromIndex + i] != b[bFromIndex + i])
return i;
}
return -1;
}
// Bytes
/**
* Find the index of a mismatch between two arrays.
*
* <p>This method does not perform bounds checks. It is the responsibility
* of the caller to perform such bounds checks before calling this method.
*
* @param a the first array to be tested for a mismatch
* @param b the second array to be tested for a mismatch
* @param length the number of bytes from each array to check
* @return the index of a mismatch between the two arrays, otherwise -1 if
* no mismatch. The index will be within the range of (inclusive) 0 to
* (exclusive) the smaller of the two array lengths.
*/
static int mismatch(byte[] a,
byte[] b,
int length) {
// ISSUE: defer to index receiving methods if performance is good
// assert length <= a.length
// assert length <= b.length
int i = 0;
if (length > 7) {
i = vectorizedMismatch(
a, Unsafe.ARRAY_BYTE_BASE_OFFSET,
b, Unsafe.ARRAY_BYTE_BASE_OFFSET,
length, LOG2_ARRAY_BYTE_INDEX_SCALE);
if (i >= 0)
return i;
// Align to tail
i = length - ~i;
// assert i >= 0 && i <= 7;
}
// Tail < 8 bytes
for (; i < length; i++) {
if (a[i] != b[i])
return i;
}
return -1;
}
/**
* Find the relative index of a mismatch between two arrays starting from
* given indexes.
*
* <p>This method does not perform bounds checks. It is the responsibility
* of the caller to perform such bounds checks before calling this method.
*
* @param a the first array to be tested for a mismatch
* @param aFromIndex the index of the first element (inclusive) in the first
* array to be compared
* @param b the second array to be tested for a mismatch
* @param bFromIndex the index of the first element (inclusive) in the
* second array to be compared
* @param length the number of bytes from each array to check
* @return the relative index of a mismatch between the two arrays,
* otherwise -1 if no mismatch. The index will be within the range of
* (inclusive) 0 to (exclusive) the smaller of the two array bounds.
*/
static int mismatch(byte[] a, int aFromIndex,
byte[] b, int bFromIndex,
int length) {
// assert 0 <= aFromIndex < a.length
// assert 0 <= aFromIndex + length <= a.length
// assert 0 <= bFromIndex < b.length
// assert 0 <= bFromIndex + length <= b.length
// assert length >= 0
int i = 0;
if (length > 7) {
int aOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET + aFromIndex;
int bOffset = Unsafe.ARRAY_BYTE_BASE_OFFSET + bFromIndex;
i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_BYTE_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[aFromIndex + i] != b[bFromIndex + i])
return i;
}
return -1;
}
// Chars
static int mismatch(char[] a,
char[] b,
int length) {
int i = 0;
if (length > 3) {
i = vectorizedMismatch(
a, Unsafe.ARRAY_CHAR_BASE_OFFSET,
b, Unsafe.ARRAY_CHAR_BASE_OFFSET,
length, LOG2_ARRAY_CHAR_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[i] != b[i])
return i;
}
return -1;
}
static int mismatch(char[] a, int aFromIndex,
char[] b, int bFromIndex,
int length) {
int i = 0;
if (length > 3) {
int aOffset = Unsafe.ARRAY_CHAR_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_CHAR_INDEX_SCALE);
int bOffset = Unsafe.ARRAY_CHAR_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_CHAR_INDEX_SCALE);
i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_CHAR_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[aFromIndex + i] != b[bFromIndex + i])
return i;
}
return -1;
}
// Shorts
static int mismatch(short[] a,
short[] b,
int length) {
int i = 0;
if (length > 3) {
i = vectorizedMismatch(
a, Unsafe.ARRAY_SHORT_BASE_OFFSET,
b, Unsafe.ARRAY_SHORT_BASE_OFFSET,
length, LOG2_ARRAY_SHORT_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[i] != b[i])
return i;
}
return -1;
}
static int mismatch(short[] a, int aFromIndex,
short[] b, int bFromIndex,
int length) {
int i = 0;
if (length > 3) {
int aOffset = Unsafe.ARRAY_SHORT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_SHORT_INDEX_SCALE);
int bOffset = Unsafe.ARRAY_SHORT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_SHORT_INDEX_SCALE);
i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_SHORT_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[aFromIndex + i] != b[bFromIndex + i])
return i;
}
return -1;
}
// Ints
static int mismatch(int[] a,
int[] b,
int length) {
int i = 0;
if (length > 1) {
i = vectorizedMismatch(
a, Unsafe.ARRAY_INT_BASE_OFFSET,
b, Unsafe.ARRAY_INT_BASE_OFFSET,
length, LOG2_ARRAY_INT_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[i] != b[i])
return i;
}
return -1;
}
static int mismatch(int[] a, int aFromIndex,
int[] b, int bFromIndex,
int length) {
int i = 0;
if (length > 1) {
int aOffset = Unsafe.ARRAY_INT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_INT_INDEX_SCALE);
int bOffset = Unsafe.ARRAY_INT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_INT_INDEX_SCALE);
i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_INT_INDEX_SCALE);
if (i >= 0)
return i;
i = length - ~i;
}
for (; i < length; i++) {
if (a[aFromIndex + i] != b[bFromIndex + i])
return i;
}
return -1;
}
// Floats
static int mismatch(float[] a,
float[] b,
int length) {
return mismatch(a, 0, b, 0, length);
}
static int mismatch(float[] a, int aFromIndex,
float[] b, int bFromIndex,
int length) {
int i = 0;
if (length > 1) {
int aOffset = Unsafe.ARRAY_FLOAT_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_FLOAT_INDEX_SCALE);
int bOffset = Unsafe.ARRAY_FLOAT_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_FLOAT_INDEX_SCALE);
i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_FLOAT_INDEX_SCALE);
// Mismatched
if (i >= 0) {
// Check if mismatch is not associated with two NaN values
if (!Float.isNaN(a[aFromIndex + i]) || !Float.isNaN(b[bFromIndex + i]))
return i;
// Mismatch on two different NaN values that are normalized to match
// Fall back to slow mechanism
// ISSUE: Consider looping over vectorizedMismatch adjusting ranges
// However, requires that returned value be relative to input ranges
i++;
}
// Matched
else {
i = length - ~i;
}
}
for (; i < length; i++) {
if (Float.floatToIntBits(a[aFromIndex + i]) != Float.floatToIntBits(b[bFromIndex + i]))
return i;
}
return -1;
}
// 64 bit sizes
// Long
static int mismatch(long[] a,
long[] b,
int length) {
if (length == 0) {
return -1;
}
int i = vectorizedMismatch(
a, Unsafe.ARRAY_LONG_BASE_OFFSET,
b, Unsafe.ARRAY_LONG_BASE_OFFSET,
length, LOG2_ARRAY_LONG_INDEX_SCALE);
return i >= 0 ? i : -1;
}
static int mismatch(long[] a, int aFromIndex,
long[] b, int bFromIndex,
int length) {
if (length == 0) {
return -1;
}
int aOffset = Unsafe.ARRAY_LONG_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_LONG_INDEX_SCALE);
int bOffset = Unsafe.ARRAY_LONG_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_LONG_INDEX_SCALE);
int i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_LONG_INDEX_SCALE);
return i >= 0 ? i : -1;
}
// Double
static int mismatch(double[] a,
double[] b,
int length) {
return mismatch(a, 0, b, 0, length);
}
static int mismatch(double[] a, int aFromIndex,
double[] b, int bFromIndex,
int length) {
if (length == 0) {
return -1;
}
int aOffset = Unsafe.ARRAY_DOUBLE_BASE_OFFSET + (aFromIndex << LOG2_ARRAY_DOUBLE_INDEX_SCALE);
int bOffset = Unsafe.ARRAY_DOUBLE_BASE_OFFSET + (bFromIndex << LOG2_ARRAY_DOUBLE_INDEX_SCALE);
int i = vectorizedMismatch(
a, aOffset,
b, bOffset,
length, LOG2_ARRAY_DOUBLE_INDEX_SCALE);
if (i >= 0) {
// Check if mismatch is not associated with two NaN values
if (!Double.isNaN(a[aFromIndex + i]) || !Double.isNaN(b[bFromIndex + i]))
return i;
// Mismatch on two different NaN values that are normalized to match
// Fall back to slow mechanism
// ISSUE: Consider looping over vectorizedMismatch adjusting ranges
// However, requires that returned value be relative to input ranges
i++;
for (; i < length; i++) {
if (Double.doubleToLongBits(a[aFromIndex + i]) != Double.doubleToLongBits(b[bFromIndex + i]))
return i;
}
}
return -1;
}
}

View file

@ -1023,7 +1023,6 @@ public class Vector<E>
setBit(deathRow, i - beg);
if (modCount != expectedModCount)
throw new ConcurrentModificationException();
expectedModCount++;
modCount++;
int w = beg;
for (i = beg; i < end; i++)

View file

@ -245,8 +245,7 @@ public abstract class AbstractExecutorService implements ExecutorService {
Future<T> f = futures.get(i);
if (!f.isDone()) {
try { f.get(); }
catch (CancellationException ignore) {}
catch (ExecutionException ignore) {}
catch (CancellationException | ExecutionException ignore) {}
}
}
return futures;
@ -283,8 +282,7 @@ public abstract class AbstractExecutorService implements ExecutorService {
Future<T> f = futures.get(j);
if (!f.isDone()) {
try { f.get(deadline - System.nanoTime(), NANOSECONDS); }
catch (CancellationException ignore) {}
catch (ExecutionException ignore) {}
catch (CancellationException | ExecutionException ignore) {}
catch (TimeoutException timedOut) {
break timedOut;
}

View file

@ -717,12 +717,12 @@ public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
*/
static Class<?> comparableClassFor(Object x) {
if (x instanceof Comparable) {
Class<?> c; Type[] ts, as; Type t; ParameterizedType p;
Class<?> c; Type[] ts, as; ParameterizedType p;
if ((c = x.getClass()) == String.class) // bypass checks
return c;
if ((ts = c.getGenericInterfaces()) != null) {
for (int i = 0; i < ts.length; ++i) {
if (((t = ts[i]) instanceof ParameterizedType) &&
for (Type t : ts) {
if ((t instanceof ParameterizedType) &&
((p = (ParameterizedType)t).getRawType() ==
Comparable.class) &&
(as = p.getActualTypeArguments()) != null &&
@ -2328,15 +2328,15 @@ public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
* @param check if <0, don't check resize, if <= 1 only check if uncontended
*/
private final void addCount(long x, int check) {
CounterCell[] as; long b, s;
if ((as = counterCells) != null ||
CounterCell[] cs; long b, s;
if ((cs = counterCells) != null ||
!U.compareAndSetLong(this, BASECOUNT, b = baseCount, s = b + x)) {
CounterCell a; long v; int m;
CounterCell c; long v; int m;
boolean uncontended = true;
if (as == null || (m = as.length - 1) < 0 ||
(a = as[ThreadLocalRandom.getProbe() & m]) == null ||
if (cs == null || (m = cs.length - 1) < 0 ||
(c = cs[ThreadLocalRandom.getProbe() & m]) == null ||
!(uncontended =
U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x))) {
U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))) {
fullAddCount(x, uncontended);
return;
}
@ -2574,13 +2574,12 @@ public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
}
final long sumCount() {
CounterCell[] as = counterCells; CounterCell a;
CounterCell[] cs = counterCells;
long sum = baseCount;
if (as != null) {
for (int i = 0; i < as.length; ++i) {
if ((a = as[i]) != null)
sum += a.value;
}
if (cs != null) {
for (CounterCell c : cs)
if (c != null)
sum += c.value;
}
return sum;
}
@ -2595,9 +2594,9 @@ public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
}
boolean collide = false; // True if last slot nonempty
for (;;) {
CounterCell[] as; CounterCell a; int n; long v;
if ((as = counterCells) != null && (n = as.length) > 0) {
if ((a = as[(n - 1) & h]) == null) {
CounterCell[] cs; CounterCell c; int n; long v;
if ((cs = counterCells) != null && (n = cs.length) > 0) {
if ((c = cs[(n - 1) & h]) == null) {
if (cellsBusy == 0) { // Try to attach new Cell
CounterCell r = new CounterCell(x); // Optimistic create
if (cellsBusy == 0 &&
@ -2623,21 +2622,17 @@ public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
}
else if (!wasUncontended) // CAS already known to fail
wasUncontended = true; // Continue after rehash
else if (U.compareAndSetLong(a, CELLVALUE, v = a.value, v + x))
else if (U.compareAndSetLong(c, CELLVALUE, v = c.value, v + x))
break;
else if (counterCells != as || n >= NCPU)
else if (counterCells != cs || n >= NCPU)
collide = false; // At max size or stale
else if (!collide)
collide = true;
else if (cellsBusy == 0 &&
U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
try {
if (counterCells == as) {// Expand table unless stale
CounterCell[] rs = new CounterCell[n << 1];
for (int i = 0; i < n; ++i)
rs[i] = as[i];
counterCells = rs;
}
if (counterCells == cs) // Expand table unless stale
counterCells = Arrays.copyOf(cs, n << 1);
} finally {
cellsBusy = 0;
}
@ -2646,11 +2641,11 @@ public class ConcurrentHashMap<K,V> extends AbstractMap<K,V>
}
h = ThreadLocalRandom.advanceProbe(h);
}
else if (cellsBusy == 0 && counterCells == as &&
else if (cellsBusy == 0 && counterCells == cs &&
U.compareAndSetInt(this, CELLSBUSY, 0, 1)) {
boolean init = false;
try { // Initialize table
if (counterCells == as) {
if (counterCells == cs) {
CounterCell[] rs = new CounterCell[2];
rs[h & 1] = new CounterCell(x);
counterCells = rs;

View file

@ -2204,9 +2204,7 @@ public class ConcurrentSkipListMap<K,V> extends AbstractMap<K,V>
Collection<?> c = (Collection<?>) o;
try {
return containsAll(c) && c.containsAll(this);
} catch (ClassCastException unused) {
return false;
} catch (NullPointerException unused) {
} catch (ClassCastException | NullPointerException unused) {
return false;
}
}
@ -2331,9 +2329,7 @@ public class ConcurrentSkipListMap<K,V> extends AbstractMap<K,V>
Collection<?> c = (Collection<?>) o;
try {
return containsAll(c) && c.containsAll(this);
} catch (ClassCastException unused) {
return false;
} catch (NullPointerException unused) {
} catch (ClassCastException | NullPointerException unused) {
return false;
}
}
@ -2453,9 +2449,7 @@ public class ConcurrentSkipListMap<K,V> extends AbstractMap<K,V>
if (k == null) // pass by markers and headers
return true;
int c = cpr(cmp, k, hi);
if (c > 0 || (c == 0 && !hiInclusive))
return false;
return true;
return c < 0 || (c == 0 && hiInclusive);
}
/**

View file

@ -309,9 +309,7 @@ public class ConcurrentSkipListSet<E>
Collection<?> c = (Collection<?>) o;
try {
return containsAll(c) && c.containsAll(this);
} catch (ClassCastException unused) {
return false;
} catch (NullPointerException unused) {
} catch (ClassCastException | NullPointerException unused) {
return false;
}
}

View file

@ -508,7 +508,7 @@ public class CopyOnWriteArrayList<E>
public boolean remove(Object o) {
Object[] snapshot = getArray();
int index = indexOf(o, snapshot, 0, snapshot.length);
return (index < 0) ? false : remove(o, snapshot, index);
return index >= 0 && remove(o, snapshot, index);
}
/**
@ -587,8 +587,8 @@ public class CopyOnWriteArrayList<E>
*/
public boolean addIfAbsent(E e) {
Object[] snapshot = getArray();
return indexOf(e, snapshot, 0, snapshot.length) >= 0 ? false :
addIfAbsent(e, snapshot);
return indexOf(e, snapshot, 0, snapshot.length) < 0
&& addIfAbsent(e, snapshot);
}
/**
@ -980,13 +980,10 @@ public class CopyOnWriteArrayList<E>
List<?> list = (List<?>)o;
Iterator<?> it = list.iterator();
Object[] elements = getArray();
for (int i = 0, len = elements.length; i < len; i++)
if (!it.hasNext() || !Objects.equals(elements[i], it.next()))
for (Object element : getArray())
if (!it.hasNext() || !Objects.equals(element, it.next()))
return false;
if (it.hasNext())
return false;
return true;
return !it.hasNext();
}
/**

View file

@ -1353,17 +1353,16 @@ public class LinkedBlockingDeque<E>
@SuppressWarnings("unchecked")
private boolean bulkRemove(Predicate<? super E> filter) {
boolean removed = false;
Node<E> p = null;
final ReentrantLock lock = this.lock;
Node<E> p = null;
Node<E>[] nodes = null;
int n, len = 0;
do {
// 1. Extract batch of up to 64 elements while holding the lock.
long deathRow = 0; // "bitset" of size 64
lock.lock();
try {
if (nodes == null) {
if (p == null) p = first;
if (nodes == null) { // first batch; initialize
p = first;
for (Node<E> q = p; q != null; q = succ(q))
if (q.item != null && ++len == 64)
break;
@ -1376,6 +1375,7 @@ public class LinkedBlockingDeque<E>
}
// 2. Run the filter on the elements while lock is free.
long deathRow = 0L; // "bitset" of size 64
for (int i = 0; i < n; i++) {
final E e;
if ((e = nodes[i].item) != null && filter.test(e))
@ -1393,6 +1393,7 @@ public class LinkedBlockingDeque<E>
unlink(q);
removed = true;
}
nodes[i] = null; // help GC
}
} finally {
lock.unlock();

View file

@ -1060,11 +1060,10 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
int n, len = 0;
do {
// 1. Extract batch of up to 64 elements while holding the lock.
long deathRow = 0; // "bitset" of size 64
fullyLock();
try {
if (nodes == null) {
if (p == null) p = head.next;
if (nodes == null) { // first batch; initialize
p = head.next;
for (Node<E> q = p; q != null; q = succ(q))
if (q.item != null && ++len == 64)
break;
@ -1077,6 +1076,7 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
}
// 2. Run the filter on the elements while lock is free.
long deathRow = 0L; // "bitset" of size 64
for (int i = 0; i < n; i++) {
final E e;
if ((e = nodes[i].item) != null && filter.test(e))
@ -1095,6 +1095,7 @@ public class LinkedBlockingQueue<E> extends AbstractQueue<E>
unlink(q, ancestor);
removed = true;
}
nodes[i] = null; // help GC
}
} finally {
fullyUnlock();

View file

@ -772,9 +772,8 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
Node first = null;
restartFromHead: for (;;) {
Node h = head, p = h;
for (; p != null;) {
final Object item;
if ((item = p.item) != null) {
while (p != null) {
if (p.item != null) {
if (p.isData) {
first = p;
break;
@ -1602,8 +1601,7 @@ public class LinkedTransferQueue<E> extends AbstractQueue<E>
// Read in elements until trailing null sentinel found
Node h = null, t = null;
for (Object item; (item = s.readObject()) != null; ) {
@SuppressWarnings("unchecked")
Node newNode = new Node((E) item);
Node newNode = new Node(item);
if (h == null)
h = t = newNode;
else

View file

@ -269,8 +269,8 @@ public class PriorityBlockingQueue<E> extends AbstractQueue<E>
if (a.getClass() != Object[].class)
a = Arrays.copyOf(a, n, Object[].class);
if (screen && (n == 1 || this.comparator != null)) {
for (int i = 0; i < n; ++i)
if (a[i] == null)
for (Object elt : a)
if (elt == null)
throw new NullPointerException();
}
this.queue = a;

View file

@ -753,8 +753,10 @@ public class SubmissionPublisher<T> implements Publisher<T>,
else
pred.next = next;
}
else
else {
subs.add(b.subscriber);
pred = b;
}
}
}
return subs;

View file

@ -67,7 +67,7 @@ import jdk.internal.misc.VM;
* {@code ThreadLocalRandom.current().nextX(...)} (where
* {@code X} is {@code Int}, {@code Long}, etc).
* When all usages are of this form, it is never possible to
* accidently share a {@code ThreadLocalRandom} across multiple threads.
* accidentally share a {@code ThreadLocalRandom} across multiple threads.
*
* <p>This class also provides additional commonly used bounded random
* generation methods.

View file

@ -264,13 +264,12 @@ import java.util.concurrent.locks.ReentrantLock;
* assist in storage reclamation when large numbers of queued tasks
* become cancelled.</dd>
*
* <dt>Finalization</dt>
* <dt>Reclamation</dt>
*
* <dd>A pool that is no longer referenced in a program <em>AND</em>
* has no remaining threads will be {@code shutdown} automatically. If
* you would like to ensure that unreferenced pools are reclaimed even
* if users forget to call {@link #shutdown}, then you must arrange
* that unused threads eventually die, by setting appropriate
* has no remaining threads may be reclaimed (garbage collected)
* without being explicity shutdown. You can configure a pool to allow
* all unused threads to eventually die by setting appropriate
* keep-alive times, using a lower bound of zero core threads and/or
* setting {@link #allowCoreThreadTimeOut(boolean)}. </dd>
*
@ -361,7 +360,7 @@ public class ThreadPoolExecutor extends AbstractExecutorService {
* time, but need not hit each state. The transitions are:
*
* RUNNING -> SHUTDOWN
* On invocation of shutdown(), perhaps implicitly in finalize()
* On invocation of shutdown()
* (RUNNING or SHUTDOWN) -> STOP
* On invocation of shutdownNow()
* SHUTDOWN -> TIDYING
@ -581,9 +580,6 @@ public class ThreadPoolExecutor extends AbstractExecutorService {
private static final RuntimePermission shutdownPerm =
new RuntimePermission("modifyThread");
/** The context to be used when executing the finalizer, or null. */
private final AccessControlContext acc;
/**
* Class Worker mainly maintains interrupt control state for
* threads running tasks, along with other minor bookkeeping.
@ -1300,9 +1296,6 @@ public class ThreadPoolExecutor extends AbstractExecutorService {
throw new IllegalArgumentException();
if (workQueue == null || threadFactory == null || handler == null)
throw new NullPointerException();
this.acc = (System.getSecurityManager() == null)
? null
: AccessController.getContext();
this.corePoolSize = corePoolSize;
this.maximumPoolSize = maximumPoolSize;
this.workQueue = workQueue;
@ -1469,33 +1462,6 @@ public class ThreadPoolExecutor extends AbstractExecutorService {
}
}
/**
* Invokes {@code shutdown} when this executor is no longer
* referenced and it has no threads.
*
* <p>This method is invoked with privileges that are restricted by
* the security context of the caller that invokes the constructor.
*
* @deprecated The {@code finalize} method has been deprecated.
* Subclasses that override {@code finalize} in order to perform cleanup
* should be modified to use alternative cleanup mechanisms and
* to remove the overriding {@code finalize} method.
* When overriding the {@code finalize} method, its implementation must explicitly
* ensure that {@code super.finalize()} is invoked as described in {@link Object#finalize}.
* See the specification for {@link Object#finalize()} for further
* information about migration options.
*/
@Deprecated(since="9")
protected void finalize() {
SecurityManager sm = System.getSecurityManager();
if (sm == null || acc == null) {
shutdown();
} else {
PrivilegedAction<Void> pa = () -> { shutdown(); return null; };
AccessController.doPrivileged(pa, acc);
}
}
/**
* Sets the thread factory used to create new threads.
*

View file

@ -320,7 +320,9 @@ public abstract class AbstractQueuedLongSynchronizer
// predNext is the apparent node to unsplice. CASes below will
// fail if not, in which case, we lost race vs another cancel
// or signal, so no further action is necessary.
// or signal, so no further action is necessary, although with
// a possibility that a cancelled node may transiently remain
// reachable.
Node predNext = pred.next;
// Can use unconditional write instead of CAS here.
@ -912,13 +914,13 @@ public abstract class AbstractQueuedLongSynchronizer
* at any time, a {@code true} return does not guarantee that any
* other thread will ever acquire.
*
* <p>In this implementation, this operation returns in
* constant time.
*
* @return {@code true} if there may be other threads waiting to acquire
*/
public final boolean hasQueuedThreads() {
return head != tail;
for (Node p = tail, h = head; p != h && p != null; p = p.prev)
if (p.waitStatus <= 0)
return true;
return false;
}
/**
@ -1067,17 +1069,21 @@ public abstract class AbstractQueuedLongSynchronizer
* @since 1.7
*/
public final boolean hasQueuedPredecessors() {
// The correctness of this depends on head being initialized
// before tail and on head.next being accurate if the current
// thread is first in queue.
Node t = tail; // Read fields in reverse initialization order
Node h = head;
Node s;
return h != t &&
((s = h.next) == null || s.thread != Thread.currentThread());
Node h, s;
if ((h = head) != null) {
if ((s = h.next) == null || s.waitStatus > 0) {
s = null; // traverse in case of concurrent cancellation
for (Node p = tail; p != h && p != null; p = p.prev) {
if (p.waitStatus <= 0)
s = p;
}
}
if (s != null && s.thread != Thread.currentThread())
return true;
}
return false;
}
// Instrumentation and monitoring methods
/**

View file

@ -800,7 +800,9 @@ public abstract class AbstractQueuedSynchronizer
// predNext is the apparent node to unsplice. CASes below will
// fail if not, in which case, we lost race vs another cancel
// or signal, so no further action is necessary.
// or signal, so no further action is necessary, although with
// a possibility that a cancelled node may transiently remain
// reachable.
Node predNext = pred.next;
// Can use unconditional write instead of CAS here.
@ -1392,13 +1394,13 @@ public abstract class AbstractQueuedSynchronizer
* at any time, a {@code true} return does not guarantee that any
* other thread will ever acquire.
*
* <p>In this implementation, this operation returns in
* constant time.
*
* @return {@code true} if there may be other threads waiting to acquire
*/
public final boolean hasQueuedThreads() {
return head != tail;
for (Node p = tail, h = head; p != h && p != null; p = p.prev)
if (p.waitStatus <= 0)
return true;
return false;
}
/**
@ -1547,17 +1549,21 @@ public abstract class AbstractQueuedSynchronizer
* @since 1.7
*/
public final boolean hasQueuedPredecessors() {
// The correctness of this depends on head being initialized
// before tail and on head.next being accurate if the current
// thread is first in queue.
Node t = tail; // Read fields in reverse initialization order
Node h = head;
Node s;
return h != t &&
((s = h.next) == null || s.thread != Thread.currentThread());
Node h, s;
if ((h = head) != null) {
if ((s = h.next) == null || s.waitStatus > 0) {
s = null; // traverse in case of concurrent cancellation
for (Node p = tail; p != h && p != null; p = p.prev) {
if (p.waitStatus <= 0)
s = p;
}
}
if (s != null && s.thread != Thread.currentThread())
return true;
}
return false;
}
// Instrumentation and monitoring methods
/**

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -953,6 +953,12 @@ public final class Pattern
*/
private int flags;
/**
* The temporary pattern flags used during compiling. The flags might be turn
* on and off by embedded flag.
*/
private transient int flags0;
/**
* Boolean indicating this Pattern is compiled; this is necessary in order
* to lazily compile deserialized Patterns.
@ -1137,7 +1143,7 @@ public final class Pattern
* @return The match flags specified when this pattern was compiled
*/
public int flags() {
return flags;
return flags0;
}
/**
@ -1369,6 +1375,9 @@ public final class Pattern
// Read in all fields
s.defaultReadObject();
// reset the flags
flags0 = flags;
// Initialize counts
capturingGroupCount = 1;
localCount = 0;
@ -1400,6 +1409,9 @@ public final class Pattern
if ((flags & UNICODE_CHARACTER_CLASS) != 0)
flags |= UNICODE_CASE;
// 'flags' for compiling
flags0 = flags;
// Reset group index count
capturingGroupCount = 1;
localCount = 0;
@ -1841,7 +1853,7 @@ loop: for(int x=0, offset=0; x<nCodePoints; x++, offset+=len) {
* Indicates whether a particular flag is set or not.
*/
private boolean has(int f) {
return (flags & f) != 0;
return (flags0 & f) != 0;
}
/**
@ -2718,7 +2730,7 @@ loop: for(int x=0, offset=0; x<nCodePoints; x++, offset+=len) {
ch == 0x53 || ch == 0x73 || //S and s
ch == 0x4b || ch == 0x6b || //K and k
ch == 0xc5 || ch == 0xe5))) { //A+ring
bits.add(ch, flags());
bits.add(ch, flags0);
return null;
}
return single(ch);
@ -2931,7 +2943,7 @@ loop: for(int x=0, offset=0; x<nCodePoints; x++, offset+=len) {
boolean capturingGroup = false;
Node head = null;
Node tail = null;
int save = flags;
int save = flags0;
int saveTCNCount = topClosureNodes.size();
root = null;
int ch = next();
@ -3032,7 +3044,7 @@ loop: for(int x=0, offset=0; x<nCodePoints; x++, offset+=len) {
}
accept(')', "Unclosed group");
flags = save;
flags0 = save;
// Check for quantifiers
Node node = closure(head);
@ -3135,28 +3147,28 @@ loop: for(int x=0, offset=0; x<nCodePoints; x++, offset+=len) {
for (;;) {
switch (ch) {
case 'i':
flags |= CASE_INSENSITIVE;
flags0 |= CASE_INSENSITIVE;
break;
case 'm':
flags |= MULTILINE;
flags0 |= MULTILINE;
break;
case 's':
flags |= DOTALL;
flags0 |= DOTALL;
break;
case 'd':
flags |= UNIX_LINES;
flags0 |= UNIX_LINES;
break;
case 'u':
flags |= UNICODE_CASE;
flags0 |= UNICODE_CASE;
break;
case 'c':
flags |= CANON_EQ;
flags0 |= CANON_EQ;
break;
case 'x':
flags |= COMMENTS;
flags0 |= COMMENTS;
break;
case 'U':
flags |= (UNICODE_CHARACTER_CLASS | UNICODE_CASE);
flags0 |= (UNICODE_CHARACTER_CLASS | UNICODE_CASE);
break;
case '-': // subFlag then fall through
ch = next();
@ -3178,28 +3190,28 @@ loop: for(int x=0, offset=0; x<nCodePoints; x++, offset+=len) {
for (;;) {
switch (ch) {
case 'i':
flags &= ~CASE_INSENSITIVE;
flags0 &= ~CASE_INSENSITIVE;
break;
case 'm':
flags &= ~MULTILINE;
flags0 &= ~MULTILINE;
break;
case 's':
flags &= ~DOTALL;
flags0 &= ~DOTALL;
break;
case 'd':
flags &= ~UNIX_LINES;
flags0 &= ~UNIX_LINES;
break;
case 'u':
flags &= ~UNICODE_CASE;
flags0 &= ~UNICODE_CASE;
break;
case 'c':
flags &= ~CANON_EQ;
flags0 &= ~CANON_EQ;
break;
case 'x':
flags &= ~COMMENTS;
flags0 &= ~COMMENTS;
break;
case 'U':
flags &= ~(UNICODE_CHARACTER_CLASS | UNICODE_CASE);
flags0 &= ~(UNICODE_CHARACTER_CLASS | UNICODE_CASE);
break;
default:
return;