This commit is contained in:
Coleen Phillimore 2010-06-28 12:03:05 -04:00
commit 52adb9a491
97 changed files with 1982 additions and 657 deletions

View file

@ -1376,11 +1376,6 @@ void Arguments::set_g1_gc_flags() {
}
no_shared_spaces();
// Set the maximum pause time goal to be a reasonable default.
if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
}
if (FLAG_IS_DEFAULT(MarkStackSize)) {
FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
}

View file

@ -1975,7 +1975,7 @@ class CommandLineFlags {
"Adaptive size policy maximum GC pause time goal in msec, " \
"or (G1 Only) the max. GC time per MMU time slice") \
\
product(intx, GCPauseIntervalMillis, 500, \
product(uintx, GCPauseIntervalMillis, 0, \
"Time slice for MMU specification") \
\
product(uintx, MaxGCMinorPauseMillis, max_uintx, \

View file

@ -66,6 +66,7 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
jobject JNIHandles::make_global(Handle obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
jobject res = NULL;
if (!obj.is_null()) {
// ignore null handles
@ -81,6 +82,7 @@ jobject JNIHandles::make_global(Handle obj) {
jobject JNIHandles::make_weak_global(Handle obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
jobject res = NULL;
if (!obj.is_null()) {
// ignore null handles

View file

@ -779,7 +779,7 @@ Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
// Find bytecode
Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
bc = bytecode->adjusted_invoke_code();
bc = bytecode->java_code();
int bytecode_index = bytecode->index();
// Find receiver for non-static call

View file

@ -135,28 +135,32 @@ typedef void (*arraycopy_fn)(address src, address dst, int count);
static void test_arraycopy_func(address func, int alignment) {
int v = 0xcc;
int v2 = 0x11;
jlong lbuffer[2];
jlong lbuffer2[2];
address buffer = (address) lbuffer;
address buffer2 = (address) lbuffer2;
jlong lbuffer[8];
jlong lbuffer2[8];
address fbuffer = (address) lbuffer;
address fbuffer2 = (address) lbuffer2;
unsigned int i;
for (i = 0; i < sizeof(lbuffer); i++) {
buffer[i] = v; buffer2[i] = v2;
fbuffer[i] = v; fbuffer2[i] = v2;
}
// C++ does not guarantee jlong[] array alignment to 8 bytes.
// Use middle of array to check that memory before it is not modified.
address buffer = (address) round_to((intptr_t)&lbuffer[4], BytesPerLong);
address buffer2 = (address) round_to((intptr_t)&lbuffer2[4], BytesPerLong);
// do an aligned copy
((arraycopy_fn)func)(buffer, buffer2, 0);
for (i = 0; i < sizeof(lbuffer); i++) {
assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
// adjust destination alignment
((arraycopy_fn)func)(buffer, buffer2 + alignment, 0);
for (i = 0; i < sizeof(lbuffer); i++) {
assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
// adjust source alignment
((arraycopy_fn)func)(buffer + alignment, buffer2, 0);
for (i = 0; i < sizeof(lbuffer); i++) {
assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything");
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
}
#endif
@ -183,7 +187,7 @@ void StubRoutines::initialize2() {
test_arraycopy_func(arrayof_##type##_arraycopy(), sizeof(HeapWord)); \
test_arraycopy_func(arrayof_##type##_disjoint_arraycopy(), sizeof(HeapWord))
// Make sure all the arraycopy stubs properly handle zeros
// Make sure all the arraycopy stubs properly handle zero count
TEST_ARRAYCOPY(jbyte);
TEST_ARRAYCOPY(jshort);
TEST_ARRAYCOPY(jint);
@ -191,6 +195,25 @@ void StubRoutines::initialize2() {
#undef TEST_ARRAYCOPY
#define TEST_COPYRTN(type) \
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic), sizeof(type)); \
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::arrayof_conjoint_##type##s), (int)MAX2(sizeof(HeapWord), sizeof(type)))
// Make sure all the copy runtime routines properly handle zero count
TEST_COPYRTN(jbyte);
TEST_COPYRTN(jshort);
TEST_COPYRTN(jint);
TEST_COPYRTN(jlong);
#undef TEST_COPYRTN
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_words), sizeof(HeapWord));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words), sizeof(HeapWord));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words_atomic), sizeof(HeapWord));
// Aligned to BytesPerLong
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong));
test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong));
#endif
}
@ -221,15 +244,13 @@ JRT_LEAF(void, StubRoutines::jbyte_copy(jbyte* src, jbyte* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::conjoint_bytes_atomic(src, dest, count);
Copy::conjoint_jbytes_atomic(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::conjoint_jshorts_atomic(src, dest, count);
JRT_END
@ -237,7 +258,6 @@ JRT_LEAF(void, StubRoutines::jint_copy(jint* src, jint* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::conjoint_jints_atomic(src, dest, count);
JRT_END
@ -245,7 +265,6 @@ JRT_LEAF(void, StubRoutines::jlong_copy(jlong* src, jlong* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jlong_array_copy_ctr++; // Slow-path long/double array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::conjoint_jlongs_atomic(src, dest, count);
JRT_END
@ -263,15 +282,13 @@ JRT_LEAF(void, StubRoutines::arrayof_jbyte_copy(HeapWord* src, HeapWord* dest, s
#ifndef PRODUCT
SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::arrayof_conjoint_bytes(src, dest, count);
Copy::arrayof_conjoint_jbytes(src, dest, count);
JRT_END
JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count))
#ifndef PRODUCT
SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::arrayof_conjoint_jshorts(src, dest, count);
JRT_END
@ -279,7 +296,6 @@ JRT_LEAF(void, StubRoutines::arrayof_jint_copy(HeapWord* src, HeapWord* dest, si
#ifndef PRODUCT
SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::arrayof_conjoint_jints(src, dest, count);
JRT_END
@ -287,7 +303,6 @@ JRT_LEAF(void, StubRoutines::arrayof_jlong_copy(HeapWord* src, HeapWord* dest, s
#ifndef PRODUCT
SharedRuntime::_jlong_array_copy_ctr++; // Slow-path int/float array copy
#endif // !PRODUCT
assert(count != 0, "count should be non-zero");
Copy::arrayof_conjoint_jlongs(src, dest, count);
JRT_END

View file

@ -26,7 +26,7 @@
# include "incls/_sweeper.cpp.incl"
long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
CodeBlob* NMethodSweeper::_current = NULL; // Current nmethod
nmethod* NMethodSweeper::_current = NULL; // Current nmethod
int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache
int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
@ -171,20 +171,16 @@ void NMethodSweeper::sweep_code_cache() {
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
// Other blobs can be deleted by other threads
// Read next before we potentially delete current
CodeBlob* next = CodeCache::next_nmethod(_current);
nmethod* next = CodeCache::next_nmethod(_current);
// Now ready to process nmethod and give up CodeCache_lock
{
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
process_nmethod((nmethod *)_current);
process_nmethod(_current);
}
_seen++;
_current = next;
}
// Skip forward to the next nmethod (if any). Code blobs other than nmethods
// can be freed async to us and make _current invalid while we sleep.
_current = CodeCache::next_nmethod(_current);
}
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {

View file

@ -29,7 +29,7 @@
class NMethodSweeper : public AllStatic {
static long _traversals; // Stack traversal count
static CodeBlob* _current; // Current nmethod
static nmethod* _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _invocations; // No. of invocations left until we are completed with this pass

View file

@ -2700,7 +2700,7 @@ void JavaThread::popframe_preserve_args(ByteSize size_in_bytes, void* start) {
if (in_bytes(size_in_bytes) != 0) {
_popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes));
_popframe_preserved_args_size = in_bytes(size_in_bytes);
Copy::conjoint_bytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
Copy::conjoint_jbytes(start, _popframe_preserved_args, _popframe_preserved_args_size);
}
}

View file

@ -355,9 +355,9 @@ void vframeArrayElement::unpack_on_stack(int callee_parameters,
} else {
base = iframe()->interpreter_frame_expression_stack();
}
Copy::conjoint_bytes(saved_args,
base,
popframe_preserved_args_size_in_bytes);
Copy::conjoint_jbytes(saved_args,
base,
popframe_preserved_args_size_in_bytes);
thread->popframe_free_preserved_args();
}
}

View file

@ -111,6 +111,35 @@ char* ReservedSpace::reserve_and_align(const size_t reserve_size,
return result;
}
// Helper method.
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
const size_t size, bool special)
{
if (base == requested_address || requested_address == NULL)
return false; // did not fail
if (base != NULL) {
// Different reserve address may be acceptable in other cases
// but for compressed oops heap should be at requested address.
assert(UseCompressedOops, "currently requested address used only for compressed oops");
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
}
// OS ignored requested address. Try different address.
if (special) {
if (!os::release_memory_special(base, size)) {
fatal("os::release_memory_special failed");
}
} else {
if (!os::release_memory(base, size)) {
fatal("os::release_memory failed");
}
}
}
return true;
}
ReservedSpace::ReservedSpace(const size_t prefix_size,
const size_t prefix_align,
const size_t suffix_size,
@ -129,6 +158,10 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
assert((suffix_align & prefix_align - 1) == 0,
"suffix_align not divisible by prefix_align");
// Assert that if noaccess_prefix is used, it is the same as prefix_align.
assert(noaccess_prefix == 0 ||
noaccess_prefix == prefix_align, "noaccess prefix wrong");
// Add in noaccess_prefix to prefix_size;
const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
const size_t size = adjusted_prefix_size + suffix_size;
@ -150,15 +183,16 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
_noaccess_prefix = 0;
_executable = false;
// Assert that if noaccess_prefix is used, it is the same as prefix_align.
assert(noaccess_prefix == 0 ||
noaccess_prefix == prefix_align, "noaccess prefix wrong");
// Optimistically try to reserve the exact size needed.
char* addr;
if (requested_address != 0) {
addr = os::attempt_reserve_memory_at(size,
requested_address-noaccess_prefix);
requested_address -= noaccess_prefix; // adjust address
assert(requested_address != NULL, "huge noaccess prefix?");
addr = os::attempt_reserve_memory_at(size, requested_address);
if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
// OS ignored requested address. Try different address.
addr = NULL;
}
} else {
addr = os::reserve_memory(size, NULL, prefix_align);
}
@ -222,11 +256,20 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
bool special = large && !os::can_commit_large_page_memory();
char* base = NULL;
if (requested_address != 0) {
requested_address -= noaccess_prefix; // adjust requested address
assert(requested_address != NULL, "huge noaccess prefix?");
}
if (special) {
base = os::reserve_memory_special(size, requested_address, executable);
if (base != NULL) {
if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
// OS ignored requested address. Try different address.
return;
}
// Check alignment constraints
if (alignment > 0) {
assert((uintptr_t) base % alignment == 0,
@ -235,6 +278,13 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
_special = true;
} else {
// failed; try to reserve regular memory below
if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Reserve regular memory without large pages.");
}
}
}
}
@ -248,8 +298,11 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
// important. If available space is not detected, return NULL.
if (requested_address != 0) {
base = os::attempt_reserve_memory_at(size,
requested_address-noaccess_prefix);
base = os::attempt_reserve_memory_at(size, requested_address);
if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
// OS ignored requested address. Try different address.
base = NULL;
}
} else {
base = os::reserve_memory(size, NULL, alignment);
}
@ -365,7 +418,12 @@ void ReservedSpace::release() {
}
void ReservedSpace::protect_noaccess_prefix(const size_t size) {
// If there is noaccess prefix, return.
assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
(size_t(_base + _size) > OopEncodingHeapMax) &&
Universe::narrow_oop_use_implicit_null_checks()),
"noaccess_prefix should be used only with non zero based compressed oops");
// If there is no noaccess prefix, return.
if (_noaccess_prefix == 0) return;
assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
@ -377,6 +435,10 @@ void ReservedSpace::protect_noaccess_prefix(const size_t size) {
_special)) {
fatal("cannot protect protection page");
}
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
}
_base += _noaccess_prefix;
_size -= _noaccess_prefix;