mirror of
https://github.com/openjdk/jdk.git
synced 2025-09-21 11:34:38 +02:00
Merge
This commit is contained in:
commit
66e2f70db7
14 changed files with 632 additions and 111 deletions
|
@ -4080,7 +4080,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
|
||||||
|
|
||||||
// Generate any default methods - default methods are interface methods
|
// Generate any default methods - default methods are interface methods
|
||||||
// that have a default implementation. This is new with Lambda project.
|
// that have a default implementation. This is new with Lambda project.
|
||||||
if (has_default_methods && !access_flags.is_interface() ) {
|
if (has_default_methods ) {
|
||||||
DefaultMethods::generate_default_methods(
|
DefaultMethods::generate_default_methods(
|
||||||
this_klass(), &all_mirandas, CHECK_(nullHandle));
|
this_klass(), &all_mirandas, CHECK_(nullHandle));
|
||||||
}
|
}
|
||||||
|
|
|
@ -171,8 +171,12 @@ class HierarchyVisitor : StackObj {
|
||||||
}
|
}
|
||||||
bool is_cancelled() const { return _cancelled; }
|
bool is_cancelled() const { return _cancelled; }
|
||||||
|
|
||||||
|
// This code used to skip interface classes because their only
|
||||||
|
// superclass was j.l.Object which would be also covered by class
|
||||||
|
// superclass hierarchy walks. Now that the starting point can be
|
||||||
|
// an interface, we must ensure we catch j.l.Object as the super.
|
||||||
static bool has_super(InstanceKlass* cls) {
|
static bool has_super(InstanceKlass* cls) {
|
||||||
return cls->super() != NULL && !cls->is_interface();
|
return cls->super() != NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
Node* node_at_depth(int i) const {
|
Node* node_at_depth(int i) const {
|
||||||
|
@ -391,16 +395,21 @@ class MethodFamily : public ResourceObj {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Qualified methods are maximally-specific methods
|
||||||
|
// These include public, instance concrete (=default) and abstract methods
|
||||||
GrowableArray<Method*> qualified_methods;
|
GrowableArray<Method*> qualified_methods;
|
||||||
int num_defaults = 0;
|
int num_defaults = 0;
|
||||||
int default_index = -1;
|
int default_index = -1;
|
||||||
|
int qualified_index = -1;
|
||||||
for (int i = 0; i < _members.length(); ++i) {
|
for (int i = 0; i < _members.length(); ++i) {
|
||||||
Pair<Method*,QualifiedState> entry = _members.at(i);
|
Pair<Method*,QualifiedState> entry = _members.at(i);
|
||||||
if (entry.second == QUALIFIED) {
|
if (entry.second == QUALIFIED) {
|
||||||
qualified_methods.append(entry.first);
|
qualified_methods.append(entry.first);
|
||||||
default_index++;
|
qualified_index++;
|
||||||
if (entry.first->is_default_method()) {
|
if (entry.first->is_default_method()) {
|
||||||
num_defaults++;
|
num_defaults++;
|
||||||
|
default_index = qualified_index;
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -408,16 +417,10 @@ class MethodFamily : public ResourceObj {
|
||||||
if (qualified_methods.length() == 0) {
|
if (qualified_methods.length() == 0) {
|
||||||
_exception_message = generate_no_defaults_message(CHECK);
|
_exception_message = generate_no_defaults_message(CHECK);
|
||||||
_exception_name = vmSymbols::java_lang_AbstractMethodError();
|
_exception_name = vmSymbols::java_lang_AbstractMethodError();
|
||||||
} else if (qualified_methods.length() == 1) {
|
|
||||||
// leave abstract methods alone, they will be found via normal search path
|
|
||||||
Method* method = qualified_methods.at(0);
|
|
||||||
if (!method->is_abstract()) {
|
|
||||||
_selected_target = qualified_methods.at(0);
|
|
||||||
}
|
|
||||||
// If only one qualified method is default, select that
|
// If only one qualified method is default, select that
|
||||||
} else if (num_defaults == 1) {
|
} else if (num_defaults == 1) {
|
||||||
_selected_target = qualified_methods.at(default_index);
|
_selected_target = qualified_methods.at(default_index);
|
||||||
} else {
|
} else if (num_defaults > 1) {
|
||||||
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
|
_exception_message = generate_conflicts_message(&qualified_methods,CHECK);
|
||||||
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
|
_exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
|
||||||
if (TraceDefaultMethods) {
|
if (TraceDefaultMethods) {
|
||||||
|
@ -425,6 +428,7 @@ class MethodFamily : public ResourceObj {
|
||||||
tty->print_cr("");
|
tty->print_cr("");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// leave abstract methods alone, they will be found via normal search path
|
||||||
}
|
}
|
||||||
|
|
||||||
bool contains_signature(Symbol* query) {
|
bool contains_signature(Symbol* query) {
|
||||||
|
@ -704,8 +708,10 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
|
||||||
Method* m = iklass->find_method(_method_name, _method_signature);
|
Method* m = iklass->find_method(_method_name, _method_signature);
|
||||||
// private interface methods are not candidates for default methods
|
// private interface methods are not candidates for default methods
|
||||||
// invokespecial to private interface methods doesn't use default method logic
|
// invokespecial to private interface methods doesn't use default method logic
|
||||||
|
// The overpasses are your supertypes' errors, we do not include them
|
||||||
// future: take access controls into account for superclass methods
|
// future: take access controls into account for superclass methods
|
||||||
if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) {
|
if (m != NULL && !m->is_static() && !m->is_overpass() &&
|
||||||
|
(!iklass->is_interface() || m->is_public())) {
|
||||||
if (_family == NULL) {
|
if (_family == NULL) {
|
||||||
_family = new StatefulMethodFamily();
|
_family = new StatefulMethodFamily();
|
||||||
}
|
}
|
||||||
|
@ -781,7 +787,8 @@ void DefaultMethods::generate_default_methods(
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (TraceDefaultMethods) {
|
if (TraceDefaultMethods) {
|
||||||
ResourceMark rm; // be careful with these!
|
ResourceMark rm; // be careful with these!
|
||||||
tty->print_cr("Class %s requires default method processing",
|
tty->print_cr("%s %s requires default method processing",
|
||||||
|
klass->is_interface() ? "Interface" : "Class",
|
||||||
klass->name()->as_klass_external_name());
|
klass->name()->as_klass_external_name());
|
||||||
PrintHierarchy printer;
|
PrintHierarchy printer;
|
||||||
printer.run(klass);
|
printer.run(klass);
|
||||||
|
@ -806,7 +813,7 @@ void DefaultMethods::generate_default_methods(
|
||||||
}
|
}
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (TraceDefaultMethods) {
|
if (TraceDefaultMethods) {
|
||||||
tty->print_cr("Creating overpasses...");
|
tty->print_cr("Creating defaults and overpasses...");
|
||||||
}
|
}
|
||||||
#endif // ndef PRODUCT
|
#endif // ndef PRODUCT
|
||||||
|
|
||||||
|
@ -1076,7 +1083,9 @@ static void merge_in_new_methods(InstanceKlass* klass,
|
||||||
klass->set_initial_method_idnum(new_size);
|
klass->set_initial_method_idnum(new_size);
|
||||||
|
|
||||||
ClassLoaderData* cld = klass->class_loader_data();
|
ClassLoaderData* cld = klass->class_loader_data();
|
||||||
|
if (original_methods ->length() > 0) {
|
||||||
MetadataFactory::free_array(cld, original_methods);
|
MetadataFactory::free_array(cld, original_methods);
|
||||||
|
}
|
||||||
if (original_ordering->length() > 0) {
|
if (original_ordering->length() > 0) {
|
||||||
klass->set_method_ordering(merged_ordering);
|
klass->set_method_ordering(merged_ordering);
|
||||||
MetadataFactory::free_array(cld, original_ordering);
|
MetadataFactory::free_array(cld, original_ordering);
|
||||||
|
|
|
@ -152,11 +152,13 @@ CallInfo::CallInfo(Method* resolved_method, Klass* resolved_klass) {
|
||||||
// Could be an Object method inherited into an interface, but still a vtable call.
|
// Could be an Object method inherited into an interface, but still a vtable call.
|
||||||
kind = CallInfo::vtable_call;
|
kind = CallInfo::vtable_call;
|
||||||
} else if (!resolved_klass->is_interface()) {
|
} else if (!resolved_klass->is_interface()) {
|
||||||
// A miranda method. Compute the vtable index.
|
// A default or miranda method. Compute the vtable index.
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
|
klassVtable* vt = InstanceKlass::cast(resolved_klass)->vtable();
|
||||||
index = vt->index_of_miranda(resolved_method->name(),
|
index = LinkResolver::vtable_index_of_interface_method(resolved_klass,
|
||||||
resolved_method->signature());
|
resolved_method);
|
||||||
|
assert(index >= 0 , "we should have valid vtable index at this point");
|
||||||
|
|
||||||
kind = CallInfo::vtable_call;
|
kind = CallInfo::vtable_call;
|
||||||
} else if (resolved_method->has_vtable_index()) {
|
} else if (resolved_method->has_vtable_index()) {
|
||||||
// Can occur if an interface redeclares a method of Object.
|
// Can occur if an interface redeclares a method of Object.
|
||||||
|
@ -279,7 +281,7 @@ void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, Klass
|
||||||
}
|
}
|
||||||
|
|
||||||
int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
|
int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
|
||||||
methodHandle resolved_method, TRAPS) {
|
methodHandle resolved_method) {
|
||||||
|
|
||||||
int vtable_index = Method::invalid_vtable_index;
|
int vtable_index = Method::invalid_vtable_index;
|
||||||
Symbol* name = resolved_method->name();
|
Symbol* name = resolved_method->name();
|
||||||
|
@ -295,7 +297,7 @@ int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
|
||||||
}
|
}
|
||||||
if (vtable_index == Method::invalid_vtable_index) {
|
if (vtable_index == Method::invalid_vtable_index) {
|
||||||
// get vtable_index for miranda methods
|
// get vtable_index for miranda methods
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm;
|
||||||
klassVtable *vt = InstanceKlass::cast(klass())->vtable();
|
klassVtable *vt = InstanceKlass::cast(klass())->vtable();
|
||||||
vtable_index = vt->index_of_miranda(name, signature);
|
vtable_index = vt->index_of_miranda(name, signature);
|
||||||
}
|
}
|
||||||
|
@ -1118,7 +1120,7 @@ void LinkResolver::runtime_resolve_virtual_method(CallInfo& result,
|
||||||
// do lookup based on receiver klass using the vtable index
|
// do lookup based on receiver klass using the vtable index
|
||||||
if (resolved_method->method_holder()->is_interface()) { // miranda method
|
if (resolved_method->method_holder()->is_interface()) { // miranda method
|
||||||
vtable_index = vtable_index_of_interface_method(resolved_klass,
|
vtable_index = vtable_index_of_interface_method(resolved_klass,
|
||||||
resolved_method, CHECK);
|
resolved_method);
|
||||||
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
|
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
|
||||||
|
|
||||||
InstanceKlass* inst = InstanceKlass::cast(recv_klass());
|
InstanceKlass* inst = InstanceKlass::cast(recv_klass());
|
||||||
|
@ -1268,14 +1270,6 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
|
||||||
sel_method->name(),
|
sel_method->name(),
|
||||||
sel_method->signature()));
|
sel_method->signature()));
|
||||||
}
|
}
|
||||||
// setup result
|
|
||||||
if (!resolved_method->has_itable_index()) {
|
|
||||||
int vtable_index = resolved_method->vtable_index();
|
|
||||||
assert(vtable_index == sel_method->vtable_index(), "sanity check");
|
|
||||||
result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
int itable_index = resolved_method()->itable_index();
|
|
||||||
|
|
||||||
if (TraceItables && Verbose) {
|
if (TraceItables && Verbose) {
|
||||||
ResourceMark rm(THREAD);
|
ResourceMark rm(THREAD);
|
||||||
|
@ -1296,8 +1290,16 @@ void LinkResolver::runtime_resolve_interface_method(CallInfo& result, methodHand
|
||||||
}
|
}
|
||||||
tty->cr();
|
tty->cr();
|
||||||
}
|
}
|
||||||
|
// setup result
|
||||||
|
if (!resolved_method->has_itable_index()) {
|
||||||
|
int vtable_index = resolved_method->vtable_index();
|
||||||
|
assert(vtable_index == sel_method->vtable_index(), "sanity check");
|
||||||
|
result.set_virtual(resolved_klass, recv_klass, resolved_method, sel_method, vtable_index, CHECK);
|
||||||
|
} else {
|
||||||
|
int itable_index = resolved_method()->itable_index();
|
||||||
result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
|
result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
methodHandle LinkResolver::linktime_resolve_interface_method_or_null(
|
methodHandle LinkResolver::linktime_resolve_interface_method_or_null(
|
||||||
|
|
|
@ -130,7 +130,6 @@ class LinkResolver: AllStatic {
|
||||||
static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
|
static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
|
||||||
KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
|
KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
|
||||||
|
|
||||||
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS);
|
|
||||||
static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS);
|
static void resolve_klass (KlassHandle& result, constantPoolHandle pool, int index, TRAPS);
|
||||||
|
|
||||||
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
|
static void resolve_pool (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
|
||||||
|
@ -186,6 +185,7 @@ class LinkResolver: AllStatic {
|
||||||
static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
static methodHandle resolve_interface_call_or_null(KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
||||||
static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
static methodHandle resolve_static_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
||||||
static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
static methodHandle resolve_special_call_or_null (KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
||||||
|
static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method);
|
||||||
|
|
||||||
// same as above for compile-time resolution; returns vtable_index if current_klass if linked
|
// same as above for compile-time resolution; returns vtable_index if current_klass if linked
|
||||||
static int resolve_virtual_vtable_index (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
static int resolve_virtual_vtable_index (KlassHandle receiver_klass, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass);
|
||||||
|
|
|
@ -70,21 +70,21 @@ void Rewriter::compute_index_maps() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unrewrite the bytecodes if an error occurs.
|
// Unrewrite the bytecodes if an error occurs.
|
||||||
void Rewriter::restore_bytecodes() {
|
void Rewriter::restore_bytecodes(TRAPS) {
|
||||||
int len = _methods->length();
|
int len = _methods->length();
|
||||||
|
|
||||||
for (int i = len-1; i >= 0; i--) {
|
for (int i = len-1; i >= 0; i--) {
|
||||||
Method* method = _methods->at(i);
|
Method* method = _methods->at(i);
|
||||||
scan_method(method, true);
|
scan_method(method, true, CHECK);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a constant pool cache given a CPC map
|
// Creates a constant pool cache given a CPC map
|
||||||
void Rewriter::make_constant_pool_cache(TRAPS) {
|
void Rewriter::make_constant_pool_cache(TRAPS) {
|
||||||
const int length = _cp_cache_map.length();
|
|
||||||
ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
|
ClassLoaderData* loader_data = _pool->pool_holder()->class_loader_data();
|
||||||
ConstantPoolCache* cache =
|
ConstantPoolCache* cache =
|
||||||
ConstantPoolCache::allocate(loader_data, length, _cp_cache_map,
|
ConstantPoolCache::allocate(loader_data, _cp_cache_map,
|
||||||
|
_invokedynamic_cp_cache_map,
|
||||||
_invokedynamic_references_map, CHECK);
|
_invokedynamic_references_map, CHECK);
|
||||||
|
|
||||||
// initialize object cache in constant pool
|
// initialize object cache in constant pool
|
||||||
|
@ -154,6 +154,31 @@ void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If the constant pool entry for invokespecial is InterfaceMethodref,
|
||||||
|
// we need to add a separate cpCache entry for its resolution, because it is
|
||||||
|
// different than the resolution for invokeinterface with InterfaceMethodref.
|
||||||
|
// These cannot share cpCache entries. It's unclear if all invokespecial to
|
||||||
|
// InterfaceMethodrefs would resolve to the same thing so a new cpCache entry
|
||||||
|
// is created for each one. This was added with lambda.
|
||||||
|
void Rewriter::rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS) {
|
||||||
|
static int count = 0;
|
||||||
|
address p = bcp + offset;
|
||||||
|
if (!reverse) {
|
||||||
|
int cp_index = Bytes::get_Java_u2(p);
|
||||||
|
int cache_index = add_invokespecial_cp_cache_entry(cp_index);
|
||||||
|
if (cache_index != (int)(jushort) cache_index) {
|
||||||
|
THROW_MSG(vmSymbols::java_lang_InternalError(),
|
||||||
|
"This classfile overflows invokespecial for interfaces "
|
||||||
|
"and cannot be loaded");
|
||||||
|
}
|
||||||
|
Bytes::put_native_u2(p, cache_index);
|
||||||
|
} else {
|
||||||
|
int cache_index = Bytes::get_native_u2(p);
|
||||||
|
int cp_index = cp_cache_entry_pool_index(cache_index);
|
||||||
|
Bytes::put_Java_u2(p, cp_index);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
|
// Adjust the invocation bytecode for a signature-polymorphic method (MethodHandle.invoke, etc.)
|
||||||
void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
|
void Rewriter::maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse) {
|
||||||
|
@ -203,7 +228,7 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
|
||||||
if (!reverse) {
|
if (!reverse) {
|
||||||
int cp_index = Bytes::get_Java_u2(p);
|
int cp_index = Bytes::get_Java_u2(p);
|
||||||
int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
|
int cache_index = add_invokedynamic_cp_cache_entry(cp_index);
|
||||||
add_invokedynamic_resolved_references_entries(cp_index, cache_index);
|
int resolved_index = add_invokedynamic_resolved_references_entries(cp_index, cache_index);
|
||||||
// Replace the trailing four bytes with a CPC index for the dynamic
|
// Replace the trailing four bytes with a CPC index for the dynamic
|
||||||
// call site. Unlike other CPC entries, there is one per bytecode,
|
// call site. Unlike other CPC entries, there is one per bytecode,
|
||||||
// not just one per distinct CP entry. In other words, the
|
// not just one per distinct CP entry. In other words, the
|
||||||
|
@ -212,13 +237,20 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
|
||||||
// all these entries. That is the main reason invokedynamic
|
// all these entries. That is the main reason invokedynamic
|
||||||
// must have a five-byte instruction format. (Of course, other JVM
|
// must have a five-byte instruction format. (Of course, other JVM
|
||||||
// implementations can use the bytes for other purposes.)
|
// implementations can use the bytes for other purposes.)
|
||||||
Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index));
|
|
||||||
// Note: We use native_u4 format exclusively for 4-byte indexes.
|
// Note: We use native_u4 format exclusively for 4-byte indexes.
|
||||||
|
Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index));
|
||||||
|
// add the bcp in case we need to patch this bytecode if we also find a
|
||||||
|
// invokespecial/InterfaceMethodref in the bytecode stream
|
||||||
|
_patch_invokedynamic_bcps->push(p);
|
||||||
|
_patch_invokedynamic_refs->push(resolved_index);
|
||||||
} else {
|
} else {
|
||||||
// callsite index
|
|
||||||
int cache_index = ConstantPool::decode_invokedynamic_index(
|
int cache_index = ConstantPool::decode_invokedynamic_index(
|
||||||
Bytes::get_native_u4(p));
|
Bytes::get_native_u4(p));
|
||||||
int cp_index = cp_cache_entry_pool_index(cache_index);
|
// We will reverse the bytecode rewriting _after_ adjusting them.
|
||||||
|
// Adjust the cache index by offset to the invokedynamic entries in the
|
||||||
|
// cpCache plus the delta if the invokedynamic bytecodes were adjusted.
|
||||||
|
cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
|
||||||
|
int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
|
||||||
assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
|
assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
|
||||||
// zero out 4 bytes
|
// zero out 4 bytes
|
||||||
Bytes::put_Java_u4(p, 0);
|
Bytes::put_Java_u4(p, 0);
|
||||||
|
@ -226,6 +258,34 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Rewriter::patch_invokedynamic_bytecodes() {
|
||||||
|
// If the end of the cp_cache is the same as after initializing with the
|
||||||
|
// cpool, nothing needs to be done. Invokedynamic bytecodes are at the
|
||||||
|
// correct offsets. ie. no invokespecials added
|
||||||
|
int delta = cp_cache_delta();
|
||||||
|
if (delta > 0) {
|
||||||
|
int length = _patch_invokedynamic_bcps->length();
|
||||||
|
assert(length == _patch_invokedynamic_refs->length(),
|
||||||
|
"lengths should match");
|
||||||
|
for (int i = 0; i < length; i++) {
|
||||||
|
address p = _patch_invokedynamic_bcps->at(i);
|
||||||
|
int cache_index = ConstantPool::decode_invokedynamic_index(
|
||||||
|
Bytes::get_native_u4(p));
|
||||||
|
Bytes::put_native_u4(p, ConstantPool::encode_invokedynamic_index(cache_index + delta));
|
||||||
|
|
||||||
|
// invokedynamic resolved references map also points to cp cache and must
|
||||||
|
// add delta to each.
|
||||||
|
int resolved_index = _patch_invokedynamic_refs->at(i);
|
||||||
|
for (int entry = 0; entry < ConstantPoolCacheEntry::_indy_resolved_references_entries; entry++) {
|
||||||
|
assert(_invokedynamic_references_map[resolved_index+entry] == cache_index,
|
||||||
|
"should be the same index");
|
||||||
|
_invokedynamic_references_map.at_put(resolved_index+entry,
|
||||||
|
cache_index + delta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Rewrite some ldc bytecodes to _fast_aldc
|
// Rewrite some ldc bytecodes to _fast_aldc
|
||||||
void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
|
void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
|
||||||
|
@ -269,7 +329,7 @@ void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
|
||||||
|
|
||||||
|
|
||||||
// Rewrites a method given the index_map information
|
// Rewrites a method given the index_map information
|
||||||
void Rewriter::scan_method(Method* method, bool reverse) {
|
void Rewriter::scan_method(Method* method, bool reverse, TRAPS) {
|
||||||
|
|
||||||
int nof_jsrs = 0;
|
int nof_jsrs = 0;
|
||||||
bool has_monitor_bytecodes = false;
|
bool has_monitor_bytecodes = false;
|
||||||
|
@ -329,12 +389,25 @@ void Rewriter::scan_method(Method* method, bool reverse) {
|
||||||
#endif
|
#endif
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
case Bytecodes::_invokespecial : {
|
||||||
|
int offset = prefix_length + 1;
|
||||||
|
address p = bcp + offset;
|
||||||
|
int cp_index = Bytes::get_Java_u2(p);
|
||||||
|
// InterfaceMethodref
|
||||||
|
if (_pool->tag_at(cp_index).is_interface_method()) {
|
||||||
|
rewrite_invokespecial(bcp, offset, reverse, CHECK);
|
||||||
|
} else {
|
||||||
|
rewrite_member_reference(bcp, offset, reverse);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case Bytecodes::_getstatic : // fall through
|
case Bytecodes::_getstatic : // fall through
|
||||||
case Bytecodes::_putstatic : // fall through
|
case Bytecodes::_putstatic : // fall through
|
||||||
case Bytecodes::_getfield : // fall through
|
case Bytecodes::_getfield : // fall through
|
||||||
case Bytecodes::_putfield : // fall through
|
case Bytecodes::_putfield : // fall through
|
||||||
case Bytecodes::_invokevirtual : // fall through
|
case Bytecodes::_invokevirtual : // fall through
|
||||||
case Bytecodes::_invokespecial : // fall through
|
|
||||||
case Bytecodes::_invokestatic :
|
case Bytecodes::_invokestatic :
|
||||||
case Bytecodes::_invokeinterface:
|
case Bytecodes::_invokeinterface:
|
||||||
case Bytecodes::_invokehandle : // if reverse=true
|
case Bytecodes::_invokehandle : // if reverse=true
|
||||||
|
@ -426,16 +499,21 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Me
|
||||||
|
|
||||||
for (int i = len-1; i >= 0; i--) {
|
for (int i = len-1; i >= 0; i--) {
|
||||||
Method* method = _methods->at(i);
|
Method* method = _methods->at(i);
|
||||||
scan_method(method);
|
scan_method(method, false, CHECK); // If you get an error here,
|
||||||
|
// there is no reversing bytecodes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
|
||||||
|
// entries had to be added.
|
||||||
|
patch_invokedynamic_bytecodes();
|
||||||
|
|
||||||
// allocate constant pool cache, now that we've seen all the bytecodes
|
// allocate constant pool cache, now that we've seen all the bytecodes
|
||||||
make_constant_pool_cache(THREAD);
|
make_constant_pool_cache(THREAD);
|
||||||
|
|
||||||
// Restore bytecodes to their unrewritten state if there are exceptions
|
// Restore bytecodes to their unrewritten state if there are exceptions
|
||||||
// rewriting bytecodes or allocating the cpCache
|
// rewriting bytecodes or allocating the cpCache
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
if (HAS_PENDING_EXCEPTION) {
|
||||||
restore_bytecodes();
|
restore_bytecodes(CATCH);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -452,7 +530,7 @@ Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Me
|
||||||
// relocating bytecodes. If some are relocated, that is ok because that
|
// relocating bytecodes. If some are relocated, that is ok because that
|
||||||
// doesn't affect constant pool to cpCache rewriting.
|
// doesn't affect constant pool to cpCache rewriting.
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
if (HAS_PENDING_EXCEPTION) {
|
||||||
restore_bytecodes();
|
restore_bytecodes(CATCH);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Method might have gotten rewritten.
|
// Method might have gotten rewritten.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
@ -46,6 +46,17 @@ class Rewriter: public StackObj {
|
||||||
intArray _method_handle_invokers;
|
intArray _method_handle_invokers;
|
||||||
int _resolved_reference_limit;
|
int _resolved_reference_limit;
|
||||||
|
|
||||||
|
// For mapping invokedynamic bytecodes, which are discovered during method
|
||||||
|
// scanning. The invokedynamic entries are added at the end of the cpCache.
|
||||||
|
// If there are any invokespecial/InterfaceMethodref special case bytecodes,
|
||||||
|
// these entries are added before invokedynamic entries so that the
|
||||||
|
// invokespecial bytecode 16 bit index doesn't overflow.
|
||||||
|
intStack _invokedynamic_cp_cache_map;
|
||||||
|
|
||||||
|
// For patching.
|
||||||
|
GrowableArray<address>* _patch_invokedynamic_bcps;
|
||||||
|
GrowableArray<int>* _patch_invokedynamic_refs;
|
||||||
|
|
||||||
void init_maps(int length) {
|
void init_maps(int length) {
|
||||||
_cp_map.initialize(length, -1);
|
_cp_map.initialize(length, -1);
|
||||||
// Choose an initial value large enough that we don't get frequent
|
// Choose an initial value large enough that we don't get frequent
|
||||||
|
@ -56,45 +67,81 @@ class Rewriter: public StackObj {
|
||||||
_resolved_references_map.initialize(length/2);
|
_resolved_references_map.initialize(length/2);
|
||||||
_invokedynamic_references_map.initialize(length/2);
|
_invokedynamic_references_map.initialize(length/2);
|
||||||
_resolved_reference_limit = -1;
|
_resolved_reference_limit = -1;
|
||||||
DEBUG_ONLY(_cp_cache_index_limit = -1);
|
_first_iteration_cp_cache_limit = -1;
|
||||||
|
|
||||||
|
// invokedynamic specific fields
|
||||||
|
_invokedynamic_cp_cache_map.initialize(length/4);
|
||||||
|
_patch_invokedynamic_bcps = new GrowableArray<address>(length/4);
|
||||||
|
_patch_invokedynamic_refs = new GrowableArray<int>(length/4);
|
||||||
}
|
}
|
||||||
|
|
||||||
int _cp_cache_index_limit;
|
int _first_iteration_cp_cache_limit;
|
||||||
void record_map_limits() {
|
void record_map_limits() {
|
||||||
#ifdef ASSERT
|
// Record initial size of the two arrays generated for the CP cache
|
||||||
// Record initial size of the two arrays generated for the CP cache:
|
// relative to walking the constant pool.
|
||||||
_cp_cache_index_limit = _cp_cache_map.length();
|
_first_iteration_cp_cache_limit = _cp_cache_map.length();
|
||||||
#endif //ASSERT
|
|
||||||
_resolved_reference_limit = _resolved_references_map.length();
|
_resolved_reference_limit = _resolved_references_map.length();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int cp_cache_delta() {
|
||||||
|
// How many cp cache entries were added since recording map limits after
|
||||||
|
// cp cache initialization?
|
||||||
|
assert(_first_iteration_cp_cache_limit != -1, "only valid after first iteration");
|
||||||
|
return _cp_cache_map.length() - _first_iteration_cp_cache_limit;
|
||||||
|
}
|
||||||
|
|
||||||
int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; }
|
int cp_entry_to_cp_cache(int i) { assert(has_cp_cache(i), "oob"); return _cp_map[i]; }
|
||||||
bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
|
bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
|
||||||
|
|
||||||
|
int add_map_entry(int cp_index, intArray* cp_map, intStack* cp_cache_map) {
|
||||||
|
assert(cp_map->at(cp_index) == -1, "not twice on same cp_index");
|
||||||
|
int cache_index = cp_cache_map->append(cp_index);
|
||||||
|
cp_map->at_put(cp_index, cache_index);
|
||||||
|
return cache_index;
|
||||||
|
}
|
||||||
|
|
||||||
int add_cp_cache_entry(int cp_index) {
|
int add_cp_cache_entry(int cp_index) {
|
||||||
assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
|
assert(_pool->tag_at(cp_index).value() != JVM_CONSTANT_InvokeDynamic, "use indy version");
|
||||||
assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
|
assert(_first_iteration_cp_cache_limit == -1, "do not add cache entries after first iteration");
|
||||||
assert(_cp_cache_index_limit == -1, "do not add cache entries after first iteration");
|
int cache_index = add_map_entry(cp_index, &_cp_map, &_cp_cache_map);
|
||||||
int cache_index = _cp_cache_map.append(cp_index);
|
|
||||||
_cp_map.at_put(cp_index, cache_index);
|
|
||||||
assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
|
assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
|
||||||
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
|
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
|
||||||
return cache_index;
|
return cache_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
// add a new CP cache entry beyond the normal cache (for invokedynamic only)
|
|
||||||
int add_invokedynamic_cp_cache_entry(int cp_index) {
|
int add_invokedynamic_cp_cache_entry(int cp_index) {
|
||||||
assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version");
|
assert(_pool->tag_at(cp_index).value() == JVM_CONSTANT_InvokeDynamic, "use non-indy version");
|
||||||
assert(_cp_map[cp_index] == -1, "do not map from cp_index");
|
assert(_first_iteration_cp_cache_limit >= 0, "add indy cache entries after first iteration");
|
||||||
assert(_cp_cache_index_limit >= 0, "add indy cache entries after first iteration");
|
// add to the invokedynamic index map.
|
||||||
|
int cache_index = _invokedynamic_cp_cache_map.append(cp_index);
|
||||||
|
// do not update _cp_map, since the mapping is one-to-many
|
||||||
|
assert(invokedynamic_cp_cache_entry_pool_index(cache_index) == cp_index, "");
|
||||||
|
// this index starts at one but in the bytecode it's appended to the end.
|
||||||
|
return cache_index + _first_iteration_cp_cache_limit;
|
||||||
|
}
|
||||||
|
|
||||||
|
int invokedynamic_cp_cache_entry_pool_index(int cache_index) {
|
||||||
|
int cp_index = _invokedynamic_cp_cache_map[cache_index];
|
||||||
|
return cp_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
// add a new CP cache entry beyond the normal cache for the special case of
|
||||||
|
// invokespecial with InterfaceMethodref as cpool operand.
|
||||||
|
int add_invokespecial_cp_cache_entry(int cp_index) {
|
||||||
|
assert(_first_iteration_cp_cache_limit >= 0, "add these special cache entries after first iteration");
|
||||||
|
// Don't add InterfaceMethodref if it already exists at the end.
|
||||||
|
for (int i = _first_iteration_cp_cache_limit; i < _cp_cache_map.length(); i++) {
|
||||||
|
if (cp_cache_entry_pool_index(i) == cp_index) {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
int cache_index = _cp_cache_map.append(cp_index);
|
int cache_index = _cp_cache_map.append(cp_index);
|
||||||
assert(cache_index >= _cp_cache_index_limit, "");
|
assert(cache_index >= _first_iteration_cp_cache_limit, "");
|
||||||
// do not update _cp_map, since the mapping is one-to-many
|
// do not update _cp_map, since the mapping is one-to-many
|
||||||
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
|
assert(cp_cache_entry_pool_index(cache_index) == cp_index, "");
|
||||||
return cache_index;
|
return cache_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
// fix duplicated code later
|
|
||||||
int cp_entry_to_resolved_references(int cp_index) const {
|
int cp_entry_to_resolved_references(int cp_index) const {
|
||||||
assert(has_entry_in_resolved_references(cp_index), "oob");
|
assert(has_entry_in_resolved_references(cp_index), "oob");
|
||||||
return _reference_map[cp_index];
|
return _reference_map[cp_index];
|
||||||
|
@ -105,10 +152,7 @@ class Rewriter: public StackObj {
|
||||||
|
|
||||||
// add a new entry to the resolved_references map
|
// add a new entry to the resolved_references map
|
||||||
int add_resolved_references_entry(int cp_index) {
|
int add_resolved_references_entry(int cp_index) {
|
||||||
assert(_reference_map[cp_index] == -1, "not twice on same cp_index");
|
int ref_index = add_map_entry(cp_index, &_reference_map, &_resolved_references_map);
|
||||||
assert(_resolved_reference_limit == -1, "do not add CP refs after first iteration");
|
|
||||||
int ref_index = _resolved_references_map.append(cp_index);
|
|
||||||
_reference_map.at_put(cp_index, ref_index);
|
|
||||||
assert(cp_entry_to_resolved_references(cp_index) == ref_index, "");
|
assert(cp_entry_to_resolved_references(cp_index) == ref_index, "");
|
||||||
return ref_index;
|
return ref_index;
|
||||||
}
|
}
|
||||||
|
@ -145,14 +189,18 @@ class Rewriter: public StackObj {
|
||||||
|
|
||||||
void compute_index_maps();
|
void compute_index_maps();
|
||||||
void make_constant_pool_cache(TRAPS);
|
void make_constant_pool_cache(TRAPS);
|
||||||
void scan_method(Method* m, bool reverse = false);
|
void scan_method(Method* m, bool reverse, TRAPS);
|
||||||
void rewrite_Object_init(methodHandle m, TRAPS);
|
void rewrite_Object_init(methodHandle m, TRAPS);
|
||||||
void rewrite_member_reference(address bcp, int offset, bool reverse = false);
|
void rewrite_member_reference(address bcp, int offset, bool reverse);
|
||||||
void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse = false);
|
void maybe_rewrite_invokehandle(address opc, int cp_index, int cache_index, bool reverse);
|
||||||
void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
|
void rewrite_invokedynamic(address bcp, int offset, bool reverse);
|
||||||
void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
|
void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse);
|
||||||
|
void rewrite_invokespecial(address bcp, int offset, bool reverse, TRAPS);
|
||||||
|
|
||||||
|
void patch_invokedynamic_bytecodes();
|
||||||
|
|
||||||
// Revert bytecodes in case of an exception.
|
// Revert bytecodes in case of an exception.
|
||||||
void restore_bytecodes();
|
void restore_bytecodes(TRAPS);
|
||||||
|
|
||||||
static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
|
static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -554,24 +554,37 @@ void ConstantPoolCacheEntry::verify(outputStream* st) const {
|
||||||
// Implementation of ConstantPoolCache
|
// Implementation of ConstantPoolCache
|
||||||
|
|
||||||
ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
|
ConstantPoolCache* ConstantPoolCache::allocate(ClassLoaderData* loader_data,
|
||||||
int length,
|
|
||||||
const intStack& index_map,
|
const intStack& index_map,
|
||||||
|
const intStack& invokedynamic_index_map,
|
||||||
const intStack& invokedynamic_map, TRAPS) {
|
const intStack& invokedynamic_map, TRAPS) {
|
||||||
|
|
||||||
|
const int length = index_map.length() + invokedynamic_index_map.length();
|
||||||
int size = ConstantPoolCache::size(length);
|
int size = ConstantPoolCache::size(length);
|
||||||
|
|
||||||
return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
|
return new (loader_data, size, false, MetaspaceObj::ConstantPoolCacheType, THREAD)
|
||||||
ConstantPoolCache(length, index_map, invokedynamic_map);
|
ConstantPoolCache(length, index_map, invokedynamic_index_map, invokedynamic_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
|
void ConstantPoolCache::initialize(const intArray& inverse_index_map,
|
||||||
|
const intArray& invokedynamic_inverse_index_map,
|
||||||
const intArray& invokedynamic_references_map) {
|
const intArray& invokedynamic_references_map) {
|
||||||
assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
|
for (int i = 0; i < inverse_index_map.length(); i++) {
|
||||||
for (int i = 0; i < length(); i++) {
|
|
||||||
ConstantPoolCacheEntry* e = entry_at(i);
|
ConstantPoolCacheEntry* e = entry_at(i);
|
||||||
int original_index = inverse_index_map[i];
|
int original_index = inverse_index_map[i];
|
||||||
e->initialize_entry(original_index);
|
e->initialize_entry(original_index);
|
||||||
assert(entry_at(i) == e, "sanity");
|
assert(entry_at(i) == e, "sanity");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Append invokedynamic entries at the end
|
||||||
|
int invokedynamic_offset = inverse_index_map.length();
|
||||||
|
for (int i = 0; i < invokedynamic_inverse_index_map.length(); i++) {
|
||||||
|
int offset = i + invokedynamic_offset;
|
||||||
|
ConstantPoolCacheEntry* e = entry_at(offset);
|
||||||
|
int original_index = invokedynamic_inverse_index_map[i];
|
||||||
|
e->initialize_entry(original_index);
|
||||||
|
assert(entry_at(offset) == e, "sanity");
|
||||||
|
}
|
||||||
|
|
||||||
for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
|
for (int ref = 0; ref < invokedynamic_references_map.length(); ref++) {
|
||||||
const int cpci = invokedynamic_references_map[ref];
|
const int cpci = invokedynamic_references_map[ref];
|
||||||
if (cpci >= 0) {
|
if (cpci >= 0) {
|
||||||
|
|
|
@ -31,6 +31,10 @@
|
||||||
|
|
||||||
class PSPromotionManager;
|
class PSPromotionManager;
|
||||||
|
|
||||||
|
// The ConstantPoolCache is not a cache! It is the resolution table that the
|
||||||
|
// interpreter uses to avoid going into the runtime and a way to access resolved
|
||||||
|
// values.
|
||||||
|
|
||||||
// A ConstantPoolCacheEntry describes an individual entry of the constant
|
// A ConstantPoolCacheEntry describes an individual entry of the constant
|
||||||
// pool cache. There's 2 principal kinds of entries: field entries for in-
|
// pool cache. There's 2 principal kinds of entries: field entries for in-
|
||||||
// stance & static field access, and method entries for invokes. Some of
|
// stance & static field access, and method entries for invokes. Some of
|
||||||
|
@ -398,20 +402,27 @@ class ConstantPoolCache: public MetaspaceObj {
|
||||||
debug_only(friend class ClassVerifier;)
|
debug_only(friend class ClassVerifier;)
|
||||||
|
|
||||||
// Constructor
|
// Constructor
|
||||||
ConstantPoolCache(int length, const intStack& inverse_index_map,
|
ConstantPoolCache(int length,
|
||||||
|
const intStack& inverse_index_map,
|
||||||
|
const intStack& invokedynamic_inverse_index_map,
|
||||||
const intStack& invokedynamic_references_map) :
|
const intStack& invokedynamic_references_map) :
|
||||||
_length(length), _constant_pool(NULL) {
|
_length(length),
|
||||||
initialize(inverse_index_map, invokedynamic_references_map);
|
_constant_pool(NULL) {
|
||||||
|
initialize(inverse_index_map, invokedynamic_inverse_index_map,
|
||||||
|
invokedynamic_references_map);
|
||||||
for (int i = 0; i < length; i++) {
|
for (int i = 0; i < length; i++) {
|
||||||
assert(entry_at(i)->is_f1_null(), "Failed to clear?");
|
assert(entry_at(i)->is_f1_null(), "Failed to clear?");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialization
|
// Initialization
|
||||||
void initialize(const intArray& inverse_index_map, const intArray& invokedynamic_references_map);
|
void initialize(const intArray& inverse_index_map,
|
||||||
|
const intArray& invokedynamic_inverse_index_map,
|
||||||
|
const intArray& invokedynamic_references_map);
|
||||||
public:
|
public:
|
||||||
static ConstantPoolCache* allocate(ClassLoaderData* loader_data, int length,
|
static ConstantPoolCache* allocate(ClassLoaderData* loader_data,
|
||||||
const intStack& inverse_index_map,
|
const intStack& cp_cache_map,
|
||||||
|
const intStack& invokedynamic_cp_cache_map,
|
||||||
const intStack& invokedynamic_references_map, TRAPS);
|
const intStack& invokedynamic_references_map, TRAPS);
|
||||||
bool is_constantPoolCache() const { return true; }
|
bool is_constantPoolCache() const { return true; }
|
||||||
|
|
||||||
|
|
|
@ -86,7 +86,11 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
|
||||||
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
|
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
|
||||||
*num_new_mirandas = new_mirandas.length();
|
*num_new_mirandas = new_mirandas.length();
|
||||||
|
|
||||||
|
// Interfaces do not need interface methods in their vtables
|
||||||
|
// This includes miranda methods and during later processing, default methods
|
||||||
|
if (!class_flags.is_interface()) {
|
||||||
vtable_length += *num_new_mirandas * vtableEntry::size();
|
vtable_length += *num_new_mirandas * vtableEntry::size();
|
||||||
|
}
|
||||||
|
|
||||||
if (Universe::is_bootstrapping() && vtable_length == 0) {
|
if (Universe::is_bootstrapping() && vtable_length == 0) {
|
||||||
// array classes don't have their superclass set correctly during
|
// array classes don't have their superclass set correctly during
|
||||||
|
@ -224,7 +228,11 @@ void klassVtable::initialize_vtable(bool checkconstraints, TRAPS) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// add miranda methods; it will also return the updated initialized
|
// add miranda methods; it will also return the updated initialized
|
||||||
|
// Interfaces do not need interface methods in their vtables
|
||||||
|
// This includes miranda methods and during later processing, default methods
|
||||||
|
if (!ik()->is_interface()) {
|
||||||
initialized = fill_in_mirandas(initialized);
|
initialized = fill_in_mirandas(initialized);
|
||||||
|
}
|
||||||
|
|
||||||
// In class hierarchies where the accessibility is not increasing (i.e., going from private ->
|
// In class hierarchies where the accessibility is not increasing (i.e., going from private ->
|
||||||
// package_private -> public/protected), the vtable might actually be smaller than our initial
|
// package_private -> public/protected), the vtable might actually be smaller than our initial
|
||||||
|
@ -332,10 +340,16 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle tar
|
||||||
// An interface never allocates new vtable slots, only inherits old ones.
|
// An interface never allocates new vtable slots, only inherits old ones.
|
||||||
// This method will either be assigned its own itable index later,
|
// This method will either be assigned its own itable index later,
|
||||||
// or be assigned an inherited vtable index in the loop below.
|
// or be assigned an inherited vtable index in the loop below.
|
||||||
// default methods store their vtable indices in the inheritors default_vtable_indices
|
// default methods inherited by classes store their vtable indices
|
||||||
assert (default_index == -1, "interfaces don't store resolved default methods");
|
// in the inheritor's default_vtable_indices
|
||||||
|
// default methods inherited by interfaces may already have a
|
||||||
|
// valid itable index, if so, don't change it
|
||||||
|
// overpass methods in an interface will be assigned an itable index later
|
||||||
|
// by an inheriting class
|
||||||
|
if (!is_default || !target_method()->has_itable_index()) {
|
||||||
target_method()->set_vtable_index(Method::pending_itable_index);
|
target_method()->set_vtable_index(Method::pending_itable_index);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// we need a new entry if there is no superclass
|
// we need a new entry if there is no superclass
|
||||||
if (klass->super() == NULL) {
|
if (klass->super() == NULL) {
|
||||||
|
@ -494,8 +508,18 @@ void klassVtable::put_method_at(Method* m, int index) {
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (PrintVtables && Verbose) {
|
if (PrintVtables && Verbose) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
tty->print_cr("adding %s::%s at index %d", _klass->internal_name(),
|
const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
|
||||||
(m != NULL) ? m->name()->as_C_string() : "<NULL>", index);
|
tty->print("adding %s at index %d, flags: ", sig, index);
|
||||||
|
if (m != NULL) {
|
||||||
|
m->access_flags().print_on(tty);
|
||||||
|
if (m->is_default_method()) {
|
||||||
|
tty->print("default ");
|
||||||
|
}
|
||||||
|
if (m->is_overpass()) {
|
||||||
|
tty->print("overpass");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tty->cr();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
table()[index].set(m);
|
table()[index].set(m);
|
||||||
|
@ -631,9 +655,11 @@ bool klassVtable::is_miranda_entry_at(int i) {
|
||||||
if (mhk->is_interface()) {
|
if (mhk->is_interface()) {
|
||||||
assert(m->is_public(), "should be public");
|
assert(m->is_public(), "should be public");
|
||||||
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
|
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
|
||||||
assert(is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super()), "should be a miranda_method");
|
// the search could find a miranda or a default method
|
||||||
|
if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -644,9 +670,10 @@ bool klassVtable::is_miranda_entry_at(int i) {
|
||||||
// the caller must make sure that the method belongs to an interface implemented by the class
|
// the caller must make sure that the method belongs to an interface implemented by the class
|
||||||
// Miranda methods only include public interface instance methods
|
// Miranda methods only include public interface instance methods
|
||||||
// Not private methods, not static methods, not default == concrete abstract
|
// Not private methods, not static methods, not default == concrete abstract
|
||||||
|
// Miranda methods also do not include overpass methods in interfaces
|
||||||
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
|
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
|
||||||
Array<Method*>* default_methods, Klass* super) {
|
Array<Method*>* default_methods, Klass* super) {
|
||||||
if (m->is_static() || m->is_private()) {
|
if (m->is_static() || m->is_private() || m->is_overpass()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
Symbol* name = m->name();
|
Symbol* name = m->name();
|
||||||
|
@ -744,6 +771,8 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
|
||||||
// Discover miranda methods ("miranda" = "interface abstract, no binding"),
|
// Discover miranda methods ("miranda" = "interface abstract, no binding"),
|
||||||
// and append them into the vtable starting at index initialized,
|
// and append them into the vtable starting at index initialized,
|
||||||
// return the new value of initialized.
|
// return the new value of initialized.
|
||||||
|
// Miranda methods use vtable entries, but do not get assigned a vtable_index
|
||||||
|
// The vtable_index is discovered by searching from the end of the vtable
|
||||||
int klassVtable::fill_in_mirandas(int initialized) {
|
int klassVtable::fill_in_mirandas(int initialized) {
|
||||||
GrowableArray<Method*> mirandas(20);
|
GrowableArray<Method*> mirandas(20);
|
||||||
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
|
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
|
||||||
|
@ -977,6 +1006,25 @@ int klassItable::assign_itable_indices_for_interface(Klass* klass) {
|
||||||
if (interface_method_needs_itable_index(m)) {
|
if (interface_method_needs_itable_index(m)) {
|
||||||
assert(!m->is_final_method(), "no final interface methods");
|
assert(!m->is_final_method(), "no final interface methods");
|
||||||
// If m is already assigned a vtable index, do not disturb it.
|
// If m is already assigned a vtable index, do not disturb it.
|
||||||
|
if (TraceItables && Verbose) {
|
||||||
|
ResourceMark rm;
|
||||||
|
const char* sig = (m != NULL) ? m->name_and_sig_as_C_string() : "<NULL>";
|
||||||
|
if (m->has_vtable_index()) {
|
||||||
|
tty->print("itable index %d for method: %s, flags: ", m->vtable_index(), sig);
|
||||||
|
} else {
|
||||||
|
tty->print("itable index %d for method: %s, flags: ", ime_num, sig);
|
||||||
|
}
|
||||||
|
if (m != NULL) {
|
||||||
|
m->access_flags().print_on(tty);
|
||||||
|
if (m->is_default_method()) {
|
||||||
|
tty->print("default ");
|
||||||
|
}
|
||||||
|
if (m->is_overpass()) {
|
||||||
|
tty->print("overpass");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
tty->cr();
|
||||||
|
}
|
||||||
if (!m->has_vtable_index()) {
|
if (!m->has_vtable_index()) {
|
||||||
assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
|
assert(m->vtable_index() == Method::pending_itable_index, "set by initialize_vtable");
|
||||||
m->set_itable_index(ime_num);
|
m->set_itable_index(ime_num);
|
||||||
|
|
|
@ -53,6 +53,8 @@
|
||||||
#include "compiler/compileBroker.hpp"
|
#include "compiler/compileBroker.hpp"
|
||||||
#include "runtime/compilationPolicy.hpp"
|
#include "runtime/compilationPolicy.hpp"
|
||||||
|
|
||||||
|
#define SIZE_T_MAX_VALUE ((size_t) -1)
|
||||||
|
|
||||||
bool WhiteBox::_used = false;
|
bool WhiteBox::_used = false;
|
||||||
|
|
||||||
WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
|
WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
|
||||||
|
@ -109,6 +111,112 @@ WB_ENTRY(void, WB_PrintHeapSizes(JNIEnv* env, jobject o)) {
|
||||||
}
|
}
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
#ifndef PRODUCT
|
||||||
|
// Forward declaration
|
||||||
|
void TestReservedSpace_test();
|
||||||
|
void TestReserveMemorySpecial_test();
|
||||||
|
void TestVirtualSpace_test();
|
||||||
|
void TestMetaspaceAux_test();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
WB_ENTRY(void, WB_RunMemoryUnitTests(JNIEnv* env, jobject o))
|
||||||
|
#ifndef PRODUCT
|
||||||
|
TestReservedSpace_test();
|
||||||
|
TestReserveMemorySpecial_test();
|
||||||
|
TestVirtualSpace_test();
|
||||||
|
TestMetaspaceAux_test();
|
||||||
|
#endif
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
|
||||||
|
size_t granularity = os::vm_allocation_granularity();
|
||||||
|
ReservedHeapSpace rhs(100 * granularity, granularity, false, NULL);
|
||||||
|
VirtualSpace vs;
|
||||||
|
vs.initialize(rhs, 50 * granularity);
|
||||||
|
|
||||||
|
//Check if constraints are complied
|
||||||
|
if (!( UseCompressedOops && rhs.base() != NULL &&
|
||||||
|
Universe::narrow_oop_base() != NULL &&
|
||||||
|
Universe::narrow_oop_use_implicit_null_checks() )) {
|
||||||
|
tty->print_cr("WB_ReadFromNoaccessArea method is useless:\n "
|
||||||
|
"\tUseCompressedOops is %d\n"
|
||||||
|
"\trhs.base() is "PTR_FORMAT"\n"
|
||||||
|
"\tUniverse::narrow_oop_base() is "PTR_FORMAT"\n"
|
||||||
|
"\tUniverse::narrow_oop_use_implicit_null_checks() is %d",
|
||||||
|
UseCompressedOops,
|
||||||
|
rhs.base(),
|
||||||
|
Universe::narrow_oop_base(),
|
||||||
|
Universe::narrow_oop_use_implicit_null_checks());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
tty->print_cr("Reading from no access area... ");
|
||||||
|
tty->print_cr("*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ) = %c",
|
||||||
|
*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ));
|
||||||
|
WB_END
|
||||||
|
|
||||||
|
static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
|
||||||
|
size_t magnitude, size_t iterations) {
|
||||||
|
size_t granularity = os::vm_allocation_granularity();
|
||||||
|
ReservedHeapSpace rhs(reserved_space_size * granularity, granularity, false, NULL);
|
||||||
|
VirtualSpace vs;
|
||||||
|
if (!vs.initialize(rhs, 0)) {
|
||||||
|
tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");
|
||||||
|
return 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
long seed = os::random();
|
||||||
|
tty->print_cr("Random seed is %ld", seed);
|
||||||
|
os::init_random(seed);
|
||||||
|
|
||||||
|
for (size_t i = 0; i < iterations; i++) {
|
||||||
|
|
||||||
|
// Whether we will shrink or grow
|
||||||
|
bool shrink = os::random() % 2L == 0;
|
||||||
|
|
||||||
|
// Get random delta to resize virtual space
|
||||||
|
size_t delta = (size_t)os::random() % magnitude;
|
||||||
|
|
||||||
|
// If we are about to shrink virtual space below zero, then expand instead
|
||||||
|
if (shrink && vs.committed_size() < delta) {
|
||||||
|
shrink = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resizing by delta
|
||||||
|
if (shrink) {
|
||||||
|
vs.shrink_by(delta);
|
||||||
|
} else {
|
||||||
|
// If expanding fails expand_by will silently return false
|
||||||
|
vs.expand_by(delta, true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
WB_ENTRY(jint, WB_StressVirtualSpaceResize(JNIEnv* env, jobject o,
|
||||||
|
jlong reserved_space_size, jlong magnitude, jlong iterations))
|
||||||
|
tty->print_cr("reservedSpaceSize="JLONG_FORMAT", magnitude="JLONG_FORMAT", "
|
||||||
|
"iterations="JLONG_FORMAT"\n", reserved_space_size, magnitude,
|
||||||
|
iterations);
|
||||||
|
if (reserved_space_size < 0 || magnitude < 0 || iterations < 0) {
|
||||||
|
tty->print_cr("One of variables printed above is negative. Can't proceed.\n");
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// sizeof(size_t) depends on whether OS is 32bit or 64bit. sizeof(jlong) is
|
||||||
|
// always 8 byte. That's why we should avoid overflow in case of 32bit platform.
|
||||||
|
if (sizeof(size_t) < sizeof(jlong)) {
|
||||||
|
jlong size_t_max_value = (jlong) SIZE_T_MAX_VALUE;
|
||||||
|
if (reserved_space_size > size_t_max_value || magnitude > size_t_max_value
|
||||||
|
|| iterations > size_t_max_value) {
|
||||||
|
tty->print_cr("One of variables printed above overflows size_t. Can't proceed.\n");
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return wb_stress_virtual_space_resize((size_t) reserved_space_size,
|
||||||
|
(size_t) magnitude, (size_t) iterations);
|
||||||
|
WB_END
|
||||||
|
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
|
WB_ENTRY(jboolean, WB_G1IsHumongous(JNIEnv* env, jobject o, jobject obj))
|
||||||
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
G1CollectedHeap* g1 = G1CollectedHeap::heap();
|
||||||
|
@ -445,6 +553,9 @@ static JNINativeMethod methods[] = {
|
||||||
{CC"getCompressedOopsMaxHeapSize", CC"()J",
|
{CC"getCompressedOopsMaxHeapSize", CC"()J",
|
||||||
(void*)&WB_GetCompressedOopsMaxHeapSize},
|
(void*)&WB_GetCompressedOopsMaxHeapSize},
|
||||||
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
|
{CC"printHeapSizes", CC"()V", (void*)&WB_PrintHeapSizes },
|
||||||
|
{CC"runMemoryUnitTests", CC"()V", (void*)&WB_RunMemoryUnitTests},
|
||||||
|
{CC"readFromNoaccessArea",CC"()V", (void*)&WB_ReadFromNoaccessArea},
|
||||||
|
{CC"stressVirtualSpaceResize",CC"(JJJ)I", (void*)&WB_StressVirtualSpaceResize},
|
||||||
#if INCLUDE_ALL_GCS
|
#if INCLUDE_ALL_GCS
|
||||||
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
|
{CC"g1InConcurrentMark", CC"()Z", (void*)&WB_G1InConcurrentMark},
|
||||||
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
|
{CC"g1IsHumongous", CC"(Ljava/lang/Object;)Z", (void*)&WB_G1IsHumongous },
|
||||||
|
|
80
hotspot/test/runtime/memory/ReadFromNoaccessArea.java
Normal file
80
hotspot/test/runtime/memory/ReadFromNoaccessArea.java
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test
|
||||||
|
* @summary Test that touching noaccess area in class ReservedHeapSpace results in SIGSEGV/ACCESS_VIOLATION
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build ReadFromNoaccessArea
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* @run main ReadFromNoaccessArea
|
||||||
|
*/
|
||||||
|
|
||||||
|
import com.oracle.java.testlibrary.*;
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
||||||
|
public class ReadFromNoaccessArea {
|
||||||
|
|
||||||
|
public static void main(String args[]) throws Exception {
|
||||||
|
if (!Platform.is64bit()) {
|
||||||
|
System.out.println("ReadFromNoaccessArea tests is useful only on 64bit architecture. Passing silently.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
|
||||||
|
"-Xbootclasspath/a:.",
|
||||||
|
"-XX:+UnlockDiagnosticVMOptions",
|
||||||
|
"-XX:+WhiteBoxAPI",
|
||||||
|
"-XX:+UseCompressedOops",
|
||||||
|
"-XX:HeapBaseMinAddress=33G",
|
||||||
|
DummyClassWithMainTryingToReadFromNoaccessArea.class.getName());
|
||||||
|
|
||||||
|
OutputAnalyzer output = new OutputAnalyzer(pb.start());
|
||||||
|
System.out.println("******* Printing stdout for analysis in case of failure *******");
|
||||||
|
System.out.println(output.getStdout());
|
||||||
|
System.out.println("******* Printing stderr for analysis in case of failure *******");
|
||||||
|
System.out.println(output.getStderr());
|
||||||
|
System.out.println("***************************************************************");
|
||||||
|
if (output.getStdout() != null && output.getStdout().contains("WB_ReadFromNoaccessArea method is useless")) {
|
||||||
|
// Test conditions broken. There is no protected page in ReservedHeapSpace in these circumstances. Silently passing test.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (Platform.isWindows()) {
|
||||||
|
output.shouldContain("EXCEPTION_ACCESS_VIOLATION");
|
||||||
|
} else if (Platform.isOSX()) {
|
||||||
|
output.shouldContain("SIGBUS");
|
||||||
|
} else {
|
||||||
|
output.shouldContain("SIGSEGV");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class DummyClassWithMainTryingToReadFromNoaccessArea {
|
||||||
|
|
||||||
|
// This method calls whitebox method reading from noaccess area
|
||||||
|
public static void main(String args[]) throws Exception {
|
||||||
|
WhiteBox.getWhiteBox().readFromNoaccessArea();
|
||||||
|
throw new Exception("Call of readFromNoaccessArea succeeded! This is wrong. Crash expected. Test failed.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
74
hotspot/test/runtime/memory/RunUnitTestsConcurrently.java
Normal file
74
hotspot/test/runtime/memory/RunUnitTestsConcurrently.java
Normal file
|
@ -0,0 +1,74 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test
|
||||||
|
* @summary Test launches unit tests inside vm concurrently
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build RunUnitTestsConcurrently
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI RunUnitTestsConcurrently 30 15000
|
||||||
|
*/
|
||||||
|
|
||||||
|
import com.oracle.java.testlibrary.*;
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
||||||
|
public class RunUnitTestsConcurrently {
|
||||||
|
|
||||||
|
private static WhiteBox wb;
|
||||||
|
private static long timeout;
|
||||||
|
private static long timeStamp;
|
||||||
|
|
||||||
|
public static class Worker implements Runnable {
|
||||||
|
@Override
|
||||||
|
public void run() {
|
||||||
|
while (System.currentTimeMillis() - timeStamp < timeout) {
|
||||||
|
WhiteBox.getWhiteBox().runMemoryUnitTests();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) throws InterruptedException {
|
||||||
|
if (!Platform.isDebugBuild() || !Platform.is64bit()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
wb = WhiteBox.getWhiteBox();
|
||||||
|
System.out.println("Starting threads");
|
||||||
|
|
||||||
|
int threads = Integer.valueOf(args[0]);
|
||||||
|
timeout = Long.valueOf(args[1]);
|
||||||
|
|
||||||
|
timeStamp = System.currentTimeMillis();
|
||||||
|
|
||||||
|
Thread[] threadsArray = new Thread[threads];
|
||||||
|
for (int i = 0; i < threads; i++) {
|
||||||
|
threadsArray[i] = new Thread(new Worker());
|
||||||
|
threadsArray[i].start();
|
||||||
|
}
|
||||||
|
for (int i = 0; i < threads; i++) {
|
||||||
|
threadsArray[i].join();
|
||||||
|
}
|
||||||
|
|
||||||
|
System.out.println("Quitting test.");
|
||||||
|
}
|
||||||
|
}
|
41
hotspot/test/runtime/memory/StressVirtualSpaceResize.java
Normal file
41
hotspot/test/runtime/memory/StressVirtualSpaceResize.java
Normal file
|
@ -0,0 +1,41 @@
|
||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test
|
||||||
|
* @summary Stress test that expands/shrinks VirtualSpace
|
||||||
|
* @library /testlibrary /testlibrary/whitebox
|
||||||
|
* @build StressVirtualSpaceResize
|
||||||
|
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||||
|
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI StressVirtualSpaceResize
|
||||||
|
*/
|
||||||
|
|
||||||
|
import sun.hotspot.WhiteBox;
|
||||||
|
|
||||||
|
public class StressVirtualSpaceResize {
|
||||||
|
|
||||||
|
public static void main(String args[]) throws Exception {
|
||||||
|
if (WhiteBox.getWhiteBox().stressVirtualSpaceResize(1000, 0xffffL, 0xffffL) != 0)
|
||||||
|
throw new RuntimeException("Whitebox method stressVirtualSpaceResize returned non zero exit code");
|
||||||
|
}
|
||||||
|
}
|
|
@ -144,4 +144,10 @@ public class WhiteBox {
|
||||||
|
|
||||||
// force Full GC
|
// force Full GC
|
||||||
public native void fullGC();
|
public native void fullGC();
|
||||||
|
|
||||||
|
// Tests on ReservedSpace/VirtualSpace classes
|
||||||
|
public native int stressVirtualSpaceResize(long reservedSpaceSize, long magnitude, long iterations);
|
||||||
|
public native void runMemoryUnitTests();
|
||||||
|
public native void readFromNoaccessArea();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue