8150607: Clean up CompactHashtable

Refactored code, and added test cases for serviceability agent

Reviewed-by: jiangli, ccheung
This commit is contained in:
Ioi Lam 2016-04-17 19:15:52 -07:00
parent 16c430d2b6
commit 6526d15d6e
18 changed files with 808 additions and 494 deletions

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -85,6 +85,12 @@ public class SymbolTable extends sun.jvm.hotspot.utilities.Hashtable {
tables. */ tables. */
public Symbol probe(byte[] name) { public Symbol probe(byte[] name) {
long hashValue = hashSymbol(name); long hashValue = hashSymbol(name);
Symbol s = sharedTable.probe(name, hashValue);
if (s != null) {
return s;
}
for (HashtableEntry e = (HashtableEntry) bucket(hashToIndex(hashValue)); e != null; e = (HashtableEntry) e.next()) { for (HashtableEntry e = (HashtableEntry) bucket(hashToIndex(hashValue)); e != null; e = (HashtableEntry) e.next()) {
if (e.hash() == hashValue) { if (e.hash() == hashValue) {
Symbol sym = Symbol.create(e.literalValue()); Symbol sym = Symbol.create(e.literalValue());
@ -94,7 +100,7 @@ public class SymbolTable extends sun.jvm.hotspot.utilities.Hashtable {
} }
} }
return sharedTable.probe(name, hashValue); return null;
} }
public interface SymbolVisitor { public interface SymbolVisitor {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -44,21 +44,23 @@ public class CompactHashTable extends VMObject {
Type type = db.lookupType("SymbolCompactHashTable"); Type type = db.lookupType("SymbolCompactHashTable");
baseAddressField = type.getAddressField("_base_address"); baseAddressField = type.getAddressField("_base_address");
bucketCountField = type.getCIntegerField("_bucket_count"); bucketCountField = type.getCIntegerField("_bucket_count");
tableEndOffsetField = type.getCIntegerField("_table_end_offset"); entryCountField = type.getCIntegerField("_entry_count");
bucketsField = type.getAddressField("_buckets"); bucketsField = type.getAddressField("_buckets");
uintSize = db.lookupType("juint").getSize(); entriesField = type.getAddressField("_entries");
uintSize = db.lookupType("u4").getSize();
} }
// Fields // Fields
private static CIntegerField bucketCountField; private static CIntegerField bucketCountField;
private static CIntegerField tableEndOffsetField; private static CIntegerField entryCountField;
private static AddressField baseAddressField; private static AddressField baseAddressField;
private static AddressField bucketsField; private static AddressField bucketsField;
private static AddressField entriesField;
private static long uintSize; private static long uintSize;
private static int BUCKET_OFFSET_MASK = 0x3FFFFFFF; private static int BUCKET_OFFSET_MASK = 0x3FFFFFFF;
private static int BUCKET_TYPE_SHIFT = 30; private static int BUCKET_TYPE_SHIFT = 30;
private static int COMPACT_BUCKET_TYPE = 1; private static int VALUE_ONLY_BUCKET_TYPE = 1;
public CompactHashTable(Address addr) { public CompactHashTable(Address addr) {
super(addr); super(addr);
@ -68,12 +70,8 @@ public class CompactHashTable extends VMObject {
return (int)bucketCountField.getValue(addr); return (int)bucketCountField.getValue(addr);
} }
private int tableEndOffset() { private boolean isValueOnlyBucket(int bucket_info) {
return (int)tableEndOffsetField.getValue(addr); return (bucket_info >> BUCKET_TYPE_SHIFT) == VALUE_ONLY_BUCKET_TYPE;
}
private boolean isCompactBucket(int bucket_info) {
return (bucket_info >> BUCKET_TYPE_SHIFT) == COMPACT_BUCKET_TYPE;
} }
private int bucketOffset(int bucket_info) { private int bucketOffset(int bucket_info) {
@ -81,9 +79,8 @@ public class CompactHashTable extends VMObject {
} }
public Symbol probe(byte[] name, long hash) { public Symbol probe(byte[] name, long hash) {
if (bucketCount() <= 0) {
if (bucketCount() == 0) { // This CompactHashTable is not in use
// The table is invalid, so don't try to lookup
return null; return null;
} }
@ -91,34 +88,33 @@ public class CompactHashTable extends VMObject {
Symbol sym; Symbol sym;
Address baseAddress = baseAddressField.getValue(addr); Address baseAddress = baseAddressField.getValue(addr);
Address bucket = bucketsField.getValue(addr); Address bucket = bucketsField.getValue(addr);
Address bucketEnd = bucket;
long index = hash % bucketCount(); long index = hash % bucketCount();
int bucketInfo = (int)bucket.getCIntegerAt(index * uintSize, uintSize, true); int bucketInfo = (int)bucket.getCIntegerAt(index * uintSize, uintSize, true);
int bucketOffset = bucketOffset(bucketInfo); int bucketOffset = bucketOffset(bucketInfo);
int nextBucketInfo = (int)bucket.getCIntegerAt((index+1) * uintSize, uintSize, true); int nextBucketInfo = (int)bucket.getCIntegerAt((index+1) * uintSize, uintSize, true);
int nextBucketOffset = bucketOffset(nextBucketInfo); int nextBucketOffset = bucketOffset(nextBucketInfo);
bucket = bucket.addOffsetTo(bucketOffset * uintSize); Address entry = entriesField.getValue(addr).addOffsetTo(bucketOffset * uintSize);
if (isCompactBucket(bucketInfo)) { if (isValueOnlyBucket(bucketInfo)) {
symOffset = bucket.getCIntegerAt(0, uintSize, true); symOffset = entry.getCIntegerAt(0, uintSize, true);
sym = Symbol.create(baseAddress.addOffsetTo(symOffset)); sym = Symbol.create(baseAddress.addOffsetTo(symOffset));
if (sym.equals(name)) { if (sym.equals(name)) {
return sym; return sym;
} }
} else { } else {
bucketEnd = bucket.addOffsetTo(nextBucketOffset * uintSize); Address entryMax = entriesField.getValue(addr).addOffsetTo(nextBucketOffset * uintSize);
while (bucket.lessThan(bucketEnd)) { while (entry.lessThan(entryMax)) {
long symHash = bucket.getCIntegerAt(0, uintSize, true); long symHash = entry.getCIntegerAt(0, uintSize, true);
if (symHash == hash) { if (symHash == hash) {
symOffset = bucket.getCIntegerAt(uintSize, uintSize, true); symOffset = entry.getCIntegerAt(uintSize, uintSize, true);
Address symAddr = baseAddress.addOffsetTo(symOffset); Address symAddr = baseAddress.addOffsetTo(symOffset);
sym = Symbol.create(symAddr); sym = Symbol.create(symAddr);
if (sym.equals(name)) { if (sym.equals(name)) {
return sym; return sym;
} }
} }
bucket = bucket.addOffsetTo(2 * uintSize); entry = entry.addOffsetTo(2 * uintSize);
} }
} }
return null; return null;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -25,6 +25,7 @@
#include "precompiled.hpp" #include "precompiled.hpp"
#include "classfile/compactHashtable.inline.hpp" #include "classfile/compactHashtable.inline.hpp"
#include "classfile/javaClasses.hpp" #include "classfile/javaClasses.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceShared.hpp" #include "memory/metaspaceShared.hpp"
#include "prims/jvm.h" #include "prims/jvm.h"
#include "utilities/numberSeq.hpp" #include "utilities/numberSeq.hpp"
@ -34,270 +35,259 @@
// //
// The compact hash table writer implementations // The compact hash table writer implementations
// //
CompactHashtableWriter::CompactHashtableWriter(int table_type, CompactHashtableWriter::CompactHashtableWriter(int num_buckets,
int num_entries,
CompactHashtableStats* stats) { CompactHashtableStats* stats) {
assert(DumpSharedSpaces, "dump-time only"); assert(DumpSharedSpaces, "dump-time only");
_type = table_type; _num_buckets = num_buckets;
_num_entries = num_entries; _num_entries = 0;
_num_buckets = number_of_buckets(_num_entries); _buckets = NEW_C_HEAP_ARRAY(GrowableArray<Entry>*, _num_buckets, mtSymbol);
_buckets = NEW_C_HEAP_ARRAY(Entry*, _num_buckets, mtSymbol); for (int i=0; i<_num_buckets; i++) {
memset(_buckets, 0, sizeof(Entry*) * _num_buckets); _buckets[i] = new (ResourceObj::C_HEAP, mtSymbol) GrowableArray<Entry>(0, true, mtSymbol);
}
/* bucket sizes table */
_bucket_sizes = NEW_C_HEAP_ARRAY(juint, _num_buckets, mtSymbol);
memset(_bucket_sizes, 0, sizeof(juint) * _num_buckets);
stats->hashentry_count = _num_entries;
// Compact buckets' entries will have only the 4-byte offset, but
// we don't know how many there will be at this point. So use a
// conservative estimate here. The size is adjusted later when we
// write out the buckets.
stats->hashentry_bytes = _num_entries * 8;
stats->bucket_count = _num_buckets; stats->bucket_count = _num_buckets;
stats->bucket_bytes = (_num_buckets + 1) * (sizeof(juint)); stats->bucket_bytes = (_num_buckets + 1) * (sizeof(u4));
_stats = stats; _stats = stats;
_compact_buckets = NULL;
// See compactHashtable.hpp for table layout _compact_entries = NULL;
_required_bytes = sizeof(juint) * 2; // _base_address, written as 2 juints _num_empty_buckets = 0;
_required_bytes+= sizeof(juint) + // num_entries _num_value_only_buckets = 0;
sizeof(juint) + // num_buckets _num_other_buckets = 0;
stats->hashentry_bytes +
stats->bucket_bytes;
} }
CompactHashtableWriter::~CompactHashtableWriter() { CompactHashtableWriter::~CompactHashtableWriter() {
for (int index = 0; index < _num_buckets; index++) { for (int index = 0; index < _num_buckets; index++) {
Entry* next = NULL; GrowableArray<Entry>* bucket = _buckets[index];
for (Entry* tent = _buckets[index]; tent; tent = next) { delete bucket;
next = tent->next();
delete tent;
}
} }
FREE_C_HEAP_ARRAY(juint, _bucket_sizes); FREE_C_HEAP_ARRAY(GrowableArray<Entry>*, _buckets);
FREE_C_HEAP_ARRAY(Entry*, _buckets);
}
// Calculate the number of buckets in the temporary hash table
int CompactHashtableWriter::number_of_buckets(int num_entries) {
const int buksize = (int)SharedSymbolTableBucketSize;
int num_buckets = (num_entries + buksize - 1) / buksize;
num_buckets = (num_buckets + 1) & (~0x01);
return num_buckets;
} }
// Add a symbol entry to the temporary hash table // Add a symbol entry to the temporary hash table
void CompactHashtableWriter::add(unsigned int hash, Entry* entry) { void CompactHashtableWriter::add(unsigned int hash, u4 value) {
int index = hash % _num_buckets; int index = hash % _num_buckets;
entry->set_next(_buckets[index]); _buckets[index]->append_if_missing(Entry(hash, value));
_buckets[index] = entry; _num_entries++;
_bucket_sizes[index] ++;
} }
// Write the compact table's bucket infos void CompactHashtableWriter::allocate_table() {
juint* CompactHashtableWriter::dump_table(juint* p, juint** first_bucket, int entries_space = 0;
NumberSeq* summary) { for (int index = 0; index < _num_buckets; index++) {
int index; GrowableArray<Entry>* bucket = _buckets[index];
juint* compact_table = p; int bucket_size = bucket->length();
// Compute the start of the buckets, include the compact_bucket_infos table if (bucket_size == 1) {
// and the table end offset. entries_space++;
juint offset = _num_buckets + 1; } else {
*first_bucket = compact_table + offset; entries_space += 2 * bucket_size;
}
}
for (index = 0; index < _num_buckets; index++) { if (entries_space & ~BUCKET_OFFSET_MASK) {
int bucket_size = _bucket_sizes[index]; vm_exit_during_initialization("CompactHashtableWriter::allocate_table: Overflow! "
"Too many entries.");
}
Thread* THREAD = VMThread::vm_thread();
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
_compact_buckets = MetadataFactory::new_array<u4>(loader_data, _num_buckets + 1, THREAD);
_compact_entries = MetadataFactory::new_array<u4>(loader_data, entries_space, THREAD);
_stats->hashentry_count = _num_entries;
_stats->hashentry_bytes = entries_space * sizeof(u4);
}
// Write the compact table's buckets
void CompactHashtableWriter::dump_table(NumberSeq* summary) {
u4 offset = 0;
for (int index = 0; index < _num_buckets; index++) {
GrowableArray<Entry>* bucket = _buckets[index];
int bucket_size = bucket->length();
if (bucket_size == 1) { if (bucket_size == 1) {
// bucket with one entry is compacted and only has the symbol offset // bucket with one entry is compacted and only has the symbol offset
compact_table[index] = BUCKET_INFO(offset, COMPACT_BUCKET_TYPE); _compact_buckets->at_put(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
offset += bucket_size; // each entry contains symbol offset only
Entry ent = bucket->at(0);
_compact_entries->at_put(offset++, ent.value());
_num_value_only_buckets++;
} else { } else {
// regular bucket, each entry is a symbol (hash, offset) pair // regular bucket, each entry is a symbol (hash, offset) pair
compact_table[index] = BUCKET_INFO(offset, REGULAR_BUCKET_TYPE); _compact_buckets->at_put(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
offset += bucket_size * 2; // each hash entry is 2 juints
for (int i=0; i<bucket_size; i++) {
Entry ent = bucket->at(i);
_compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash
_compact_entries->at_put(offset++, ent.value());
}
if (bucket_size == 0) {
_num_empty_buckets++;
} else {
_num_other_buckets++;
} }
if (offset & ~BUCKET_OFFSET_MASK) {
vm_exit_during_initialization("CompactHashtableWriter::dump_table: Overflow! "
"Too many symbols.");
} }
summary->add(bucket_size); summary->add(bucket_size);
} }
// Mark the end of the table
compact_table[_num_buckets] = BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE);
return compact_table; // Mark the end of the buckets
_compact_buckets->at_put(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE));
assert(offset == (u4)_compact_entries->length(), "sanity");
} }
// Write the compact table's entries
juint* CompactHashtableWriter::dump_buckets(juint* compact_table, juint* p,
NumberSeq* summary) {
uintx base_address = 0;
uintx max_delta = 0;
int num_compact_buckets = 0;
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
base_address = uintx(MetaspaceShared::shared_rs()->base());
max_delta = uintx(MetaspaceShared::shared_rs()->size());
assert(max_delta <= MAX_SHARED_DELTA, "range check");
} else {
assert((_type == CompactHashtable<oop, char>::_string_table), "unknown table");
assert(UseCompressedOops, "UseCompressedOops is required");
}
assert(p != NULL, "sanity");
for (int index = 0; index < _num_buckets; index++) {
juint count = 0;
int bucket_size = _bucket_sizes[index];
int bucket_type = BUCKET_TYPE(compact_table[index]);
if (bucket_size == 1) {
assert(bucket_type == COMPACT_BUCKET_TYPE, "Bad bucket type");
num_compact_buckets ++;
}
for (Entry* tent = _buckets[index]; tent;
tent = tent->next()) {
if (bucket_type == REGULAR_BUCKET_TYPE) {
*p++ = juint(tent->hash()); // write entry hash
}
if (_type == CompactHashtable<Symbol*, char>::_symbol_table) {
uintx deltax = uintx(tent->value()) - base_address;
assert(deltax < max_delta, "range check");
juint delta = juint(deltax);
*p++ = delta; // write entry offset
} else {
*p++ = oopDesc::encode_heap_oop(tent->string());
}
count ++;
}
assert(count == _bucket_sizes[index], "sanity");
}
// Adjust the hashentry_bytes in CompactHashtableStats. Each compact
// bucket saves 4-byte.
_stats->hashentry_bytes -= num_compact_buckets * 4;
return p;
}
// Write the compact table // Write the compact table
void CompactHashtableWriter::dump(char** top, char* end) { void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table_name) {
NumberSeq summary; NumberSeq summary;
char* old_top = *top; allocate_table();
juint* p = (juint*)(*top); dump_table(&summary);
uintx base_address = uintx(MetaspaceShared::shared_rs()->base()); int table_bytes = _stats->bucket_bytes + _stats->hashentry_bytes;
address base_address = address(MetaspaceShared::shared_rs()->base());
// Now write the following at the beginning of the table: cht->init(base_address, _num_entries, _num_buckets,
// base_address (uintx) _compact_buckets->data(), _compact_entries->data());
// num_entries (juint)
// num_buckets (juint)
*p++ = high(base_address);
*p++ = low (base_address); // base address
*p++ = _num_entries; // number of entries in the table
*p++ = _num_buckets; // number of buckets in the table
juint* first_bucket = NULL;
juint* compact_table = dump_table(p, &first_bucket, &summary);
juint* bucket_end = dump_buckets(compact_table, first_bucket, &summary);
assert(bucket_end <= (juint*)end, "cannot write past end");
*top = (char*)bucket_end;
if (PrintSharedSpaces) { if (PrintSharedSpaces) {
double avg_cost = 0.0; double avg_cost = 0.0;
if (_num_entries > 0) { if (_num_entries > 0) {
avg_cost = double(_required_bytes)/double(_num_entries); avg_cost = double(table_bytes)/double(_num_entries);
} }
tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT, tty->print_cr("Shared %s table stats -------- base: " PTR_FORMAT,
table_name(), (intptr_t)base_address); table_name, (intptr_t)base_address);
tty->print_cr("Number of entries : %9d", _num_entries); tty->print_cr("Number of entries : %9d", _num_entries);
tty->print_cr("Total bytes used : %9d", (int)((*top) - old_top)); tty->print_cr("Total bytes used : %9d", table_bytes);
tty->print_cr("Average bytes per entry : %9.3f", avg_cost); tty->print_cr("Average bytes per entry : %9.3f", avg_cost);
tty->print_cr("Average bucket size : %9.3f", summary.avg()); tty->print_cr("Average bucket size : %9.3f", summary.avg());
tty->print_cr("Variance of bucket size : %9.3f", summary.variance()); tty->print_cr("Variance of bucket size : %9.3f", summary.variance());
tty->print_cr("Std. dev. of bucket size: %9.3f", summary.sd()); tty->print_cr("Std. dev. of bucket size: %9.3f", summary.sd());
tty->print_cr("Maximum bucket size : %9d", (int)summary.maximum()); tty->print_cr("Empty buckets : %9d", _num_empty_buckets);
tty->print_cr("Value_Only buckets : %9d", _num_value_only_buckets);
tty->print_cr("Other buckets : %9d", _num_other_buckets);
} }
} }
const char* CompactHashtableWriter::table_name() { /////////////////////////////////////////////////////////////
switch (_type) { //
case CompactHashtable<Symbol*, char>::_symbol_table: return "symbol"; // Customization for dumping Symbol and String tables
case CompactHashtable<oop, char>::_string_table: return "string";
default: void CompactSymbolTableWriter::add(unsigned int hash, Symbol *symbol) {
; address base_address = address(MetaspaceShared::shared_rs()->base());
} uintx max_delta = uintx(MetaspaceShared::shared_rs()->size());
return "unknown"; assert(max_delta <= MAX_SHARED_DELTA, "range check");
uintx deltax = address(symbol) - base_address;
assert(deltax < max_delta, "range check");
u4 delta = u4(deltax);
CompactHashtableWriter::add(hash, delta);
}
void CompactStringTableWriter::add(unsigned int hash, oop string) {
CompactHashtableWriter::add(hash, oopDesc::encode_heap_oop(string));
}
void CompactSymbolTableWriter::dump(CompactHashtable<Symbol*, char> *cht) {
CompactHashtableWriter::dump(cht, "symbol");
}
void CompactStringTableWriter::dump(CompactHashtable<oop, char> *cht) {
CompactHashtableWriter::dump(cht, "string");
} }
///////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////
// //
// The CompactHashtable implementation // The CompactHashtable implementation
// //
template <class T, class N> const char* CompactHashtable<T, N>::init(
CompactHashtableType type, const char* buffer) {
assert(!DumpSharedSpaces, "run-time only");
_type = type;
juint*p = (juint*)buffer;
juint upper = *p++;
juint lower = *p++;
_base_address = uintx(jlong_from(upper, lower));
_entry_count = *p++;
_bucket_count = *p++;
_buckets = p;
_table_end_offset = BUCKET_OFFSET(p[_bucket_count]); // located at the end of the bucket_info table
juint *end = _buckets + _table_end_offset; void SimpleCompactHashtable::serialize(SerializeClosure* soc) {
return (const char*)end; soc->do_ptr((void**)&_base_address);
soc->do_u4(&_entry_count);
soc->do_u4(&_bucket_count);
soc->do_ptr((void**)&_buckets);
soc->do_ptr((void**)&_entries);
} }
bool SimpleCompactHashtable::exists(u4 value) {
assert(!DumpSharedSpaces, "run-time only");
if (_entry_count == 0) {
return false;
}
unsigned int hash = (unsigned int)value;
int index = hash % _bucket_count;
u4 bucket_info = _buckets[index];
u4 bucket_offset = BUCKET_OFFSET(bucket_info);
int bucket_type = BUCKET_TYPE(bucket_info);
u4* entry = _entries + bucket_offset;
if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
return (entry[0] == value);
} else {
u4*entry_max = _entries + BUCKET_OFFSET(_buckets[index + 1]);
while (entry <entry_max) {
if (entry[1] == value) {
return true;
}
entry += 2;
}
return false;
}
}
template <class I>
inline void SimpleCompactHashtable::iterate(const I& iterator) {
assert(!DumpSharedSpaces, "run-time only");
for (u4 i = 0; i < _bucket_count; i++) {
u4 bucket_info = _buckets[i];
u4 bucket_offset = BUCKET_OFFSET(bucket_info);
int bucket_type = BUCKET_TYPE(bucket_info);
u4* entry = _entries + bucket_offset;
if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
iterator.do_value(_base_address, entry[0]);
} else {
u4*entry_max = _entries + BUCKET_OFFSET(_buckets[i + 1]);
while (entry < entry_max) {
iterator.do_value(_base_address, entry[0]);
entry += 2;
}
}
}
}
template <class T, class N> void CompactHashtable<T, N>::serialize(SerializeClosure* soc) {
SimpleCompactHashtable::serialize(soc);
soc->do_u4(&_type);
}
class CompactHashtable_SymbolIterator {
SymbolClosure* const _closure;
public:
CompactHashtable_SymbolIterator(SymbolClosure *cl) : _closure(cl) {}
inline void do_value(address base_address, u4 offset) const {
Symbol* sym = (Symbol*)((void*)(base_address + offset));
_closure->do_symbol(&sym);
}
};
template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosure *cl) { template <class T, class N> void CompactHashtable<T, N>::symbols_do(SymbolClosure *cl) {
assert(!DumpSharedSpaces, "run-time only"); CompactHashtable_SymbolIterator iterator(cl);
for (juint i = 0; i < _bucket_count; i ++) { iterate(iterator);
juint bucket_info = _buckets[i];
juint bucket_offset = BUCKET_OFFSET(bucket_info);
int bucket_type = BUCKET_TYPE(bucket_info);
juint* bucket = _buckets + bucket_offset;
juint* bucket_end = _buckets;
Symbol* sym;
if (bucket_type == COMPACT_BUCKET_TYPE) {
sym = (Symbol*)((void*)(_base_address + bucket[0]));
cl->do_symbol(&sym);
} else {
bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
while (bucket < bucket_end) {
sym = (Symbol*)((void*)(_base_address + bucket[1]));
cl->do_symbol(&sym);
bucket += 2;
}
}
}
} }
template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* f) { class CompactHashtable_OopIterator {
assert(!DumpSharedSpaces, "run-time only"); OopClosure* const _closure;
assert(_type == _string_table || _bucket_count == 0, "sanity"); public:
for (juint i = 0; i < _bucket_count; i ++) { CompactHashtable_OopIterator(OopClosure *cl) : _closure(cl) {}
juint bucket_info = _buckets[i]; inline void do_value(address base_address, u4 offset) const {
juint bucket_offset = BUCKET_OFFSET(bucket_info); narrowOop o = (narrowOop)offset;
int bucket_type = BUCKET_TYPE(bucket_info); _closure->do_oop(&o);
juint* bucket = _buckets + bucket_offset; }
juint* bucket_end = _buckets; };
narrowOop o; template <class T, class N> void CompactHashtable<T, N>::oops_do(OopClosure* cl) {
if (bucket_type == COMPACT_BUCKET_TYPE) { assert(_type == _string_table || _bucket_count == 0, "sanity");
o = (narrowOop)bucket[0]; CompactHashtable_OopIterator iterator(cl);
f->do_oop(&o); iterate(iterator);
} else {
bucket_end += BUCKET_OFFSET(_buckets[i + 1]);
while (bucket < bucket_end) {
o = (narrowOop)bucket[1];
f->do_oop(&o);
bucket += 2;
}
}
}
} }
// Explicitly instantiate these types // Explicitly instantiate these types
@ -360,7 +350,7 @@ bool HashtableTextDump::skip_newline() {
} else { } else {
corrupted(_p, "Unexpected character"); corrupted(_p, "Unexpected character");
} }
_line_no ++; _line_no++;
return true; return true;
} }
@ -390,7 +380,7 @@ void HashtableTextDump::check_version(const char* ver) {
} }
void HashtableTextDump::scan_prefix_type() { void HashtableTextDump::scan_prefix_type() {
_p ++; _p++;
if (strncmp(_p, "SECTION: String", 15) == 0) { if (strncmp(_p, "SECTION: String", 15) == 0) {
_p += 15; _p += 15;
_prefix_type = StringPrefix; _prefix_type = StringPrefix;

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -31,7 +31,10 @@
#include "services/diagnosticCommand.hpp" #include "services/diagnosticCommand.hpp"
#include "utilities/hashtable.hpp" #include "utilities/hashtable.hpp"
template <class T, class N> class CompactHashtable;
class NumberSeq; class NumberSeq;
class SimpleCompactHashtable;
class SerializeClosure;
// Stats for symbol tables in the CDS archive // Stats for symbol tables in the CDS archive
class CompactHashtableStats VALUE_OBJ_CLASS_SPEC { class CompactHashtableStats VALUE_OBJ_CLASS_SPEC {
@ -70,66 +73,74 @@ public:
// //
class CompactHashtableWriter: public StackObj { class CompactHashtableWriter: public StackObj {
public: public:
class Entry: public CHeapObj<mtSymbol> { class Entry VALUE_OBJ_CLASS_SPEC {
Entry* _next;
unsigned int _hash; unsigned int _hash;
void* _literal; u4 _value;
public: public:
Entry(unsigned int hash, Symbol *symbol) : _next(NULL), _hash(hash), _literal(symbol) {} Entry() {}
Entry(unsigned int hash, oop string) : _next(NULL), _hash(hash), _literal(string) {} Entry(unsigned int hash, u4 val) : _hash(hash), _value(val) {}
void *value() { u4 value() {
return _literal; return _value;
}
Symbol *symbol() {
return (Symbol*)_literal;
}
oop string() {
return (oop)_literal;
} }
unsigned int hash() { unsigned int hash() {
return _hash; return _hash;
} }
Entry *next() {return _next;}
void set_next(Entry *p) {_next = p;} bool operator==(const CompactHashtableWriter::Entry& other) {
return (_value == other._value && _hash == other._hash);
}
}; // class CompactHashtableWriter::Entry }; // class CompactHashtableWriter::Entry
private: private:
static int number_of_buckets(int num_entries);
int _type;
int _num_entries; int _num_entries;
int _num_buckets; int _num_buckets;
juint* _bucket_sizes; int _num_empty_buckets;
Entry** _buckets; int _num_value_only_buckets;
int _required_bytes; int _num_other_buckets;
GrowableArray<Entry>** _buckets;
CompactHashtableStats* _stats; CompactHashtableStats* _stats;
Array<u4>* _compact_buckets;
Array<u4>* _compact_entries;
public: public:
// This is called at dump-time only // This is called at dump-time only
CompactHashtableWriter(int table_type, int num_entries, CompactHashtableStats* stats); CompactHashtableWriter(int num_buckets, CompactHashtableStats* stats);
~CompactHashtableWriter(); ~CompactHashtableWriter();
int get_required_bytes() { void add(unsigned int hash, u4 value);
return _required_bytes; void add(u4 value) {
add((unsigned int)value, value);
} }
inline void add(unsigned int hash, Symbol* symbol);
inline void add(unsigned int hash, oop string);
private: private:
void add(unsigned int hash, Entry* entry); void allocate_table();
juint* dump_table(juint* p, juint** first_bucket, NumberSeq* summary); void dump_table(NumberSeq* summary);
juint* dump_buckets(juint* table, juint* p, NumberSeq* summary);
public: public:
void dump(char** top, char* end); void dump(SimpleCompactHashtable *cht, const char* table_name);
const char* table_name(); const char* table_name();
}; };
class CompactSymbolTableWriter: public CompactHashtableWriter {
public:
CompactSymbolTableWriter(int num_buckets, CompactHashtableStats* stats) :
CompactHashtableWriter(num_buckets, stats) {}
void add(unsigned int hash, Symbol *symbol);
void dump(CompactHashtable<Symbol*, char> *cht);
};
class CompactStringTableWriter: public CompactHashtableWriter {
public:
CompactStringTableWriter(int num_entries, CompactHashtableStats* stats) :
CompactHashtableWriter(num_entries, stats) {}
void add(unsigned int hash, oop string);
void dump(CompactHashtable<oop, char> *cht);
};
#define REGULAR_BUCKET_TYPE 0 #define REGULAR_BUCKET_TYPE 0
#define COMPACT_BUCKET_TYPE 1 #define VALUE_ONLY_BUCKET_TYPE 1
#define TABLEEND_BUCKET_TYPE 3 #define TABLEEND_BUCKET_TYPE 3
#define BUCKET_OFFSET_MASK 0x3FFFFFFF #define BUCKET_OFFSET_MASK 0x3FFFFFFF
#define BUCKET_OFFSET(info) ((info) & BUCKET_OFFSET_MASK) #define BUCKET_OFFSET(info) ((info) & BUCKET_OFFSET_MASK)
@ -146,90 +157,106 @@ public:
// and tend to have large number of entries, we try to minimize the footprint // and tend to have large number of entries, we try to minimize the footprint
// cost per entry. // cost per entry.
// //
// Layout of compact table in the shared archive: // The CompactHashtable is split into two arrays
// //
// uintx base_address; // u4 buckets[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
// juint num_entries; // u4 entries[<variable size>]
// juint num_buckets;
// juint bucket_infos[num_buckets+1]; // bit[31,30]: type; bit[29-0]: offset
// juint table[]
// //
// ----------------------------------- // The size of buckets[] is 'num_buckets + 1'. Each entry of
// | base_address | num_entries | // buckets[] is a 32-bit encoding of the bucket type and bucket offset,
// |---------------------------------|
// | num_buckets | bucket_info0 |
// |---------------------------------|
// | bucket_info1 | bucket_info2 |
// | bucket_info3 ... |
// | .... | table_end_info |
// |---------------------------------|
// | entry0 |
// | entry1 |
// | entry2 |
// | |
// | ... |
// -----------------------------------
//
// The size of the bucket_info table is 'num_buckets + 1'. Each entry of the
// bucket_info table is a 32-bit encoding of the bucket type and bucket offset,
// with the type in the left-most 2-bit and offset in the remaining 30-bit. // with the type in the left-most 2-bit and offset in the remaining 30-bit.
// The last entry is a special type. It contains the offset of the last // The last entry is a special type. It contains the end of the last
// bucket end. We use that information when traversing the compact table. // bucket.
// //
// There are two types of buckets, regular buckets and compact buckets. The // There are two types of buckets, regular buckets and value_only buckets. The
// compact buckets have '01' in their highest 2-bit, and regular buckets have // value_only buckets have '01' in their highest 2-bit, and regular buckets have
// '00' in their highest 2-bit. // '00' in their highest 2-bit.
// //
// For normal buckets, each entry is 8 bytes in the table[]: // For normal buckets, each entry is 8 bytes in the entries[]:
// juint hash; /* symbol/string hash */ // u4 hash; /* symbol/string hash */
// union { // union {
// juint offset; /* Symbol* sym = (Symbol*)(base_address + offset) */ // u4 offset; /* Symbol* sym = (Symbol*)(base_address + offset) */
// narrowOop str; /* String narrowOop encoding */ // narrowOop str; /* String narrowOop encoding */
// } // }
// //
// //
// For compact buckets, each entry has only the 4-byte 'offset' in the table[]. // For value_only buckets, each entry has only the 4-byte 'offset' in the entries[].
//
// Example -- note that the second bucket is a VALUE_ONLY_BUCKET_TYPE so the hash code
// is skipped.
// buckets[0, 4, 5, ....]
// | | |
// | | +---+
// | | |
// | +----+ |
// v v v
// entries[H,O,H,O,O,H,O,H,O.....]
// //
// See CompactHashtable::lookup() for how the table is searched at runtime. // See CompactHashtable::lookup() for how the table is searched at runtime.
// See CompactHashtableWriter::dump() for how the table is written at CDS // See CompactHashtableWriter::dump() for how the table is written at CDS
// dump time. // dump time.
// //
template <class T, class N> class CompactHashtable VALUE_OBJ_CLASS_SPEC { class SimpleCompactHashtable VALUE_OBJ_CLASS_SPEC {
protected:
address _base_address;
u4 _bucket_count;
u4 _entry_count;
u4* _buckets;
u4* _entries;
public:
SimpleCompactHashtable() {
_entry_count = 0;
_bucket_count = 0;
_buckets = 0;
_entries = 0;
}
void reset() {
_bucket_count = 0;
_entry_count = 0;
_buckets = 0;
_entries = 0;
}
void init(address base_address, u4 entry_count, u4 bucket_count, u4* buckets, u4* entries) {
_base_address = base_address;
_bucket_count = bucket_count;
_entry_count = entry_count;
_buckets = buckets;
_entries = entries;
}
template <class I> inline void iterate(const I& iterator);
bool exists(u4 value);
// For reading from/writing to the CDS archive
void serialize(SerializeClosure* soc);
};
template <class T, class N> class CompactHashtable : public SimpleCompactHashtable {
friend class VMStructs; friend class VMStructs;
public: public:
enum CompactHashtableType { enum CompactHashtableType {
_symbol_table = 0, _symbol_table = 0,
_string_table = 1 _string_table = 1
}; };
private: private:
CompactHashtableType _type; u4 _type;
uintx _base_address;
juint _entry_count;
juint _bucket_count;
juint _table_end_offset;
juint* _buckets;
inline Symbol* lookup_entry(CompactHashtable<Symbol*, char>* const t, inline Symbol* decode_entry(CompactHashtable<Symbol*, char>* const t,
juint* addr, const char* name, int len); u4 offset, const char* name, int len);
inline oop lookup_entry(CompactHashtable<oop, char>* const t, inline oop decode_entry(CompactHashtable<oop, char>* const t,
juint* addr, const char* name, int len); u4 offset, const char* name, int len);
public: public:
CompactHashtable() { CompactHashtable() : SimpleCompactHashtable() {}
_entry_count = 0;
_bucket_count = 0;
_table_end_offset = 0;
_buckets = 0;
}
const char* init(CompactHashtableType type, const char *buffer);
void reset() { void set_type(CompactHashtableType type) {
_entry_count = 0; _type = (u4)type;
_bucket_count = 0;
_table_end_offset = 0;
_buckets = 0;
} }
// Lookup an entry from the compact table // Lookup an entry from the compact table
@ -240,6 +267,9 @@ public:
// iterate over strings // iterate over strings
void oops_do(OopClosure* f); void oops_do(OopClosure* f);
// For reading from/writing to the CDS archive
void serialize(SerializeClosure* soc);
}; };
//////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////
@ -293,7 +323,7 @@ public:
u8 n = 0; u8 n = 0;
while (p < end) { while (p < end) {
char c = *p ++; char c = *p++;
if ('0' <= c && c <= '9') { if ('0' <= c && c <= '9') {
n = n * 10 + (c - '0'); n = n * 10 + (c - '0');
if (n > (u8)INT_MAX) { if (n > (u8)INT_MAX) {

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -30,9 +30,9 @@
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
template <class T, class N> template <class T, class N>
inline Symbol* CompactHashtable<T, N>::lookup_entry(CompactHashtable<Symbol*, char>* const t, inline Symbol* CompactHashtable<T, N>::decode_entry(CompactHashtable<Symbol*, char>* const t,
juint* addr, const char* name, int len) { u4 offset, const char* name, int len) {
Symbol* sym = (Symbol*)((void*)(_base_address + *addr)); Symbol* sym = (Symbol*)(_base_address + offset);
if (sym->equals(name, len)) { if (sym->equals(name, len)) {
assert(sym->refcount() == -1, "must be shared"); assert(sym->refcount() == -1, "must be shared");
return sym; return sym;
@ -42,9 +42,9 @@ inline Symbol* CompactHashtable<T, N>::lookup_entry(CompactHashtable<Symbol*, ch
} }
template <class T, class N> template <class T, class N>
inline oop CompactHashtable<T, N>::lookup_entry(CompactHashtable<oop, char>* const t, inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
juint* addr, const char* name, int len) { u4 offset, const char* name, int len) {
narrowOop obj = (narrowOop)(*addr); narrowOop obj = (narrowOop)offset;
oop string = oopDesc::decode_heap_oop(obj); oop string = oopDesc::decode_heap_oop(obj);
if (java_lang_String::equals(string, (jchar*)name, len)) { if (java_lang_String::equals(string, (jchar*)name, len)) {
return string; return string;
@ -56,17 +56,14 @@ inline oop CompactHashtable<T, N>::lookup_entry(CompactHashtable<oop, char>* con
template <class T, class N> template <class T, class N>
inline T CompactHashtable<T,N>::lookup(const N* name, unsigned int hash, int len) { inline T CompactHashtable<T,N>::lookup(const N* name, unsigned int hash, int len) {
if (_entry_count > 0) { if (_entry_count > 0) {
assert(!DumpSharedSpaces, "run-time only");
int index = hash % _bucket_count; int index = hash % _bucket_count;
juint bucket_info = _buckets[index]; u4 bucket_info = _buckets[index];
juint bucket_offset = BUCKET_OFFSET(bucket_info); u4 bucket_offset = BUCKET_OFFSET(bucket_info);
int bucket_type = BUCKET_TYPE(bucket_info); int bucket_type = BUCKET_TYPE(bucket_info);
juint* bucket = _buckets + bucket_offset; u4* entry = _entries + bucket_offset;
juint* bucket_end = _buckets;
if (bucket_type == COMPACT_BUCKET_TYPE) { if (bucket_type == VALUE_ONLY_BUCKET_TYPE) {
// the compact bucket has one entry with entry offset only T res = decode_entry(this, entry[0], name, len);
T res = lookup_entry(this, &bucket[0], name, len);
if (res != NULL) { if (res != NULL) {
return res; return res;
} }
@ -74,29 +71,20 @@ inline T CompactHashtable<T,N>::lookup(const N* name, unsigned int hash, int len
// This is a regular bucket, which has more than one // This is a regular bucket, which has more than one
// entries. Each entry is a pair of entry (hash, offset). // entries. Each entry is a pair of entry (hash, offset).
// Seek until the end of the bucket. // Seek until the end of the bucket.
bucket_end += BUCKET_OFFSET(_buckets[index + 1]); u4* entry_max = _entries + BUCKET_OFFSET(_buckets[index + 1]);
while (bucket < bucket_end) { while (entry < entry_max) {
unsigned int h = (unsigned int)(bucket[0]); unsigned int h = (unsigned int)(entry[0]);
if (h == hash) { if (h == hash) {
T res = lookup_entry(this, &bucket[1], name, len); T res = decode_entry(this, entry[1], name, len);
if (res != NULL) { if (res != NULL) {
return res; return res;
} }
} }
bucket += 2; entry += 2;
} }
} }
} }
return NULL; return NULL;
} }
inline void CompactHashtableWriter::add(unsigned int hash, Symbol* symbol) {
add(hash, new Entry(hash, symbol));
}
inline void CompactHashtableWriter::add(unsigned int hash, oop string) {
add(hash, new Entry(hash, string));
}
#endif // SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP #endif // SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP

View file

@ -662,7 +662,7 @@ int StringtableDCmd::num_arguments() {
// Sharing // Sharing
bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space, bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
CompactHashtableWriter* ch_table) { CompactStringTableWriter* writer) {
#if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS) #if INCLUDE_CDS && INCLUDE_ALL_GCS && defined(_LP64) && !defined(_WINDOWS)
assert(UseG1GC, "Only support G1 GC"); assert(UseG1GC, "Only support G1 GC");
assert(UseCompressedOops && UseCompressedClassPointers, assert(UseCompressedOops && UseCompressedClassPointers,
@ -713,7 +713,7 @@ bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
} }
// add to the compact table // add to the compact table
ch_table->add(hash, new_s); writer->add(hash, new_s);
} }
} }
@ -723,40 +723,41 @@ bool StringTable::copy_shared_string(GrowableArray<MemRegion> *string_space,
return true; return true;
} }
bool StringTable::copy_compact_table(char** top, char *end, GrowableArray<MemRegion> *string_space, void StringTable::serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
size_t* space_size) { size_t* space_size) {
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS) #if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
_shared_table.reset();
if (soc->writing()) {
if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) { if (!(UseG1GC && UseCompressedOops && UseCompressedClassPointers)) {
if (PrintSharedSpaces) { if (PrintSharedSpaces) {
tty->print_cr("Shared strings are excluded from the archive as UseG1GC, " tty->print_cr("Shared strings are excluded from the archive as UseG1GC, "
"UseCompressedOops and UseCompressedClassPointers are required."); "UseCompressedOops and UseCompressedClassPointers are required.");
} }
return true; } else {
} int num_buckets = the_table()->number_of_entries() /
SharedSymbolTableBucketSize;
CompactHashtableWriter ch_table(CompactHashtable<oop, char>::_string_table, CompactStringTableWriter writer(num_buckets,
the_table()->number_of_entries(),
&MetaspaceShared::stats()->string); &MetaspaceShared::stats()->string);
// Copy the interned strings into the "string space" within the java heap // Copy the interned strings into the "string space" within the java heap
if (!copy_shared_string(string_space, &ch_table)) { if (copy_shared_string(string_space, &writer)) {
return false;
}
for (int i = 0; i < string_space->length(); i++) { for (int i = 0; i < string_space->length(); i++) {
*space_size += string_space->at(i).byte_size(); *space_size += string_space->at(i).byte_size();
} }
writer.dump(&_shared_table);
// Now dump the compact table }
if (*top + ch_table.get_required_bytes() > end) { }
// not enough space left
return false;
} }
ch_table.dump(top, end);
*top = (char*)align_ptr_up(*top, sizeof(void*));
_shared_table.set_type(CompactHashtable<oop, char>::_string_table);
_shared_table.serialize(soc);
if (soc->writing()) {
_shared_table.reset(); // Sanity. Make sure we don't use the shared table at dump time
} else if (_ignore_shared_strings) {
_shared_table.reset();
}
#endif #endif
return true;
} }
void StringTable::shared_oops_do(OopClosure* f) { void StringTable::shared_oops_do(OopClosure* f) {
@ -765,25 +766,3 @@ void StringTable::shared_oops_do(OopClosure* f) {
#endif #endif
} }
const char* StringTable::init_shared_table(FileMapInfo *mapinfo, char *buffer) {
#if INCLUDE_CDS && defined(_LP64) && !defined(_WINDOWS)
if (mapinfo->space_capacity(MetaspaceShared::first_string) == 0) {
// no shared string data
return buffer;
}
// initialize the shared table
juint *p = (juint*)buffer;
const char* end = _shared_table.init(
CompactHashtable<oop, char>::_string_table, (char*)p);
const char* aligned_end = (const char*)align_ptr_up(end, sizeof(void*));
if (_ignore_shared_strings) {
_shared_table.reset();
}
return aligned_end;
#endif
return buffer;
}

View file

@ -29,8 +29,9 @@
#include "utilities/hashtable.hpp" #include "utilities/hashtable.hpp"
template <class T, class N> class CompactHashtable; template <class T, class N> class CompactHashtable;
class CompactHashtableWriter; class CompactStringTableWriter;
class FileMapInfo; class FileMapInfo;
class SerializeClosure;
class StringTable : public RehashableHashtable<oop, mtSymbol> { class StringTable : public RehashableHashtable<oop, mtSymbol> {
friend class VMStructs; friend class VMStructs;
@ -155,10 +156,9 @@ public:
static bool shared_string_ignored() { return _ignore_shared_strings; } static bool shared_string_ignored() { return _ignore_shared_strings; }
static void shared_oops_do(OopClosure* f); static void shared_oops_do(OopClosure* f);
static bool copy_shared_string(GrowableArray<MemRegion> *string_space, static bool copy_shared_string(GrowableArray<MemRegion> *string_space,
CompactHashtableWriter* ch_table); CompactStringTableWriter* ch_table);
static bool copy_compact_table(char** top, char* end, GrowableArray<MemRegion> *string_space, static void serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
size_t* space_size); size_t* space_size);
static const char* init_shared_table(FileMapInfo *mapinfo, char* buffer);
static void reverse() { static void reverse() {
the_table()->Hashtable<oop, mtSymbol>::reverse(); the_table()->Hashtable<oop, mtSymbol>::reverse();
} }

View file

@ -537,37 +537,42 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
} }
} }
bool SymbolTable::copy_compact_table(char** top, char*end) { void SymbolTable::serialize(SerializeClosure* soc) {
#if INCLUDE_CDS #if INCLUDE_CDS
CompactHashtableWriter ch_table(CompactHashtable<Symbol*, char>::_symbol_table, _shared_table.reset();
the_table()->number_of_entries(), if (soc->writing()) {
int num_buckets = the_table()->number_of_entries() /
SharedSymbolTableBucketSize;
CompactSymbolTableWriter writer(num_buckets,
&MetaspaceShared::stats()->symbol); &MetaspaceShared::stats()->symbol);
if (*top + ch_table.get_required_bytes() > end) {
// not enough space left
return false;
}
for (int i = 0; i < the_table()->table_size(); ++i) { for (int i = 0; i < the_table()->table_size(); ++i) {
HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i);
for ( ; p != NULL; p = p->next()) { for ( ; p != NULL; p = p->next()) {
Symbol* s = (Symbol*)(p->literal()); Symbol* s = (Symbol*)(p->literal());
unsigned int fixed_hash = hash_shared_symbol((char*)s->bytes(), s->utf8_length()); unsigned int fixed_hash = hash_shared_symbol((char*)s->bytes(), s->utf8_length());
assert(fixed_hash == p->hash(), "must not rehash during dumping"); assert(fixed_hash == p->hash(), "must not rehash during dumping");
ch_table.add(fixed_hash, s); writer.add(fixed_hash, s);
} }
} }
ch_table.dump(top, end); writer.dump(&_shared_table);
}
*top = (char*)align_ptr_up(*top, sizeof(void*)); _shared_table.set_type(CompactHashtable<Symbol*, char>::_symbol_table);
_shared_table.serialize(soc);
if (soc->writing()) {
// Verify table is correct
Symbol* sym = vmSymbols::java_lang_Object();
const char* name = (const char*)sym->bytes();
int len = sym->utf8_length();
unsigned int hash = hash_symbol(name, len);
assert(sym == _shared_table.lookup(name, hash, len), "sanity");
// Sanity. Make sure we don't use the shared table at dump time
_shared_table.reset();
}
#endif #endif
return true;
}
const char* SymbolTable::init_shared_table(const char* buffer) {
const char* end = _shared_table.init(
CompactHashtable<Symbol*, char>::_symbol_table, buffer);
return (const char*)align_ptr_up(end, sizeof(void*));
} }
//--------------------------------------------------------------------------- //---------------------------------------------------------------------------

View file

@ -41,6 +41,7 @@
class BoolObjectClosure; class BoolObjectClosure;
class outputStream; class outputStream;
class SerializeClosure;
// TempNewSymbol acts as a handle class in a handle/body idiom and is // TempNewSymbol acts as a handle class in a handle/body idiom and is
// responsible for proper resource management of the body (which is a Symbol*). // responsible for proper resource management of the body (which is a Symbol*).
@ -251,8 +252,7 @@ public:
static void read(const char* filename, TRAPS); static void read(const char* filename, TRAPS);
// Sharing // Sharing
static bool copy_compact_table(char** top, char* end); static void serialize(SerializeClosure* soc);
static const char* init_shared_table(const char* buffer);
// Rehash the symbol table if it gets out of balance // Rehash the symbol table if it gets out of balance
static void rehash_table(); static void rehash_table();

View file

@ -29,6 +29,7 @@
#include "classfile/dictionary.hpp" #include "classfile/dictionary.hpp"
class ClassFileStream; class ClassFileStream;
class SerializeClosure;
class SystemDictionaryShared: public SystemDictionary { class SystemDictionaryShared: public SystemDictionary {
public: public:
@ -77,6 +78,7 @@ public:
TRAPS) { TRAPS) {
return NULL; return NULL;
} }
static void serialize(SerializeClosure* soc) {}
}; };
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP #endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -355,6 +355,9 @@ public:
// Read/write the void pointer pointed to by p. // Read/write the void pointer pointed to by p.
virtual void do_ptr(void** p) = 0; virtual void do_ptr(void** p) = 0;
// Read/write the 32-bit unsigned integer pointed to by p.
virtual void do_u4(u4* p) = 0;
// Read/write the region specified. // Read/write the region specified.
virtual void do_region(u_char* start, size_t size) = 0; virtual void do_region(u_char* start, size_t size) = 0;
@ -363,6 +366,10 @@ public:
// for verification that sections of the serialized data are of the // for verification that sections of the serialized data are of the
// correct length. // correct length.
virtual void do_tag(int tag) = 0; virtual void do_tag(int tag) = 0;
bool writing() {
return !reading();
}
}; };
class SymbolClosure : public StackObj { class SymbolClosure : public StackObj {

View file

@ -31,6 +31,7 @@
#include "classfile/sharedClassUtil.hpp" #include "classfile/sharedClassUtil.hpp"
#include "classfile/symbolTable.hpp" #include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp" #include "classfile/systemDictionary.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "code/codeCache.hpp" #include "code/codeCache.hpp"
#include "gc/shared/gcLocker.hpp" #include "gc/shared/gcLocker.hpp"
#include "interpreter/bytecodeStream.hpp" #include "interpreter/bytecodeStream.hpp"
@ -106,7 +107,8 @@ void MetaspaceShared::initialize_shared_rs(ReservedSpace* rs) {
// Read/write a data stream for restoring/preserving metadata pointers and // Read/write a data stream for restoring/preserving metadata pointers and
// miscellaneous data from/to the shared archive file. // miscellaneous data from/to the shared archive file.
void MetaspaceShared::serialize(SerializeClosure* soc) { void MetaspaceShared::serialize(SerializeClosure* soc, GrowableArray<MemRegion> *string_space,
size_t* space_size) {
int tag = 0; int tag = 0;
soc->do_tag(--tag); soc->do_tag(--tag);
@ -128,6 +130,15 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
vmSymbols::serialize(soc); vmSymbols::serialize(soc);
soc->do_tag(--tag); soc->do_tag(--tag);
// Dump/restore the symbol and string tables
SymbolTable::serialize(soc);
StringTable::serialize(soc, string_space, space_size);
soc->do_tag(--tag);
// Dump/restore the misc information for system dictionary
SystemDictionaryShared::serialize(soc);
soc->do_tag(--tag);
soc->do_tag(666); soc->do_tag(666);
} }
@ -314,6 +325,11 @@ public:
++top; ++top;
} }
void do_u4(u4* p) {
void* ptr = (void*)(uintx(*p));
do_ptr(&ptr);
}
void do_tag(int tag) { void do_tag(int tag) {
check_space(); check_space();
*top = (intptr_t)tag; *top = (intptr_t)tag;
@ -348,6 +364,8 @@ public:
METASPACE_OBJ_TYPES_DO(f) \ METASPACE_OBJ_TYPES_DO(f) \
f(SymbolHashentry) \ f(SymbolHashentry) \
f(SymbolBucket) \ f(SymbolBucket) \
f(StringHashentry) \
f(StringBucket) \
f(Other) f(Other)
#define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type, #define SHAREDSPACE_OBJ_TYPE_DECLARE(name) name ## Type,
@ -406,13 +424,22 @@ void DumpAllocClosure::dump_stats(int ro_all, int rw_all, int md_all, int mc_all
MetaspaceSharedStats *stats = MetaspaceShared::stats(); MetaspaceSharedStats *stats = MetaspaceShared::stats();
// symbols // symbols
_counts[RW][SymbolHashentryType] = stats->symbol.hashentry_count; _counts[RO][SymbolHashentryType] = stats->symbol.hashentry_count;
_bytes [RW][SymbolHashentryType] = stats->symbol.hashentry_bytes; _bytes [RO][SymbolHashentryType] = stats->symbol.hashentry_bytes;
other_bytes -= stats->symbol.hashentry_bytes; _bytes [RO][TypeArrayU4Type] -= stats->symbol.hashentry_bytes;
_counts[RW][SymbolBucketType] = stats->symbol.bucket_count; _counts[RO][SymbolBucketType] = stats->symbol.bucket_count;
_bytes [RW][SymbolBucketType] = stats->symbol.bucket_bytes; _bytes [RO][SymbolBucketType] = stats->symbol.bucket_bytes;
other_bytes -= stats->symbol.bucket_bytes; _bytes [RO][TypeArrayU4Type] -= stats->symbol.bucket_bytes;
// strings
_counts[RO][StringHashentryType] = stats->string.hashentry_count;
_bytes [RO][StringHashentryType] = stats->string.hashentry_bytes;
_bytes [RO][TypeArrayU4Type] -= stats->string.hashentry_bytes;
_counts[RO][StringBucketType] = stats->string.bucket_count;
_bytes [RO][StringBucketType] = stats->string.bucket_bytes;
_bytes [RO][TypeArrayU4Type] -= stats->string.bucket_bytes;
// TODO: count things like dictionary, vtable, etc // TODO: count things like dictionary, vtable, etc
_bytes[RW][OtherType] = other_bytes; _bytes[RW][OtherType] = other_bytes;
@ -488,7 +515,6 @@ private:
GrowableArray<Klass*> *_class_promote_order; GrowableArray<Klass*> *_class_promote_order;
VirtualSpace _md_vs; VirtualSpace _md_vs;
VirtualSpace _mc_vs; VirtualSpace _mc_vs;
CompactHashtableWriter* _string_cht;
GrowableArray<MemRegion> *_string_regions; GrowableArray<MemRegion> *_string_regions;
public: public:
@ -600,39 +626,27 @@ void VM_PopulateDumpSharedSpace::doit() {
// Not doing this either. // Not doing this either.
SystemDictionary::reorder_dictionary(); SystemDictionary::reorder_dictionary();
NOT_PRODUCT(SystemDictionary::verify();) NOT_PRODUCT(SystemDictionary::verify();)
// Copy the symbol table, string table, and the system dictionary to the shared
// space in usable form. Copy the hashtable
// buckets first [read-write], then copy the linked lists of entries
// [read-only].
NOT_PRODUCT(SymbolTable::verify());
handle_misc_data_space_failure(SymbolTable::copy_compact_table(&md_top, md_end));
size_t ss_bytes = 0;
char* ss_low;
// The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
_string_regions = new GrowableArray<MemRegion>(2);
NOT_PRODUCT(StringTable::verify());
handle_misc_data_space_failure(StringTable::copy_compact_table(&md_top, md_end, _string_regions,
&ss_bytes));
ss_low = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
SystemDictionary::reverse(); SystemDictionary::reverse();
SystemDictionary::copy_buckets(&md_top, md_end); SystemDictionary::copy_buckets(&md_top, md_end);
SystemDictionary::copy_table(&md_top, md_end); SystemDictionary::copy_table(&md_top, md_end);
// Write the other data to the output array. // Write the other data to the output array.
// SymbolTable, StringTable and extra information for system dictionary
NOT_PRODUCT(SymbolTable::verify());
NOT_PRODUCT(StringTable::verify());
size_t ss_bytes = 0;
char* ss_low;
// The string space has maximum two regions. See FileMapInfo::write_string_regions() for details.
_string_regions = new GrowableArray<MemRegion>(2);
WriteClosure wc(md_top, md_end); WriteClosure wc(md_top, md_end);
MetaspaceShared::serialize(&wc); MetaspaceShared::serialize(&wc, _string_regions, &ss_bytes);
md_top = wc.get_top(); md_top = wc.get_top();
ss_low = _string_regions->is_empty() ? NULL : (char*)_string_regions->first().start();
// Print shared spaces all the time // Print shared spaces all the time
// To make fmt_space be a syntactic constant (for format warnings), use #define.
#define fmt_space "%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%4.1f%% used] at " INTPTR_FORMAT
Metaspace* ro_space = _loader_data->ro_metaspace(); Metaspace* ro_space = _loader_data->ro_metaspace();
Metaspace* rw_space = _loader_data->rw_metaspace(); Metaspace* rw_space = _loader_data->rw_metaspace();
@ -665,12 +679,13 @@ void VM_PopulateDumpSharedSpace::doit() {
const double mc_u_perc = mc_bytes / double(mc_alloced) * 100.0; const double mc_u_perc = mc_bytes / double(mc_alloced) * 100.0;
const double total_u_perc = total_bytes / double(total_alloced) * 100.0; const double total_u_perc = total_bytes / double(total_alloced) * 100.0;
#define fmt_space "%s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT
tty->print_cr(fmt_space, "ro", ro_bytes, ro_t_perc, ro_alloced, ro_u_perc, p2i(ro_space->bottom())); tty->print_cr(fmt_space, "ro", ro_bytes, ro_t_perc, ro_alloced, ro_u_perc, p2i(ro_space->bottom()));
tty->print_cr(fmt_space, "rw", rw_bytes, rw_t_perc, rw_alloced, rw_u_perc, p2i(rw_space->bottom())); tty->print_cr(fmt_space, "rw", rw_bytes, rw_t_perc, rw_alloced, rw_u_perc, p2i(rw_space->bottom()));
tty->print_cr(fmt_space, "md", md_bytes, md_t_perc, md_alloced, md_u_perc, p2i(md_low)); tty->print_cr(fmt_space, "md", md_bytes, md_t_perc, md_alloced, md_u_perc, p2i(md_low));
tty->print_cr(fmt_space, "mc", mc_bytes, mc_t_perc, mc_alloced, mc_u_perc, p2i(mc_low)); tty->print_cr(fmt_space, "mc", mc_bytes, mc_t_perc, mc_alloced, mc_u_perc, p2i(mc_low));
tty->print_cr(fmt_space, "st", ss_bytes, ss_t_perc, ss_bytes, 100.0, p2i(ss_low)); tty->print_cr(fmt_space, "st", ss_bytes, ss_t_perc, ss_bytes, 100.0, p2i(ss_low));
tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%4.1f%% used]", tty->print_cr("total : " SIZE_FORMAT_W(9) " [100.0%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used]",
total_bytes, total_alloced, total_u_perc); total_bytes, total_alloced, total_u_perc);
// Update the vtable pointers in all of the Klass objects in the // Update the vtable pointers in all of the Klass objects in the
@ -974,6 +989,11 @@ public:
*p = (void*)obj; *p = (void*)obj;
} }
void do_u4(u4* p) {
intptr_t obj = nextPtr();
*p = (u4)(uintx(obj));
}
void do_tag(int tag) { void do_tag(int tag) {
int old_tag; int old_tag;
old_tag = (int)(intptr_t)nextPtr(); old_tag = (int)(intptr_t)nextPtr();
@ -1097,21 +1117,6 @@ void MetaspaceShared::initialize_shared_spaces() {
buffer += sizeof(intptr_t); buffer += sizeof(intptr_t);
buffer += vtable_size; buffer += vtable_size;
// Create the shared symbol table using the compact table at this spot in the
// misc data space. (Todo: move this to read-only space. Currently
// this is mapped copy-on-write but will never be written into).
buffer = (char*)SymbolTable::init_shared_table(buffer);
SymbolTable::create_table();
// Create the shared string table using the compact table
buffer = (char*)StringTable::init_shared_table(mapinfo, buffer);
// Create the shared dictionary using the bucket array at this spot in
// the misc data space. Since the shared dictionary table is never
// modified, this region (of mapped pages) will be (effectively, if
// not explicitly) read-only.
int sharedDictionaryLen = *(intptr_t*)buffer; int sharedDictionaryLen = *(intptr_t*)buffer;
buffer += sizeof(intptr_t); buffer += sizeof(intptr_t);
int number_of_entries = *(intptr_t*)buffer; int number_of_entries = *(intptr_t*)buffer;
@ -1129,9 +1134,14 @@ void MetaspaceShared::initialize_shared_spaces() {
buffer += sizeof(intptr_t); buffer += sizeof(intptr_t);
buffer += len; buffer += len;
// Verify various attributes of the archive, plus initialize the
// shared string/symbol tables
intptr_t* array = (intptr_t*)buffer; intptr_t* array = (intptr_t*)buffer;
ReadClosure rc(&array); ReadClosure rc(&array);
serialize(&rc); serialize(&rc, NULL, NULL);
// Initialize the run-time symbol table.
SymbolTable::create_table();
// Close the mapinfo file // Close the mapinfo file
mapinfo->close(); mapinfo->close();

View file

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,7 @@
#define MIN_SHARED_READ_WRITE_SIZE (NOT_LP64(7*M) LP64_ONLY(12*M)) #define MIN_SHARED_READ_WRITE_SIZE (NOT_LP64(7*M) LP64_ONLY(12*M))
#define DEFAULT_SHARED_READ_ONLY_SIZE (NOT_LP64(12*M) LP64_ONLY(16*M)) #define DEFAULT_SHARED_READ_ONLY_SIZE (NOT_LP64(12*M) LP64_ONLY(16*M))
#define MIN_SHARED_READ_ONLY_SIZE (NOT_LP64(8*M) LP64_ONLY(9*M)) #define MIN_SHARED_READ_ONLY_SIZE (NOT_LP64(9*M) LP64_ONLY(10*M))
// the MIN_SHARED_MISC_DATA_SIZE and MIN_SHARED_MISC_CODE_SIZE estimates are based on // the MIN_SHARED_MISC_DATA_SIZE and MIN_SHARED_MISC_CODE_SIZE estimates are based on
// the sizes required for dumping the archive using the default classlist. The sizes // the sizes required for dumping the archive using the default classlist. The sizes
@ -193,7 +193,8 @@ class MetaspaceShared : AllStatic {
void** vtable, void** vtable,
char** md_top, char* md_end, char** md_top, char* md_end,
char** mc_top, char* mc_end); char** mc_top, char* mc_end);
static void serialize(SerializeClosure* sc); static void serialize(SerializeClosure* sc, GrowableArray<MemRegion> *string_space,
size_t* space_size);
static MetaspaceSharedStats* stats() { static MetaspaceSharedStats* stats() {
return &_stats; return &_stats;

View file

@ -662,11 +662,11 @@ typedef CompactHashtable<Symbol*, char> SymbolCompactHashTable;
/* CompactHashTable */ \ /* CompactHashTable */ \
/********************/ \ /********************/ \
\ \
nonstatic_field(SymbolCompactHashTable, _base_address, uintx) \ nonstatic_field(SymbolCompactHashTable, _base_address, address) \
nonstatic_field(SymbolCompactHashTable, _entry_count, juint) \ nonstatic_field(SymbolCompactHashTable, _entry_count, u4) \
nonstatic_field(SymbolCompactHashTable, _bucket_count, juint) \ nonstatic_field(SymbolCompactHashTable, _bucket_count, u4) \
nonstatic_field(SymbolCompactHashTable, _table_end_offset, juint) \ nonstatic_field(SymbolCompactHashTable, _buckets, u4*) \
nonstatic_field(SymbolCompactHashTable, _buckets, juint*) \ nonstatic_field(SymbolCompactHashTable, _entries, u4*) \
\ \
/********************/ \ /********************/ \
/* SystemDictionary */ \ /* SystemDictionary */ \

View file

@ -125,7 +125,7 @@ public class LimitSharedSizes {
// test with sizes which just meet the minimum required sizes // test with sizes which just meet the minimum required sizes
// the following tests also attempt to use the shared archive // the following tests also attempt to use the shared archive
new SharedSizeTestData(Region.RO, Platform.is64bit() ? "9M":"8M", Result.VALID_ARCHIVE), new SharedSizeTestData(Region.RO, Platform.is64bit() ? "10M":"9M", Result.VALID_ARCHIVE),
new SharedSizeTestData(Region.RW, Platform.is64bit() ? "12M":"7M", Result.VALID_ARCHIVE), new SharedSizeTestData(Region.RW, Platform.is64bit() ? "12M":"7M", Result.VALID_ARCHIVE),
new SharedSizeTestData(Region.MD, Platform.is64bit() ? "4M":"2M", Result.VALID_ARCHIVE), new SharedSizeTestData(Region.MD, Platform.is64bit() ? "4M":"2M", Result.VALID_ARCHIVE),
new SharedSizeTestData(Region.MC, "120k", Result.VALID_ARCHIVE), new SharedSizeTestData(Region.MC, "120k", Result.VALID_ARCHIVE),
@ -176,7 +176,7 @@ public class LimitSharedSizes {
output.getOutput().contains("Unable to reserve shared space at required address")) && output.getOutput().contains("Unable to reserve shared space at required address")) &&
output.getExitValue() == 1) { output.getExitValue() == 1) {
System.out.println("Unable to use shared archive: test not executed; assumed passed"); System.out.println("Unable to use shared archive: test not executed; assumed passed");
return; continue;
} }
} }
output.shouldHaveExitValue(0); output.shouldHaveExitValue(0);

View file

@ -0,0 +1,120 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test SASymbolTableTest
* @summary Walk symbol table using SA, with and without CDS.
* @library /testlibrary
* @modules java.base/jdk.internal.misc
* jdk.hotspot.agent/sun.jvm.hotspot.oops
* jdk.hotspot.agent/sun.jvm.hotspot.memory
* jdk.hotspot.agent/sun.jvm.hotspot.runtime
* jdk.hotspot.agent/sun.jvm.hotspot.tools
* java.management
* @build SASymbolTableTestAgent SASymbolTableTestAttachee jdk.test.lib.*
* @run main SASymbolTableTest
*/
import jdk.test.lib.*;
/*
* The purpose of this test is to validate that we can use SA to
* attach a process and walk its SymbolTable, regardless whether
* the attachee process runs in CDS mode or not.
*
* SASymbolTableTest Just sets up the agent and attachee processes.
* The SymbolTable walking is done in the SASymbolTableTestAgent class.
*/
public class SASymbolTableTest {
static String jsaName = "./SASymbolTableTest.jsa";
public static void main(String[] args) throws Exception {
if (!Platform.shouldSAAttach()) {
System.out.println("SA attach not expected to work - test skipped.");
return;
}
createArchive();
run(true);
run(false);
}
private static void createArchive() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=" + jsaName,
"-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Loading classes to share");
output.shouldHaveExitValue(0);
}
private static void run(boolean useArchive) throws Exception {
String flag = useArchive ? "auto" : "off";
// (1) Launch the attachee process
ProcessBuilder attachee = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=" + jsaName,
"-Xshare:" + flag,
"-showversion", // so we can see "sharing" in the output
"SASymbolTableTestAttachee");
final Process p = attachee.start();
// (2) Launch the agent process
long pid = p.getPid();
System.out.println("Attaching agent " + pid);
ProcessBuilder tool = ProcessTools.createJavaProcessBuilder(
"-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.oops=ALL-UNNAMED",
"-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.memory=ALL-UNNAMED",
"-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.runtime=ALL-UNNAMED",
"-XaddExports:jdk.hotspot.agent/sun.jvm.hotspot.tools=ALL-UNNAMED",
"SASymbolTableTestAgent",
Long.toString(pid));
OutputAnalyzer output = ProcessTools.executeProcess(tool);
System.out.println(output.getOutput());
output.shouldHaveExitValue(0);
Thread t = new Thread() {
public void run() {
try {
OutputAnalyzer output = new OutputAnalyzer(p);
System.out.println("STDOUT[");
System.out.print(output.getStdout());
System.out.println("]");
System.out.println("STDERR[");
System.out.print(output.getStderr());
System.out.println("]");
} catch (Throwable t) {
t.printStackTrace();
}
}
};
t.start();
Thread.sleep(2 * 1000);
p.destroy();
t.join();
}
}

View file

@ -0,0 +1,142 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import sun.jvm.hotspot.memory.SymbolTable;
import sun.jvm.hotspot.oops.Symbol;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.tools.Tool;
/**
* This class is launched in a sub-process by the main test,
* SASymbolTableTest.java.
*
* It uses SA to connect to another JVM process, whose PID is specified in args[].
* The purpose of the test is to validate that we can walk the SymbolTable
* and CompactHashTable of the other process. Everything should work regardless
* of whether the other process runs in CDS mode or not.
*
* Note: CompactHashTable is used only when CDS is enabled.
*/
public class SASymbolTableTestAgent extends Tool {
public SASymbolTableTestAgent() {
super();
}
public static void main(String args[]) {
SASymbolTableTestAgent tool = new SASymbolTableTestAgent();
tool.execute(args);
}
static String[] commonNames = {
"java/lang/Object",
"java/lang/String",
"java/lang/Class",
"java/lang/Cloneable",
"java/lang/ClassLoader",
"java/io/Serializable",
"java/lang/System",
"java/lang/Throwable",
"java/lang/Error",
"java/lang/ThreadDeath",
"java/lang/Exception",
"java/lang/RuntimeException",
"java/lang/SecurityManager",
"java/security/ProtectionDomain",
"java/security/AccessControlContext",
"java/security/SecureClassLoader",
"java/lang/ClassNotFoundException",
"java/lang/NoClassDefFoundError",
"java/lang/LinkageError",
"java/lang/ClassCastException",
"java/lang/ArrayStoreException",
"java/lang/VirtualMachineError",
"java/lang/OutOfMemoryError",
"java/lang/StackOverflowError",
"java/lang/IllegalMonitorStateException",
"java/lang/ref/Reference",
"java/lang/ref/SoftReference",
"java/lang/ref/WeakReference",
"java/lang/ref/FinalReference",
"java/lang/ref/PhantomReference",
"java/lang/ref/Finalizer",
"java/lang/Thread",
"java/lang/ThreadGroup",
"java/util/Properties",
"java/lang/reflect/AccessibleObject",
"java/lang/reflect/Field",
"java/lang/reflect/Method",
"java/lang/reflect/Constructor",
"java/lang/invoke/MethodHandle",
"java/lang/invoke/MemberName",
"java/lang/invoke/MethodHandleNatives",
"java/lang/invoke/MethodType",
"java/lang/BootstrapMethodError",
"java/lang/invoke/CallSite",
"java/lang/invoke/ConstantCallSite",
"java/lang/invoke/MutableCallSite",
"java/lang/invoke/VolatileCallSite",
"java/lang/StringBuffer",
"java/lang/StringBuilder",
"java/io/ByteArrayInputStream",
"java/io/File",
"java/net/URLClassLoader",
"java/net/URL",
"java/util/jar/Manifest",
"java/security/CodeSource",
};
static String[] badNames = {
"java/lang/badbadbad",
"java/io/badbadbadbad",
"this*symbol*must*not*exist"
};
public void run() {
System.out.println("SASymbolTableTestAgent: starting");
VM vm = VM.getVM();
SymbolTable table = vm.getSymbolTable();
// (a) These are names that are likely to exist in the symbol table
// of a JVM after start-up. They were taken from vmSymbols.hpp
// during the middle of JDK9 development.
//
// The purpose is not to check that each name must exist (a future
// version of JDK may not preload some of the classes).
//
// The purpose of this loops is to ensure that we check a lot of symbols,
// so we will (most likely) hit on both VALUE_ONLY_BUCKET_TYPE and normal bucket type
// in CompactHashTable.probe().
for (String n : commonNames) {
Symbol s = table.probe(n);
System.out.format("%-40s = %s\n", n, s);
}
System.out.println("======================================================================");
// (b) Also test a few strings that are known to not exist in the table. This will
// both the compact table (if it exists) and the regular table to be walked.
for (String n : badNames) {
Symbol s = table.probe(n);
System.out.format("%-40s = %s\n", n, s);
}
}
}

View file

@ -0,0 +1,38 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* This class is launched in a sub-process by the main test,
* SASymbolTableTest.java.
*
* This class does nothing in particular. It just sleeps for 120
* seconds so SASymbolTableTestAgent can have a chance to examine its
* SymbolTable. This process should be killed by the parent process
* after SASymbolTableTestAgent has completed testing.
*/
public class SASymbolTableTestAttachee {
public static void main(String args[]) throws Throwable {
System.out.println("SASymbolTableTestAttachee: sleeping to wait for SA tool to attach ...");
Thread.sleep(120 * 1000);
}
}