mirror of
https://github.com/openjdk/jdk.git
synced 2025-08-27 23:04:50 +02:00
8210388: Use hash table to store archived subgraph_info records
Reviewed-by: jiangli
This commit is contained in:
parent
859d376494
commit
2f82ed4f1d
9 changed files with 220 additions and 246 deletions
|
@ -627,43 +627,31 @@ void SymbolTable::dump(outputStream* st, bool verbose) {
|
|||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
class CompactSymbolTableWriter: public CompactHashtableWriter {
|
||||
public:
|
||||
CompactSymbolTableWriter(int num_buckets, CompactHashtableStats* stats) :
|
||||
CompactHashtableWriter(num_buckets, stats) {}
|
||||
void add(unsigned int hash, Symbol *symbol) {
|
||||
uintx deltax = MetaspaceShared::object_delta(symbol);
|
||||
struct CopyToArchive : StackObj {
|
||||
CompactHashtableWriter* _writer;
|
||||
CopyToArchive(CompactHashtableWriter* writer) : _writer(writer) {}
|
||||
bool operator()(Symbol** value) {
|
||||
assert(value != NULL, "expected valid value");
|
||||
assert(*value != NULL, "value should point to a symbol");
|
||||
Symbol* sym = *value;
|
||||
unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
|
||||
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
|
||||
"must not rehash during dumping");
|
||||
|
||||
uintx deltax = MetaspaceShared::object_delta(sym);
|
||||
// When the symbols are stored into the archive, we already check that
|
||||
// they won't be more than MAX_SHARED_DELTA from the base address, or
|
||||
// else the dumping would have been aborted.
|
||||
assert(deltax <= MAX_SHARED_DELTA, "must not be");
|
||||
u4 delta = u4(deltax);
|
||||
|
||||
CompactHashtableWriter::add(hash, delta);
|
||||
}
|
||||
};
|
||||
|
||||
struct CopyToArchive : StackObj {
|
||||
CompactSymbolTableWriter* _writer;
|
||||
CopyToArchive(CompactSymbolTableWriter* writer) : _writer(writer) {}
|
||||
bool operator()(Symbol** value) {
|
||||
assert(value != NULL, "expected valid value");
|
||||
assert(*value != NULL, "value should point to a symbol");
|
||||
Symbol* sym = *value;
|
||||
unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length());
|
||||
if (fixed_hash == 0) {
|
||||
return true;
|
||||
}
|
||||
assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false),
|
||||
"must not rehash during dumping");
|
||||
|
||||
// add to the compact table
|
||||
_writer->add(fixed_hash, sym);
|
||||
_writer->add(fixed_hash, delta);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
void SymbolTable::copy_shared_symbol_table(CompactSymbolTableWriter* writer) {
|
||||
void SymbolTable::copy_shared_symbol_table(CompactHashtableWriter* writer) {
|
||||
CopyToArchive copy(writer);
|
||||
SymbolTable::the_table()->_local_table->do_scan(Thread::current(), copy);
|
||||
}
|
||||
|
@ -671,10 +659,10 @@ void SymbolTable::copy_shared_symbol_table(CompactSymbolTableWriter* writer) {
|
|||
void SymbolTable::write_to_archive() {
|
||||
_shared_table.reset();
|
||||
|
||||
int num_buckets = (int)(SymbolTable::the_table()->_items_count / SharedSymbolTableBucketSize);
|
||||
// calculation of num_buckets can result in zero buckets, we need at least one
|
||||
CompactSymbolTableWriter writer(num_buckets > 1 ? num_buckets : 1,
|
||||
&MetaspaceShared::stats()->symbol);
|
||||
int num_buckets = CompactHashtableWriter::default_num_buckets(
|
||||
SymbolTable::the_table()->_items_count);
|
||||
CompactHashtableWriter writer(num_buckets,
|
||||
&MetaspaceShared::stats()->symbol);
|
||||
copy_shared_symbol_table(&writer);
|
||||
writer.dump(&_shared_table, "symbol");
|
||||
|
||||
|
@ -686,8 +674,8 @@ void SymbolTable::write_to_archive() {
|
|||
assert(sym == _shared_table.lookup(name, hash, len), "sanity");
|
||||
}
|
||||
|
||||
void SymbolTable::serialize(SerializeClosure* soc) {
|
||||
_shared_table.serialize(soc);
|
||||
void SymbolTable::serialize_shared_table_header(SerializeClosure* soc) {
|
||||
_shared_table.serialize_header(soc);
|
||||
|
||||
if (soc->writing()) {
|
||||
// Sanity. Make sure we don't use the shared table at dump time
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue