8166377: is_compiled_by_jvmci hot in some profiles - improve nmethod compiler type detection

Refactor code removing virtual call

Reviewed-by: kvn, twisti
This commit is contained in:
Nils Eliasson 2016-10-21 20:12:47 +02:00
parent 2bc0337093
commit 1134c66f5e
18 changed files with 197 additions and 155 deletions

View file

@ -42,7 +42,7 @@
#include "runtime/sharedRuntime.hpp" #include "runtime/sharedRuntime.hpp"
Compiler::Compiler() : AbstractCompiler(c1) { Compiler::Compiler() : AbstractCompiler(compiler_c1) {
} }
void Compiler::init_c1_runtime() { void Compiler::init_c1_runtime() {

View file

@ -45,6 +45,10 @@
#include "c1/c1_Runtime1.hpp" #include "c1/c1_Runtime1.hpp"
#endif #endif
const char* CodeBlob::compiler_name() const {
return compilertype2name(_type);
}
unsigned int CodeBlob::align_code_offset(int offset) { unsigned int CodeBlob::align_code_offset(int offset) {
// align the size to CodeEntryAlignment // align the size to CodeEntryAlignment
return return
@ -65,7 +69,7 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
return size; return size;
} }
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) : CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name), _name(name),
_size(layout.size()), _size(layout.size()),
_header_size(layout.header_size()), _header_size(layout.header_size()),
@ -80,7 +84,8 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_com
_data_end(layout.data_end()), _data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()), _relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()), _relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin()) _content_begin(layout.content_begin()),
_type(type)
{ {
assert(layout.size() == round_to(layout.size(), oopSize), "unaligned size"); assert(layout.size() == round_to(layout.size(), oopSize), "unaligned size");
assert(layout.header_size() == round_to(layout.header_size(), oopSize), "unaligned size"); assert(layout.header_size() == round_to(layout.header_size(), oopSize), "unaligned size");
@ -92,7 +97,7 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_com
#endif // COMPILER1 #endif // COMPILER1
} }
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) : CodeBlob::CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name), _name(name),
_size(layout.size()), _size(layout.size()),
_header_size(layout.header_size()), _header_size(layout.header_size()),
@ -106,7 +111,8 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* c
_data_end(layout.data_end()), _data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()), _relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()), _relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin()) _content_begin(layout.content_begin()),
_type(type)
{ {
assert(_size == round_to(_size, oopSize), "unaligned size"); assert(_size == round_to(_size, oopSize), "unaligned size");
assert(_header_size == round_to(_header_size, oopSize), "unaligned size"); assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
@ -123,7 +129,7 @@ CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* c
// Creates a simple CodeBlob. Sets up the size of the different regions. // Creates a simple CodeBlob. Sets up the size of the different regions.
RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
: CodeBlob(name, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
{ {
assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
assert(!UseRelocIndex, "no space allocated for reloc index yet"); assert(!UseRelocIndex, "no space allocated for reloc index yet");
@ -148,7 +154,7 @@ RuntimeBlob::RuntimeBlob(
int frame_size, int frame_size,
OopMapSet* oop_maps, OopMapSet* oop_maps,
bool caller_must_gc_arguments bool caller_must_gc_arguments
) : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) { ) : CodeBlob(name, compiler_none, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
cb->copy_code_and_locs_to(this); cb->copy_code_and_locs_to(this);
} }

View file

@ -26,6 +26,7 @@
#define SHARE_VM_CODE_CODEBLOB_HPP #define SHARE_VM_CODE_CODEBLOB_HPP
#include "asm/codeBuffer.hpp" #include "asm/codeBuffer.hpp"
#include "compiler/compilerDefinitions.hpp"
#include "compiler/oopMap.hpp" #include "compiler/oopMap.hpp"
#include "runtime/frame.hpp" #include "runtime/frame.hpp"
#include "runtime/handles.hpp" #include "runtime/handles.hpp"
@ -71,7 +72,8 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
friend class CodeCacheDumper; friend class CodeCacheDumper;
protected: protected:
const char* _name;
const CompilerType _type; // CompilerType
int _size; // total size of CodeBlob in bytes int _size; // total size of CodeBlob in bytes
int _header_size; // size of header (depends on subclass) int _header_size; // size of header (depends on subclass)
int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
@ -92,9 +94,10 @@ protected:
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
bool _caller_must_gc_arguments; bool _caller_must_gc_arguments;
CodeStrings _strings; CodeStrings _strings;
const char* _name;
CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments); CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); CodeBlob(const char* name, CompilerType type, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public: public:
// Returns the space needed for CodeBlob // Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size); static unsigned int allocation_size(CodeBuffer* cb, int header_size);
@ -115,9 +118,11 @@ public:
virtual bool is_method_handles_adapter_blob() const { return false; } virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled() const { return false; } virtual bool is_compiled() const { return false; }
virtual bool is_compiled_by_c2() const { return false; } inline bool is_compiled_by_c1() const { return _type == compiler_c1; };
virtual bool is_compiled_by_c1() const { return false; } inline bool is_compiled_by_c2() const { return _type == compiler_c2; };
virtual bool is_compiled_by_jvmci() const { return false; } inline bool is_compiled_by_jvmci() const { return _type == compiler_jvmci; };
inline bool is_compiled_by_shark() const { return _type == compiler_shark; };
const char* compiler_name() const;
// Casting // Casting
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; } nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }

View file

@ -31,14 +31,14 @@
#include "memory/resourceArea.hpp" #include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp" #include "runtime/mutexLocker.hpp"
CompiledMethod::CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), : CodeBlob(name, type, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) { _method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults(); init_defaults();
} }
CompiledMethod::CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments), : CodeBlob(name, type, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) { _method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults(); init_defaults();
} }

View file

@ -164,8 +164,8 @@ protected:
virtual void flush() = 0; virtual void flush() = 0;
protected: protected:
CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments); CompiledMethod(Method* method, const char* name, CompilerType type, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments); CompiledMethod(Method* method, const char* name, CompilerType type, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public: public:
virtual bool is_compiled() const { return true; } virtual bool is_compiled() const { return true; }
@ -191,12 +191,10 @@ public:
// will be transformed to zombie immediately // will be transformed to zombie immediately
}; };
virtual AbstractCompiler* compiler() const = 0;
virtual bool is_in_use() const = 0; virtual bool is_in_use() const = 0;
virtual int comp_level() const = 0; virtual int comp_level() const = 0;
virtual int compile_id() const = 0; virtual int compile_id() const = 0;
virtual address verified_entry_point() const = 0; virtual address verified_entry_point() const = 0;
virtual void log_identity(xmlStream* log) const = 0; virtual void log_identity(xmlStream* log) const = 0;
virtual void log_state_change() const = 0; virtual void log_state_change() const = 0;

View file

@ -82,32 +82,6 @@
#endif #endif
bool nmethod::is_compiled_by_c1() const {
if (compiler() == NULL) {
return false;
}
return compiler()->is_c1();
}
bool nmethod::is_compiled_by_jvmci() const {
if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing
if (is_native_method()) return false;
return compiler()->is_jvmci();
}
bool nmethod::is_compiled_by_c2() const {
if (compiler() == NULL) {
return false;
}
return compiler()->is_c2();
}
bool nmethod::is_compiled_by_shark() const {
if (compiler() == NULL) {
return false;
}
return compiler()->is_shark();
}
//--------------------------------------------------------------------------------- //---------------------------------------------------------------------------------
// NMethod statistics // NMethod statistics
// They are printed under various flags, including: // They are printed under various flags, including:
@ -440,7 +414,6 @@ void nmethod::init_defaults() {
_scavenge_root_link = NULL; _scavenge_root_link = NULL;
} }
_scavenge_root_state = 0; _scavenge_root_state = 0;
_compiler = NULL;
#if INCLUDE_RTM_OPT #if INCLUDE_RTM_OPT
_rtm_state = NoRTM; _rtm_state = NoRTM;
#endif #endif
@ -468,7 +441,7 @@ nmethod* nmethod::new_native_nmethod(const methodHandle& method,
CodeOffsets offsets; CodeOffsets offsets;
offsets.set_value(CodeOffsets::Verified_Entry, vep_offset); offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
offsets.set_value(CodeOffsets::Frame_Complete, frame_complete); offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size, nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), compiler_none, native_nmethod_size,
compile_id, &offsets, compile_id, &offsets,
code_buffer, frame_size, code_buffer, frame_size,
basic_lock_owner_sp_offset, basic_lock_owner_sp_offset,
@ -518,7 +491,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
+ round_to(debug_info->data_size() , oopSize); + round_to(debug_info->data_size() , oopSize);
nm = new (nmethod_size, comp_level) nm = new (nmethod_size, comp_level)
nmethod(method(), nmethod_size, compile_id, entry_bci, offsets, nmethod(method(), compiler->type(), nmethod_size, compile_id, entry_bci, offsets,
orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
oop_maps, oop_maps,
handler_table, handler_table,
@ -569,6 +542,7 @@ nmethod* nmethod::new_nmethod(const methodHandle& method,
// For native wrappers // For native wrappers
nmethod::nmethod( nmethod::nmethod(
Method* method, Method* method,
CompilerType type,
int nmethod_size, int nmethod_size,
int compile_id, int compile_id,
CodeOffsets* offsets, CodeOffsets* offsets,
@ -577,7 +551,7 @@ nmethod::nmethod(
ByteSize basic_lock_owner_sp_offset, ByteSize basic_lock_owner_sp_offset,
ByteSize basic_lock_sp_offset, ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps ) OopMapSet* oop_maps )
: CompiledMethod(method, "native nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), : CompiledMethod(method, "native nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_native_receiver_sp_offset(basic_lock_owner_sp_offset), _native_receiver_sp_offset(basic_lock_owner_sp_offset),
_native_basic_lock_sp_offset(basic_lock_sp_offset) _native_basic_lock_sp_offset(basic_lock_sp_offset)
{ {
@ -666,6 +640,7 @@ void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw
nmethod::nmethod( nmethod::nmethod(
Method* method, Method* method,
CompilerType type,
int nmethod_size, int nmethod_size,
int compile_id, int compile_id,
int entry_bci, int entry_bci,
@ -685,7 +660,7 @@ nmethod::nmethod(
Handle speculation_log Handle speculation_log
#endif #endif
) )
: CompiledMethod(method, "nmethod", nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false), : CompiledMethod(method, "nmethod", type, nmethod_size, sizeof(nmethod), code_buffer, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps, false),
_native_receiver_sp_offset(in_ByteSize(-1)), _native_receiver_sp_offset(in_ByteSize(-1)),
_native_basic_lock_sp_offset(in_ByteSize(-1)) _native_basic_lock_sp_offset(in_ByteSize(-1))
{ {
@ -701,7 +676,6 @@ nmethod::nmethod(
_entry_bci = entry_bci; _entry_bci = entry_bci;
_compile_id = compile_id; _compile_id = compile_id;
_comp_level = comp_level; _comp_level = comp_level;
_compiler = compiler;
_orig_pc_offset = orig_pc_offset; _orig_pc_offset = orig_pc_offset;
_hotness_counter = NMethodSweeper::hotness_counter_reset_val(); _hotness_counter = NMethodSweeper::hotness_counter_reset_val();
@ -803,9 +777,7 @@ void nmethod::log_identity(xmlStream* log) const {
log->print(" compile_id='%d'", compile_id()); log->print(" compile_id='%d'", compile_id());
const char* nm_kind = compile_kind(); const char* nm_kind = compile_kind();
if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind); if (nm_kind != NULL) log->print(" compile_kind='%s'", nm_kind);
if (compiler() != NULL) { log->print(" compiler='%s'", compiler_name());
log->print(" compiler='%s'", compiler()->name());
}
if (TieredCompilation) { if (TieredCompilation) {
log->print(" level='%d'", comp_level()); log->print(" level='%d'", comp_level());
} }

View file

@ -74,8 +74,6 @@ class nmethod : public CompiledMethod {
static nmethod* volatile _oops_do_mark_nmethods; static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link; nmethod* volatile _oops_do_mark_link;
AbstractCompiler* _compiler; // The compiler which compiled this nmethod
// offsets for entry points // offsets for entry points
address _entry_point; // entry point with class check address _entry_point; // entry point with class check
address _verified_entry_point; // entry point without class check address _verified_entry_point; // entry point without class check
@ -166,6 +164,7 @@ class nmethod : public CompiledMethod {
// For native wrappers // For native wrappers
nmethod(Method* method, nmethod(Method* method,
CompilerType type,
int nmethod_size, int nmethod_size,
int compile_id, int compile_id,
CodeOffsets* offsets, CodeOffsets* offsets,
@ -177,6 +176,7 @@ class nmethod : public CompiledMethod {
// Creation support // Creation support
nmethod(Method* method, nmethod(Method* method,
CompilerType type,
int nmethod_size, int nmethod_size,
int compile_id, int compile_id,
int entry_bci, int entry_bci,
@ -251,18 +251,10 @@ class nmethod : public CompiledMethod {
ByteSize basic_lock_sp_offset, ByteSize basic_lock_sp_offset,
OopMapSet* oop_maps); OopMapSet* oop_maps);
// accessors
AbstractCompiler* compiler() const { return _compiler; }
// type info // type info
bool is_nmethod() const { return true; } bool is_nmethod() const { return true; }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; } bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
bool is_compiled_by_c1() const;
bool is_compiled_by_jvmci() const;
bool is_compiled_by_c2() const;
bool is_compiled_by_shark() const;
// boundaries for different parts // boundaries for different parts
address consts_begin () const { return header_begin() + _consts_offset ; } address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return code_begin() ; } address consts_end () const { return code_begin() ; }

View file

@ -26,6 +26,7 @@
#define SHARE_VM_COMPILER_ABSTRACTCOMPILER_HPP #define SHARE_VM_COMPILER_ABSTRACTCOMPILER_HPP
#include "ci/compilerInterface.hpp" #include "ci/compilerInterface.hpp"
#include "compiler/compilerDefinitions.hpp"
#include "compiler/compilerDirectives.hpp" #include "compiler/compilerDirectives.hpp"
typedef void (*initializer)(void); typedef void (*initializer)(void);
@ -82,24 +83,15 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
// This thread will initialize the compiler runtime. // This thread will initialize the compiler runtime.
bool should_perform_init(); bool should_perform_init();
// The (closed set) of concrete compiler classes.
enum Type {
none,
c1,
c2,
jvmci,
shark
};
private: private:
Type _type; const CompilerType _type;
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
CompilerStatistics _stats; CompilerStatistics _stats;
#endif #endif
public: public:
AbstractCompiler(Type type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {} AbstractCompiler(CompilerType type) : _type(type), _compiler_state(uninitialized), _num_compiler_threads(0) {}
// This function determines the compiler thread that will perform the // This function determines the compiler thread that will perform the
// shutdown of the corresponding compiler runtime. // shutdown of the corresponding compiler runtime.
@ -157,10 +149,11 @@ class AbstractCompiler : public CHeapObj<mtCompiler> {
} }
// Compiler type queries. // Compiler type queries.
bool is_c1() { return _type == c1; } const bool is_c1() { return _type == compiler_c1; }
bool is_c2() { return _type == c2; } const bool is_c2() { return _type == compiler_c2; }
bool is_jvmci() { return _type == jvmci; } const bool is_jvmci() { return _type == compiler_jvmci; }
bool is_shark() { return _type == shark; } const bool is_shark() { return _type == compiler_shark; }
const CompilerType type() { return _type; }
// Extra tests to identify trivial methods for the tiered compilation policy. // Extra tests to identify trivial methods for the tiered compilation policy.
virtual bool is_trivial(Method* method) { return false; } virtual bool is_trivial(Method* method) { return false; }

View file

@ -0,0 +1,34 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "compiler/compilerDefinitions.hpp"
const char* compilertype2name_tab[compiler_number_of_types] = {
"",
"c1",
"c2",
"jvmci",
"shark"
};

View file

@ -0,0 +1,109 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
#define SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP
#include "utilities/globalDefinitions.hpp"
// The (closed set) of concrete compiler classes.
enum CompilerType {
compiler_none,
compiler_c1,
compiler_c2,
compiler_jvmci,
compiler_shark,
compiler_number_of_types
};
extern const char* compilertype2name_tab[compiler_number_of_types]; // Map CompilerType to its name
inline const char* compilertype2name(CompilerType t) { return (uint)t < compiler_number_of_types ? compilertype2name_tab[t] : NULL; }
// Handy constants for deciding which compiler mode to use.
enum MethodCompilation {
InvocationEntryBci = -1 // i.e., not a on-stack replacement compilation
};
// Enumeration to distinguish tiers of compilation
enum CompLevel {
CompLevel_any = -1,
CompLevel_all = -1,
CompLevel_none = 0, // Interpreter
CompLevel_simple = 1, // C1
CompLevel_limited_profile = 2, // C1, invocation & backedge counters
CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo
CompLevel_full_optimization = 4, // C2, Shark or JVMCI
#if defined(COMPILER2) || defined(SHARK)
CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered or JVMCI and tiered
#elif defined(COMPILER1)
CompLevel_highest_tier = CompLevel_simple, // pure C1 or JVMCI
#else
CompLevel_highest_tier = CompLevel_none,
#endif
#if defined(TIERED)
CompLevel_initial_compile = CompLevel_full_profile // tiered
#elif defined(COMPILER1) || INCLUDE_JVMCI
CompLevel_initial_compile = CompLevel_simple // pure C1 or JVMCI
#elif defined(COMPILER2) || defined(SHARK)
CompLevel_initial_compile = CompLevel_full_optimization // pure C2
#else
CompLevel_initial_compile = CompLevel_none
#endif
};
inline bool is_c1_compile(int comp_level) {
return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
}
inline bool is_c2_compile(int comp_level) {
return comp_level == CompLevel_full_optimization;
}
inline bool is_highest_tier_compile(int comp_level) {
return comp_level == CompLevel_highest_tier;
}
inline bool is_compile(int comp_level) {
return is_c1_compile(comp_level) || is_c2_compile(comp_level);
}
// States of Restricted Transactional Memory usage.
enum RTMState {
NoRTM = 0x2, // Don't use RTM
UseRTM = 0x1, // Use RTM
ProfileRTM = 0x0 // Use RTM with abort ratio calculation
};
#ifndef INCLUDE_RTM_OPT
#define INCLUDE_RTM_OPT 0
#endif
#if INCLUDE_RTM_OPT
#define RTM_OPT_ONLY(code) code
#else
#define RTM_OPT_ONLY(code)
#endif
#endif // SHARE_VM_COMPILER_COMPILERDEFINITIONS_HPP

View file

@ -37,7 +37,7 @@
JVMCICompiler* JVMCICompiler::_instance = NULL; JVMCICompiler* JVMCICompiler::_instance = NULL;
elapsedTimer JVMCICompiler::_codeInstallTimer; elapsedTimer JVMCICompiler::_codeInstallTimer;
JVMCICompiler::JVMCICompiler() : AbstractCompiler(jvmci) { JVMCICompiler::JVMCICompiler() : AbstractCompiler(compiler_jvmci) {
_bootstrapping = false; _bootstrapping = false;
_bootstrap_compilation_request_handled = false; _bootstrap_compilation_request_handled = false;
_methods_compiled = 0; _methods_compiled = 0;

View file

@ -27,6 +27,7 @@
#include "classfile/vmSymbols.hpp" #include "classfile/vmSymbols.hpp"
#include "code/compressedStream.hpp" #include "code/compressedStream.hpp"
#include "compiler/compilerDefinitions.hpp"
#include "compiler/oopMap.hpp" #include "compiler/oopMap.hpp"
#include "interpreter/invocationCounter.hpp" #include "interpreter/invocationCounter.hpp"
#include "oops/annotations.hpp" #include "oops/annotations.hpp"

View file

@ -32,7 +32,7 @@ class C2Compiler : public AbstractCompiler {
static bool init_c2_runtime(); static bool init_c2_runtime();
public: public:
C2Compiler() : AbstractCompiler(c2) {} C2Compiler() : AbstractCompiler(compiler_c2) {}
// Name // Name
const char *name() { return "C2"; } const char *name() { return "C2"; }

View file

@ -171,7 +171,6 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
assert(thread->deopt_compiled_method() == NULL, "Pending deopt!"); assert(thread->deopt_compiled_method() == NULL, "Pending deopt!");
CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null(); CompiledMethod* cm = deoptee.cb()->as_compiled_method_or_null();
thread->set_deopt_compiled_method(cm); thread->set_deopt_compiled_method(cm);
bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
if (VerifyStack) { if (VerifyStack) {
thread->validate_frame_layout(); thread->validate_frame_layout();
@ -241,6 +240,7 @@ Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread
JRT_BLOCK JRT_BLOCK
realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD); realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
JRT_END JRT_END
bool skip_internal = (cm != NULL) && !cm->is_compiled_by_jvmci();
reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal); reassign_fields(&deoptee, &map, objects, realloc_failures, skip_internal);
#ifndef PRODUCT #ifndef PRODUCT
if (TraceDeoptimization) { if (TraceDeoptimization) {
@ -1651,7 +1651,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
if (TraceDeoptimization) { // make noise on the tty if (TraceDeoptimization) { // make noise on the tty
tty->print("Uncommon trap occurred in"); tty->print("Uncommon trap occurred in");
nm->method()->print_short_name(tty); nm->method()->print_short_name(tty);
tty->print(" compiler=%s compile_id=%d", nm->compiler() == NULL ? "" : nm->compiler()->name(), nm->compile_id()); tty->print(" compiler=%s compile_id=%d", nm->compiler_name(), nm->compile_id());
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
if (nm->is_nmethod()) { if (nm->is_nmethod()) {
oop installedCode = nm->as_nmethod()->jvmci_installed_code(); oop installedCode = nm->as_nmethod()->jvmci_installed_code();

View file

@ -686,9 +686,7 @@ void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose
if (cm->is_nmethod()) { if (cm->is_nmethod()) {
nmethod* nm = cm->as_nmethod(); nmethod* nm = cm->as_nmethod();
st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : ""));
if (nm->compiler() != NULL) { st->print(" %s", nm->compiler_name());
st->print(" %s", nm->compiler()->name());
}
} }
m->name_and_sig_as_C_string(buf, buflen); m->name_and_sig_as_C_string(buf, buflen);
st->print(" %s", buf); st->print(" %s", buf);

View file

@ -23,7 +23,7 @@
*/ */
#include "precompiled.hpp" #include "precompiled.hpp"
#include "utilities/globalDefinitions.hpp" #include "compiler/compilerDefinitions.hpp"
#if INCLUDE_RTM_OPT #if INCLUDE_RTM_OPT

View file

@ -214,7 +214,6 @@ BasicType name2type(const char* name) {
return T_ILLEGAL; return T_ILLEGAL;
} }
// Map BasicType to size in words // Map BasicType to size in words
int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, -1}; int type2size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, 1, 1, 1, -1};

View file

@ -444,13 +444,6 @@ const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlas
// Machine dependent stuff // Machine dependent stuff
// States of Restricted Transactional Memory usage.
enum RTMState {
NoRTM = 0x2, // Don't use RTM
UseRTM = 0x1, // Use RTM
ProfileRTM = 0x0 // Use RTM with abort ratio calculation
};
// The maximum size of the code cache. Can be overridden by targets. // The maximum size of the code cache. Can be overridden by targets.
#define CODE_CACHE_SIZE_LIMIT (2*G) #define CODE_CACHE_SIZE_LIMIT (2*G)
// Allow targets to reduce the default size of the code cache. // Allow targets to reduce the default size of the code cache.
@ -458,15 +451,6 @@ enum RTMState {
#include CPU_HEADER(globalDefinitions) #include CPU_HEADER(globalDefinitions)
#ifndef INCLUDE_RTM_OPT
#define INCLUDE_RTM_OPT 0
#endif
#if INCLUDE_RTM_OPT
#define RTM_OPT_ONLY(code) code
#else
#define RTM_OPT_ONLY(code)
#endif
// To assure the IRIW property on processors that are not multiple copy // To assure the IRIW property on processors that are not multiple copy
// atomic, sync instructions must be issued between volatile reads to // atomic, sync instructions must be issued between volatile reads to
// assure their ordering, instead of after volatile stores. // assure their ordering, instead of after volatile stores.
@ -923,55 +907,6 @@ enum JavaThreadState {
}; };
// Handy constants for deciding which compiler mode to use.
enum MethodCompilation {
InvocationEntryBci = -1 // i.e., not a on-stack replacement compilation
};
// Enumeration to distinguish tiers of compilation
enum CompLevel {
CompLevel_any = -1,
CompLevel_all = -1,
CompLevel_none = 0, // Interpreter
CompLevel_simple = 1, // C1
CompLevel_limited_profile = 2, // C1, invocation & backedge counters
CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo
CompLevel_full_optimization = 4, // C2, Shark or JVMCI
#if defined(COMPILER2) || defined(SHARK)
CompLevel_highest_tier = CompLevel_full_optimization, // pure C2 and tiered or JVMCI and tiered
#elif defined(COMPILER1)
CompLevel_highest_tier = CompLevel_simple, // pure C1 or JVMCI
#else
CompLevel_highest_tier = CompLevel_none,
#endif
#if defined(TIERED)
CompLevel_initial_compile = CompLevel_full_profile // tiered
#elif defined(COMPILER1) || INCLUDE_JVMCI
CompLevel_initial_compile = CompLevel_simple // pure C1 or JVMCI
#elif defined(COMPILER2) || defined(SHARK)
CompLevel_initial_compile = CompLevel_full_optimization // pure C2
#else
CompLevel_initial_compile = CompLevel_none
#endif
};
inline bool is_c1_compile(int comp_level) {
return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
}
inline bool is_c2_compile(int comp_level) {
return comp_level == CompLevel_full_optimization;
}
inline bool is_highest_tier_compile(int comp_level) {
return comp_level == CompLevel_highest_tier;
}
inline bool is_compile(int comp_level) {
return is_c1_compile(comp_level) || is_c2_compile(comp_level);
}
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------
// 'Forward' declarations of frequently used classes // 'Forward' declarations of frequently used classes