8355003: Implement JEP 515: Ahead-of-Time Method Profiling
Co-authored-by: John R Rose <jrose@openjdk.org> Co-authored-by: Vladimir Ivanov <vlivanov@openjdk.org> Co-authored-by: Ioi Lam <iklam@openjdk.org> Co-authored-by: Vladimir Kozlov <kvn@openjdk.org> Co-authored-by: Aleksey Shipilev <shade@openjdk.org> Reviewed-by: kvn, ihse, cjplummer, iklam
This commit is contained in:
parent
63d0e7ff11
commit
e3f85c961b
@ -128,7 +128,8 @@ ifneq ($(call check-jvm-feature, cds), true)
|
||||
aotCodeCache.cpp \
|
||||
classLoaderDataShared.cpp \
|
||||
classLoaderExt.cpp \
|
||||
systemDictionaryShared.cpp
|
||||
systemDictionaryShared.cpp \
|
||||
trainingData.cpp
|
||||
JVM_EXCLUDE_PATTERNS += cds/
|
||||
endif
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "utilities/resourceHash.hpp"
|
||||
|
||||
// All the classes that should be included in the AOT cache (in at least the "allocated" state)
|
||||
@ -165,6 +166,8 @@ void AOTArtifactFinder::find_artifacts() {
|
||||
});
|
||||
|
||||
end_scanning_for_oops();
|
||||
|
||||
TrainingData::cleanup_training_data();
|
||||
}
|
||||
|
||||
void AOTArtifactFinder::start_scanning_for_oops() {
|
||||
|
@ -32,10 +32,12 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "gc/shared/gcVMOperations.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
@ -48,6 +50,17 @@ void AOTLinkedClassBulkLoader::serialize(SerializeClosure* soc, bool is_static_a
|
||||
AOTLinkedClassTable::get(is_static_archive)->serialize(soc);
|
||||
}
|
||||
|
||||
bool AOTLinkedClassBulkLoader::class_preloading_finished() {
|
||||
if (!CDSConfig::is_using_aot_linked_classes()) {
|
||||
return true;
|
||||
} else {
|
||||
// The ConstantPools of preloaded classes have references to other preloaded classes. We don't
|
||||
// want any Java code (including JVMCI compiler) to use these classes until all of them
|
||||
// are loaded.
|
||||
return Atomic::load_acquire(&_all_completed);
|
||||
}
|
||||
}
|
||||
|
||||
void AOTLinkedClassBulkLoader::load_javabase_classes(JavaThread* current) {
|
||||
assert(CDSConfig::is_using_aot_linked_classes(), "sanity");
|
||||
load_classes_in_loader(current, AOTLinkedClassCategory::BOOT1, nullptr); // only java.base classes
|
||||
@ -70,8 +83,14 @@ void AOTLinkedClassBulkLoader::load_non_javabase_classes(JavaThread* current) {
|
||||
_platform_completed = true;
|
||||
|
||||
load_classes_in_loader(current, AOTLinkedClassCategory::APP, SystemDictionary::java_system_loader());
|
||||
|
||||
if (AOTPrintTrainingInfo) {
|
||||
tty->print_cr("==================== archived_training_data ** after all classes preloaded ====================");
|
||||
TrainingData::print_archived_training_data_on(tty);
|
||||
}
|
||||
|
||||
_app_completed = true;
|
||||
_all_completed = true;
|
||||
Atomic::release_store(&_all_completed, true);
|
||||
}
|
||||
|
||||
void AOTLinkedClassBulkLoader::load_classes_in_loader(JavaThread* current, AOTLinkedClassCategory class_category, oop class_loader_oop) {
|
||||
@ -394,3 +413,25 @@ bool AOTLinkedClassBulkLoader::is_pending_aot_linked_class(Klass* k) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void AOTLinkedClassBulkLoader::replay_training_at_init(Array<InstanceKlass*>* classes, TRAPS) {
|
||||
if (classes != nullptr) {
|
||||
for (int i = 0; i < classes->length(); i++) {
|
||||
InstanceKlass* ik = classes->at(i);
|
||||
if (ik->has_aot_initialized_mirror() && ik->is_initialized() && !ik->has_init_deps_processed()) {
|
||||
CompilationPolicy::replay_training_at_init(ik, CHECK);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AOTLinkedClassBulkLoader::replay_training_at_init_for_preloaded_classes(TRAPS) {
|
||||
if (CDSConfig::is_using_aot_linked_classes() && TrainingData::have_data()) {
|
||||
// Only static archive can have training data.
|
||||
AOTLinkedClassTable* table = AOTLinkedClassTable::for_static_archive();
|
||||
replay_training_at_init(table->boot(), CHECK);
|
||||
replay_training_at_init(table->boot2(), CHECK);
|
||||
replay_training_at_init(table->platform(), CHECK);
|
||||
replay_training_at_init(table->app(), CHECK);
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,6 +55,7 @@ class AOTLinkedClassBulkLoader : AllStatic {
|
||||
const char* category_name, Handle loader, TRAPS);
|
||||
static void load_hidden_class(ClassLoaderData* loader_data, InstanceKlass* ik, TRAPS);
|
||||
static void init_required_classes_for_loader(Handle class_loader, Array<InstanceKlass*>* classes, TRAPS);
|
||||
static void replay_training_at_init(Array<InstanceKlass*>* classes, TRAPS) NOT_CDS_RETURN;
|
||||
public:
|
||||
static void serialize(SerializeClosure* soc, bool is_static_archive) NOT_CDS_RETURN;
|
||||
|
||||
@ -63,6 +64,8 @@ public:
|
||||
static void finish_loading_javabase_classes(TRAPS) NOT_CDS_RETURN;
|
||||
static void exit_on_exception(JavaThread* current);
|
||||
|
||||
static void replay_training_at_init_for_preloaded_classes(TRAPS) NOT_CDS_RETURN;
|
||||
static bool class_preloading_finished();
|
||||
static bool is_pending_aot_linked_class(Klass* k) NOT_CDS_RETURN_(false);
|
||||
};
|
||||
|
||||
|
@ -54,9 +54,12 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedKlass.inline.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/methodCounters.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oopHandle.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/fieldDescriptor.inline.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
@ -131,13 +134,27 @@ public:
|
||||
size_t field_offset = size_t(bit_offset - _start_idx) * sizeof(address);
|
||||
address* ptr_loc = (address*)(_buffered_obj + field_offset);
|
||||
|
||||
address old_p = *ptr_loc;
|
||||
address old_p_with_tags = *ptr_loc;
|
||||
assert(old_p_with_tags != nullptr, "null ptrs shouldn't have been marked");
|
||||
|
||||
address old_p = MetaspaceClosure::strip_tags(old_p_with_tags);
|
||||
uintx tags = MetaspaceClosure::decode_tags(old_p_with_tags);
|
||||
address new_p = _builder->get_buffered_addr(old_p);
|
||||
|
||||
log_trace(aot)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT,
|
||||
p2i(ptr_loc), p2i(old_p), p2i(new_p));
|
||||
bool nulled;
|
||||
if (new_p == nullptr) {
|
||||
// old_p had a FollowMode of set_to_null
|
||||
nulled = true;
|
||||
} else {
|
||||
new_p = MetaspaceClosure::add_tags(new_p, tags);
|
||||
nulled = false;
|
||||
}
|
||||
|
||||
log_trace(aot)("Ref: [" PTR_FORMAT "] -> " PTR_FORMAT " => " PTR_FORMAT " %zu",
|
||||
p2i(ptr_loc), p2i(old_p) + tags, p2i(new_p), tags);
|
||||
|
||||
ArchivePtrMarker::set_and_mark_pointer(ptr_loc, new_p);
|
||||
ArchiveBuilder::current()->count_relocated_pointer(tags != 0, nulled);
|
||||
return true; // keep iterating the bitmap
|
||||
}
|
||||
};
|
||||
@ -178,6 +195,9 @@ ArchiveBuilder::ArchiveBuilder() :
|
||||
_klasses = new (mtClassShared) GrowableArray<Klass*>(4 * K, mtClassShared);
|
||||
_symbols = new (mtClassShared) GrowableArray<Symbol*>(256 * K, mtClassShared);
|
||||
_entropy_seed = 0x12345678;
|
||||
_relocated_ptr_info._num_ptrs = 0;
|
||||
_relocated_ptr_info._num_tagged_ptrs = 0;
|
||||
_relocated_ptr_info._num_nulled_ptrs = 0;
|
||||
assert(_current == nullptr, "must be");
|
||||
_current = this;
|
||||
}
|
||||
@ -437,6 +457,11 @@ bool ArchiveBuilder::gather_one_source_obj(MetaspaceClosure::Ref* ref, bool read
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ref->msotype() == MetaspaceObj::MethodDataType) {
|
||||
MethodData* md = (MethodData*)ref->obj();
|
||||
md->clean_method_data(false /* always_clean */);
|
||||
}
|
||||
|
||||
assert(p->read_only() == src_info.read_only(), "must be");
|
||||
|
||||
if (created && src_info.should_copy()) {
|
||||
@ -534,8 +559,11 @@ ArchiveBuilder::FollowMode ArchiveBuilder::get_follow_mode(MetaspaceClosure::Ref
|
||||
// Don't dump existing shared metadata again.
|
||||
return point_to_it;
|
||||
} else if (ref->msotype() == MetaspaceObj::MethodDataType ||
|
||||
ref->msotype() == MetaspaceObj::MethodCountersType) {
|
||||
return set_to_null;
|
||||
ref->msotype() == MetaspaceObj::MethodCountersType ||
|
||||
ref->msotype() == MetaspaceObj::KlassTrainingDataType ||
|
||||
ref->msotype() == MetaspaceObj::MethodTrainingDataType ||
|
||||
ref->msotype() == MetaspaceObj::CompileTrainingDataType) {
|
||||
return (TrainingData::need_data() || TrainingData::assembling_data()) ? make_a_copy : set_to_null;
|
||||
} else if (ref->msotype() == MetaspaceObj::AdapterHandlerEntryType) {
|
||||
if (CDSConfig::is_dumping_adapters()) {
|
||||
AdapterHandlerEntry* entry = (AdapterHandlerEntry*)ref->obj();
|
||||
@ -756,6 +784,10 @@ void ArchiveBuilder::relocate_metaspaceobj_embedded_pointers() {
|
||||
aot_log_info(aot)("Relocating embedded pointers in core regions ... ");
|
||||
relocate_embedded_pointers(&_rw_src_objs);
|
||||
relocate_embedded_pointers(&_ro_src_objs);
|
||||
log_info(cds)("Relocating %zu pointers, %zu tagged, %zu nulled",
|
||||
_relocated_ptr_info._num_ptrs,
|
||||
_relocated_ptr_info._num_tagged_ptrs,
|
||||
_relocated_ptr_info._num_nulled_ptrs);
|
||||
}
|
||||
|
||||
#define ADD_COUNT(x) \
|
||||
@ -953,6 +985,28 @@ void ArchiveBuilder::make_klasses_shareable() {
|
||||
DynamicArchive::make_array_klasses_shareable();
|
||||
}
|
||||
|
||||
void ArchiveBuilder::make_training_data_shareable() {
|
||||
auto clean_td = [&] (address& src_obj, SourceObjInfo& info) {
|
||||
if (!is_in_buffer_space(info.buffered_addr())) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (info.msotype() == MetaspaceObj::KlassTrainingDataType ||
|
||||
info.msotype() == MetaspaceObj::MethodTrainingDataType ||
|
||||
info.msotype() == MetaspaceObj::CompileTrainingDataType) {
|
||||
TrainingData* buffered_td = (TrainingData*)info.buffered_addr();
|
||||
buffered_td->remove_unshareable_info();
|
||||
} else if (info.msotype() == MetaspaceObj::MethodDataType) {
|
||||
MethodData* buffered_mdo = (MethodData*)info.buffered_addr();
|
||||
buffered_mdo->remove_unshareable_info();
|
||||
} else if (info.msotype() == MetaspaceObj::MethodCountersType) {
|
||||
MethodCounters* buffered_mc = (MethodCounters*)info.buffered_addr();
|
||||
buffered_mc->remove_unshareable_info();
|
||||
}
|
||||
};
|
||||
_src_obj_table.iterate_all(clean_td);
|
||||
}
|
||||
|
||||
void ArchiveBuilder::serialize_dynamic_archivable_items(SerializeClosure* soc) {
|
||||
SymbolTable::serialize_shared_table_header(soc, false);
|
||||
SystemDictionaryShared::serialize_dictionary_headers(soc, false);
|
||||
@ -1588,6 +1642,12 @@ void ArchiveBuilder::write_region(FileMapInfo* mapinfo, int region_idx, DumpRegi
|
||||
mapinfo->write_region(region_idx, dump_region->base(), dump_region->used(), read_only, allow_exec);
|
||||
}
|
||||
|
||||
void ArchiveBuilder::count_relocated_pointer(bool tagged, bool nulled) {
|
||||
_relocated_ptr_info._num_ptrs ++;
|
||||
_relocated_ptr_info._num_tagged_ptrs += tagged ? 1 : 0;
|
||||
_relocated_ptr_info._num_nulled_ptrs += nulled ? 1 : 0;
|
||||
}
|
||||
|
||||
void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, ArchiveHeapInfo* heap_info) {
|
||||
// Print statistics of all the regions
|
||||
const size_t bitmap_used = mapinfo->region_at(MetaspaceShared::bm)->used();
|
||||
|
@ -238,6 +238,11 @@ private:
|
||||
// statistics
|
||||
DumpAllocStats _alloc_stats;
|
||||
size_t _total_heap_region_size;
|
||||
struct {
|
||||
size_t _num_ptrs;
|
||||
size_t _num_tagged_ptrs;
|
||||
size_t _num_nulled_ptrs;
|
||||
} _relocated_ptr_info;
|
||||
|
||||
void print_region_stats(FileMapInfo *map_info, ArchiveHeapInfo* heap_info);
|
||||
void print_bitmap_region_stats(size_t size, size_t total_size);
|
||||
@ -258,6 +263,8 @@ public:
|
||||
~OtherROAllocMark();
|
||||
};
|
||||
|
||||
void count_relocated_pointer(bool tagged, bool nulled);
|
||||
|
||||
private:
|
||||
FollowMode get_follow_mode(MetaspaceClosure::Ref *ref);
|
||||
|
||||
@ -419,6 +426,7 @@ public:
|
||||
void relocate_metaspaceobj_embedded_pointers();
|
||||
void record_regenerated_object(address orig_src_obj, address regen_src_obj);
|
||||
void make_klasses_shareable();
|
||||
void make_training_data_shareable();
|
||||
void relocate_to_requested();
|
||||
void write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info);
|
||||
void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
|
||||
@ -443,7 +451,8 @@ public:
|
||||
|
||||
address get_buffered_addr(address src_addr) const;
|
||||
template <typename T> T get_buffered_addr(T src_addr) const {
|
||||
return (T)get_buffered_addr((address)src_addr);
|
||||
CDS_ONLY(return (T)get_buffered_addr((address)src_addr);)
|
||||
NOT_CDS(return nullptr;)
|
||||
}
|
||||
|
||||
address get_source_addr(address buffered_addr) const;
|
||||
@ -456,7 +465,8 @@ public:
|
||||
GrowableArray<Symbol*>* symbols() const { return _symbols; }
|
||||
|
||||
static bool is_active() {
|
||||
return (_current != nullptr);
|
||||
CDS_ONLY(return (_current != nullptr));
|
||||
NOT_CDS(return false;)
|
||||
}
|
||||
|
||||
static ArchiveBuilder* current() {
|
||||
|
@ -532,6 +532,8 @@ bool CDSConfig::check_vm_args_consistency(bool patch_mod_javabase, bool mode_fla
|
||||
FLAG_SET_ERGO_IF_DEFAULT(AOTClassLinking, true);
|
||||
}
|
||||
|
||||
setup_compiler_args();
|
||||
|
||||
if (AOTClassLinking) {
|
||||
// If AOTClassLinking is specified, enable all AOT optimizations by default.
|
||||
FLAG_SET_ERGO_IF_DEFAULT(AOTInvokeDynamicLinking, true);
|
||||
@ -604,6 +606,28 @@ bool CDSConfig::check_vm_args_consistency(bool patch_mod_javabase, bool mode_fla
|
||||
return true;
|
||||
}
|
||||
|
||||
void CDSConfig::setup_compiler_args() {
|
||||
// AOT profiles are supported only in the JEP 483 workflow.
|
||||
bool can_dump_profiles = AOTClassLinking && new_aot_flags_used();
|
||||
|
||||
if (is_dumping_preimage_static_archive() && can_dump_profiles) {
|
||||
// JEP 483 workflow -- training
|
||||
FLAG_SET_ERGO_IF_DEFAULT(AOTRecordTraining, true);
|
||||
FLAG_SET_ERGO(AOTReplayTraining, false);
|
||||
} else if (is_dumping_final_static_archive() && can_dump_profiles) {
|
||||
// JEP 483 workflow -- assembly
|
||||
FLAG_SET_ERGO(AOTRecordTraining, false);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(AOTReplayTraining, true);
|
||||
} else if (is_using_archive() && new_aot_flags_used()) {
|
||||
// JEP 483 workflow -- production
|
||||
FLAG_SET_ERGO(AOTRecordTraining, false);
|
||||
FLAG_SET_ERGO_IF_DEFAULT(AOTReplayTraining, true);
|
||||
} else {
|
||||
FLAG_SET_ERGO(AOTReplayTraining, false);
|
||||
FLAG_SET_ERGO(AOTRecordTraining, false);
|
||||
}
|
||||
}
|
||||
|
||||
void CDSConfig::prepare_for_dumping() {
|
||||
assert(CDSConfig::is_dumping_archive(), "sanity");
|
||||
|
||||
|
@ -67,6 +67,7 @@ class CDSConfig : public AllStatic {
|
||||
static void check_aotmode_auto_or_on();
|
||||
static void check_aotmode_record();
|
||||
static void check_aotmode_create();
|
||||
static void setup_compiler_args();
|
||||
static void check_unsupported_dumping_module_options();
|
||||
|
||||
// Called after Arguments::apply_ergo() has started
|
||||
|
@ -130,6 +130,23 @@
|
||||
product(bool, AOTCacheParallelRelocation, true, DIAGNOSTIC, \
|
||||
"Use parallel relocation code to speed up startup.") \
|
||||
\
|
||||
/* flags to control training and deployment modes */ \
|
||||
\
|
||||
product(bool, AOTRecordTraining, false, DIAGNOSTIC, \
|
||||
"Request output of training data for improved deployment.") \
|
||||
\
|
||||
product(bool, AOTReplayTraining, false, DIAGNOSTIC, \
|
||||
"Read training data, if available, for use in this execution") \
|
||||
\
|
||||
product(bool, AOTPrintTrainingInfo, false, DIAGNOSTIC, \
|
||||
"Print additional information about training") \
|
||||
\
|
||||
product(bool, AOTVerifyTrainingData, trueInDebug, DIAGNOSTIC, \
|
||||
"Verify archived training data") \
|
||||
\
|
||||
product(bool, AOTCompileEagerly, false, DIAGNOSTIC, \
|
||||
"Compile methods as soon as possible") \
|
||||
\
|
||||
/* AOT Code flags */ \
|
||||
\
|
||||
product(bool, AOTAdapterCaching, false, DIAGNOSTIC, \
|
||||
|
@ -32,7 +32,9 @@
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/instanceStackChunkKlass.hpp"
|
||||
#include "oops/methodCounters.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "oops/objArrayKlass.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
@ -60,8 +62,13 @@
|
||||
f(InstanceRefKlass) \
|
||||
f(InstanceStackChunkKlass) \
|
||||
f(Method) \
|
||||
f(MethodData) \
|
||||
f(MethodCounters) \
|
||||
f(ObjArrayKlass) \
|
||||
f(TypeArrayKlass)
|
||||
f(TypeArrayKlass) \
|
||||
f(KlassTrainingData) \
|
||||
f(MethodTrainingData) \
|
||||
f(CompileTrainingData)
|
||||
|
||||
class CppVtableInfo {
|
||||
intptr_t _vtable_size;
|
||||
@ -279,16 +286,11 @@ intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address ob
|
||||
case MetaspaceObj::ConstMethodType:
|
||||
case MetaspaceObj::ConstantPoolCacheType:
|
||||
case MetaspaceObj::AnnotationsType:
|
||||
case MetaspaceObj::MethodCountersType:
|
||||
case MetaspaceObj::RecordComponentType:
|
||||
case MetaspaceObj::AdapterHandlerEntryType:
|
||||
case MetaspaceObj::AdapterFingerPrintType:
|
||||
// These have no vtables.
|
||||
break;
|
||||
case MetaspaceObj::MethodDataType:
|
||||
// We don't archive MethodData <-- should have been removed in removed_unsharable_info
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
default:
|
||||
for (kind = 0; kind < _num_cloned_vtable_kinds; kind ++) {
|
||||
if (vtable_of((Metadata*)obj) == _orig_cpp_vtptrs[kind] ||
|
||||
|
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "cds/aotClassLinker.hpp"
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "cds/dumpAllocStats.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
@ -118,8 +119,15 @@ void DumpAllocStats::print_stats(int ro_all, int rw_all) {
|
||||
_num_indy_cp_entries, _num_indy_cp_entries_archived,
|
||||
percent_of(_num_indy_cp_entries_archived, _num_indy_cp_entries),
|
||||
_num_indy_cp_entries_reverted);
|
||||
msg.info("Platform loader initiated classes = %5d", AOTClassLinker::num_platform_initiated_classes());
|
||||
msg.info("App loader initiated classes = %5d", AOTClassLinker::num_app_initiated_classes());
|
||||
msg.info("Platform loader initiated classes = %6d", AOTClassLinker::num_platform_initiated_classes());
|
||||
msg.info("App loader initiated classes = %6d", AOTClassLinker::num_app_initiated_classes());
|
||||
msg.info("MethodCounters = %6d (%8d bytes)", _counts[RW][MethodCountersType],
|
||||
_bytes [RW][MethodCountersType]);
|
||||
msg.info("KlassTrainingData = %6d (%8d bytes)", _counts[RW][KlassTrainingDataType],
|
||||
_bytes [RW][KlassTrainingDataType]);
|
||||
msg.info("MethodTrainingData = %6d (%8d bytes)", _counts[RW][MethodTrainingDataType],
|
||||
_bytes [RW][MethodTrainingDataType]);
|
||||
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include "oops/compressedKlass.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "oops/typeArrayKlass.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
@ -231,6 +232,14 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
|
||||
} else {
|
||||
_narrow_klass_pointer_bits = _narrow_klass_shift = -1;
|
||||
}
|
||||
_type_profile_level = TypeProfileLevel;
|
||||
_type_profile_args_limit = TypeProfileArgsLimit;
|
||||
_type_profile_parms_limit = TypeProfileParmsLimit;
|
||||
_type_profile_width = TypeProfileWidth;
|
||||
_bci_profile_width = BciProfileWidth;
|
||||
_profile_traps = ProfileTraps;
|
||||
_type_profile_casts = TypeProfileCasts;
|
||||
_spec_trap_limit_extra_entries = SpecTrapLimitExtraEntries;
|
||||
_max_heap_size = MaxHeapSize;
|
||||
_use_optimized_module_handling = CDSConfig::is_using_optimized_module_handling();
|
||||
_has_aot_linked_classes = CDSConfig::is_dumping_aot_linked_classes();
|
||||
@ -1923,6 +1932,64 @@ bool FileMapHeader::validate() {
|
||||
CompactStrings ? "enabled" : "disabled");
|
||||
return false;
|
||||
}
|
||||
if (TrainingData::have_data()) {
|
||||
if (_type_profile_level != TypeProfileLevel) {
|
||||
MetaspaceShared::report_loading_error("The %s's TypeProfileLevel setting (%d)"
|
||||
" does not equal the current TypeProfileLevel setting (%d).", file_type,
|
||||
_type_profile_level, TypeProfileLevel);
|
||||
return false;
|
||||
}
|
||||
if (_type_profile_args_limit != TypeProfileArgsLimit) {
|
||||
MetaspaceShared::report_loading_error("The %s's TypeProfileArgsLimit setting (%d)"
|
||||
" does not equal the current TypeProfileArgsLimit setting (%d).", file_type,
|
||||
_type_profile_args_limit, TypeProfileArgsLimit);
|
||||
return false;
|
||||
}
|
||||
if (_type_profile_parms_limit != TypeProfileParmsLimit) {
|
||||
MetaspaceShared::report_loading_error("The %s's TypeProfileParamsLimit setting (%d)"
|
||||
" does not equal the current TypeProfileParamsLimit setting (%d).", file_type,
|
||||
_type_profile_args_limit, TypeProfileArgsLimit);
|
||||
return false;
|
||||
|
||||
}
|
||||
if (_type_profile_width != TypeProfileWidth) {
|
||||
MetaspaceShared::report_loading_error("The %s's TypeProfileWidth setting (%d)"
|
||||
" does not equal the current TypeProfileWidth setting (%d).", file_type,
|
||||
(int)_type_profile_width, (int)TypeProfileWidth);
|
||||
return false;
|
||||
|
||||
}
|
||||
if (_bci_profile_width != BciProfileWidth) {
|
||||
MetaspaceShared::report_loading_error("The %s's BciProfileWidth setting (%d)"
|
||||
" does not equal the current BciProfileWidth setting (%d).", file_type,
|
||||
(int)_bci_profile_width, (int)BciProfileWidth);
|
||||
return false;
|
||||
}
|
||||
if (_type_profile_casts != TypeProfileCasts) {
|
||||
MetaspaceShared::report_loading_error("The %s's TypeProfileCasts setting (%s)"
|
||||
" does not equal the current TypeProfileCasts setting (%s).", file_type,
|
||||
_type_profile_casts ? "enabled" : "disabled",
|
||||
TypeProfileCasts ? "enabled" : "disabled");
|
||||
|
||||
return false;
|
||||
|
||||
}
|
||||
if (_profile_traps != ProfileTraps) {
|
||||
MetaspaceShared::report_loading_error("The %s's ProfileTraps setting (%s)"
|
||||
" does not equal the current ProfileTraps setting (%s).", file_type,
|
||||
_profile_traps ? "enabled" : "disabled",
|
||||
ProfileTraps ? "enabled" : "disabled");
|
||||
|
||||
return false;
|
||||
}
|
||||
if (_spec_trap_limit_extra_entries != SpecTrapLimitExtraEntries) {
|
||||
MetaspaceShared::report_loading_error("The %s's SpecTrapLimitExtraEntries setting (%d)"
|
||||
" does not equal the current SpecTrapLimitExtraEntries setting (%d).", file_type,
|
||||
_spec_trap_limit_extra_entries, SpecTrapLimitExtraEntries);
|
||||
return false;
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// This must be done after header validation because it might change the
|
||||
// header data
|
||||
|
@ -145,6 +145,17 @@ private:
|
||||
size_t _heap_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the heap.
|
||||
size_t _rw_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the rw region
|
||||
size_t _ro_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the ro region
|
||||
|
||||
// The following are parameters that affect MethodData layout.
|
||||
uint _type_profile_level;
|
||||
int _type_profile_args_limit;
|
||||
int _type_profile_parms_limit;
|
||||
intx _type_profile_width;
|
||||
intx _bci_profile_width;
|
||||
bool _profile_traps;
|
||||
bool _type_profile_casts;
|
||||
int _spec_trap_limit_extra_entries;
|
||||
|
||||
template <typename T> T from_mapped_offset(size_t offset) const {
|
||||
return (T)(mapped_base_address() + offset);
|
||||
}
|
||||
|
@ -81,6 +81,7 @@
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oopHandle.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
@ -483,6 +484,7 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
|
||||
SystemDictionaryShared::serialize_dictionary_headers(soc);
|
||||
AOTLinkedClassBulkLoader::serialize(soc, true);
|
||||
FinalImageRecipes::serialize(soc);
|
||||
TrainingData::serialize(soc);
|
||||
InstanceMirrorKlass::serialize_offsets(soc);
|
||||
|
||||
// Dump/restore well known classes (pointers)
|
||||
@ -569,6 +571,7 @@ public:
|
||||
SystemDictionaryShared::dumptime_classes_do(it);
|
||||
Universe::metaspace_pointers_do(it);
|
||||
vmSymbols::metaspace_pointers_do(it);
|
||||
TrainingData::iterate_roots(it);
|
||||
|
||||
// The above code should find all the symbols that are referenced by the
|
||||
// archived classes. We just need to add the extra symbols which
|
||||
@ -608,6 +611,9 @@ char* VM_PopulateDumpSharedSpace::dump_read_only_tables(AOTClassLocationConfig*&
|
||||
if (CDSConfig::is_dumping_preimage_static_archive()) {
|
||||
FinalImageRecipes::record_recipes();
|
||||
}
|
||||
|
||||
TrainingData::dump_training_data();
|
||||
|
||||
MetaspaceShared::write_method_handle_intrinsics();
|
||||
|
||||
// Write lambform lines into archive
|
||||
@ -673,6 +679,9 @@ void VM_PopulateDumpSharedSpace::doit() {
|
||||
LambdaProxyClassDictionary::adjust_dumptime_table();
|
||||
}
|
||||
|
||||
log_info(cds)("Make training data shareable");
|
||||
_builder.make_training_data_shareable();
|
||||
|
||||
// The vtable clones contain addresses of the current process.
|
||||
// We don't want to write these addresses into the archive.
|
||||
CppVtables::zero_archived_vtables();
|
||||
@ -791,6 +800,13 @@ void MetaspaceShared::link_shared_classes(TRAPS) {
|
||||
void MetaspaceShared::preload_and_dump(TRAPS) {
|
||||
CDSConfig::DumperThreadMark dumper_thread_mark(THREAD);
|
||||
ResourceMark rm(THREAD);
|
||||
HandleMark hm(THREAD);
|
||||
|
||||
if (CDSConfig::is_dumping_final_static_archive() && AOTPrintTrainingInfo) {
|
||||
tty->print_cr("==================== archived_training_data ** before dumping ====================");
|
||||
TrainingData::print_archived_training_data_on(tty);
|
||||
}
|
||||
|
||||
StaticArchiveBuilder builder;
|
||||
preload_and_dump_impl(builder, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
@ -956,6 +972,7 @@ void MetaspaceShared::preload_and_dump_impl(StaticArchiveBuilder& builder, TRAPS
|
||||
// are implemented by K are not verified.
|
||||
link_shared_classes(CHECK);
|
||||
log_info(aot)("Rewriting and linking classes: done");
|
||||
TrainingData::init_dumptime_table(CHECK); // captures TrainingDataSetLocker
|
||||
|
||||
if (CDSConfig::is_dumping_regenerated_lambdaform_invokers()) {
|
||||
LambdaFormInvokers::regenerate_holder_classes(CHECK);
|
||||
@ -1860,6 +1877,8 @@ void MetaspaceShared::initialize_shared_spaces() {
|
||||
SystemDictionaryShared::print_shared_archive(tty, false/*dynamic*/);
|
||||
}
|
||||
|
||||
TrainingData::print_archived_training_data_on(tty);
|
||||
|
||||
if (AOTCodeCache::is_on_for_use()) {
|
||||
tty->print_cr("\n\nAOT Code");
|
||||
AOTCodeCache::print_on(tty);
|
||||
|
@ -264,7 +264,11 @@ public:
|
||||
// Used by RunTimeSharedDictionary to implement OffsetCompactHashtable::EQUALS
|
||||
static inline bool EQUALS(
|
||||
const RunTimeClassInfo* value, Symbol* key, int len_unused) {
|
||||
#if INCLUDE_CDS
|
||||
return (value->klass()->name() == key);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1159,6 +1159,13 @@ int ciEnv::compile_id() {
|
||||
// ciEnv::notice_inlined_method()
|
||||
void ciEnv::notice_inlined_method(ciMethod* method) {
|
||||
_num_inlined_bytecodes += method->code_size_for_inlining();
|
||||
CompileTrainingData* ctd = task()->training_data();
|
||||
if (ctd != nullptr) {
|
||||
GUARDED_VM_ENTRY({
|
||||
methodHandle mh(Thread::current(), method->get_Method());
|
||||
ctd->notice_inlined_method(task(), mh);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -44,6 +44,7 @@ class ciInstanceKlass : public ciKlass {
|
||||
friend class ciMethod;
|
||||
friend class ciField;
|
||||
friend class ciReplay;
|
||||
friend class CompileTrainingData;
|
||||
|
||||
private:
|
||||
enum SubklassValue { subklass_unknown, subklass_false, subklass_true };
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include "compiler/abstractCompiler.hpp"
|
||||
#include "compiler/compilerDefinitions.inline.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "compiler/methodLiveness.hpp"
|
||||
#include "interpreter/interpreter.hpp"
|
||||
#include "interpreter/linkResolver.hpp"
|
||||
@ -47,6 +48,7 @@
|
||||
#include "oops/generateOopMap.hpp"
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -1148,6 +1150,28 @@ int ciMethod::code_size_for_inlining() {
|
||||
// Also some instructions inside the code are excluded from inline
|
||||
// heuristic (e.g. post call nop instructions; see InlineSkippedInstructionsCounter)
|
||||
int ciMethod::inline_instructions_size() {
|
||||
if (_inline_instructions_size == -1) {
|
||||
if (TrainingData::have_data()) {
|
||||
GUARDED_VM_ENTRY(
|
||||
CompLevel level = static_cast<CompLevel>(CURRENT_ENV->comp_level());
|
||||
methodHandle top_level_mh(Thread::current(), CURRENT_ENV->task()->method());
|
||||
MethodTrainingData* mtd = MethodTrainingData::find(top_level_mh);
|
||||
if (mtd != nullptr) {
|
||||
CompileTrainingData* ctd = mtd->last_toplevel_compile(level);
|
||||
if (ctd != nullptr) {
|
||||
methodHandle mh(Thread::current(), get_Method());
|
||||
MethodTrainingData* this_mtd = MethodTrainingData::find(mh);
|
||||
if (this_mtd != nullptr) {
|
||||
auto r = ctd->ci_records().ciMethod__inline_instructions_size.find(this_mtd);
|
||||
if (r.is_valid()) {
|
||||
_inline_instructions_size = r.result();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
if (_inline_instructions_size == -1) {
|
||||
GUARDED_VM_ENTRY(
|
||||
nmethod* code = get_Method()->code();
|
||||
@ -1157,6 +1181,14 @@ int ciMethod::inline_instructions_size() {
|
||||
} else {
|
||||
_inline_instructions_size = 0;
|
||||
}
|
||||
if (TrainingData::need_data()) {
|
||||
CompileTrainingData* ctd = CURRENT_ENV->task()->training_data();
|
||||
if (ctd != nullptr) {
|
||||
methodHandle mh(Thread::current(), get_Method());
|
||||
MethodTrainingData* this_mtd = MethodTrainingData::make(mh);
|
||||
ctd->ci_records().ciMethod__inline_instructions_size.append_if_missing(_inline_instructions_size, this_mtd);
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
return _inline_instructions_size;
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/methodData.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
|
||||
@ -54,6 +55,15 @@ ciMethodData::ciMethodData(MethodData* md)
|
||||
_invocation_counter(0),
|
||||
_orig() {}
|
||||
|
||||
|
||||
static bool is_klass_loaded(Klass* k) {
|
||||
if (TrainingData::have_data()) {
|
||||
// If we're running in AOT mode some classes may not be loaded yet
|
||||
return !k->is_instance_klass() || InstanceKlass::cast(k)->is_loaded();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check for entries that reference an unloaded method
|
||||
class PrepareExtraDataClosure : public CleanExtraDataClosure {
|
||||
MethodData* _mdo;
|
||||
@ -68,7 +78,8 @@ public:
|
||||
{ }
|
||||
|
||||
bool is_live(Method* m) {
|
||||
if (!m->method_holder()->is_loader_alive()) {
|
||||
Klass* holder = m->method_holder();
|
||||
if (holder == nullptr || !holder->is_loader_present_and_alive() || !is_klass_loaded(holder)) {
|
||||
return false;
|
||||
}
|
||||
if (CURRENT_ENV->cached_metadata(m) == nullptr) {
|
||||
@ -303,7 +314,7 @@ bool ciMethodData::load_data() {
|
||||
void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
Klass* k = data->as_ReceiverTypeData()->receiver(row);
|
||||
if (k != nullptr) {
|
||||
if (k != nullptr && k->class_loader_data() != nullptr && is_klass_loaded(k)) {
|
||||
if (k->is_loader_alive()) {
|
||||
ciKlass* klass = CURRENT_ENV->get_klass(k);
|
||||
set_receiver(row, klass);
|
||||
@ -321,7 +332,7 @@ void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries
|
||||
for (int i = 0; i < number_of_entries(); i++) {
|
||||
intptr_t k = entries->type(i);
|
||||
Klass* klass = (Klass*)klass_part(k);
|
||||
if (klass != nullptr && !klass->is_loader_alive()) {
|
||||
if (klass == nullptr || !klass->is_loader_present_and_alive() || !is_klass_loaded(klass)) {
|
||||
// With concurrent class unloading, the MDO could have stale metadata; override it
|
||||
TypeStackSlotEntries::set_type(i, TypeStackSlotEntries::with_status((Klass*)nullptr, k));
|
||||
} else {
|
||||
@ -333,7 +344,7 @@ void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries
|
||||
void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
|
||||
intptr_t k = ret->type();
|
||||
Klass* klass = (Klass*)klass_part(k);
|
||||
if (klass != nullptr && !klass->is_loader_alive()) {
|
||||
if (klass == nullptr || !klass->is_loader_present_and_alive() || !is_klass_loaded(klass)) {
|
||||
// With concurrent class unloading, the MDO could have stale metadata; override it
|
||||
set_type(ReturnTypeEntry::with_status((Klass*)nullptr, k));
|
||||
} else {
|
||||
|
@ -44,10 +44,12 @@
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
#include "compiler/compiler_globals.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/signature.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -108,7 +110,7 @@ void ciObjectFactory::initialize() {
|
||||
// This Arena is long lived and exists in the resource mark of the
|
||||
// compiler thread that initializes the initial ciObjectFactory which
|
||||
// creates the shared ciObjects that all later ciObjectFactories use.
|
||||
Arena* arena = new (mtCompiler) Arena(mtCompiler, Arena::Tag::tag_cienv);
|
||||
Arena* arena = new (mtCompiler) Arena(mtCompiler);
|
||||
ciEnv initial(arena);
|
||||
ciEnv* env = ciEnv::current();
|
||||
env->_factory->init_shared_objects();
|
||||
@ -232,26 +234,40 @@ void ciObjectFactory::remove_symbols() {
|
||||
ciObject* ciObjectFactory::get(oop key) {
|
||||
ASSERT_IN_VM;
|
||||
|
||||
assert(Universe::heap()->is_in(key), "must be");
|
||||
Handle keyHandle(Thread::current(), key);
|
||||
assert(Universe::heap()->is_in(keyHandle()), "must be");
|
||||
|
||||
NonPermObject* &bucket = find_non_perm(key);
|
||||
NonPermObject* &bucket = find_non_perm(keyHandle);
|
||||
if (bucket != nullptr) {
|
||||
return bucket->object();
|
||||
}
|
||||
|
||||
// The ciObject does not yet exist. Create it and insert it
|
||||
// into the cache.
|
||||
Handle keyHandle(Thread::current(), key);
|
||||
ciObject* new_object = create_new_object(keyHandle());
|
||||
assert(keyHandle() == new_object->get_oop(), "must be properly recorded");
|
||||
init_ident_of(new_object);
|
||||
assert(Universe::heap()->is_in(new_object->get_oop()), "must be");
|
||||
|
||||
// Not a perm-space object.
|
||||
insert_non_perm(bucket, keyHandle(), new_object);
|
||||
insert_non_perm(bucket, keyHandle, new_object);
|
||||
notice_new_object(new_object);
|
||||
return new_object;
|
||||
}
|
||||
|
||||
void ciObjectFactory::notice_new_object(ciBaseObject* new_object) {
|
||||
if (TrainingData::need_data()) {
|
||||
ciEnv* env = ciEnv::current();
|
||||
if (env->task() != nullptr) {
|
||||
// Note: task will be null during init_compiler_runtime.
|
||||
CompileTrainingData* td = env->task()->training_data();
|
||||
if (td != nullptr) {
|
||||
td->notice_jit_observation(env, new_object);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int ciObjectFactory::metadata_compare(Metadata* const& key, ciMetadata* const& elt) {
|
||||
Metadata* value = elt->constant_encoding();
|
||||
if (key < value) return -1;
|
||||
@ -331,6 +347,7 @@ ciMetadata* ciObjectFactory::get_metadata(Metadata* key) {
|
||||
}
|
||||
assert(!found, "no double insert");
|
||||
_ci_metadata.insert_before(index, new_object);
|
||||
notice_new_object(new_object);
|
||||
return new_object;
|
||||
}
|
||||
return _ci_metadata.at(index)->as_metadata();
|
||||
@ -636,12 +653,12 @@ static ciObjectFactory::NonPermObject* emptyBucket = nullptr;
|
||||
// Use a small hash table, hashed on the klass of the key.
|
||||
// If there is no entry in the cache corresponding to this oop, return
|
||||
// the null tail of the bucket into which the oop should be inserted.
|
||||
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(oop key) {
|
||||
assert(Universe::heap()->is_in(key), "must be");
|
||||
ciMetadata* klass = get_metadata(key->klass());
|
||||
ciObjectFactory::NonPermObject* &ciObjectFactory::find_non_perm(Handle keyHandle) {
|
||||
assert(Universe::heap()->is_in(keyHandle()), "must be");
|
||||
ciMetadata* klass = get_metadata(keyHandle->klass()); // This may safepoint!
|
||||
NonPermObject* *bp = &_non_perm_bucket[(unsigned) klass->hash() % NON_PERM_BUCKETS];
|
||||
for (NonPermObject* p; (p = (*bp)) != nullptr; bp = &p->next()) {
|
||||
if (is_equal(p, key)) break;
|
||||
if (is_equal(p, keyHandle())) break;
|
||||
}
|
||||
return (*bp);
|
||||
}
|
||||
@ -664,12 +681,12 @@ inline ciObjectFactory::NonPermObject::NonPermObject(ciObjectFactory::NonPermObj
|
||||
// ciObjectFactory::insert_non_perm
|
||||
//
|
||||
// Insert a ciObject into the non-perm table.
|
||||
void ciObjectFactory::insert_non_perm(ciObjectFactory::NonPermObject* &where, oop key, ciObject* obj) {
|
||||
assert(Universe::heap()->is_in_or_null(key), "must be");
|
||||
void ciObjectFactory::insert_non_perm(ciObjectFactory::NonPermObject* &where, Handle keyHandle, ciObject* obj) {
|
||||
assert(Universe::heap()->is_in_or_null(keyHandle()), "must be");
|
||||
assert(&where != &emptyBucket, "must not try to fill empty bucket");
|
||||
NonPermObject* p = new (arena()) NonPermObject(where, key, obj);
|
||||
assert(where == p && is_equal(p, key) && p->object() == obj, "entry must match");
|
||||
assert(find_non_perm(key) == p, "must find the same spot");
|
||||
NonPermObject* p = new (arena()) NonPermObject(where, keyHandle(), obj);
|
||||
assert(where == p && is_equal(p, keyHandle()) && p->object() == obj, "entry must match");
|
||||
assert(find_non_perm(keyHandle) == p, "must find the same spot");
|
||||
++_non_perm_count;
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,7 @@
|
||||
// which ensures that for each oop, at most one ciObject is created.
|
||||
// This invariant allows efficient implementation of ciObject.
|
||||
class ciObjectFactory : public ArenaObj {
|
||||
friend class VMStructs;
|
||||
friend class ciEnv;
|
||||
|
||||
private:
|
||||
@ -77,8 +78,8 @@ private:
|
||||
return p->object()->get_oop() == key;
|
||||
}
|
||||
|
||||
NonPermObject* &find_non_perm(oop key);
|
||||
void insert_non_perm(NonPermObject* &where, oop key, ciObject* obj);
|
||||
NonPermObject* &find_non_perm(Handle keyHandle);
|
||||
void insert_non_perm(NonPermObject* &where, Handle keyHandle, ciObject* obj);
|
||||
|
||||
void init_ident_of(ciBaseObject* obj);
|
||||
|
||||
@ -106,6 +107,9 @@ public:
|
||||
// Get the ciSymbol corresponding to one of the vmSymbols.
|
||||
static ciSymbol* vm_symbol_at(vmSymbolID index);
|
||||
|
||||
// Called on every new object made.
|
||||
void notice_new_object(ciBaseObject* new_object);
|
||||
|
||||
// Get the ciMethod representing an unloaded/unfound method.
|
||||
ciMethod* get_unloaded_method(ciInstanceKlass* holder,
|
||||
ciSymbol* name,
|
||||
|
@ -241,6 +241,7 @@ template <
|
||||
bool (*EQUALS)(V value, K key, int len)
|
||||
>
|
||||
class CompactHashtable : public SimpleCompactHashtable {
|
||||
friend class VMStructs;
|
||||
|
||||
V decode(u4 offset) const {
|
||||
return DECODE(_base_address, offset);
|
||||
|
@ -659,6 +659,11 @@ bool SystemDictionaryShared::should_be_excluded(Klass* k) {
|
||||
} else {
|
||||
InstanceKlass* ik = InstanceKlass::cast(k);
|
||||
|
||||
if (CDSConfig::is_dumping_dynamic_archive() && ik->is_shared()) {
|
||||
// ik is already part of the static archive, so it will never be considered as excluded.
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!SafepointSynchronize::is_at_safepoint()) {
|
||||
if (!ik->is_linked()) {
|
||||
// check_for_exclusion() below doesn't link unlinked classes. We come
|
||||
@ -1003,7 +1008,7 @@ void SystemDictionaryShared::copy_linking_constraints_from_preimage(InstanceKlas
|
||||
}
|
||||
|
||||
unsigned int SystemDictionaryShared::hash_for_shared_dictionary(address ptr) {
|
||||
if (ArchiveBuilder::is_active()) {
|
||||
if (ArchiveBuilder::is_active() && ArchiveBuilder::current()->is_in_buffer_space(ptr)) {
|
||||
uintx offset = ArchiveBuilder::current()->any_to_offset(ptr);
|
||||
unsigned int hash = primitive_hash<uintx>(offset);
|
||||
DEBUG_ONLY({
|
||||
|
@ -22,6 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cds/aotLinkedClassBulkLoader.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
@ -31,6 +32,7 @@
|
||||
#include "oops/method.inline.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
@ -50,11 +52,13 @@
|
||||
#include "jvmci/jvmci.hpp"
|
||||
#endif
|
||||
|
||||
jlong CompilationPolicy::_start_time = 0;
|
||||
int64_t CompilationPolicy::_start_time = 0;
|
||||
int CompilationPolicy::_c1_count = 0;
|
||||
int CompilationPolicy::_c2_count = 0;
|
||||
double CompilationPolicy::_increase_threshold_at_ratio = 0;
|
||||
|
||||
CompilationPolicy::TrainingReplayQueue CompilationPolicy::_training_replay_queue;
|
||||
|
||||
void compilationPolicy_init() {
|
||||
CompilationPolicy::initialize();
|
||||
}
|
||||
@ -82,10 +86,33 @@ bool CompilationPolicy::must_be_compiled(const methodHandle& m, int comp_level)
|
||||
(AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
|
||||
}
|
||||
|
||||
void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
|
||||
if (must_be_compiled(m)) {
|
||||
// This path is unusual, mostly used by the '-Xcomp' stress test mode.
|
||||
void CompilationPolicy::maybe_compile_early(const methodHandle& m, TRAPS) {
|
||||
if (m->method_holder()->is_not_initialized()) {
|
||||
// 'is_not_initialized' means not only '!is_initialized', but also that
|
||||
// initialization has not been started yet ('!being_initialized')
|
||||
// Do not force compilation of methods in uninitialized classes.
|
||||
return;
|
||||
}
|
||||
if (!m->is_native() && MethodTrainingData::have_data()) {
|
||||
MethodTrainingData* mtd = MethodTrainingData::find_fast(m);
|
||||
if (mtd == nullptr) {
|
||||
return; // there is no training data recorded for m
|
||||
}
|
||||
CompLevel cur_level = static_cast<CompLevel>(m->highest_comp_level());
|
||||
CompLevel next_level = trained_transition(m, cur_level, mtd, THREAD);
|
||||
if (next_level != cur_level && can_be_compiled(m, next_level) && !CompileBroker::compilation_is_in_queue(m)) {
|
||||
if (PrintTieredEvents) {
|
||||
print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, next_level);
|
||||
}
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, next_level, 0, CompileTask::Reason_MustBeCompiled, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
|
||||
if (!THREAD->can_call_java() || THREAD->is_Compiler_thread()) {
|
||||
// don't force compilation, resolve was on behalf of compiler
|
||||
return;
|
||||
@ -100,14 +127,69 @@ void CompilationPolicy::compile_if_required(const methodHandle& m, TRAPS) {
|
||||
// even before classes are initialized.
|
||||
return;
|
||||
}
|
||||
|
||||
if (must_be_compiled(m)) {
|
||||
// This path is unusual, mostly used by the '-Xcomp' stress test mode.
|
||||
CompLevel level = initial_compile_level(m);
|
||||
if (PrintTieredEvents) {
|
||||
print_event(COMPILE, m(), m(), InvocationEntryBci, level);
|
||||
print_event(FORCE_COMPILE, m(), m(), InvocationEntryBci, level);
|
||||
}
|
||||
CompileBroker::compile_method(m, InvocationEntryBci, level, 0, CompileTask::Reason_MustBeCompiled, THREAD);
|
||||
}
|
||||
}
|
||||
|
||||
void CompilationPolicy::replay_training_at_init_impl(InstanceKlass* klass, TRAPS) {
|
||||
if (!klass->has_init_deps_processed()) {
|
||||
ResourceMark rm;
|
||||
log_debug(training)("Replay training: %s", klass->external_name());
|
||||
|
||||
KlassTrainingData* ktd = KlassTrainingData::find(klass);
|
||||
if (ktd != nullptr) {
|
||||
guarantee(ktd->has_holder(), "");
|
||||
ktd->notice_fully_initialized(); // sets klass->has_init_deps_processed bit
|
||||
assert(klass->has_init_deps_processed(), "");
|
||||
if (AOTCompileEagerly) {
|
||||
ktd->iterate_comp_deps([&](CompileTrainingData* ctd) {
|
||||
if (ctd->init_deps_left() == 0) {
|
||||
MethodTrainingData* mtd = ctd->method();
|
||||
if (mtd->has_holder()) {
|
||||
const methodHandle mh(THREAD, const_cast<Method*>(mtd->holder()));
|
||||
CompilationPolicy::maybe_compile_early(mh, THREAD);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CompilationPolicy::replay_training_at_init(InstanceKlass* klass, TRAPS) {
|
||||
assert(klass->is_initialized(), "");
|
||||
if (TrainingData::have_data() && klass->is_shared()) {
|
||||
_training_replay_queue.push(klass, TrainingReplayQueue_lock, THREAD);
|
||||
}
|
||||
}
|
||||
|
||||
// For TrainingReplayQueue
|
||||
template<>
|
||||
void CompilationPolicyUtils::Queue<InstanceKlass>::print_on(outputStream* st) {
|
||||
int pos = 0;
|
||||
for (QueueNode* cur = _head; cur != nullptr; cur = cur->next()) {
|
||||
ResourceMark rm;
|
||||
InstanceKlass* ik = cur->value();
|
||||
st->print_cr("%3d: " INTPTR_FORMAT " %s", ++pos, p2i(ik), ik->external_name());
|
||||
}
|
||||
}
|
||||
|
||||
void CompilationPolicy::replay_training_at_init_loop(TRAPS) {
|
||||
while (!CompileBroker::is_compilation_disabled_forever()) {
|
||||
InstanceKlass* ik = _training_replay_queue.pop(TrainingReplayQueue_lock, THREAD);
|
||||
if (ik != nullptr) {
|
||||
replay_training_at_init_impl(ik, THREAD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_level) {
|
||||
if (comp_level == CompLevel_any) {
|
||||
if (CompilerConfig::is_c1_only()) {
|
||||
@ -122,7 +204,7 @@ static inline CompLevel adjust_level_for_compilability_query(CompLevel comp_leve
|
||||
// Returns true if m is allowed to be compiled
|
||||
bool CompilationPolicy::can_be_compiled(const methodHandle& m, int comp_level) {
|
||||
// allow any levels for WhiteBox
|
||||
assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level");
|
||||
assert(WhiteBoxAPI || comp_level == CompLevel_any || is_compile(comp_level), "illegal compilation level %d", comp_level);
|
||||
|
||||
if (m->is_abstract()) return false;
|
||||
if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
|
||||
@ -322,7 +404,7 @@ double CompilationPolicy::threshold_scale(CompLevel level, int feedback_k) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
void CompilationPolicy::print_counters(const char* prefix, const Method* m) {
|
||||
void CompilationPolicy::print_counters(const char* prefix, Method* m) {
|
||||
int invocation_count = m->invocation_count();
|
||||
int backedge_count = m->backedge_count();
|
||||
MethodData* mdh = m->method_data();
|
||||
@ -342,8 +424,36 @@ void CompilationPolicy::print_counters(const char* prefix, const Method* m) {
|
||||
m->highest_comp_level(), m->highest_osr_comp_level());
|
||||
}
|
||||
|
||||
void CompilationPolicy::print_training_data(const char* prefix, Method* method) {
|
||||
methodHandle m(Thread::current(), method);
|
||||
tty->print(" %smtd: ", prefix);
|
||||
MethodTrainingData* mtd = MethodTrainingData::find(m);
|
||||
if (mtd == nullptr) {
|
||||
tty->print("null");
|
||||
} else {
|
||||
MethodData* md = mtd->final_profile();
|
||||
tty->print("mdo=");
|
||||
if (md == nullptr) {
|
||||
tty->print("null");
|
||||
} else {
|
||||
int mdo_invocations = md->invocation_count();
|
||||
int mdo_backedges = md->backedge_count();
|
||||
int mdo_invocations_start = md->invocation_count_start();
|
||||
int mdo_backedges_start = md->backedge_count_start();
|
||||
tty->print("%d(%d), %d(%d)", mdo_invocations, mdo_invocations_start, mdo_backedges, mdo_backedges_start);
|
||||
}
|
||||
CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
|
||||
tty->print(", deps=");
|
||||
if (ctd == nullptr) {
|
||||
tty->print("null");
|
||||
} else {
|
||||
tty->print("%d", ctd->init_deps_left());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print an event.
|
||||
void CompilationPolicy::print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level) {
|
||||
void CompilationPolicy::print_event(EventType type, Method* m, Method* im, int bci, CompLevel level) {
|
||||
bool inlinee_event = m != im;
|
||||
|
||||
ttyLocker tty_lock;
|
||||
@ -359,6 +469,9 @@ void CompilationPolicy::print_event(EventType type, const Method* m, const Metho
|
||||
case COMPILE:
|
||||
tty->print("compile");
|
||||
break;
|
||||
case FORCE_COMPILE:
|
||||
tty->print("force-compile");
|
||||
break;
|
||||
case REMOVE_FROM_QUEUE:
|
||||
tty->print("remove-from-queue");
|
||||
break;
|
||||
@ -424,6 +537,10 @@ void CompilationPolicy::print_event(EventType type, const Method* m, const Metho
|
||||
if (m->queued_for_compilation()) {
|
||||
tty->print("in-queue");
|
||||
} else tty->print("idle");
|
||||
print_training_data("", m);
|
||||
if (inlinee_event) {
|
||||
print_training_data("inlinee ", im);
|
||||
}
|
||||
}
|
||||
tty->print_cr("]");
|
||||
}
|
||||
@ -617,12 +734,12 @@ void CompilationPolicy::handle_counter_overflow(const methodHandle& method) {
|
||||
}
|
||||
|
||||
// Called with the queue locked and with at least one element
|
||||
CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) {
|
||||
CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue, JavaThread* THREAD) {
|
||||
CompileTask *max_blocking_task = nullptr;
|
||||
CompileTask *max_task = nullptr;
|
||||
Method* max_method = nullptr;
|
||||
|
||||
jlong t = nanos_to_millis(os::javaTimeNanos());
|
||||
int64_t t = nanos_to_millis(os::javaTimeNanos());
|
||||
// Iterate through the queue and find a method with a maximum rate.
|
||||
for (CompileTask* task = compile_queue->first(); task != nullptr;) {
|
||||
CompileTask* next_task = task->next();
|
||||
@ -639,7 +756,7 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) {
|
||||
return task;
|
||||
}
|
||||
Method* method = task->method();
|
||||
methodHandle mh(Thread::current(), method);
|
||||
methodHandle mh(THREAD, method);
|
||||
if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, mh) && !is_old(mh)) {
|
||||
if (PrintTieredEvents) {
|
||||
print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel) task->comp_level());
|
||||
@ -675,7 +792,7 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) {
|
||||
max_method = max_task->method();
|
||||
}
|
||||
|
||||
methodHandle max_method_h(Thread::current(), max_method);
|
||||
methodHandle max_method_h(THREAD, max_method);
|
||||
|
||||
if (max_task != nullptr && max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile &&
|
||||
max_method != nullptr && is_method_profiled(max_method_h) && !Arguments::is_compiler_only()) {
|
||||
@ -694,7 +811,6 @@ CompileTask* CompilationPolicy::select_task(CompileQueue* compile_queue) {
|
||||
print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
|
||||
}
|
||||
}
|
||||
|
||||
return max_task;
|
||||
}
|
||||
|
||||
@ -717,6 +833,13 @@ nmethod* CompilationPolicy::event(const methodHandle& method, const methodHandle
|
||||
print_event(bci == InvocationEntryBci ? CALL : LOOP, method(), inlinee(), bci, comp_level);
|
||||
}
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI && UseJVMCICompiler &&
|
||||
comp_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) {
|
||||
return nullptr;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (comp_level == CompLevel_none &&
|
||||
JvmtiExport::can_post_interpreter_events() &&
|
||||
THREAD->is_interp_only_mode()) {
|
||||
@ -817,7 +940,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
|
||||
}
|
||||
|
||||
// update_rate() is called from select_task() while holding a compile queue lock.
|
||||
void CompilationPolicy::update_rate(jlong t, const methodHandle& method) {
|
||||
void CompilationPolicy::update_rate(int64_t t, const methodHandle& method) {
|
||||
// Skip update if counters are absent.
|
||||
// Can't allocate them since we are holding compile queue lock.
|
||||
if (method->method_counters() == nullptr) return;
|
||||
@ -831,8 +954,8 @@ void CompilationPolicy::update_rate(jlong t, const methodHandle& method) {
|
||||
|
||||
// We don't update the rate if we've just came out of a safepoint.
|
||||
// delta_s is the time since last safepoint in milliseconds.
|
||||
jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
|
||||
jlong delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
|
||||
int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
|
||||
int64_t delta_t = t - (method->prev_time() != 0 ? method->prev_time() : start_time()); // milliseconds since the last measurement
|
||||
// How many events were there since the last time?
|
||||
int event_count = method->invocation_count() + method->backedge_count();
|
||||
int delta_e = event_count - method->prev_event_count();
|
||||
@ -855,9 +978,9 @@ void CompilationPolicy::update_rate(jlong t, const methodHandle& method) {
|
||||
|
||||
// Check if this method has been stale for a given number of milliseconds.
|
||||
// See select_task().
|
||||
bool CompilationPolicy::is_stale(jlong t, jlong timeout, const methodHandle& method) {
|
||||
jlong delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
|
||||
jlong delta_t = t - method->prev_time();
|
||||
bool CompilationPolicy::is_stale(int64_t t, int64_t timeout, const methodHandle& method) {
|
||||
int64_t delta_s = t - SafepointTracing::end_of_last_safepoint_ms();
|
||||
int64_t delta_t = t - method->prev_time();
|
||||
if (delta_t > timeout && delta_s > timeout) {
|
||||
int event_count = method->invocation_count() + method->backedge_count();
|
||||
int delta_e = event_count - method->prev_event_count();
|
||||
@ -908,13 +1031,12 @@ bool CompilationPolicy::is_method_profiled(const methodHandle& method) {
|
||||
|
||||
|
||||
// Determine is a method is mature.
|
||||
bool CompilationPolicy::is_mature(Method* method) {
|
||||
bool CompilationPolicy::is_mature(MethodData* mdo) {
|
||||
if (Arguments::is_compiler_only()) {
|
||||
// Always report profiles as immature with -Xcomp
|
||||
return false;
|
||||
}
|
||||
methodHandle mh(Thread::current(), method);
|
||||
MethodData* mdo = method->method_data();
|
||||
methodHandle mh(Thread::current(), mdo->method());
|
||||
if (mdo != nullptr) {
|
||||
int i = mdo->invocation_count();
|
||||
int b = mdo->backedge_count();
|
||||
@ -931,9 +1053,18 @@ bool CompilationPolicy::should_create_mdo(const methodHandle& method, CompLevel
|
||||
if (cur_level != CompLevel_none || force_comp_at_level_simple(method) || CompilationModeFlag::quick_only() || !ProfileInterpreter) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (TrainingData::have_data()) {
|
||||
MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
|
||||
if (mtd != nullptr && mtd->saw_level(CompLevel_full_optimization)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_old(method)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
int i = method->invocation_count();
|
||||
int b = method->backedge_count();
|
||||
double k = Tier0ProfilingStartPercentage / 100.0;
|
||||
@ -967,7 +1098,7 @@ void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
|
||||
if (mh->method_data() == nullptr) {
|
||||
Method::build_profiling_method_data(mh, CHECK_AND_CLEAR);
|
||||
}
|
||||
if (ProfileInterpreter) {
|
||||
if (ProfileInterpreter && THREAD->has_last_Java_frame()) {
|
||||
MethodData* mdo = mh->method_data();
|
||||
if (mdo != nullptr) {
|
||||
frame last_frame = THREAD->last_frame();
|
||||
@ -980,7 +1111,136 @@ void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
|
||||
}
|
||||
}
|
||||
|
||||
CompLevel CompilationPolicy::trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
|
||||
precond(mtd != nullptr);
|
||||
precond(cur_level == CompLevel_none);
|
||||
|
||||
if (mtd->only_inlined() && !mtd->saw_level(CompLevel_full_optimization)) {
|
||||
return CompLevel_none;
|
||||
}
|
||||
|
||||
bool training_has_profile = (mtd->final_profile() != nullptr);
|
||||
if (mtd->saw_level(CompLevel_full_optimization) && !training_has_profile) {
|
||||
return CompLevel_full_profile;
|
||||
}
|
||||
|
||||
CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
|
||||
switch (highest_training_level) {
|
||||
case CompLevel_limited_profile:
|
||||
case CompLevel_full_profile:
|
||||
return CompLevel_limited_profile;
|
||||
case CompLevel_simple:
|
||||
return CompLevel_simple;
|
||||
case CompLevel_none:
|
||||
return CompLevel_none;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// Now handle the case of level 4.
|
||||
assert(highest_training_level == CompLevel_full_optimization, "Unexpected compilation level: %d", highest_training_level);
|
||||
if (!training_has_profile) {
|
||||
// The method was a part of a level 4 compile, but don't have a stored profile,
|
||||
// we need to profile it.
|
||||
return CompLevel_full_profile;
|
||||
}
|
||||
const bool deopt = (static_cast<CompLevel>(method->highest_comp_level()) == CompLevel_full_optimization);
|
||||
// If we deopted, then we reprofile
|
||||
if (deopt && !is_method_profiled(method)) {
|
||||
return CompLevel_full_profile;
|
||||
}
|
||||
|
||||
CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
|
||||
assert(ctd != nullptr, "Should have CTD for CompLevel_full_optimization");
|
||||
// With SkipTier2IfPossible and all deps satisfied, go to level 4 immediately
|
||||
if (SkipTier2IfPossible && ctd->init_deps_left() == 0) {
|
||||
if (method->method_data() == nullptr) {
|
||||
create_mdo(method, THREAD);
|
||||
}
|
||||
return CompLevel_full_optimization;
|
||||
}
|
||||
|
||||
// Otherwise go to level 2
|
||||
return CompLevel_limited_profile;
|
||||
}
|
||||
|
||||
|
||||
CompLevel CompilationPolicy::trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
|
||||
precond(mtd != nullptr);
|
||||
precond(cur_level == CompLevel_limited_profile);
|
||||
|
||||
// One of the main reasons that we can get here is that we're waiting for the stored C2 code to become ready.
|
||||
|
||||
// But first, check if we have a saved profile
|
||||
bool training_has_profile = (mtd->final_profile() != nullptr);
|
||||
if (!training_has_profile) {
|
||||
return CompLevel_full_profile;
|
||||
}
|
||||
|
||||
|
||||
assert(training_has_profile, "Have to have a profile to be here");
|
||||
// Check if the method is ready
|
||||
CompileTrainingData* ctd = mtd->last_toplevel_compile(CompLevel_full_optimization);
|
||||
if (ctd != nullptr && ctd->init_deps_left() == 0) {
|
||||
if (method->method_data() == nullptr) {
|
||||
create_mdo(method, THREAD);
|
||||
}
|
||||
return CompLevel_full_optimization;
|
||||
}
|
||||
|
||||
// Otherwise stay at the current level
|
||||
return CompLevel_limited_profile;
|
||||
}
|
||||
|
||||
|
||||
CompLevel CompilationPolicy::trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
|
||||
precond(mtd != nullptr);
|
||||
precond(cur_level == CompLevel_full_profile);
|
||||
|
||||
CompLevel highest_training_level = static_cast<CompLevel>(mtd->highest_top_level());
|
||||
// We have method at the full profile level and we also know that it's possibly an important method.
|
||||
if (highest_training_level == CompLevel_full_optimization && !mtd->only_inlined()) {
|
||||
// Check if it is adequately profiled
|
||||
if (is_method_profiled(method)) {
|
||||
return CompLevel_full_optimization;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise stay at the current level
|
||||
return CompLevel_full_profile;
|
||||
}
|
||||
|
||||
CompLevel CompilationPolicy::trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD) {
|
||||
precond(MethodTrainingData::have_data());
|
||||
|
||||
// If there is no training data recorded for this method, bail out.
|
||||
if (mtd == nullptr) {
|
||||
return cur_level;
|
||||
}
|
||||
|
||||
CompLevel next_level = cur_level;
|
||||
switch(cur_level) {
|
||||
default: break;
|
||||
case CompLevel_none:
|
||||
next_level = trained_transition_from_none(method, cur_level, mtd, THREAD);
|
||||
break;
|
||||
case CompLevel_limited_profile:
|
||||
next_level = trained_transition_from_limited_profile(method, cur_level, mtd, THREAD);
|
||||
break;
|
||||
case CompLevel_full_profile:
|
||||
next_level = trained_transition_from_full_profile(method, cur_level, mtd, THREAD);
|
||||
break;
|
||||
}
|
||||
|
||||
// We don't have any special strategies for the C2-only compilation modes, so just fix up the levels for now.
|
||||
if (CompilationModeFlag::high_only_quick_internal() && CompLevel_simple < next_level && next_level < CompLevel_full_optimization) {
|
||||
return CompLevel_none;
|
||||
}
|
||||
if (CompilationModeFlag::high_only() && next_level < CompLevel_full_optimization) {
|
||||
return CompLevel_none;
|
||||
}
|
||||
return (cur_level != next_level) ? limit_level(next_level) : cur_level;
|
||||
}
|
||||
|
||||
/*
|
||||
* Method states:
|
||||
@ -1022,24 +1282,65 @@ void CompilationPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) {
|
||||
|
||||
// Common transition function. Given a predicate determines if a method should transition to another level.
|
||||
template<typename Predicate>
|
||||
CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, bool disable_feedback) {
|
||||
CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback) {
|
||||
CompLevel next_level = cur_level;
|
||||
int i = method->invocation_count();
|
||||
int b = method->backedge_count();
|
||||
|
||||
if (force_comp_at_level_simple(method)) {
|
||||
next_level = CompLevel_simple;
|
||||
} else {
|
||||
if (is_trivial(method) || method->is_native()) {
|
||||
} else if (is_trivial(method) || method->is_native()) {
|
||||
// We do not care if there is profiling data for these methods, throw them to compiler.
|
||||
next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_full_optimization : CompLevel_simple;
|
||||
} else if (MethodTrainingData::have_data()) {
|
||||
MethodTrainingData* mtd = MethodTrainingData::find_fast(method);
|
||||
if (mtd == nullptr) {
|
||||
// We haven't see compilations of this method in training. It's either very cold or the behavior changed.
|
||||
// Feed it to the standard TF with no profiling delay.
|
||||
next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
|
||||
} else {
|
||||
next_level = trained_transition(method, cur_level, mtd, THREAD);
|
||||
if (cur_level == next_level) {
|
||||
// trained_transtion() is going to return the same level if no startup/warmup optimizations apply.
|
||||
// In order to catch possible pathologies due to behavior change we feed the event to the regular
|
||||
// TF but with profiling delay.
|
||||
next_level = standard_transition<Predicate>(method, cur_level, true /*delay_profiling*/, disable_feedback);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
next_level = standard_transition<Predicate>(method, cur_level, false /*delay_profiling*/, disable_feedback);
|
||||
}
|
||||
return (next_level != cur_level) ? limit_level(next_level) : next_level;
|
||||
}
|
||||
|
||||
|
||||
template<typename Predicate>
|
||||
CompLevel CompilationPolicy::standard_transition(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
|
||||
CompLevel next_level = cur_level;
|
||||
switch(cur_level) {
|
||||
default: break;
|
||||
case CompLevel_none:
|
||||
next_level = transition_from_none<Predicate>(method, cur_level, delay_profiling, disable_feedback);
|
||||
break;
|
||||
case CompLevel_limited_profile:
|
||||
next_level = transition_from_limited_profile<Predicate>(method, cur_level, delay_profiling, disable_feedback);
|
||||
break;
|
||||
case CompLevel_full_profile:
|
||||
next_level = transition_from_full_profile<Predicate>(method, cur_level);
|
||||
break;
|
||||
}
|
||||
return next_level;
|
||||
}
|
||||
|
||||
template<typename Predicate>
|
||||
CompLevel CompilationPolicy::transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
|
||||
precond(cur_level == CompLevel_none);
|
||||
CompLevel next_level = cur_level;
|
||||
int i = method->invocation_count();
|
||||
int b = method->backedge_count();
|
||||
double scale = delay_profiling ? Tier0ProfileDelayFactor : 1.0;
|
||||
// If we were at full profile level, would we switch to full opt?
|
||||
if (common<Predicate>(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
|
||||
if (transition_from_full_profile<Predicate>(method, CompLevel_full_profile) == CompLevel_full_optimization) {
|
||||
next_level = CompLevel_full_optimization;
|
||||
} else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply(method, cur_level, i, b)) {
|
||||
} else if (!CompilationModeFlag::disable_intermediate() && Predicate::apply_scaled(method, cur_level, i, b, scale)) {
|
||||
// C1-generated fully profiled code is about 30% slower than the limited profile
|
||||
// code that has only invocation and backedge counters. The observation is that
|
||||
// if C2 queue is large enough we can spend too much time in the fully profiled code
|
||||
@ -1047,42 +1348,19 @@ CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_le
|
||||
// we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
|
||||
// we choose to compile a limited profiled version and then recompile with full profiling
|
||||
// when the load on C2 goes down.
|
||||
if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
|
||||
Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
|
||||
if (delay_profiling || (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > Tier3DelayOn * compiler_count(CompLevel_full_optimization))) {
|
||||
next_level = CompLevel_limited_profile;
|
||||
} else {
|
||||
next_level = CompLevel_full_profile;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CompLevel_limited_profile:
|
||||
if (is_method_profiled(method)) {
|
||||
// Special case: we got here because this method was fully profiled in the interpreter.
|
||||
next_level = CompLevel_full_optimization;
|
||||
} else {
|
||||
MethodData* mdo = method->method_data();
|
||||
if (mdo != nullptr) {
|
||||
if (mdo->would_profile()) {
|
||||
if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
|
||||
Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
|
||||
Predicate::apply(method, cur_level, i, b))) {
|
||||
next_level = CompLevel_full_profile;
|
||||
return next_level;
|
||||
}
|
||||
} else {
|
||||
next_level = CompLevel_full_optimization;
|
||||
}
|
||||
} else {
|
||||
// If there is no MDO we need to profile
|
||||
if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
|
||||
Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
|
||||
Predicate::apply(method, cur_level, i, b))) {
|
||||
next_level = CompLevel_full_profile;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case CompLevel_full_profile:
|
||||
{
|
||||
|
||||
template<typename Predicate>
|
||||
CompLevel CompilationPolicy::transition_from_full_profile(const methodHandle& method, CompLevel cur_level) {
|
||||
precond(cur_level == CompLevel_full_profile);
|
||||
CompLevel next_level = cur_level;
|
||||
MethodData* mdo = method->method_data();
|
||||
if (mdo != nullptr) {
|
||||
if (mdo->would_profile() || CompilationModeFlag::disable_intermediate()) {
|
||||
@ -1095,20 +1373,46 @@ CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_le
|
||||
next_level = CompLevel_full_optimization;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return (next_level != cur_level) ? limit_level(next_level) : next_level;
|
||||
return next_level;
|
||||
}
|
||||
|
||||
template<typename Predicate>
|
||||
CompLevel CompilationPolicy::transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback) {
|
||||
precond(cur_level == CompLevel_limited_profile);
|
||||
CompLevel next_level = cur_level;
|
||||
int i = method->invocation_count();
|
||||
int b = method->backedge_count();
|
||||
double scale = delay_profiling ? Tier2ProfileDelayFactor : 1.0;
|
||||
MethodData* mdo = method->method_data();
|
||||
if (mdo != nullptr) {
|
||||
if (mdo->would_profile()) {
|
||||
if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
|
||||
Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
|
||||
Predicate::apply_scaled(method, cur_level, i, b, scale))) {
|
||||
next_level = CompLevel_full_profile;
|
||||
}
|
||||
} else {
|
||||
next_level = CompLevel_full_optimization;
|
||||
}
|
||||
} else {
|
||||
// If there is no MDO we need to profile
|
||||
if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
|
||||
Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
|
||||
Predicate::apply_scaled(method, cur_level, i, b, scale))) {
|
||||
next_level = CompLevel_full_profile;
|
||||
}
|
||||
}
|
||||
if (next_level == CompLevel_full_profile && is_method_profiled(method)) {
|
||||
next_level = CompLevel_full_optimization;
|
||||
}
|
||||
return next_level;
|
||||
}
|
||||
|
||||
|
||||
// Determine if a method should be compiled with a normal entry point at a different level.
|
||||
CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
|
||||
CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, true));
|
||||
CompLevel next_level = common<CallPredicate>(method, cur_level, is_old(method));
|
||||
CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
|
||||
CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), common<LoopPredicate>(method, cur_level, THREAD, true));
|
||||
CompLevel next_level = common<CallPredicate>(method, cur_level, THREAD, !TrainingData::have_data() && is_old(method));
|
||||
|
||||
// If OSR method level is greater than the regular method level, the levels should be
|
||||
// equalized by raising the regular method level in order to avoid OSRs during each
|
||||
@ -1122,12 +1426,18 @@ CompLevel CompilationPolicy::call_event(const methodHandle& method, CompLevel cu
|
||||
} else {
|
||||
next_level = MAX2(osr_level, next_level);
|
||||
}
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI && UseJVMCICompiler &&
|
||||
next_level == CompLevel_full_optimization CDS_ONLY(&& !AOTLinkedClassBulkLoader::class_preloading_finished())) {
|
||||
next_level = cur_level;
|
||||
}
|
||||
#endif
|
||||
return next_level;
|
||||
}
|
||||
|
||||
// Determine if we should do an OSR compilation of a given method.
|
||||
CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread) {
|
||||
CompLevel next_level = common<LoopPredicate>(method, cur_level, true);
|
||||
CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD) {
|
||||
CompLevel next_level = common<LoopPredicate>(method, cur_level, THREAD, true);
|
||||
if (cur_level == CompLevel_none) {
|
||||
// If there is a live OSR method that means that we deopted to the interpreter
|
||||
// for the transition.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,8 +28,82 @@
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
namespace CompilationPolicyUtils {
|
||||
template<typename T>
|
||||
class Queue {
|
||||
class QueueNode : public CHeapObj<mtCompiler> {
|
||||
T* _value;
|
||||
QueueNode* _next;
|
||||
public:
|
||||
QueueNode(T* value, QueueNode* next) : _value(value), _next(next) { }
|
||||
T* value() const { return _value; }
|
||||
void set_next(QueueNode* next) { _next = next; }
|
||||
QueueNode* next() const { return _next; }
|
||||
};
|
||||
|
||||
QueueNode* _head;
|
||||
QueueNode* _tail;
|
||||
|
||||
void push_unlocked(T* value) {
|
||||
QueueNode* n = new QueueNode(value, nullptr);
|
||||
if (_tail != nullptr) {
|
||||
_tail->set_next(n);
|
||||
}
|
||||
_tail = n;
|
||||
if (_head == nullptr) {
|
||||
_head = _tail;
|
||||
}
|
||||
}
|
||||
T* pop_unlocked() {
|
||||
QueueNode* n = _head;
|
||||
if (_head != nullptr) {
|
||||
_head = _head->next();
|
||||
}
|
||||
if (_head == nullptr) {
|
||||
_tail = _head;
|
||||
}
|
||||
T* value = nullptr;
|
||||
if (n != nullptr) {
|
||||
value = n->value();
|
||||
delete n;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
public:
|
||||
Queue() : _head(nullptr), _tail(nullptr) { }
|
||||
void push(T* value, Monitor* lock, TRAPS) {
|
||||
MonitorLocker locker(THREAD, lock);
|
||||
push_unlocked(value);
|
||||
locker.notify_all();
|
||||
}
|
||||
|
||||
bool is_empty_unlocked() const { return _head == nullptr; }
|
||||
|
||||
T* pop(Monitor* lock, TRAPS) {
|
||||
MonitorLocker locker(THREAD, lock);
|
||||
while(is_empty_unlocked() && !CompileBroker::is_compilation_disabled_forever()) {
|
||||
locker.wait();
|
||||
}
|
||||
T* value = pop_unlocked();
|
||||
return value;
|
||||
}
|
||||
|
||||
T* try_pop(Monitor* lock, TRAPS) {
|
||||
MonitorLocker locker(THREAD, lock);
|
||||
T* value = nullptr;
|
||||
if (!is_empty_unlocked()) {
|
||||
value = pop_unlocked();
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
void print_on(outputStream* st);
|
||||
};
|
||||
} // namespace CompilationPolicyUtils
|
||||
|
||||
class CompileTask;
|
||||
class CompileQueue;
|
||||
/*
|
||||
@ -173,9 +247,12 @@ class CompilationPolicy : AllStatic {
|
||||
friend class CallPredicate;
|
||||
friend class LoopPredicate;
|
||||
|
||||
static jlong _start_time;
|
||||
typedef CompilationPolicyUtils::Queue<InstanceKlass> TrainingReplayQueue;
|
||||
|
||||
static int64_t _start_time;
|
||||
static int _c1_count, _c2_count;
|
||||
static double _increase_threshold_at_ratio;
|
||||
static TrainingReplayQueue _training_replay_queue;
|
||||
|
||||
// Set carry flags in the counters (in Method* and MDO).
|
||||
inline static void handle_counter_overflow(const methodHandle& method);
|
||||
@ -187,29 +264,45 @@ class CompilationPolicy : AllStatic {
|
||||
inline static CompLevel limit_level(CompLevel level);
|
||||
// Common transition function. Given a predicate determines if a method should transition to another level.
|
||||
template<typename Predicate>
|
||||
static CompLevel common(const methodHandle& method, CompLevel cur_level, bool disable_feedback = false);
|
||||
static CompLevel common(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD, bool disable_feedback = false);
|
||||
|
||||
template<typename Predicate>
|
||||
static CompLevel transition_from_none(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback);
|
||||
template<typename Predicate>
|
||||
static CompLevel transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, bool delay_profiling, bool disable_feedback);
|
||||
template<typename Predicate>
|
||||
static CompLevel transition_from_full_profile(const methodHandle& method, CompLevel cur_level);
|
||||
template<typename Predicate>
|
||||
static CompLevel standard_transition(const methodHandle& method, CompLevel cur_level, bool delayprof, bool disable_feedback);
|
||||
|
||||
static CompLevel trained_transition_from_none(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD);
|
||||
static CompLevel trained_transition_from_limited_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD);
|
||||
static CompLevel trained_transition_from_full_profile(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD);
|
||||
static CompLevel trained_transition(const methodHandle& method, CompLevel cur_level, MethodTrainingData* mtd, JavaThread* THREAD);
|
||||
|
||||
// Transition functions.
|
||||
// call_event determines if a method should be compiled at a different
|
||||
// level with a regular invocation entry.
|
||||
static CompLevel call_event(const methodHandle& method, CompLevel cur_level, Thread* thread);
|
||||
static CompLevel call_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD);
|
||||
// loop_event checks if a method should be OSR compiled at a different
|
||||
// level.
|
||||
static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread);
|
||||
static void print_counters(const char* prefix, const Method* m);
|
||||
static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, JavaThread* THREAD);
|
||||
static void print_counters(const char* prefix, Method* m);
|
||||
static void print_training_data(const char* prefix, Method* method);
|
||||
// Has a method been long around?
|
||||
// We don't remove old methods from the compile queue even if they have
|
||||
// very low activity (see select_task()).
|
||||
inline static bool is_old(const methodHandle& method);
|
||||
// Was a given method inactive for a given number of milliseconds.
|
||||
// If it is, we would remove it from the queue (see select_task()).
|
||||
inline static bool is_stale(jlong t, jlong timeout, const methodHandle& method);
|
||||
inline static bool is_stale(int64_t t, int64_t timeout, const methodHandle& method);
|
||||
// Compute the weight of the method for the compilation scheduling
|
||||
inline static double weight(Method* method);
|
||||
// Apply heuristics and return true if x should be compiled before y
|
||||
inline static bool compare_methods(Method* x, Method* y);
|
||||
// Compute event rate for a given method. The rate is the number of event (invocations + backedges)
|
||||
// per millisecond.
|
||||
inline static void update_rate(jlong t, const methodHandle& method);
|
||||
inline static void update_rate(int64_t t, const methodHandle& method);
|
||||
// Compute threshold scaling coefficient
|
||||
inline static double threshold_scale(CompLevel level, int feedback_k);
|
||||
// If a method is old enough and is still in the interpreter we would want to
|
||||
@ -224,8 +317,8 @@ class CompilationPolicy : AllStatic {
|
||||
static void set_c1_count(int x) { _c1_count = x; }
|
||||
static void set_c2_count(int x) { _c2_count = x; }
|
||||
|
||||
enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
|
||||
static void print_event(EventType type, const Method* m, const Method* im, int bci, CompLevel level);
|
||||
enum EventType { CALL, LOOP, COMPILE, FORCE_COMPILE, FORCE_RECOMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
|
||||
static void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level);
|
||||
// Check if the method can be compiled, change level if necessary
|
||||
static void compile(const methodHandle& mh, int bci, CompLevel level, TRAPS);
|
||||
// Simple methods are as good being compiled with C1 as C2.
|
||||
@ -242,21 +335,25 @@ class CompilationPolicy : AllStatic {
|
||||
int bci, CompLevel level, nmethod* nm, TRAPS);
|
||||
|
||||
static void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
|
||||
static void set_start_time(jlong t) { _start_time = t; }
|
||||
static jlong start_time() { return _start_time; }
|
||||
static void set_start_time(int64_t t) { _start_time = t; }
|
||||
static int64_t start_time() { return _start_time; }
|
||||
|
||||
// m must be compiled before executing it
|
||||
static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_any);
|
||||
static void maybe_compile_early(const methodHandle& m, TRAPS);
|
||||
static void replay_training_at_init_impl(InstanceKlass* klass, TRAPS);
|
||||
public:
|
||||
static int min_invocations() { return Tier4MinInvocationThreshold; }
|
||||
static int c1_count() { return _c1_count; }
|
||||
static int c2_count() { return _c2_count; }
|
||||
static int compiler_count(CompLevel comp_level);
|
||||
|
||||
// If m must_be_compiled then request a compilation from the CompileBroker.
|
||||
// This supports the -Xcomp option.
|
||||
static void compile_if_required(const methodHandle& m, TRAPS);
|
||||
|
||||
static void replay_training_at_init(InstanceKlass* klass, TRAPS);
|
||||
static void replay_training_at_init_loop(TRAPS);
|
||||
|
||||
// m is allowed to be compiled
|
||||
static bool can_be_compiled(const methodHandle& m, int comp_level = CompLevel_any);
|
||||
// m is allowed to be osr compiled
|
||||
@ -269,9 +366,9 @@ public:
|
||||
static nmethod* event(const methodHandle& method, const methodHandle& inlinee,
|
||||
int branch_bci, int bci, CompLevel comp_level, nmethod* nm, TRAPS);
|
||||
// Select task is called by CompileBroker. We should return a task or nullptr.
|
||||
static CompileTask* select_task(CompileQueue* compile_queue);
|
||||
static CompileTask* select_task(CompileQueue* compile_queue, JavaThread* THREAD);
|
||||
// Tell the runtime if we think a given method is adequately profiled.
|
||||
static bool is_mature(Method* method);
|
||||
static bool is_mature(MethodData* mdo);
|
||||
// Initialize: set compiler thread count
|
||||
static void initialize();
|
||||
static bool should_not_inline(ciEnv* env, ciMethod* callee);
|
||||
@ -280,6 +377,7 @@ public:
|
||||
static CompLevel initial_compile_level(const methodHandle& method);
|
||||
// Return highest level possible
|
||||
static CompLevel highest_compile_level();
|
||||
static void dump();
|
||||
};
|
||||
|
||||
#endif // SHARE_COMPILER_COMPILATIONPOLICY_HPP
|
||||
|
@ -22,6 +22,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/vmClasses.hpp"
|
||||
@ -347,6 +348,13 @@ void CompileQueue::add(CompileTask* task) {
|
||||
task->log_task_queued();
|
||||
}
|
||||
|
||||
if (TrainingData::need_data() && !CDSConfig::is_dumping_final_static_archive()) {
|
||||
CompileTrainingData* ctd = CompileTrainingData::make(task);
|
||||
if (ctd != nullptr) {
|
||||
task->set_training_data(ctd);
|
||||
}
|
||||
}
|
||||
|
||||
// Notify CompilerThreads that a task is available.
|
||||
MethodCompileQueue_lock->notify_all();
|
||||
}
|
||||
@ -441,7 +449,7 @@ CompileTask* CompileQueue::get(CompilerThread* thread) {
|
||||
CompileTask* task;
|
||||
{
|
||||
NoSafepointVerifier nsv;
|
||||
task = CompilationPolicy::select_task(this);
|
||||
task = CompilationPolicy::select_task(this, thread);
|
||||
if (task != nullptr) {
|
||||
task = task->select_for_compilation();
|
||||
}
|
||||
@ -781,6 +789,10 @@ void CompileBroker::compilation_init(JavaThread* THREAD) {
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
void TrainingReplayThread::training_replay_thread_entry(JavaThread* thread, TRAPS) {
|
||||
CompilationPolicy::replay_training_at_init_loop(thread);
|
||||
}
|
||||
|
||||
#if defined(ASSERT) && COMPILER2_OR_JVMCI
|
||||
// Entry for DeoptimizeObjectsALotThread. The threads are started in
|
||||
// CompileBroker::init_compiler_threads() iff DeoptimizeObjectsALot is enabled
|
||||
@ -858,6 +870,9 @@ JavaThread* CompileBroker::make_thread(ThreadType type, jobject thread_handle, C
|
||||
new_thread = new DeoptimizeObjectsALotThread();
|
||||
break;
|
||||
#endif // ASSERT
|
||||
case training_replay_t:
|
||||
new_thread = new TrainingReplayThread();
|
||||
break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -1015,6 +1030,16 @@ void CompileBroker::init_compiler_threads() {
|
||||
#endif // defined(ASSERT) && COMPILER2_OR_JVMCI
|
||||
}
|
||||
|
||||
void CompileBroker::init_training_replay() {
|
||||
// Ensure any exceptions lead to vm_exit_during_initialization.
|
||||
EXCEPTION_MARK;
|
||||
if (TrainingData::have_data()) {
|
||||
Handle thread_oop = JavaThread::create_system_thread_object("Training replay thread", CHECK);
|
||||
jobject thread_handle = JNIHandles::make_local(THREAD, thread_oop());
|
||||
make_thread(training_replay_t, thread_handle, nullptr, nullptr, THREAD);
|
||||
}
|
||||
}
|
||||
|
||||
void CompileBroker::possibly_add_compiler_threads(JavaThread* THREAD) {
|
||||
|
||||
int old_c2_count = 0, new_c2_count = 0, old_c1_count = 0, new_c1_count = 0;
|
||||
|
@ -254,11 +254,13 @@ class CompileBroker: AllStatic {
|
||||
|
||||
enum ThreadType {
|
||||
compiler_t,
|
||||
deoptimizer_t
|
||||
deoptimizer_t,
|
||||
training_replay_t
|
||||
};
|
||||
|
||||
static JavaThread* make_thread(ThreadType type, jobject thread_oop, CompileQueue* queue, AbstractCompiler* comp, JavaThread* THREAD);
|
||||
static void init_compiler_threads();
|
||||
static void init_training_replay();
|
||||
static void possibly_add_compiler_threads(JavaThread* THREAD);
|
||||
static bool compilation_is_prohibited(const methodHandle& method, int osr_bci, int comp_level, bool excluded);
|
||||
|
||||
@ -447,4 +449,16 @@ public:
|
||||
static void print_heapinfo(outputStream *out, const char* function, size_t granularity);
|
||||
};
|
||||
|
||||
// In order to achiveve a maximally fast warmup we attempt to compile important methods as soon as all
|
||||
// the classes that they depend on are initialized. TrainingReplayThread processes a queue of InstanceKlass*
|
||||
// that have just finished running their static initializers. We find all the methods that depend on the given class
|
||||
// and for which the number of remaining dependencies is now zero, and eagerly compile them.
|
||||
class TrainingReplayThread : public JavaThread {
|
||||
static void training_replay_thread_entry(JavaThread* thread, TRAPS);
|
||||
public:
|
||||
TrainingReplayThread() : JavaThread(&training_replay_thread_entry) { }
|
||||
|
||||
bool is_hidden_from_external_view() const { return true; }
|
||||
};
|
||||
|
||||
#endif // SHARE_COMPILER_COMPILEBROKER_HPP
|
||||
|
@ -119,6 +119,7 @@ void CompileTask::initialize(int compile_id,
|
||||
_nm_total_size = 0;
|
||||
_failure_reason = nullptr;
|
||||
_failure_reason_on_C_heap = false;
|
||||
_training_data = nullptr;
|
||||
_arena_bytes = 0;
|
||||
|
||||
_next = nullptr;
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/xmlstream.hpp"
|
||||
|
||||
class CompileTrainingData;
|
||||
class DirectiveSet;
|
||||
|
||||
JVMCI_ONLY(class JVMCICompileState;)
|
||||
@ -111,6 +112,7 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
const char* _failure_reason;
|
||||
// Specifies if _failure_reason is on the C heap.
|
||||
bool _failure_reason_on_C_heap;
|
||||
CompileTrainingData* _training_data;
|
||||
size_t _arena_bytes; // peak size of temporary memory during compilation (e.g. node arenas)
|
||||
|
||||
public:
|
||||
@ -212,6 +214,9 @@ class CompileTask : public CHeapObj<mtCompiler> {
|
||||
void set_is_free(bool val) { _is_free = val; }
|
||||
bool is_unloaded() const;
|
||||
|
||||
CompileTrainingData* training_data() const { return _training_data; }
|
||||
void set_training_data(CompileTrainingData* td) { _training_data = td; }
|
||||
|
||||
// RedefineClasses support
|
||||
void metadata_do(MetadataClosure* f);
|
||||
void mark_on_stack();
|
||||
|
@ -59,7 +59,8 @@ enum CompLevel : s1 {
|
||||
CompLevel_simple = 1, // C1
|
||||
CompLevel_limited_profile = 2, // C1, invocation & backedge counters
|
||||
CompLevel_full_profile = 3, // C1, invocation & backedge counters + mdo
|
||||
CompLevel_full_optimization = 4 // C2 or JVMCI
|
||||
CompLevel_full_optimization = 4, // C2 or JVMCI
|
||||
CompLevel_count = 5
|
||||
};
|
||||
|
||||
class CompilationModeFlag : AllStatic {
|
||||
|
@ -269,6 +269,17 @@
|
||||
"Maximum rate sampling interval (in milliseconds)") \
|
||||
range(0, max_intx) \
|
||||
\
|
||||
product(double, Tier0ProfileDelayFactor, 100.0, DIAGNOSTIC, \
|
||||
"Delay profiling/compiling of methods that were " \
|
||||
"observed to be lukewarm") \
|
||||
\
|
||||
product(double, Tier2ProfileDelayFactor, 250.0, DIAGNOSTIC, \
|
||||
"Delay profiling of methods that were observed to be lukewarm") \
|
||||
\
|
||||
product(bool, SkipTier2IfPossible, false, DIAGNOSTIC, \
|
||||
"Compile at tier 4 instead of tier 2 in training replay " \
|
||||
"mode if posssible") \
|
||||
\
|
||||
product(ccstr, CompilationMode, "default", \
|
||||
"Compilation modes: " \
|
||||
"default: normal tiered compilation; " \
|
||||
@ -382,7 +393,6 @@
|
||||
"If compilation is stopped with an error, capture diagnostic " \
|
||||
"information at the bailout point") \
|
||||
\
|
||||
|
||||
// end of COMPILER_FLAGS
|
||||
|
||||
DECLARE_FLAGS(COMPILER_FLAGS)
|
||||
|
@ -205,6 +205,7 @@ class outputStream;
|
||||
LOG_TAG(timer) \
|
||||
LOG_TAG(tlab) \
|
||||
LOG_TAG(tracking) \
|
||||
LOG_TAG(training) \
|
||||
LOG_TAG(trimnative) /* trim native heap */ \
|
||||
LOG_TAG(unload) /* Trace unloading of classes */ \
|
||||
LOG_TAG(unmap) \
|
||||
|
@ -85,6 +85,13 @@ void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data,
|
||||
return Metaspace::allocate(loader_data, word_size, type, /*use_class_space*/ false);
|
||||
}
|
||||
|
||||
// This is used for allocating training data. We are allocating training data in many cases where a GC cannot be triggered.
|
||||
void* MetaspaceObj::operator new(size_t size, MemTag flags) throw() {
|
||||
void* p = AllocateHeap(size, flags, CALLER_PC);
|
||||
memset(p, 0, size);
|
||||
return p;
|
||||
}
|
||||
|
||||
bool MetaspaceObj::is_valid(const MetaspaceObj* p) {
|
||||
// Weed out obvious bogus values first without traversing metaspace
|
||||
if ((size_t)p < os::min_page_size()) {
|
||||
|
@ -315,6 +315,9 @@ class MetaspaceObj {
|
||||
f(Annotations) \
|
||||
f(MethodCounters) \
|
||||
f(RecordComponent) \
|
||||
f(KlassTrainingData) \
|
||||
f(MethodTrainingData) \
|
||||
f(CompileTrainingData) \
|
||||
f(AdapterHandlerEntry) \
|
||||
f(AdapterFingerPrint)
|
||||
|
||||
@ -354,6 +357,8 @@ class MetaspaceObj {
|
||||
void* operator new(size_t size, ClassLoaderData* loader_data,
|
||||
size_t word_size,
|
||||
Type type) throw();
|
||||
// This is used for allocating training data. We are allocating training data in many cases where a GC cannot be triggered.
|
||||
void* operator new(size_t size, MemTag flags) throw();
|
||||
void operator delete(void* p) = delete;
|
||||
|
||||
// Declare a *static* method with the same signature in any subclass of MetaspaceObj
|
||||
|
@ -48,6 +48,12 @@ class MetadataFactory : AllStatic {
|
||||
return array;
|
||||
}
|
||||
|
||||
// This API should be used for TrainingData only.
|
||||
template <typename T>
|
||||
static Array<T>* new_array_from_c_heap(int length, MemTag flags) {
|
||||
return new (length, flags) Array<T>(length);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
|
||||
if (data != nullptr) {
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "metaprogramming/enableIf.hpp"
|
||||
#include "oops/array.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
@ -104,6 +105,18 @@ public:
|
||||
// Symbol* bar() { return (Symbol*) _obj; }
|
||||
//
|
||||
// [2] All Array<T> dimensions are statically declared.
|
||||
//
|
||||
// Pointer Tagging
|
||||
//
|
||||
// All metaspace pointers are at least 4 byte aligned. Therefore, it's possible for
|
||||
// certain pointers to contain "tags" in their lowest 2 bits.
|
||||
//
|
||||
// Ref::obj() clears the tag bits in the return values. As a result, most
|
||||
// callers who just want walk a closure of metaspace objects do not need to worry
|
||||
// about the tag bits.
|
||||
//
|
||||
// If you need to use the tags, you can access the tagged pointer with Ref::addr()
|
||||
// and manipulate its parts with strip_tags(), decode_tags() and add_tags()
|
||||
class Ref : public CHeapObj<mtMetaspace> {
|
||||
Writability _writability;
|
||||
address _enclosing_obj;
|
||||
@ -123,7 +136,7 @@ public:
|
||||
virtual ~Ref() {}
|
||||
|
||||
address obj() const {
|
||||
return *addr();
|
||||
return strip_tags(*addr());
|
||||
}
|
||||
|
||||
address* addr() const {
|
||||
@ -143,12 +156,35 @@ public:
|
||||
Ref* next() const { return _next; }
|
||||
};
|
||||
|
||||
// Pointer tagging support
|
||||
constexpr static uintx TAG_MASK = 0x03;
|
||||
|
||||
template <typename T>
|
||||
static T strip_tags(T ptr_with_tags) {
|
||||
uintx n = (uintx)ptr_with_tags;
|
||||
return (T)(n & ~TAG_MASK);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static uintx decode_tags(T ptr_with_tags) {
|
||||
uintx n = (uintx)ptr_with_tags;
|
||||
return (n & TAG_MASK);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T add_tags(T ptr, uintx tags) {
|
||||
uintx n = (uintx)ptr;
|
||||
assert((n & TAG_MASK) == 0, "sanity");
|
||||
assert(tags <= TAG_MASK, "sanity");
|
||||
return (T)(n | tags);
|
||||
}
|
||||
|
||||
private:
|
||||
// MSORef -- iterate an instance of MetaspaceObj
|
||||
template <class T> class MSORef : public Ref {
|
||||
T** _mpp;
|
||||
T* dereference() const {
|
||||
return *_mpp;
|
||||
return strip_tags(*_mpp);
|
||||
}
|
||||
protected:
|
||||
virtual void** mpp() const {
|
||||
@ -176,7 +212,7 @@ private:
|
||||
Array<T>** _mpp;
|
||||
protected:
|
||||
Array<T>* dereference() const {
|
||||
return *_mpp;
|
||||
return strip_tags(*_mpp);
|
||||
}
|
||||
virtual void** mpp() const {
|
||||
return (void**)_mpp;
|
||||
|
@ -54,6 +54,11 @@ protected:
|
||||
NONCOPYABLE(Array);
|
||||
|
||||
inline void* operator new(size_t size, ClassLoaderData* loader_data, int length, TRAPS) throw();
|
||||
inline void* operator new(size_t size, ClassLoaderData* loader_data, int length) throw();
|
||||
|
||||
// Work-around -- see JDK-8331086
|
||||
inline void* operator new(size_t size, int length, MemTag flags) throw();
|
||||
|
||||
|
||||
static size_t byte_sizeof(int length, size_t elm_byte_size) {
|
||||
return sizeof(Array<T>) + MAX2(length - 1, 0) * elm_byte_size;
|
||||
|
@ -37,4 +37,19 @@ inline void* Array<T>::operator new(size_t size, ClassLoaderData* loader_data, i
|
||||
MetaspaceObj::array_type(sizeof(T)), false, THREAD);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void* Array<T>::operator new(size_t size, ClassLoaderData* loader_data, int length) throw() {
|
||||
size_t word_size = Array::size(length);
|
||||
return (void*) Metaspace::allocate(loader_data, word_size,
|
||||
MetaspaceObj::array_type(sizeof(T)), false);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void* Array<T>::operator new(size_t size, int length, MemTag flags) throw() {
|
||||
size = Array::size(length) * BytesPerWord;
|
||||
void* p = AllocateHeap(size * BytesPerWord, flags);
|
||||
memset(p, 0, size);
|
||||
return p;
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_ARRAY_INLINE_HPP
|
||||
|
@ -1327,6 +1327,7 @@ void InstanceKlass::initialize_impl(TRAPS) {
|
||||
if (!HAS_PENDING_EXCEPTION) {
|
||||
set_initialization_state_and_notify(fully_initialized, CHECK);
|
||||
DEBUG_ONLY(vtable().verify(tty, true);)
|
||||
CompilationPolicy::replay_training_at_init(this, THREAD);
|
||||
}
|
||||
else {
|
||||
// Step 10 and 11
|
||||
@ -2647,6 +2648,8 @@ void InstanceKlass::remove_unshareable_info() {
|
||||
set_verified_at_dump_time();
|
||||
}
|
||||
|
||||
_misc_flags.set_has_init_deps_processed(false);
|
||||
|
||||
Klass::remove_unshareable_info();
|
||||
|
||||
if (SystemDictionaryShared::has_class_failed_verification(this)) {
|
||||
|
@ -1118,6 +1118,12 @@ public:
|
||||
bool can_be_verified_at_dumptime() const;
|
||||
void compute_has_loops_flag_for_methods();
|
||||
#endif
|
||||
bool has_init_deps_processed() const { return _misc_flags.has_init_deps_processed(); }
|
||||
void set_has_init_deps_processed() {
|
||||
assert(is_initialized(), "");
|
||||
assert(!has_init_deps_processed(), "already set"); // one-off action
|
||||
_misc_flags.set_has_init_deps_processed(true);
|
||||
}
|
||||
|
||||
u2 compute_modifier_flags() const;
|
||||
|
||||
|
@ -68,6 +68,7 @@ class InstanceKlassFlags {
|
||||
status(has_been_redefined , 1 << 2) /* class has been redefined */ \
|
||||
status(is_scratch_class , 1 << 3) /* class is the redefined scratch class */ \
|
||||
status(is_marked_dependent , 1 << 4) /* class is the redefined scratch class */ \
|
||||
status(has_init_deps_processed , 1 << 5) /* all init dependencies are processed */ \
|
||||
/* end of list */
|
||||
|
||||
#define IK_STATUS_ENUM_NAME(name, value) _misc_##name = value,
|
||||
|
@ -730,6 +730,7 @@ public:
|
||||
virtual MetaspaceObj::Type type() const { return ClassType; }
|
||||
|
||||
inline bool is_loader_alive() const;
|
||||
inline bool is_loader_present_and_alive() const;
|
||||
|
||||
void clean_subklass();
|
||||
|
||||
|
@ -59,6 +59,11 @@ inline bool Klass::is_loader_alive() const {
|
||||
return class_loader_data()->is_alive();
|
||||
}
|
||||
|
||||
inline bool Klass::is_loader_present_and_alive() const {
|
||||
ClassLoaderData* cld = class_loader_data();
|
||||
return (cld != nullptr) ? cld->is_alive() : false;
|
||||
}
|
||||
|
||||
inline markWord Klass::prototype_header() const {
|
||||
assert(UseCompactObjectHeaders, "only use with compact object headers");
|
||||
#ifdef _LP64
|
||||
|
@ -60,6 +60,7 @@
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/symbol.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@ -408,6 +409,12 @@ void Method::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
|
||||
void Method::remove_unshareable_info() {
|
||||
unlink_method();
|
||||
if (method_data() != nullptr) {
|
||||
method_data()->remove_unshareable_info();
|
||||
}
|
||||
if (method_counters() != nullptr) {
|
||||
method_counters()->remove_unshareable_info();
|
||||
}
|
||||
if (CDSConfig::is_dumping_adapters() && _adapter != nullptr) {
|
||||
_adapter->remove_unshareable_info();
|
||||
_adapter = nullptr;
|
||||
@ -417,6 +424,12 @@ void Method::remove_unshareable_info() {
|
||||
|
||||
void Method::restore_unshareable_info(TRAPS) {
|
||||
assert(is_method() && is_valid_method(this), "ensure C++ vtable is restored");
|
||||
if (method_data() != nullptr) {
|
||||
method_data()->restore_unshareable_info(CHECK);
|
||||
}
|
||||
if (method_counters() != nullptr) {
|
||||
method_counters()->restore_unshareable_info(CHECK);
|
||||
}
|
||||
if (_adapter != nullptr) {
|
||||
assert(_adapter->is_linked(), "must be");
|
||||
_from_compiled_entry = _adapter->get_c2i_entry();
|
||||
@ -588,9 +601,43 @@ void Method::print_invocation_count(outputStream* st) {
|
||||
#endif
|
||||
}
|
||||
|
||||
MethodTrainingData* Method::training_data_or_null() const {
|
||||
MethodCounters* mcs = method_counters();
|
||||
if (mcs == nullptr) {
|
||||
return nullptr;
|
||||
} else {
|
||||
MethodTrainingData* mtd = mcs->method_training_data();
|
||||
if (mtd == mcs->method_training_data_sentinel()) {
|
||||
return nullptr;
|
||||
}
|
||||
return mtd;
|
||||
}
|
||||
}
|
||||
|
||||
bool Method::init_training_data(MethodTrainingData* td) {
|
||||
MethodCounters* mcs = method_counters();
|
||||
if (mcs == nullptr) {
|
||||
return false;
|
||||
} else {
|
||||
return mcs->init_method_training_data(td);
|
||||
}
|
||||
}
|
||||
|
||||
bool Method::install_training_method_data(const methodHandle& method) {
|
||||
MethodTrainingData* mtd = MethodTrainingData::find(method);
|
||||
if (mtd != nullptr && mtd->final_profile() != nullptr) {
|
||||
Atomic::replace_if_null(&method->_method_data, mtd->final_profile());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Build a MethodData* object to hold profiling information collected on this
|
||||
// method when requested.
|
||||
void Method::build_profiling_method_data(const methodHandle& method, TRAPS) {
|
||||
if (install_training_method_data(method)) {
|
||||
return;
|
||||
}
|
||||
// Do not profile the method if metaspace has hit an OOM previously
|
||||
// allocating profiling data. Callers clear pending exception so don't
|
||||
// add one here.
|
||||
@ -1163,6 +1210,12 @@ void Method::unlink_method() {
|
||||
|
||||
clear_method_data();
|
||||
clear_method_counters();
|
||||
clear_is_not_c1_compilable();
|
||||
clear_is_not_c1_osr_compilable();
|
||||
clear_is_not_c2_compilable();
|
||||
clear_is_not_c2_osr_compilable();
|
||||
clear_queued_for_compilation();
|
||||
|
||||
remove_unshareable_flags();
|
||||
}
|
||||
|
||||
|
@ -59,6 +59,7 @@ class LocalVariableTableElement;
|
||||
class AdapterHandlerEntry;
|
||||
class MethodData;
|
||||
class MethodCounters;
|
||||
class MethodTrainingData;
|
||||
class ConstMethod;
|
||||
class InlineTableSizes;
|
||||
class nmethod;
|
||||
@ -313,6 +314,10 @@ class Method : public Metadata {
|
||||
MethodData* method_data() const {
|
||||
return _method_data;
|
||||
}
|
||||
void set_method_data(MethodData* data);
|
||||
|
||||
MethodTrainingData* training_data_or_null() const;
|
||||
bool init_training_data(MethodTrainingData* td);
|
||||
|
||||
// mark an exception handler as entered (used to prune dead catch blocks in C2)
|
||||
void set_exception_handler_entered(int handler_bci);
|
||||
@ -341,7 +346,7 @@ class Method : public Metadata {
|
||||
bool was_never_executed() { return !was_executed_more_than(0); }
|
||||
|
||||
static void build_profiling_method_data(const methodHandle& method, TRAPS);
|
||||
|
||||
static bool install_training_method_data(const methodHandle& method);
|
||||
static MethodCounters* build_method_counters(Thread* current, Method* m);
|
||||
|
||||
inline int interpreter_invocation_count() const;
|
||||
|
@ -22,13 +22,19 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "compiler/compiler_globals.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/methodCounters.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
|
||||
MethodCounters::MethodCounters(const methodHandle& mh) :
|
||||
_method(mh()),
|
||||
_method_training_data(method_training_data_sentinel()),
|
||||
_prev_time(0),
|
||||
_rate(0),
|
||||
_highest_comp_level(0),
|
||||
@ -47,14 +53,21 @@ MethodCounters::MethodCounters(const methodHandle& mh) :
|
||||
_backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
MethodCounters::MethodCounters() {
|
||||
// Used by cppVtables.cpp only
|
||||
assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
|
||||
}
|
||||
#endif
|
||||
|
||||
MethodCounters* MethodCounters::allocate_no_exception(const methodHandle& mh) {
|
||||
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
|
||||
return new(loader_data, size(), MetaspaceObj::MethodCountersType) MethodCounters(mh);
|
||||
return new(loader_data, method_counters_size(), MetaspaceObj::MethodCountersType) MethodCounters(mh);
|
||||
}
|
||||
|
||||
MethodCounters* MethodCounters::allocate_with_exception(const methodHandle& mh, TRAPS) {
|
||||
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
|
||||
return new(loader_data, size(), MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
|
||||
return new(loader_data, method_counters_size(), MetaspaceObj::MethodCountersType, THREAD) MethodCounters(mh);
|
||||
}
|
||||
|
||||
void MethodCounters::clear_counters() {
|
||||
@ -68,7 +81,47 @@ void MethodCounters::clear_counters() {
|
||||
set_highest_osr_comp_level(0);
|
||||
}
|
||||
|
||||
void MethodCounters::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
log_trace(aot, training)("Iter(MethodCounters): %p", this);
|
||||
it->push(&_method);
|
||||
it->push(&_method_training_data);
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void MethodCounters::remove_unshareable_info() {
|
||||
}
|
||||
void MethodCounters::restore_unshareable_info(TRAPS) {
|
||||
_method_training_data = method_training_data_sentinel();
|
||||
}
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
void MethodCounters::print_on(outputStream* st) const {
|
||||
assert(is_methodCounters(), "should be method counters");
|
||||
st->print("method counters");
|
||||
print_data_on(st);
|
||||
}
|
||||
|
||||
void MethodCounters::print_data_on(outputStream* st) const {
|
||||
ResourceMark rm;
|
||||
st->print_cr(" - invocation_counter: %d carry=%d", _invocation_counter.count(), _invocation_counter.carry());
|
||||
st->print_cr(" - backedge_counter: %d carry=%d", _backedge_counter.count(), _backedge_counter.carry());
|
||||
st->print_cr(" - prev_time: " JLONG_FORMAT, _prev_time);
|
||||
st->print_cr(" - rate: %.3f", _rate);
|
||||
st->print_cr(" - invoke_mask: %d", _invoke_mask);
|
||||
st->print_cr(" - backedge_mask: %d", _backedge_mask);
|
||||
st->print_cr(" - prev_event_count: %d", _prev_event_count);
|
||||
#if COMPILER2_OR_JVMCI
|
||||
st->print_cr(" - interpreter_throwout_count: %u", _interpreter_throwout_count);
|
||||
#endif
|
||||
#if INCLUDE_JVMTI
|
||||
st->print_cr(" - number_of_breakpoints: %u", _number_of_breakpoints);
|
||||
#endif
|
||||
st->print_cr(" - highest_comp_level: %u", _highest_comp_level);
|
||||
st->print_cr(" - highest_osr_comp_level: %u", _highest_osr_comp_level);
|
||||
}
|
||||
|
||||
void MethodCounters::print_value_on(outputStream* st) const {
|
||||
assert(is_methodCounters(), "must be methodCounters");
|
||||
st->print("method counters");
|
||||
print_address_on(st);
|
||||
}
|
||||
|
@ -30,12 +30,25 @@
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
class MethodCounters : public MetaspaceObj {
|
||||
class MethodTrainingData;
|
||||
|
||||
class MethodCounters : public Metadata {
|
||||
friend class VMStructs;
|
||||
friend class JVMCIVMStructs;
|
||||
|
||||
// Used by CDS. These classes need to access the private default constructor.
|
||||
template <class T> friend class CppVtableTesterA;
|
||||
template <class T> friend class CppVtableTesterB;
|
||||
template <class T> friend class CppVtableCloner;
|
||||
|
||||
private:
|
||||
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
|
||||
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequency-based optimizations
|
||||
|
||||
// Back pointer to the Method*
|
||||
Method* _method;
|
||||
|
||||
Metadata* _method_training_data;
|
||||
jlong _prev_time; // Previous time the rate was acquired
|
||||
float _rate; // Events (invocation and backedge counter increments) per millisecond
|
||||
int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
|
||||
@ -51,20 +64,26 @@ class MethodCounters : public MetaspaceObj {
|
||||
u1 _highest_osr_comp_level; // Same for OSR level
|
||||
|
||||
MethodCounters(const methodHandle& mh);
|
||||
MethodCounters();
|
||||
|
||||
public:
|
||||
virtual bool is_methodCounters() const { return true; }
|
||||
Method* method() const { return _method; }
|
||||
static MethodCounters* allocate_no_exception(const methodHandle& mh);
|
||||
static MethodCounters* allocate_with_exception(const methodHandle& mh, TRAPS);
|
||||
|
||||
DEBUG_ONLY(bool on_stack() { return false; })
|
||||
void deallocate_contents(ClassLoaderData* loader_data) {}
|
||||
|
||||
void metaspace_pointers_do(MetaspaceClosure* it) { return; }
|
||||
|
||||
static int size() {
|
||||
static int method_counters_size() {
|
||||
return align_up((int)sizeof(MethodCounters), wordSize) / wordSize;
|
||||
}
|
||||
virtual int size() const {
|
||||
return method_counters_size();
|
||||
}
|
||||
|
||||
MetaspaceObj::Type type() const { return MethodCountersType; }
|
||||
void metaspace_pointers_do(MetaspaceClosure* iter);
|
||||
|
||||
void clear_counters();
|
||||
|
||||
#if COMPILER2_OR_JVMCI
|
||||
@ -127,7 +146,33 @@ class MethodCounters : public MetaspaceObj {
|
||||
return byte_offset_of(MethodCounters, _backedge_mask);
|
||||
}
|
||||
|
||||
const char* internal_name() const { return "{method counters}"; }
|
||||
virtual const char* internal_name() const { return "{method counters}"; }
|
||||
|
||||
Metadata* method_training_data_sentinel() {
|
||||
return this;
|
||||
}
|
||||
MethodTrainingData* method_training_data() const {
|
||||
return reinterpret_cast<MethodTrainingData*>(_method_training_data);
|
||||
}
|
||||
bool init_method_training_data(MethodTrainingData* td) {
|
||||
MethodTrainingData* cur = method_training_data();
|
||||
if (cur == td) {
|
||||
return true;
|
||||
}
|
||||
if (cur == nullptr || cur == reinterpret_cast<MethodTrainingData*>(method_training_data_sentinel())) {
|
||||
return Atomic::cmpxchg(reinterpret_cast<MethodTrainingData**>(&_method_training_data), cur, td) == cur;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void remove_unshareable_info();
|
||||
void restore_unshareable_info(TRAPS);
|
||||
#endif
|
||||
|
||||
// Printing
|
||||
void print_on (outputStream* st) const;
|
||||
void print_value_on(outputStream* st) const;
|
||||
void print_data_on(outputStream* st) const;
|
||||
};
|
||||
#endif // SHARE_OOPS_METHODCOUNTERS_HPP
|
||||
|
@ -22,7 +22,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "ci/ciMethodData.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compilerDefinitions.inline.hpp"
|
||||
@ -319,23 +321,65 @@ void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* md
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_excluded(Klass* k) {
|
||||
#if INCLUDE_CDS
|
||||
if (SafepointSynchronize::is_at_safepoint() &&
|
||||
CDSConfig::is_dumping_archive() &&
|
||||
CDSConfig::current_thread_is_vm_or_dumper()) {
|
||||
if (k->is_instance_klass() && !InstanceKlass::cast(k)->is_loaded()) {
|
||||
log_debug(aot, training)("Purged %s from MDO: unloaded class", k->name()->as_C_string());
|
||||
return true;
|
||||
} else {
|
||||
bool excluded = SystemDictionaryShared::should_be_excluded(k);
|
||||
if (excluded) {
|
||||
log_debug(aot, training)("Purged %s from MDO: excluded class", k->name()->as_C_string());
|
||||
}
|
||||
return excluded;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
||||
void TypeStackSlotEntries::clean_weak_klass_links(bool always_clean) {
|
||||
for (int i = 0; i < _number_of_entries; i++) {
|
||||
intptr_t p = type(i);
|
||||
Klass* k = (Klass*)klass_part(p);
|
||||
if (k != nullptr && (always_clean || !k->is_loader_alive())) {
|
||||
if (k != nullptr) {
|
||||
if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
|
||||
continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
|
||||
}
|
||||
if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) {
|
||||
set_type(i, with_status((Klass*)nullptr, p));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TypeStackSlotEntries::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
for (int i = 0; i < _number_of_entries; i++) {
|
||||
Klass** k = (Klass**)type_adr(i); // tagged
|
||||
it->push(k);
|
||||
}
|
||||
}
|
||||
|
||||
void ReturnTypeEntry::clean_weak_klass_links(bool always_clean) {
|
||||
intptr_t p = type();
|
||||
Klass* k = (Klass*)klass_part(p);
|
||||
if (k != nullptr && (always_clean || !k->is_loader_alive())) {
|
||||
if (k != nullptr) {
|
||||
if (!always_clean && k->is_instance_klass() && InstanceKlass::cast(k)->is_not_initialized()) {
|
||||
return; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
|
||||
}
|
||||
if (always_clean || !k->is_loader_present_and_alive() || is_excluded(k)) {
|
||||
set_type(with_status((Klass*)nullptr, p));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ReturnTypeEntry::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
Klass** k = (Klass**)type_adr(); // tagged
|
||||
it->push(k);
|
||||
}
|
||||
|
||||
bool TypeEntriesAtCall::return_profiling_enabled() {
|
||||
return MethodData::profile_return();
|
||||
@ -412,11 +456,23 @@ void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) con
|
||||
void ReceiverTypeData::clean_weak_klass_links(bool always_clean) {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
Klass* p = receiver(row);
|
||||
if (p != nullptr && (always_clean || !p->is_loader_alive())) {
|
||||
if (p != nullptr) {
|
||||
if (!always_clean && p->is_instance_klass() && InstanceKlass::cast(p)->is_not_initialized()) {
|
||||
continue; // skip not-yet-initialized classes // TODO: maybe clear the slot instead?
|
||||
}
|
||||
if (always_clean || !p->is_loader_present_and_alive() || is_excluded(p)) {
|
||||
clear_row(row);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiverTypeData::metaspace_pointers_do(MetaspaceClosure *it) {
|
||||
for (uint row = 0; row < row_limit(); row++) {
|
||||
Klass** recv = (Klass**)intptr_at_adr(receiver_cell_index(row));
|
||||
it->push(recv);
|
||||
}
|
||||
}
|
||||
|
||||
void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
|
||||
uint row;
|
||||
@ -646,6 +702,11 @@ void ParametersTypeData::print_data_on(outputStream* st, const char* extra) cons
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void SpeculativeTrapData::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
Method** m = (Method**)intptr_at_adr(speculative_trap_method);
|
||||
it->push(m);
|
||||
}
|
||||
|
||||
void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
|
||||
print_shared(st, "SpeculativeTrapData", extra);
|
||||
tab(st);
|
||||
@ -1223,12 +1284,19 @@ void MethodData::post_initialize(BytecodeStream* stream) {
|
||||
MethodData::MethodData(const methodHandle& method)
|
||||
: _method(method()),
|
||||
// Holds Compile_lock
|
||||
_extra_data_lock(Mutex::nosafepoint, "MDOExtraData_lock"),
|
||||
_compiler_counters(),
|
||||
_parameters_type_data_di(parameters_uninitialized) {
|
||||
_extra_data_lock = nullptr;
|
||||
initialize();
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
MethodData::MethodData() {
|
||||
// Used by cppVtables.cpp only
|
||||
assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
|
||||
}
|
||||
#endif
|
||||
|
||||
// Reinitialize the storage of an existing MDO at a safepoint. Doing it this way will ensure it's
|
||||
// not being accessed while the contents are being rewritten.
|
||||
class VM_ReinitializeMDO: public VM_Operation {
|
||||
@ -1364,7 +1432,7 @@ void MethodData::init() {
|
||||
}
|
||||
|
||||
bool MethodData::is_mature() const {
|
||||
return CompilationPolicy::is_mature(_method);
|
||||
return CompilationPolicy::is_mature(const_cast<MethodData*>(this));
|
||||
}
|
||||
|
||||
// Translate a bci to its corresponding data index (di).
|
||||
@ -1552,7 +1620,8 @@ void MethodData::print_value_on(outputStream* st) const {
|
||||
}
|
||||
|
||||
void MethodData::print_data_on(outputStream* st) const {
|
||||
ConditionalMutexLocker ml(extra_data_lock(), !extra_data_lock()->owned_by_self(),
|
||||
Mutex* lock = const_cast<MethodData*>(this)->extra_data_lock();
|
||||
ConditionalMutexLocker ml(lock, !lock->owned_by_self(),
|
||||
Mutex::_no_safepoint_check_flag);
|
||||
ResourceMark rm;
|
||||
ProfileData* data = first_data();
|
||||
@ -1725,8 +1794,26 @@ bool MethodData::profile_parameters_for_method(const methodHandle& m) {
|
||||
}
|
||||
|
||||
void MethodData::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
log_trace(aot)("Iter(MethodData): %p", this);
|
||||
log_trace(aot, training)("Iter(MethodData): %p for %p %s", this, _method, _method->name_and_sig_as_C_string());
|
||||
it->push(&_method);
|
||||
if (_parameters_type_data_di != no_parameters) {
|
||||
parameters_type_data()->metaspace_pointers_do(it);
|
||||
}
|
||||
for (ProfileData* data = first_data(); is_valid(data); data = next_data(data)) {
|
||||
data->metaspace_pointers_do(it);
|
||||
}
|
||||
for (DataLayout* dp = extra_data_base();
|
||||
dp < extra_data_limit();
|
||||
dp = MethodData::next_extra(dp)) {
|
||||
if (dp->tag() == DataLayout::speculative_trap_data_tag) {
|
||||
ResourceMark rm;
|
||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||
data->metaspace_pointers_do(it);
|
||||
} else if (dp->tag() == DataLayout::no_tag ||
|
||||
dp->tag() == DataLayout::arg_info_data_tag) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
|
||||
@ -1758,6 +1845,9 @@ class CleanExtraDataKlassClosure : public CleanExtraDataClosure {
|
||||
public:
|
||||
CleanExtraDataKlassClosure(bool always_clean) : _always_clean(always_clean) {}
|
||||
bool is_live(Method* m) {
|
||||
if (!_always_clean && m->method_holder()->is_instance_klass() && InstanceKlass::cast(m->method_holder())->is_not_initialized()) {
|
||||
return true; // TODO: treat as unloaded instead?
|
||||
}
|
||||
return !(_always_clean) && m->method_holder()->is_loader_alive();
|
||||
}
|
||||
};
|
||||
@ -1769,6 +1859,20 @@ public:
|
||||
bool is_live(Method* m) { return !m->is_old(); }
|
||||
};
|
||||
|
||||
Mutex* MethodData::extra_data_lock() {
|
||||
Mutex* lock = Atomic::load(&_extra_data_lock);
|
||||
if (lock == nullptr) {
|
||||
// This lock could be acquired while we are holding DumpTimeTable_lock/nosafepoint
|
||||
lock = new Mutex(Mutex::nosafepoint-1, "MDOExtraData_lock");
|
||||
Mutex* old = Atomic::cmpxchg(&_extra_data_lock, (Mutex*)nullptr, lock);
|
||||
if (old != nullptr) {
|
||||
// Another thread created the lock before us. Use that lock instead.
|
||||
delete lock;
|
||||
return old;
|
||||
}
|
||||
}
|
||||
return lock;
|
||||
}
|
||||
|
||||
// Remove SpeculativeTrapData entries that reference an unloaded or
|
||||
// redefined method
|
||||
@ -1785,7 +1889,7 @@ void MethodData::clean_extra_data(CleanExtraDataClosure* cl) {
|
||||
SpeculativeTrapData* data = new SpeculativeTrapData(dp);
|
||||
Method* m = data->method();
|
||||
assert(m != nullptr, "should have a method");
|
||||
if (!cl->is_live(m)) {
|
||||
if (is_excluded(m->method_holder()) || !cl->is_live(m)) {
|
||||
// "shift" accumulates the number of cells for dead
|
||||
// SpeculativeTrapData entries that have been seen so
|
||||
// far. Following entries must be shifted left by that many
|
||||
@ -1889,13 +1993,23 @@ void MethodData::release_C_heap_structures() {
|
||||
#endif
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void MethodData::remove_unshareable_info() {
|
||||
_extra_data_lock = nullptr;
|
||||
}
|
||||
|
||||
void MethodData::restore_unshareable_info(TRAPS) {
|
||||
//_extra_data_lock = new Mutex(Mutex::nosafepoint, "MDOExtraData_lock");
|
||||
}
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
#ifdef ASSERT
|
||||
void MethodData::check_extra_data_locked() const {
|
||||
// Cast const away, just to be able to verify the lock
|
||||
// Usually we only want non-const accesses on the lock,
|
||||
// so this here is an exception.
|
||||
MethodData* self = (MethodData*)this;
|
||||
assert(self->extra_data_lock()->owned_by_self(), "must have lock");
|
||||
assert(self->extra_data_lock()->owned_by_self() || CDSConfig::is_dumping_archive(), "must have lock");
|
||||
assert(!Thread::current()->is_Java_thread() ||
|
||||
JavaThread::current()->is_in_no_safepoint_scope(),
|
||||
"JavaThread must have NoSafepointVerifier inside lock scope");
|
||||
|
@ -29,7 +29,6 @@
|
||||
#include "interpreter/invocationCounter.hpp"
|
||||
#include "oops/metadata.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
#include "runtime/mutex.hpp"
|
||||
@ -202,6 +201,9 @@ public:
|
||||
intptr_t cell_at(int index) const {
|
||||
return _cells[index];
|
||||
}
|
||||
intptr_t* cell_at_adr(int index) const {
|
||||
return const_cast<intptr_t*>(&_cells[index]);
|
||||
}
|
||||
|
||||
bool set_flag_at(u1 flag_number) {
|
||||
const u1 bit = 1 << flag_number;
|
||||
@ -345,6 +347,10 @@ protected:
|
||||
assert(0 <= index && index < cell_count(), "oob");
|
||||
return data()->cell_at(index);
|
||||
}
|
||||
intptr_t* intptr_at_adr(int index) const {
|
||||
assert(0 <= index && index < cell_count(), "oob");
|
||||
return data()->cell_at_adr(index);
|
||||
}
|
||||
void set_uint_at(int index, uint value) {
|
||||
set_intptr_at(index, (intptr_t) value);
|
||||
}
|
||||
@ -362,12 +368,6 @@ protected:
|
||||
int int_at_unchecked(int index) const {
|
||||
return (int)data()->cell_at(index);
|
||||
}
|
||||
void set_oop_at(int index, oop value) {
|
||||
set_intptr_at(index, cast_from_oop<intptr_t>(value));
|
||||
}
|
||||
oop oop_at(int index) const {
|
||||
return cast_to_oop(intptr_at(index));
|
||||
}
|
||||
|
||||
void set_flag_at(u1 flag_number) {
|
||||
data()->set_flag_at(flag_number);
|
||||
@ -488,6 +488,9 @@ public:
|
||||
// GC support
|
||||
virtual void clean_weak_klass_links(bool always_clean) {}
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it) {}
|
||||
|
||||
// CI translation: ProfileData can represent both MethodDataOop data
|
||||
// as well as CIMethodData data. This function is provided for translating
|
||||
// an oop in a ProfileData to the ci equivalent. Generally speaking,
|
||||
@ -853,6 +856,11 @@ public:
|
||||
return _pd->intptr_at(type_offset_in_cells(i));
|
||||
}
|
||||
|
||||
intptr_t* type_adr(int i) const {
|
||||
assert(i >= 0 && i < _number_of_entries, "oob");
|
||||
return _pd->intptr_at_adr(type_offset_in_cells(i));
|
||||
}
|
||||
|
||||
// set type for entry i
|
||||
void set_type(int i, intptr_t k) {
|
||||
assert(i >= 0 && i < _number_of_entries, "oob");
|
||||
@ -874,6 +882,9 @@ public:
|
||||
// GC support
|
||||
void clean_weak_klass_links(bool always_clean);
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
|
||||
void print_data_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
@ -898,6 +909,10 @@ public:
|
||||
return _pd->intptr_at(_base_off);
|
||||
}
|
||||
|
||||
intptr_t* type_adr() const {
|
||||
return _pd->intptr_at_adr(_base_off);
|
||||
}
|
||||
|
||||
void set_type(intptr_t k) {
|
||||
_pd->set_intptr_at(_base_off, k);
|
||||
}
|
||||
@ -917,6 +932,9 @@ public:
|
||||
// GC support
|
||||
void clean_weak_klass_links(bool always_clean);
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
|
||||
void print_data_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
@ -1108,6 +1126,16 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
if (has_arguments()) {
|
||||
_args.metaspace_pointers_do(it);
|
||||
}
|
||||
if (has_return()) {
|
||||
_ret.metaspace_pointers_do(it);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
|
||||
};
|
||||
|
||||
@ -1218,6 +1246,9 @@ public:
|
||||
// GC support
|
||||
virtual void clean_weak_klass_links(bool always_clean);
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
|
||||
void print_receiver_data_on(outputStream* st) const;
|
||||
void print_data_on(outputStream* st, const char* extra = nullptr) const;
|
||||
};
|
||||
@ -1383,6 +1414,17 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
ReceiverTypeData::metaspace_pointers_do(it);
|
||||
if (has_arguments()) {
|
||||
_args.metaspace_pointers_do(it);
|
||||
}
|
||||
if (has_return()) {
|
||||
_ret.metaspace_pointers_do(it);
|
||||
}
|
||||
}
|
||||
|
||||
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
|
||||
};
|
||||
|
||||
@ -1566,10 +1608,6 @@ protected:
|
||||
int aindex = index + array_start_off_set;
|
||||
return int_at(aindex);
|
||||
}
|
||||
oop array_oop_at(int index) const {
|
||||
int aindex = index + array_start_off_set;
|
||||
return oop_at(aindex);
|
||||
}
|
||||
void array_set_int_at(int index, int value) {
|
||||
int aindex = index + array_start_off_set;
|
||||
set_int_at(aindex, value);
|
||||
@ -1782,6 +1820,11 @@ public:
|
||||
_parameters.clean_weak_klass_links(always_clean);
|
||||
}
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
_parameters.metaspace_pointers_do(it);
|
||||
}
|
||||
|
||||
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
|
||||
|
||||
static ByteSize stack_slot_offset(int i) {
|
||||
@ -1852,6 +1895,9 @@ public:
|
||||
return cell_offset(speculative_trap_method);
|
||||
}
|
||||
|
||||
// CDS support
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* it);
|
||||
|
||||
virtual void print_data_on(outputStream* st, const char* extra = nullptr) const;
|
||||
};
|
||||
|
||||
@ -1962,13 +2008,15 @@ class MethodData : public Metadata {
|
||||
// Cached hint for bci_to_dp and bci_to_data
|
||||
int _hint_di;
|
||||
|
||||
Mutex _extra_data_lock;
|
||||
Mutex* volatile _extra_data_lock;
|
||||
|
||||
MethodData(const methodHandle& method);
|
||||
|
||||
void initialize();
|
||||
|
||||
public:
|
||||
MethodData();
|
||||
|
||||
static MethodData* allocate(ClassLoaderData* loader_data, const methodHandle& method, TRAPS);
|
||||
|
||||
virtual bool is_methodData() const { return true; }
|
||||
@ -2266,6 +2314,11 @@ public:
|
||||
}
|
||||
#endif
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void remove_unshareable_info();
|
||||
void restore_unshareable_info(TRAPS);
|
||||
#endif
|
||||
|
||||
void set_would_profile(bool p) { _would_profile = p ? profile : no_profile; }
|
||||
bool would_profile() const { return _would_profile != no_profile; }
|
||||
|
||||
@ -2504,7 +2557,7 @@ public:
|
||||
|
||||
void clean_method_data(bool always_clean);
|
||||
void clean_weak_method_links();
|
||||
Mutex* extra_data_lock() const { return const_cast<Mutex*>(&_extra_data_lock); }
|
||||
Mutex* extra_data_lock();
|
||||
void check_extra_data_locked() const NOT_DEBUG_RETURN;
|
||||
};
|
||||
|
||||
|
794
src/hotspot/share/oops/trainingData.cpp
Normal file
794
src/hotspot/share/oops/trainingData.cpp
Normal file
@ -0,0 +1,794 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "ci/ciEnv.hpp"
|
||||
#include "ci/ciMetadata.hpp"
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "cds/metaspaceShared.hpp"
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/compactHashtable.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "oops/methodCounters.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/javaThread.inline.hpp"
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
TrainingData::TrainingDataSet TrainingData::_training_data_set(1024, 0x3fffffff);
|
||||
TrainingData::TrainingDataDictionary TrainingData::_archived_training_data_dictionary;
|
||||
TrainingData::TrainingDataDictionary TrainingData::_archived_training_data_dictionary_for_dumping;
|
||||
TrainingData::DumptimeTrainingDataDictionary* TrainingData::_dumptime_training_data_dictionary = nullptr;
|
||||
int TrainingData::TrainingDataLocker::_lock_mode;
|
||||
volatile bool TrainingData::TrainingDataLocker::_snapshot = false;
|
||||
|
||||
MethodTrainingData::MethodTrainingData() {
|
||||
// Used by cppVtables.cpp only
|
||||
assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
|
||||
}
|
||||
|
||||
KlassTrainingData::KlassTrainingData() {
|
||||
// Used by cppVtables.cpp only
|
||||
assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
|
||||
}
|
||||
|
||||
CompileTrainingData::CompileTrainingData() : _level(-1), _compile_id(-1) {
|
||||
// Used by cppVtables.cpp only
|
||||
assert(CDSConfig::is_dumping_static_archive() || UseSharedSpaces, "only for CDS");
|
||||
}
|
||||
|
||||
void TrainingData::initialize() {
|
||||
// this is a nop if training modes are not enabled
|
||||
if (have_data() || need_data()) {
|
||||
// Data structures that we have do not currently support iterative training. So you cannot replay
|
||||
// and train at the same time. Going forward we may want to adjust iteration/search to enable that.
|
||||
guarantee(have_data() != need_data(), "Iterative training is not supported");
|
||||
TrainingDataLocker::initialize();
|
||||
}
|
||||
}
|
||||
|
||||
static void verify_archived_entry(TrainingData* td, const TrainingData::Key* k) {
|
||||
guarantee(TrainingData::Key::can_compute_cds_hash(k), "");
|
||||
TrainingData* td1 = TrainingData::lookup_archived_training_data(k);
|
||||
guarantee(td == td1, "");
|
||||
}
|
||||
|
||||
void TrainingData::verify() {
|
||||
if (TrainingData::have_data()) {
|
||||
archived_training_data_dictionary()->iterate([&](TrainingData* td) {
|
||||
if (td->is_KlassTrainingData()) {
|
||||
KlassTrainingData* ktd = td->as_KlassTrainingData();
|
||||
if (ktd->has_holder() && ktd->holder()->is_loaded()) {
|
||||
Key k(ktd->holder());
|
||||
verify_archived_entry(td, &k);
|
||||
}
|
||||
ktd->verify();
|
||||
} else if (td->is_MethodTrainingData()) {
|
||||
MethodTrainingData* mtd = td->as_MethodTrainingData();
|
||||
if (mtd->has_holder() && mtd->holder()->method_holder()->is_loaded()) {
|
||||
Key k(mtd->holder());
|
||||
verify_archived_entry(td, &k);
|
||||
}
|
||||
mtd->verify();
|
||||
} else if (td->is_CompileTrainingData()) {
|
||||
td->as_CompileTrainingData()->verify();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
MethodTrainingData* MethodTrainingData::make(const methodHandle& method, bool null_if_not_found, bool use_cache) {
|
||||
MethodTrainingData* mtd = nullptr;
|
||||
if (!have_data() && !need_data()) {
|
||||
return mtd;
|
||||
}
|
||||
// Try grabbing the cached value first.
|
||||
// Cache value is stored in MethodCounters and the following are the
|
||||
// possible states:
|
||||
// 1. Cached value is method_training_data_sentinel().
|
||||
// This is an initial state and needs a full lookup.
|
||||
// 2. Cached value is null.
|
||||
// Lookup failed the last time, if we don't plan to create a new TD object,
|
||||
// i.e. null_if_no_found == true, then just return a null.
|
||||
// 3. Cache value is not null.
|
||||
// Return it, the value of training_data_lookup_failed doesn't matter.
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
if (mcs != nullptr) {
|
||||
mtd = mcs->method_training_data();
|
||||
if (mtd != nullptr && mtd != mcs->method_training_data_sentinel()) {
|
||||
return mtd;
|
||||
}
|
||||
if (null_if_not_found && mtd == nullptr) {
|
||||
assert(mtd == nullptr, "No training data found");
|
||||
return nullptr;
|
||||
}
|
||||
} else if (use_cache) {
|
||||
mcs = Method::build_method_counters(Thread::current(), method());
|
||||
}
|
||||
|
||||
TrainingData* td = nullptr;
|
||||
|
||||
Key key(method());
|
||||
if (have_data()) {
|
||||
td = lookup_archived_training_data(&key);
|
||||
if (td != nullptr) {
|
||||
mtd = td->as_MethodTrainingData();
|
||||
} else {
|
||||
mtd = nullptr;
|
||||
}
|
||||
// Cache the pointer to MTD in MethodCounters for faster lookup (could be null if not found)
|
||||
method->init_training_data(mtd);
|
||||
}
|
||||
|
||||
if (need_data()) {
|
||||
TrainingDataLocker l;
|
||||
td = training_data_set()->find(&key);
|
||||
if (td == nullptr) {
|
||||
if (!null_if_not_found) {
|
||||
KlassTrainingData* ktd = KlassTrainingData::make(method->method_holder());
|
||||
if (ktd == nullptr) {
|
||||
return nullptr; // allocation failure
|
||||
}
|
||||
mtd = MethodTrainingData::allocate(method(), ktd);
|
||||
if (mtd == nullptr) {
|
||||
return nullptr; // allocation failure
|
||||
}
|
||||
td = training_data_set()->install(mtd);
|
||||
assert(td == mtd, "");
|
||||
} else {
|
||||
mtd = nullptr;
|
||||
}
|
||||
} else {
|
||||
mtd = td->as_MethodTrainingData();
|
||||
}
|
||||
// Cache the pointer to MTD in MethodCounters for faster lookup (could be null if not found)
|
||||
method->init_training_data(mtd);
|
||||
}
|
||||
|
||||
return mtd;
|
||||
}
|
||||
|
||||
void MethodTrainingData::print_on(outputStream* st, bool name_only) const {
|
||||
if (has_holder()) {
|
||||
_klass->print_on(st, true);
|
||||
st->print(".");
|
||||
name()->print_symbol_on(st);
|
||||
signature()->print_symbol_on(st);
|
||||
}
|
||||
if (name_only) {
|
||||
return;
|
||||
}
|
||||
if (!has_holder()) {
|
||||
st->print("[SYM]");
|
||||
}
|
||||
if (_level_mask) {
|
||||
st->print(" LM%d", _level_mask);
|
||||
}
|
||||
st->print(" mc=%p mdo=%p", _final_counters, _final_profile);
|
||||
}
|
||||
|
||||
CompileTrainingData* CompileTrainingData::make(CompileTask* task) {
|
||||
int level = task->comp_level();
|
||||
int compile_id = task->compile_id();
|
||||
Thread* thread = Thread::current();
|
||||
methodHandle m(thread, task->method());
|
||||
if (m->method_holder() == nullptr) {
|
||||
return nullptr; // do not record (dynamically generated method)
|
||||
}
|
||||
MethodTrainingData* mtd = MethodTrainingData::make(m);
|
||||
if (mtd == nullptr) {
|
||||
return nullptr; // allocation failure
|
||||
}
|
||||
mtd->notice_compilation(level);
|
||||
|
||||
TrainingDataLocker l;
|
||||
CompileTrainingData* ctd = CompileTrainingData::allocate(mtd, level, compile_id);
|
||||
if (ctd != nullptr) {
|
||||
CompileTrainingData*& last_ctd = mtd->_last_toplevel_compiles[level - 1];
|
||||
if (last_ctd != nullptr) {
|
||||
assert(mtd->highest_top_level() >= level, "consistency");
|
||||
if (last_ctd->compile_id() < compile_id) {
|
||||
last_ctd->clear_init_deps();
|
||||
last_ctd = ctd;
|
||||
}
|
||||
} else {
|
||||
last_ctd = ctd;
|
||||
mtd->notice_toplevel_compilation(level);
|
||||
}
|
||||
}
|
||||
return ctd;
|
||||
}
|
||||
|
||||
|
||||
void CompileTrainingData::dec_init_deps_left(KlassTrainingData* ktd) {
|
||||
LogStreamHandle(Trace, training) log;
|
||||
if (log.is_enabled()) {
|
||||
log.print("CTD "); print_on(&log); log.cr();
|
||||
log.print("KTD "); ktd->print_on(&log); log.cr();
|
||||
}
|
||||
assert(ktd!= nullptr && ktd->has_holder(), "");
|
||||
assert(_init_deps.contains(ktd), "");
|
||||
assert(_init_deps_left > 0, "");
|
||||
|
||||
uint init_deps_left1 = Atomic::sub(&_init_deps_left, 1);
|
||||
|
||||
if (log.is_enabled()) {
|
||||
uint init_deps_left2 = compute_init_deps_left();
|
||||
log.print("init_deps_left: %d (%d)", init_deps_left1, init_deps_left2);
|
||||
ktd->print_on(&log, true);
|
||||
}
|
||||
}
|
||||
|
||||
uint CompileTrainingData::compute_init_deps_left(bool count_initialized) {
|
||||
int left = 0;
|
||||
for (int i = 0; i < _init_deps.length(); i++) {
|
||||
KlassTrainingData* ktd = _init_deps.at(i);
|
||||
// Ignore symbolic refs and already initialized classes (unless explicitly requested).
|
||||
if (ktd->has_holder()) {
|
||||
InstanceKlass* holder = ktd->holder();
|
||||
if (!ktd->holder()->is_initialized() || count_initialized) {
|
||||
++left;
|
||||
} else if (holder->defined_by_other_loaders()) {
|
||||
Key k(holder);
|
||||
if (CDS_ONLY(!Key::can_compute_cds_hash(&k)) NOT_CDS(true)) {
|
||||
++left;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return left;
|
||||
}
|
||||
|
||||
void CompileTrainingData::print_on(outputStream* st, bool name_only) const {
|
||||
_method->print_on(st, true);
|
||||
st->print("#%dL%d", _compile_id, _level);
|
||||
if (name_only) {
|
||||
return;
|
||||
}
|
||||
if (_init_deps.length() > 0) {
|
||||
if (_init_deps_left > 0) {
|
||||
st->print(" udeps=%d", _init_deps_left);
|
||||
}
|
||||
for (int i = 0, len = _init_deps.length(); i < len; i++) {
|
||||
st->print(" dep:");
|
||||
_init_deps.at(i)->print_on(st, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTrainingData::notice_inlined_method(CompileTask* task,
|
||||
const methodHandle& method) {
|
||||
MethodTrainingData* mtd = MethodTrainingData::make(method);
|
||||
if (mtd != nullptr) {
|
||||
mtd->notice_compilation(task->comp_level(), true);
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTrainingData::notice_jit_observation(ciEnv* env, ciBaseObject* what) {
|
||||
// A JIT is starting to look at class k.
|
||||
// We could follow the queries that it is making, but it is
|
||||
// simpler to assume, conservatively, that the JIT will
|
||||
// eventually depend on the initialization state of k.
|
||||
CompileTask* task = env->task();
|
||||
assert(task != nullptr, "");
|
||||
Method* method = task->method();
|
||||
InstanceKlass* compiling_klass = method->method_holder();
|
||||
if (what->is_metadata()) {
|
||||
ciMetadata* md = what->as_metadata();
|
||||
if (md->is_loaded() && md->is_instance_klass()) {
|
||||
ciInstanceKlass* cik = md->as_instance_klass();
|
||||
|
||||
if (cik->is_initialized()) {
|
||||
InstanceKlass* ik = md->as_instance_klass()->get_instanceKlass();
|
||||
KlassTrainingData* ktd = KlassTrainingData::make(ik);
|
||||
if (ktd == nullptr) {
|
||||
// Allocation failure or snapshot in progress
|
||||
return;
|
||||
}
|
||||
// This JIT task is (probably) requesting that ik be initialized,
|
||||
// so add him to my _init_deps list.
|
||||
TrainingDataLocker l;
|
||||
add_init_dep(ktd);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KlassTrainingData::prepare(Visitor& visitor) {
|
||||
if (visitor.is_visited(this)) {
|
||||
return;
|
||||
}
|
||||
visitor.visit(this);
|
||||
ClassLoaderData* loader_data = nullptr;
|
||||
if (_holder != nullptr) {
|
||||
loader_data = _holder->class_loader_data();
|
||||
} else {
|
||||
loader_data = java_lang_ClassLoader::loader_data(SystemDictionary::java_system_loader()); // default CLD
|
||||
}
|
||||
_comp_deps.prepare(loader_data);
|
||||
}
|
||||
|
||||
void MethodTrainingData::prepare(Visitor& visitor) {
|
||||
if (visitor.is_visited(this)) {
|
||||
return;
|
||||
}
|
||||
visitor.visit(this);
|
||||
klass()->prepare(visitor);
|
||||
if (has_holder()) {
|
||||
_final_counters = holder()->method_counters();
|
||||
_final_profile = holder()->method_data();
|
||||
assert(_final_profile == nullptr || _final_profile->method() == holder(), "");
|
||||
}
|
||||
for (int i = 0; i < CompLevel_count - 1; i++) {
|
||||
CompileTrainingData* ctd = _last_toplevel_compiles[i];
|
||||
if (ctd != nullptr) {
|
||||
ctd->prepare(visitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTrainingData::prepare(Visitor& visitor) {
|
||||
if (visitor.is_visited(this)) {
|
||||
return;
|
||||
}
|
||||
visitor.visit(this);
|
||||
method()->prepare(visitor);
|
||||
ClassLoaderData* loader_data = _method->klass()->class_loader_data();
|
||||
_init_deps.prepare(loader_data);
|
||||
_ci_records.prepare(loader_data);
|
||||
}
|
||||
|
||||
KlassTrainingData* KlassTrainingData::make(InstanceKlass* holder, bool null_if_not_found) {
|
||||
Key key(holder);
|
||||
TrainingData* td = CDS_ONLY(have_data() ? lookup_archived_training_data(&key) :) nullptr;
|
||||
KlassTrainingData* ktd = nullptr;
|
||||
if (td != nullptr) {
|
||||
ktd = td->as_KlassTrainingData();
|
||||
guarantee(!ktd->has_holder() || ktd->holder() == holder, "");
|
||||
if (ktd->has_holder()) {
|
||||
return ktd;
|
||||
} else {
|
||||
ktd = nullptr;
|
||||
}
|
||||
}
|
||||
if (need_data()) {
|
||||
TrainingDataLocker l;
|
||||
td = training_data_set()->find(&key);
|
||||
if (td == nullptr) {
|
||||
if (null_if_not_found) {
|
||||
return nullptr;
|
||||
}
|
||||
ktd = KlassTrainingData::allocate(holder);
|
||||
if (ktd == nullptr) {
|
||||
return nullptr; // allocation failure
|
||||
}
|
||||
td = training_data_set()->install(ktd);
|
||||
assert(ktd == td, "");
|
||||
} else {
|
||||
ktd = td->as_KlassTrainingData();
|
||||
guarantee(ktd->holder() != nullptr, "null holder");
|
||||
}
|
||||
assert(ktd != nullptr, "");
|
||||
guarantee(ktd->holder() == holder, "");
|
||||
}
|
||||
return ktd;
|
||||
}
|
||||
|
||||
void KlassTrainingData::print_on(outputStream* st, bool name_only) const {
|
||||
if (has_holder()) {
|
||||
name()->print_symbol_on(st);
|
||||
switch (holder()->init_state()) {
|
||||
case InstanceKlass::allocated: st->print("[A]"); break;
|
||||
case InstanceKlass::loaded: st->print("[D]"); break;
|
||||
case InstanceKlass::linked: st->print("[L]"); break;
|
||||
case InstanceKlass::being_initialized: st->print("[i]"); break;
|
||||
case InstanceKlass::fully_initialized: break;
|
||||
case InstanceKlass::initialization_error: st->print("[E]"); break;
|
||||
default: fatal("unknown state: %d", holder()->init_state());
|
||||
}
|
||||
if (holder()->is_interface()) {
|
||||
st->print("I");
|
||||
}
|
||||
} else {
|
||||
st->print("[SYM]");
|
||||
}
|
||||
if (name_only) {
|
||||
return;
|
||||
}
|
||||
if (_comp_deps.length() > 0) {
|
||||
for (int i = 0, len = _comp_deps.length(); i < len; i++) {
|
||||
st->print(" dep:");
|
||||
_comp_deps.at(i)->print_on(st, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
KlassTrainingData::KlassTrainingData(InstanceKlass* klass) : TrainingData(klass) {
|
||||
if (holder() == klass) {
|
||||
return; // no change to make
|
||||
}
|
||||
|
||||
jobject hmj = _holder_mirror;
|
||||
if (hmj != nullptr) { // clear out previous handle, if any
|
||||
_holder_mirror = nullptr;
|
||||
assert(JNIHandles::is_global_handle(hmj), "");
|
||||
JNIHandles::destroy_global(hmj);
|
||||
}
|
||||
|
||||
if (klass != nullptr) {
|
||||
Handle hm(JavaThread::current(), klass->java_mirror());
|
||||
hmj = JNIHandles::make_global(hm);
|
||||
Atomic::release_store(&_holder_mirror, hmj);
|
||||
}
|
||||
|
||||
Atomic::release_store(&_holder, const_cast<InstanceKlass*>(klass));
|
||||
assert(holder() == klass, "");
|
||||
}
|
||||
|
||||
void KlassTrainingData::notice_fully_initialized() {
|
||||
ResourceMark rm;
|
||||
assert(has_holder(), "");
|
||||
assert(holder()->is_initialized(), "wrong state: %s %s",
|
||||
holder()->name()->as_C_string(), holder()->init_state_name());
|
||||
|
||||
TrainingDataLocker l; // Not a real lock if we don't collect the data,
|
||||
// that's why we need the atomic decrement below.
|
||||
for (int i = 0; i < comp_dep_count(); i++) {
|
||||
comp_dep(i)->dec_init_deps_left(this);
|
||||
}
|
||||
holder()->set_has_init_deps_processed();
|
||||
}
|
||||
|
||||
void TrainingData::init_dumptime_table(TRAPS) {
|
||||
precond((!assembling_data() && !need_data()) || need_data() != assembling_data());
|
||||
if (assembling_data()) {
|
||||
_dumptime_training_data_dictionary = new DumptimeTrainingDataDictionary();
|
||||
_archived_training_data_dictionary.iterate([&](TrainingData* record) {
|
||||
_dumptime_training_data_dictionary->append(record);
|
||||
});
|
||||
}
|
||||
if (need_data()) {
|
||||
_dumptime_training_data_dictionary = new DumptimeTrainingDataDictionary();
|
||||
TrainingDataLocker l;
|
||||
TrainingDataLocker::snapshot();
|
||||
|
||||
ResourceMark rm;
|
||||
Visitor visitor(training_data_set()->size());
|
||||
training_data_set()->iterate([&](TrainingData* td) {
|
||||
td->prepare(visitor);
|
||||
if (!td->is_CompileTrainingData()) {
|
||||
_dumptime_training_data_dictionary->append(td);
|
||||
}
|
||||
});
|
||||
|
||||
if (AOTVerifyTrainingData) {
|
||||
training_data_set()->verify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TrainingData::iterate_roots(MetaspaceClosure* it) {
|
||||
if (_dumptime_training_data_dictionary != nullptr) {
|
||||
for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) {
|
||||
_dumptime_training_data_dictionary->at(i).metaspace_pointers_do(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TrainingData::dump_training_data() {
|
||||
if (_dumptime_training_data_dictionary != nullptr) {
|
||||
CompactHashtableStats stats;
|
||||
_archived_training_data_dictionary_for_dumping.reset();
|
||||
CompactHashtableWriter writer(_dumptime_training_data_dictionary->length(), &stats);
|
||||
for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) {
|
||||
TrainingData* td = _dumptime_training_data_dictionary->at(i).training_data();
|
||||
#ifdef ASSERT
|
||||
for (int j = i+1; j < _dumptime_training_data_dictionary->length(); j++) {
|
||||
TrainingData* td1 = _dumptime_training_data_dictionary->at(j).training_data();
|
||||
assert(!TrainingData::Key::equals(td1, td->key(), -1), "conflict");
|
||||
}
|
||||
#endif // ASSERT
|
||||
td = ArchiveBuilder::current()->get_buffered_addr(td);
|
||||
uint hash = TrainingData::Key::cds_hash(td->key());
|
||||
u4 delta = ArchiveBuilder::current()->buffer_to_offset_u4((address)td);
|
||||
writer.add(hash, delta);
|
||||
}
|
||||
writer.dump(&_archived_training_data_dictionary_for_dumping, "training data dictionary");
|
||||
}
|
||||
}
|
||||
|
||||
void TrainingData::cleanup_training_data() {
|
||||
if (_dumptime_training_data_dictionary != nullptr) {
|
||||
ResourceMark rm;
|
||||
Visitor visitor(_dumptime_training_data_dictionary->length());
|
||||
for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) {
|
||||
TrainingData* td = _dumptime_training_data_dictionary->at(i).training_data();
|
||||
td->cleanup(visitor);
|
||||
}
|
||||
// Throw away all elements with empty keys
|
||||
int j = 0;
|
||||
for (int i = 0; i < _dumptime_training_data_dictionary->length(); i++) {
|
||||
TrainingData* td = _dumptime_training_data_dictionary->at(i).training_data();
|
||||
if (td->key()->is_empty()) {
|
||||
continue;
|
||||
}
|
||||
if (i != j) { // no need to copy if it's the same
|
||||
_dumptime_training_data_dictionary->at_put(j, td);
|
||||
}
|
||||
j++;
|
||||
}
|
||||
_dumptime_training_data_dictionary->trunc_to(j);
|
||||
}
|
||||
}
|
||||
|
||||
void KlassTrainingData::cleanup(Visitor& visitor) {
|
||||
if (visitor.is_visited(this)) {
|
||||
return;
|
||||
}
|
||||
visitor.visit(this);
|
||||
if (has_holder()) {
|
||||
bool is_excluded = !holder()->is_loaded() || SystemDictionaryShared::check_for_exclusion(holder(), nullptr);
|
||||
if (is_excluded) {
|
||||
ResourceMark rm;
|
||||
log_debug(aot, training)("Cleanup KTD %s", name()->as_klass_external_name());
|
||||
_holder = nullptr;
|
||||
key()->make_empty();
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < _comp_deps.length(); i++) {
|
||||
_comp_deps.at(i)->cleanup(visitor);
|
||||
}
|
||||
}
|
||||
|
||||
void MethodTrainingData::cleanup(Visitor& visitor) {
|
||||
if (visitor.is_visited(this)) {
|
||||
return;
|
||||
}
|
||||
visitor.visit(this);
|
||||
if (has_holder()) {
|
||||
if (SystemDictionaryShared::check_for_exclusion(holder()->method_holder(), nullptr)) {
|
||||
log_debug(aot, training)("Cleanup MTD %s::%s", name()->as_klass_external_name(), signature()->as_utf8());
|
||||
if (_final_profile != nullptr && _final_profile->method() != _holder) {
|
||||
log_warning(aot, training)("Stale MDO for %s::%s", name()->as_klass_external_name(), signature()->as_utf8());
|
||||
}
|
||||
_final_profile = nullptr;
|
||||
_final_counters = nullptr;
|
||||
_holder = nullptr;
|
||||
key()->make_empty();
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < CompLevel_count - 1; i++) {
|
||||
CompileTrainingData* ctd = _last_toplevel_compiles[i];
|
||||
if (ctd != nullptr) {
|
||||
ctd->cleanup(visitor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KlassTrainingData::verify() {
|
||||
for (int i = 0; i < comp_dep_count(); i++) {
|
||||
CompileTrainingData* ctd = comp_dep(i);
|
||||
if (!ctd->_init_deps.contains(this)) {
|
||||
print_on(tty); tty->cr();
|
||||
ctd->print_on(tty); tty->cr();
|
||||
}
|
||||
guarantee(ctd->_init_deps.contains(this), "");
|
||||
}
|
||||
}
|
||||
|
||||
void MethodTrainingData::verify() {
|
||||
iterate_compiles([](CompileTrainingData* ctd) {
|
||||
ctd->verify();
|
||||
|
||||
int init_deps_left1 = ctd->init_deps_left();
|
||||
int init_deps_left2 = ctd->compute_init_deps_left();
|
||||
|
||||
if (init_deps_left1 != init_deps_left2) {
|
||||
ctd->print_on(tty); tty->cr();
|
||||
}
|
||||
guarantee(init_deps_left1 == init_deps_left2, "mismatch: %d %d %d",
|
||||
init_deps_left1, init_deps_left2, ctd->init_deps_left());
|
||||
});
|
||||
}
|
||||
|
||||
void CompileTrainingData::verify() {
|
||||
for (int i = 0; i < init_dep_count(); i++) {
|
||||
KlassTrainingData* ktd = init_dep(i);
|
||||
if (ktd->has_holder() && ktd->holder()->defined_by_other_loaders()) {
|
||||
LogStreamHandle(Warning, training) log;
|
||||
if (log.is_enabled()) {
|
||||
ResourceMark rm;
|
||||
log.print("CTD "); print_value_on(&log);
|
||||
log.print(" depends on unregistered class %s", ktd->holder()->name()->as_C_string());
|
||||
}
|
||||
}
|
||||
if (!ktd->_comp_deps.contains(this)) {
|
||||
print_on(tty); tty->cr();
|
||||
ktd->print_on(tty); tty->cr();
|
||||
}
|
||||
guarantee(ktd->_comp_deps.contains(this), "");
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTrainingData::cleanup(Visitor& visitor) {
|
||||
if (visitor.is_visited(this)) {
|
||||
return;
|
||||
}
|
||||
visitor.visit(this);
|
||||
method()->cleanup(visitor);
|
||||
}
|
||||
|
||||
void TrainingData::serialize(SerializeClosure* soc) {
|
||||
if (soc->writing()) {
|
||||
_archived_training_data_dictionary_for_dumping.serialize_header(soc);
|
||||
} else {
|
||||
_archived_training_data_dictionary.serialize_header(soc);
|
||||
}
|
||||
}
|
||||
|
||||
class TrainingDataPrinter : StackObj {
|
||||
outputStream* _st;
|
||||
int _index;
|
||||
public:
|
||||
TrainingDataPrinter(outputStream* st) : _st(st), _index(0) {}
|
||||
void do_value(TrainingData* td) {
|
||||
const char* type = (td->is_KlassTrainingData() ? "K" :
|
||||
td->is_MethodTrainingData() ? "M" :
|
||||
td->is_CompileTrainingData() ? "C" : "?");
|
||||
_st->print("%4d: %p %s ", _index++, td, type);
|
||||
td->print_on(_st);
|
||||
_st->cr();
|
||||
if (td->is_KlassTrainingData()) {
|
||||
td->as_KlassTrainingData()->iterate_comp_deps([&](CompileTrainingData* ctd) {
|
||||
ResourceMark rm;
|
||||
_st->print_raw(" C ");
|
||||
ctd->print_on(_st);
|
||||
_st->cr();
|
||||
});
|
||||
} else if (td->is_MethodTrainingData()) {
|
||||
td->as_MethodTrainingData()->iterate_compiles([&](CompileTrainingData* ctd) {
|
||||
ResourceMark rm;
|
||||
_st->print_raw(" C ");
|
||||
ctd->print_on(_st);
|
||||
_st->cr();
|
||||
});
|
||||
} else if (td->is_CompileTrainingData()) {
|
||||
// ?
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void TrainingData::print_archived_training_data_on(outputStream* st) {
|
||||
st->print_cr("Archived TrainingData Dictionary");
|
||||
TrainingDataPrinter tdp(st);
|
||||
TrainingDataLocker::initialize();
|
||||
_archived_training_data_dictionary.iterate(&tdp);
|
||||
}
|
||||
|
||||
void TrainingData::Key::metaspace_pointers_do(MetaspaceClosure *iter) {
|
||||
iter->push(const_cast<Metadata**>(&_meta));
|
||||
}
|
||||
|
||||
void TrainingData::metaspace_pointers_do(MetaspaceClosure* iter) {
|
||||
_key.metaspace_pointers_do(iter);
|
||||
}
|
||||
|
||||
bool TrainingData::Key::can_compute_cds_hash(const Key* const& k) {
|
||||
return k->meta() == nullptr || MetaspaceObj::is_shared(k->meta());
|
||||
}
|
||||
|
||||
uint TrainingData::Key::cds_hash(const Key* const& k) {
|
||||
return SystemDictionaryShared::hash_for_shared_dictionary((address)k->meta());
|
||||
}
|
||||
|
||||
TrainingData* TrainingData::lookup_archived_training_data(const Key* k) {
|
||||
// For this to work, all components of the key must be in shared metaspace.
|
||||
if (!TrainingData::Key::can_compute_cds_hash(k) || _archived_training_data_dictionary.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
uint hash = TrainingData::Key::cds_hash(k);
|
||||
TrainingData* td = _archived_training_data_dictionary.lookup(k, hash, -1 /*unused*/);
|
||||
if (td != nullptr) {
|
||||
if ((td->is_KlassTrainingData() && td->as_KlassTrainingData()->has_holder()) ||
|
||||
(td->is_MethodTrainingData() && td->as_MethodTrainingData()->has_holder())) {
|
||||
return td;
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void TrainingData::DepList<T>::metaspace_pointers_do(MetaspaceClosure* iter) {
|
||||
iter->push(&_deps);
|
||||
}
|
||||
|
||||
void KlassTrainingData::metaspace_pointers_do(MetaspaceClosure* iter) {
|
||||
log_trace(aot, training)("Iter(KlassTrainingData): %p", this);
|
||||
TrainingData::metaspace_pointers_do(iter);
|
||||
_comp_deps.metaspace_pointers_do(iter);
|
||||
iter->push(&_holder);
|
||||
}
|
||||
|
||||
void MethodTrainingData::metaspace_pointers_do(MetaspaceClosure* iter) {
|
||||
log_trace(aot, training)("Iter(MethodTrainingData): %p", this);
|
||||
TrainingData::metaspace_pointers_do(iter);
|
||||
iter->push(&_klass);
|
||||
iter->push((Method**)&_holder);
|
||||
for (int i = 0; i < CompLevel_count - 1; i++) {
|
||||
iter->push(&_last_toplevel_compiles[i]);
|
||||
}
|
||||
iter->push(&_final_profile);
|
||||
iter->push(&_final_counters);
|
||||
}
|
||||
|
||||
void CompileTrainingData::metaspace_pointers_do(MetaspaceClosure* iter) {
|
||||
log_trace(aot, training)("Iter(CompileTrainingData): %p", this);
|
||||
TrainingData::metaspace_pointers_do(iter);
|
||||
_init_deps.metaspace_pointers_do(iter);
|
||||
_ci_records.metaspace_pointers_do(iter);
|
||||
iter->push(&_method);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void TrainingData::DepList<T>::prepare(ClassLoaderData* loader_data) {
|
||||
if (_deps == nullptr && _deps_dyn != nullptr) {
|
||||
int len = _deps_dyn->length();
|
||||
_deps = MetadataFactory::new_array_from_c_heap<T>(len, mtClassShared);
|
||||
for (int i = 0; i < len; i++) {
|
||||
_deps->at_put(i, _deps_dyn->at(i)); // copy
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void KlassTrainingData::remove_unshareable_info() {
|
||||
TrainingData::remove_unshareable_info();
|
||||
_holder_mirror = nullptr;
|
||||
_comp_deps.remove_unshareable_info();
|
||||
}
|
||||
|
||||
void MethodTrainingData::remove_unshareable_info() {
|
||||
TrainingData::remove_unshareable_info();
|
||||
if (_final_counters != nullptr) {
|
||||
_final_counters->remove_unshareable_info();
|
||||
}
|
||||
if (_final_profile != nullptr) {
|
||||
_final_profile->remove_unshareable_info();
|
||||
}
|
||||
}
|
||||
|
||||
void CompileTrainingData::remove_unshareable_info() {
|
||||
TrainingData::remove_unshareable_info();
|
||||
_init_deps.remove_unshareable_info();
|
||||
_ci_records.remove_unshareable_info();
|
||||
_init_deps_left = compute_init_deps_left(true);
|
||||
}
|
825
src/hotspot/share/oops/trainingData.hpp
Normal file
825
src/hotspot/share/oops/trainingData.hpp
Normal file
@ -0,0 +1,825 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_OOPS_TRAININGDATA_HPP
|
||||
#define SHARE_OOPS_TRAININGDATA_HPP
|
||||
|
||||
#include "cds/cdsConfig.hpp"
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/compactHashtable.hpp"
|
||||
#include "compiler/compilerDefinitions.hpp"
|
||||
#include "compiler/compiler_globals.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/method.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "utilities/resizeableResourceHash.hpp"
|
||||
|
||||
class ciEnv;
|
||||
class ciBaseObject;
|
||||
class CompileTask;
|
||||
class CompileTrainingData;
|
||||
class KlassTrainingData;
|
||||
class MethodTrainingData;
|
||||
|
||||
// Base class for all the training data varieties
|
||||
class TrainingData : public Metadata {
|
||||
friend KlassTrainingData;
|
||||
friend MethodTrainingData;
|
||||
friend CompileTrainingData;
|
||||
public:
|
||||
// Key is used to insert any TrainingData (TD) object into a hash tables. The key is currently a
|
||||
// pointer to a metaspace object the TD is associated with. For example,
|
||||
// for KlassTrainingData it's an InstanceKlass, for MethodTrainingData it's a Method.
|
||||
// The utility of the these hash tables is to be able to find a TD object for a given metaspace
|
||||
// metaspace object.
|
||||
class Key {
|
||||
mutable Metadata* _meta;
|
||||
// These guys can get to my constructors:
|
||||
friend TrainingData;
|
||||
friend KlassTrainingData;
|
||||
friend MethodTrainingData;
|
||||
friend CompileTrainingData;
|
||||
|
||||
// The empty key
|
||||
Key() : _meta(nullptr) { }
|
||||
bool is_empty() const { return _meta == nullptr; }
|
||||
public:
|
||||
Key(Metadata* meta) : _meta(meta) { }
|
||||
|
||||
static bool can_compute_cds_hash(const Key* const& k);
|
||||
static uint cds_hash(const Key* const& k);
|
||||
static unsigned hash(const Key* const& k) {
|
||||
return primitive_hash(k->meta());
|
||||
}
|
||||
static bool equals(const Key* const& k1, const Key* const& k2) {
|
||||
return k1->meta() == k2->meta();
|
||||
}
|
||||
static inline bool equals(TrainingData* value, const TrainingData::Key* key, int unused) {
|
||||
return equals(value->key(), key);
|
||||
}
|
||||
int cmp(const Key* that) const {
|
||||
auto m1 = this->meta();
|
||||
auto m2 = that->meta();
|
||||
if (m1 < m2) return -1;
|
||||
if (m1 > m2) return +1;
|
||||
return 0;
|
||||
}
|
||||
Metadata* meta() const { return _meta; }
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter);
|
||||
void make_empty() const { _meta = nullptr; }
|
||||
};
|
||||
|
||||
// TrainingDataLocker is used to guard read/write operations on non-MT-safe data structures.
|
||||
// It supports recursive locking and a read-only mode (in which case no locks are taken).
|
||||
// It is also a part of the TD collection termination protocol (see the "spanshot" field).
|
||||
class TrainingDataLocker {
|
||||
static volatile bool _snapshot; // If true we're not allocating new training data
|
||||
static int _lock_mode;
|
||||
const bool _recursive;
|
||||
static void lock() {
|
||||
#if INCLUDE_CDS
|
||||
assert(_lock_mode != 0, "Forgot to call TrainingDataLocker::initialize()");
|
||||
if (_lock_mode > 0) {
|
||||
TrainingData_lock->lock();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
static void unlock() {
|
||||
#if INCLUDE_CDS
|
||||
if (_lock_mode > 0) {
|
||||
TrainingData_lock->unlock();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
static bool safely_locked() {
|
||||
#if INCLUDE_CDS
|
||||
assert(_lock_mode != 0, "Forgot to call TrainingDataLocker::initialize()");
|
||||
if (_lock_mode > 0) {
|
||||
return is_self_locked();
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
static bool is_self_locked() {
|
||||
return CDS_ONLY(TrainingData_lock->owned_by_self()) NOT_CDS(false);
|
||||
}
|
||||
|
||||
public:
|
||||
static void snapshot() {
|
||||
#if INCLUDE_CDS
|
||||
assert_locked();
|
||||
_snapshot = true;
|
||||
#endif
|
||||
}
|
||||
static bool can_add() {
|
||||
#if INCLUDE_CDS
|
||||
assert_locked();
|
||||
return !_snapshot;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
static void initialize() {
|
||||
#if INCLUDE_CDS
|
||||
_lock_mode = need_data() ? +1 : -1; // if -1, we go lock-free
|
||||
#endif
|
||||
}
|
||||
static void assert_locked() {
|
||||
assert(safely_locked(), "use under TrainingDataLocker");
|
||||
}
|
||||
static void assert_can_add() {
|
||||
assert(can_add(), "Cannot add TrainingData objects");
|
||||
}
|
||||
TrainingDataLocker() : _recursive(is_self_locked()) {
|
||||
if (!_recursive) {
|
||||
lock();
|
||||
}
|
||||
}
|
||||
~TrainingDataLocker() {
|
||||
if (!_recursive) {
|
||||
unlock();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// A set of TD objects that we collect during the training run.
|
||||
class TrainingDataSet {
|
||||
friend TrainingData;
|
||||
ResizeableResourceHashtable<const Key*, TrainingData*,
|
||||
AnyObj::C_HEAP, MemTag::mtCompiler,
|
||||
&TrainingData::Key::hash,
|
||||
&TrainingData::Key::equals>
|
||||
_table;
|
||||
|
||||
public:
|
||||
template<typename... Arg>
|
||||
TrainingDataSet(Arg... arg)
|
||||
: _table(arg...) {
|
||||
}
|
||||
TrainingData* find(const Key* key) const {
|
||||
TrainingDataLocker::assert_locked();
|
||||
if (TrainingDataLocker::can_add()) {
|
||||
auto res = _table.get(key);
|
||||
return res == nullptr ? nullptr : *res;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
bool remove(const Key* key) {
|
||||
return _table.remove(key);
|
||||
}
|
||||
TrainingData* install(TrainingData* td) {
|
||||
TrainingDataLocker::assert_locked();
|
||||
TrainingDataLocker::assert_can_add();
|
||||
auto key = td->key();
|
||||
if (key->is_empty()) {
|
||||
return td; // unkeyed TD not installed
|
||||
}
|
||||
bool created = false;
|
||||
auto prior = _table.put_if_absent(key, td, &created);
|
||||
if (prior == nullptr || *prior == td) {
|
||||
return td;
|
||||
}
|
||||
assert(false, "no pre-existing elements allowed");
|
||||
return *prior;
|
||||
}
|
||||
template<typename Function>
|
||||
void iterate(const Function& fn) const { // lambda enabled API
|
||||
iterate(const_cast<Function&>(fn));
|
||||
}
|
||||
template<typename Function>
|
||||
void iterate(Function& fn) const { // lambda enabled API
|
||||
return _table.iterate_all([&](const TrainingData::Key* k, TrainingData* td) { fn(td); });
|
||||
}
|
||||
int size() const { return _table.number_of_entries(); }
|
||||
|
||||
void verify() const {
|
||||
TrainingDataLocker::assert_locked();
|
||||
iterate([&](TrainingData* td) { td->verify(); });
|
||||
}
|
||||
};
|
||||
|
||||
// A widget to ensure that we visit TD object only once (TD objects can have pointer to
|
||||
// other TD object that are sometimes circular).
|
||||
class Visitor {
|
||||
ResizeableResourceHashtable<TrainingData*, bool> _visited;
|
||||
public:
|
||||
Visitor(unsigned size) : _visited(size, 0x3fffffff) { }
|
||||
bool is_visited(TrainingData* td) {
|
||||
return _visited.contains(td);
|
||||
}
|
||||
void visit(TrainingData* td) {
|
||||
bool created;
|
||||
_visited.put_if_absent(td, &created);
|
||||
}
|
||||
};
|
||||
|
||||
typedef OffsetCompactHashtable<const TrainingData::Key*, TrainingData*, TrainingData::Key::equals> TrainingDataDictionary;
|
||||
private:
|
||||
Key _key;
|
||||
|
||||
// just forward all constructor arguments to the embedded key
|
||||
template<typename... Arg>
|
||||
TrainingData(Arg... arg)
|
||||
: _key(arg...) { }
|
||||
|
||||
// Container for recording TD during training run
|
||||
static TrainingDataSet _training_data_set;
|
||||
// Containter for replaying the training data (read-only, populated from the AOT image)
|
||||
static TrainingDataDictionary _archived_training_data_dictionary;
|
||||
// Container used for writing the AOT image
|
||||
static TrainingDataDictionary _archived_training_data_dictionary_for_dumping;
|
||||
class DumpTimeTrainingDataInfo {
|
||||
TrainingData* _training_data;
|
||||
public:
|
||||
DumpTimeTrainingDataInfo() : DumpTimeTrainingDataInfo(nullptr) {}
|
||||
DumpTimeTrainingDataInfo(TrainingData* training_data) : _training_data(training_data) {}
|
||||
void metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
it->push(&_training_data);
|
||||
}
|
||||
TrainingData* training_data() {
|
||||
return _training_data;
|
||||
}
|
||||
};
|
||||
typedef GrowableArrayCHeap<DumpTimeTrainingDataInfo, mtClassShared> DumptimeTrainingDataDictionary;
|
||||
// A temporary container that is used to accumulate and filter TD during dumping
|
||||
static DumptimeTrainingDataDictionary* _dumptime_training_data_dictionary;
|
||||
|
||||
static TrainingDataSet* training_data_set() { return &_training_data_set; }
|
||||
static TrainingDataDictionary* archived_training_data_dictionary() { return &_archived_training_data_dictionary; }
|
||||
|
||||
public:
|
||||
// Returns the key under which this TD is installed, or else
|
||||
// Key::EMPTY if it is not installed.
|
||||
const Key* key() const { return &_key; }
|
||||
|
||||
static bool have_data() { return AOTReplayTraining; } // Going to read
|
||||
static bool need_data() { return AOTRecordTraining; } // Going to write
|
||||
static bool assembling_data() { return have_data() && CDSConfig::is_dumping_final_static_archive() && CDSConfig::is_dumping_aot_linked_classes(); }
|
||||
|
||||
template<typename Function>
|
||||
static void iterate(const Function& fn) { iterate(const_cast<Function&>(fn)); }
|
||||
|
||||
template<typename Function>
|
||||
static void iterate(Function& fn) { // lambda enabled API
|
||||
TrainingDataLocker l;
|
||||
if (have_data()) {
|
||||
archived_training_data_dictionary()->iterate(fn);
|
||||
}
|
||||
if (need_data()) {
|
||||
training_data_set()->iterate(fn);
|
||||
}
|
||||
}
|
||||
|
||||
virtual MethodTrainingData* as_MethodTrainingData() const { return nullptr; }
|
||||
virtual KlassTrainingData* as_KlassTrainingData() const { return nullptr; }
|
||||
virtual CompileTrainingData* as_CompileTrainingData() const { return nullptr; }
|
||||
bool is_MethodTrainingData() const { return as_MethodTrainingData() != nullptr; }
|
||||
bool is_KlassTrainingData() const { return as_KlassTrainingData() != nullptr; }
|
||||
bool is_CompileTrainingData() const { return as_CompileTrainingData() != nullptr; }
|
||||
|
||||
virtual void prepare(Visitor& visitor) = 0;
|
||||
virtual void cleanup(Visitor& visitor) = 0;
|
||||
|
||||
static void initialize() NOT_CDS_RETURN;
|
||||
|
||||
static void verify();
|
||||
|
||||
// Widget for recording dependencies, as an N-to-M graph relation,
|
||||
// possibly cyclic.
|
||||
template<typename E>
|
||||
class DepList : public StackObj {
|
||||
GrowableArrayCHeap<E, mtCompiler>* _deps_dyn;
|
||||
Array<E>* _deps;
|
||||
public:
|
||||
DepList() {
|
||||
_deps_dyn = nullptr;
|
||||
_deps = nullptr;
|
||||
}
|
||||
|
||||
int length() const {
|
||||
return (_deps_dyn != nullptr ? _deps_dyn->length()
|
||||
: _deps != nullptr ? _deps->length()
|
||||
: 0);
|
||||
}
|
||||
E* adr_at(int i) const {
|
||||
return (_deps_dyn != nullptr ? _deps_dyn->adr_at(i)
|
||||
: _deps != nullptr ? _deps->adr_at(i)
|
||||
: nullptr);
|
||||
}
|
||||
E at(int i) const {
|
||||
assert(i >= 0 && i < length(), "oob");
|
||||
return *adr_at(i);
|
||||
}
|
||||
bool append_if_missing(E dep) {
|
||||
if (_deps_dyn == nullptr) {
|
||||
_deps_dyn = new GrowableArrayCHeap<E, mtCompiler>(10);
|
||||
_deps_dyn->append(dep);
|
||||
return true;
|
||||
} else {
|
||||
return _deps_dyn->append_if_missing(dep);
|
||||
}
|
||||
}
|
||||
bool remove_if_existing(E dep) {
|
||||
if (_deps_dyn != nullptr) {
|
||||
return _deps_dyn->remove_if_existing(dep);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
void clear() {
|
||||
if (_deps_dyn != nullptr) {
|
||||
_deps_dyn->clear();
|
||||
}
|
||||
}
|
||||
void append(E dep) {
|
||||
if (_deps_dyn == nullptr) {
|
||||
_deps_dyn = new GrowableArrayCHeap<E, mtCompiler>(10);
|
||||
}
|
||||
_deps_dyn->append(dep);
|
||||
}
|
||||
bool contains(E dep) {
|
||||
for (int i = 0; i < length(); i++) {
|
||||
if (dep == at(i)) {
|
||||
return true; // found
|
||||
}
|
||||
}
|
||||
return false; // not found
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
void remove_unshareable_info() {
|
||||
_deps_dyn = nullptr;
|
||||
}
|
||||
#endif
|
||||
void prepare(ClassLoaderData* loader_data);
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter);
|
||||
};
|
||||
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure *iter);
|
||||
|
||||
static void init_dumptime_table(TRAPS);
|
||||
|
||||
#if INCLUDE_CDS
|
||||
virtual void remove_unshareable_info() {}
|
||||
static void iterate_roots(MetaspaceClosure* it);
|
||||
static void dump_training_data();
|
||||
static void cleanup_training_data();
|
||||
static void serialize(SerializeClosure* soc);
|
||||
static void print_archived_training_data_on(outputStream* st);
|
||||
static TrainingData* lookup_archived_training_data(const Key* k);
|
||||
#endif
|
||||
|
||||
template<typename TrainingDataType, typename... ArgTypes>
|
||||
static TrainingDataType* allocate(ArgTypes... args) {
|
||||
assert(need_data() || have_data(), "");
|
||||
if (TrainingDataLocker::can_add()) {
|
||||
return new (mtClassShared) TrainingDataType(args...);
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// Training data that is associated with an InstanceKlass
|
||||
class KlassTrainingData : public TrainingData {
|
||||
friend TrainingData;
|
||||
friend CompileTrainingData;
|
||||
|
||||
// Used by CDS. These classes need to access the private default constructor.
|
||||
template <class T> friend class CppVtableTesterA;
|
||||
template <class T> friend class CppVtableTesterB;
|
||||
template <class T> friend class CppVtableCloner;
|
||||
|
||||
// cross-link to live klass, or null if not loaded or encountered yet
|
||||
InstanceKlass* _holder;
|
||||
jobject _holder_mirror; // extra link to prevent unloading by GC
|
||||
|
||||
DepList<CompileTrainingData*> _comp_deps; // compiles that depend on me
|
||||
|
||||
KlassTrainingData();
|
||||
KlassTrainingData(InstanceKlass* klass);
|
||||
|
||||
int comp_dep_count() const {
|
||||
TrainingDataLocker::assert_locked();
|
||||
return _comp_deps.length();
|
||||
}
|
||||
CompileTrainingData* comp_dep(int i) const {
|
||||
TrainingDataLocker::assert_locked();
|
||||
return _comp_deps.at(i);
|
||||
}
|
||||
void add_comp_dep(CompileTrainingData* ctd) {
|
||||
TrainingDataLocker::assert_locked();
|
||||
_comp_deps.append_if_missing(ctd);
|
||||
}
|
||||
void remove_comp_dep(CompileTrainingData* ctd) {
|
||||
TrainingDataLocker::assert_locked();
|
||||
_comp_deps.remove_if_existing(ctd);
|
||||
}
|
||||
|
||||
public:
|
||||
Symbol* name() const {
|
||||
precond(has_holder());
|
||||
return holder()->name();
|
||||
}
|
||||
bool has_holder() const { return _holder != nullptr; }
|
||||
InstanceKlass* holder() const { return _holder; }
|
||||
|
||||
static KlassTrainingData* make(InstanceKlass* holder,
|
||||
bool null_if_not_found = false) NOT_CDS_RETURN_(nullptr);
|
||||
static KlassTrainingData* find(InstanceKlass* holder) {
|
||||
return make(holder, true);
|
||||
}
|
||||
virtual KlassTrainingData* as_KlassTrainingData() const { return const_cast<KlassTrainingData*>(this); };
|
||||
|
||||
ClassLoaderData* class_loader_data() {
|
||||
assert(has_holder(), "");
|
||||
return holder()->class_loader_data();
|
||||
}
|
||||
void notice_fully_initialized() NOT_CDS_RETURN;
|
||||
|
||||
void print_on(outputStream* st, bool name_only) const;
|
||||
virtual void print_on(outputStream* st) const { print_on(st, false); }
|
||||
virtual void print_value_on(outputStream* st) const { print_on(st, true); }
|
||||
|
||||
virtual void prepare(Visitor& visitor);
|
||||
virtual void cleanup(Visitor& visitor) NOT_CDS_RETURN;
|
||||
|
||||
MetaspaceObj::Type type() const {
|
||||
return KlassTrainingDataType;
|
||||
}
|
||||
|
||||
#if INCLUDE_CDS
|
||||
virtual void remove_unshareable_info();
|
||||
#endif
|
||||
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter);
|
||||
|
||||
int size() const {
|
||||
return (int)align_metadata_size(align_up(sizeof(KlassTrainingData), BytesPerWord)/BytesPerWord);
|
||||
}
|
||||
|
||||
const char* internal_name() const {
|
||||
return "{ klass training data }";
|
||||
};
|
||||
|
||||
void verify();
|
||||
|
||||
static KlassTrainingData* allocate(InstanceKlass* holder) {
|
||||
return TrainingData::allocate<KlassTrainingData>(holder);
|
||||
}
|
||||
|
||||
template<typename Function>
|
||||
void iterate_comp_deps(Function fn) const { // lambda enabled API
|
||||
TrainingDataLocker l;
|
||||
for (int i = 0; i < comp_dep_count(); i++) {
|
||||
fn(comp_dep(i));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Information about particular JIT tasks.
|
||||
class CompileTrainingData : public TrainingData {
|
||||
friend TrainingData;
|
||||
friend KlassTrainingData;
|
||||
|
||||
// Used by CDS. These classes need to access the private default constructor.
|
||||
template <class T> friend class CppVtableTesterA;
|
||||
template <class T> friend class CppVtableTesterB;
|
||||
template <class T> friend class CppVtableCloner;
|
||||
|
||||
MethodTrainingData* _method;
|
||||
const short _level;
|
||||
const int _compile_id;
|
||||
|
||||
// classes that should be initialized before this JIT task runs
|
||||
DepList<KlassTrainingData*> _init_deps;
|
||||
// Number of uninitialized classes left, when it's 0, all deps are satisfied
|
||||
volatile int _init_deps_left;
|
||||
|
||||
public:
|
||||
// ciRecords is a generic meachanism to memoize CI responses to arbitary queries. For each function we're interested in we record
|
||||
// (return_value, argument_values) tuples in a list. Arguments are allowed to have Metaspace pointers in them.
|
||||
class ciRecords {
|
||||
template <typename... Ts> class Arguments {
|
||||
public:
|
||||
bool operator==(const Arguments<>&) const { return true; }
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter) { }
|
||||
};
|
||||
template <typename T, typename... Ts> class Arguments<T, Ts...> {
|
||||
private:
|
||||
T _first;
|
||||
Arguments<Ts...> _remaining;
|
||||
|
||||
public:
|
||||
constexpr Arguments(const T& first, const Ts&... remaining) noexcept
|
||||
: _first(first), _remaining(remaining...) {}
|
||||
constexpr Arguments() noexcept : _first(), _remaining() {}
|
||||
bool operator==(const Arguments<T, Ts...>& that) const {
|
||||
return _first == that._first && _remaining == that._remaining;
|
||||
}
|
||||
template<typename U = T, ENABLE_IF(std::is_pointer<U>::value && std::is_base_of<MetaspaceObj, typename std::remove_pointer<U>::type>::value)>
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter) {
|
||||
iter->push(&_first);
|
||||
_remaining.metaspace_pointers_do(iter);
|
||||
}
|
||||
template<typename U = T, ENABLE_IF(!(std::is_pointer<U>::value && std::is_base_of<MetaspaceObj, typename std::remove_pointer<U>::type>::value))>
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter) {
|
||||
_remaining.metaspace_pointers_do(iter);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename ReturnType, typename... Args> class ciMemoizedFunction : public StackObj {
|
||||
public:
|
||||
class OptionalReturnType {
|
||||
bool _valid;
|
||||
ReturnType _result;
|
||||
public:
|
||||
OptionalReturnType(bool valid, const ReturnType& result) : _valid(valid), _result(result) {}
|
||||
bool is_valid() const { return _valid; }
|
||||
ReturnType result() const { return _result; }
|
||||
};
|
||||
private:
|
||||
typedef Arguments<Args...> ArgumentsType;
|
||||
class Record : public MetaspaceObj {
|
||||
ReturnType _result;
|
||||
ArgumentsType _arguments;
|
||||
public:
|
||||
Record(const ReturnType& result, const ArgumentsType& arguments) : _result(result), _arguments(arguments) {}
|
||||
Record() { }
|
||||
ReturnType result() const { return _result; }
|
||||
ArgumentsType arguments() const { return _arguments; }
|
||||
bool operator==(const Record& that) { return _arguments == that._arguments; }
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter) { _arguments.metaspace_pointers_do(iter); }
|
||||
};
|
||||
DepList<Record> _data;
|
||||
public:
|
||||
OptionalReturnType find(const Args&... args) {
|
||||
ArgumentsType a(args...);
|
||||
for (int i = 0; i < _data.length(); i++) {
|
||||
if (_data.at(i).arguments() == a) {
|
||||
return OptionalReturnType(true, _data.at(i).result());
|
||||
}
|
||||
}
|
||||
return OptionalReturnType(false, ReturnType());
|
||||
}
|
||||
bool append_if_missing(const ReturnType& result, const Args&... args) {
|
||||
return _data.append_if_missing(Record(result, ArgumentsType(args...)));
|
||||
}
|
||||
#if INCLUDE_CDS
|
||||
void remove_unshareable_info() { _data.remove_unshareable_info(); }
|
||||
#endif
|
||||
void prepare(ClassLoaderData* loader_data) {
|
||||
_data.prepare(loader_data);
|
||||
}
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter) {
|
||||
_data.metaspace_pointers_do(iter);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
public:
|
||||
// Record CI answers for the InlineSmallCode heuristic. It is importance since the heuristic is non-commutative and we may want to
|
||||
// compile methods in a different order than in the training run.
|
||||
typedef ciMemoizedFunction<int, MethodTrainingData*> ciMethod__inline_instructions_size_type;
|
||||
ciMethod__inline_instructions_size_type ciMethod__inline_instructions_size;
|
||||
#if INCLUDE_CDS
|
||||
void remove_unshareable_info() {
|
||||
ciMethod__inline_instructions_size.remove_unshareable_info();
|
||||
}
|
||||
#endif
|
||||
void prepare(ClassLoaderData* loader_data) {
|
||||
ciMethod__inline_instructions_size.prepare(loader_data);
|
||||
}
|
||||
void metaspace_pointers_do(MetaspaceClosure *iter) {
|
||||
ciMethod__inline_instructions_size.metaspace_pointers_do(iter);
|
||||
}
|
||||
};
|
||||
|
||||
private:
|
||||
ciRecords _ci_records;
|
||||
|
||||
CompileTrainingData();
|
||||
CompileTrainingData(MethodTrainingData* mtd,
|
||||
int level,
|
||||
int compile_id)
|
||||
: TrainingData(), // empty key
|
||||
_method(mtd), _level(level), _compile_id(compile_id), _init_deps_left(0) { }
|
||||
public:
|
||||
ciRecords& ci_records() { return _ci_records; }
|
||||
static CompileTrainingData* make(CompileTask* task) NOT_CDS_RETURN_(nullptr);
|
||||
|
||||
virtual CompileTrainingData* as_CompileTrainingData() const { return const_cast<CompileTrainingData*>(this); };
|
||||
|
||||
MethodTrainingData* method() const { return _method; }
|
||||
|
||||
int level() const { return _level; }
|
||||
|
||||
int compile_id() const { return _compile_id; }
|
||||
|
||||
int init_dep_count() const {
|
||||
TrainingDataLocker::assert_locked();
|
||||
return _init_deps.length();
|
||||
}
|
||||
KlassTrainingData* init_dep(int i) const {
|
||||
TrainingDataLocker::assert_locked();
|
||||
return _init_deps.at(i);
|
||||
}
|
||||
void add_init_dep(KlassTrainingData* ktd) {
|
||||
TrainingDataLocker::assert_locked();
|
||||
ktd->add_comp_dep(this);
|
||||
_init_deps.append_if_missing(ktd);
|
||||
}
|
||||
void clear_init_deps() {
|
||||
TrainingDataLocker::assert_locked();
|
||||
for (int i = 0; i < _init_deps.length(); i++) {
|
||||
_init_deps.at(i)->remove_comp_dep(this);
|
||||
}
|
||||
_init_deps.clear();
|
||||
}
|
||||
void dec_init_deps_left(KlassTrainingData* ktd);
|
||||
int init_deps_left() const {
|
||||
return Atomic::load(&_init_deps_left);
|
||||
}
|
||||
uint compute_init_deps_left(bool count_initialized = false);
|
||||
|
||||
void notice_inlined_method(CompileTask* task, const methodHandle& method) NOT_CDS_RETURN;
|
||||
|
||||
// The JIT looks at classes and objects too and can depend on their state.
|
||||
// These simple calls just report the *possibility* of an observation.
|
||||
void notice_jit_observation(ciEnv* env, ciBaseObject* what) NOT_CDS_RETURN;
|
||||
|
||||
virtual void prepare(Visitor& visitor);
|
||||
virtual void cleanup(Visitor& visitor) NOT_CDS_RETURN;
|
||||
|
||||
void print_on(outputStream* st, bool name_only) const;
|
||||
virtual void print_on(outputStream* st) const { print_on(st, false); }
|
||||
virtual void print_value_on(outputStream* st) const { print_on(st, true); }
|
||||
|
||||
#if INCLUDE_CDS
|
||||
virtual void remove_unshareable_info();
|
||||
#endif
|
||||
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
|
||||
virtual MetaspaceObj::Type type() const { return CompileTrainingDataType; }
|
||||
|
||||
virtual const char* internal_name() const {
|
||||
return "{ compile training data }";
|
||||
};
|
||||
|
||||
virtual int size() const {
|
||||
return (int)align_metadata_size(align_up(sizeof(CompileTrainingData), BytesPerWord)/BytesPerWord);
|
||||
}
|
||||
|
||||
void verify();
|
||||
|
||||
static CompileTrainingData* allocate(MethodTrainingData* mtd, int level, int compile_id) {
|
||||
return TrainingData::allocate<CompileTrainingData>(mtd, level, compile_id);
|
||||
}
|
||||
};
|
||||
|
||||
// Record information about a method at the time compilation is requested.
|
||||
class MethodTrainingData : public TrainingData {
|
||||
friend TrainingData;
|
||||
friend CompileTrainingData;
|
||||
|
||||
// Used by CDS. These classes need to access the private default constructor.
|
||||
template <class T> friend class CppVtableTesterA;
|
||||
template <class T> friend class CppVtableTesterB;
|
||||
template <class T> friend class CppVtableCloner;
|
||||
|
||||
KlassTrainingData* _klass;
|
||||
Method* _holder;
|
||||
CompileTrainingData* _last_toplevel_compiles[CompLevel_count - 1];
|
||||
int _highest_top_level;
|
||||
int _level_mask; // bit-set of all possible levels
|
||||
bool _was_toplevel;
|
||||
// metadata snapshots of final state:
|
||||
MethodCounters* _final_counters;
|
||||
MethodData* _final_profile;
|
||||
|
||||
MethodTrainingData();
|
||||
MethodTrainingData(Method* method, KlassTrainingData* ktd) : TrainingData(method) {
|
||||
_klass = ktd;
|
||||
_holder = method;
|
||||
for (int i = 0; i < CompLevel_count - 1; i++) {
|
||||
_last_toplevel_compiles[i] = nullptr;
|
||||
}
|
||||
_highest_top_level = CompLevel_none;
|
||||
_level_mask = 0;
|
||||
_was_toplevel = false;
|
||||
}
|
||||
|
||||
static int level_mask(int level) {
|
||||
return ((level & 0xF) != level ? 0 : 1 << level);
|
||||
}
|
||||
|
||||
public:
|
||||
KlassTrainingData* klass() const { return _klass; }
|
||||
bool has_holder() const { return _holder != nullptr; }
|
||||
Method* holder() const { return _holder; }
|
||||
bool only_inlined() const { return !_was_toplevel; }
|
||||
bool saw_level(CompLevel l) const { return (_level_mask & level_mask(l)) != 0; }
|
||||
int highest_top_level() const { return _highest_top_level; }
|
||||
MethodData* final_profile() const { return _final_profile; }
|
||||
|
||||
Symbol* name() const {
|
||||
precond(has_holder());
|
||||
return holder()->name();
|
||||
}
|
||||
Symbol* signature() const {
|
||||
precond(has_holder());
|
||||
return holder()->signature();
|
||||
}
|
||||
|
||||
CompileTrainingData* last_toplevel_compile(int level) const {
|
||||
if (level > CompLevel_none) {
|
||||
return _last_toplevel_compiles[level - 1];
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void notice_compilation(int level, bool inlined = false) {
|
||||
if (!inlined) {
|
||||
_was_toplevel = true;
|
||||
}
|
||||
_level_mask |= level_mask(level);
|
||||
}
|
||||
|
||||
void notice_toplevel_compilation(int level) {
|
||||
_highest_top_level = MAX2(_highest_top_level, level);
|
||||
}
|
||||
|
||||
static MethodTrainingData* make(const methodHandle& method,
|
||||
bool null_if_not_found = false,
|
||||
bool use_cache = true) NOT_CDS_RETURN_(nullptr);
|
||||
static MethodTrainingData* find_fast(const methodHandle& method) { return make(method, true, true); }
|
||||
static MethodTrainingData* find(const methodHandle& method) { return make(method, true, false); }
|
||||
|
||||
virtual MethodTrainingData* as_MethodTrainingData() const {
|
||||
return const_cast<MethodTrainingData*>(this);
|
||||
};
|
||||
|
||||
void print_on(outputStream* st, bool name_only) const;
|
||||
virtual void print_on(outputStream* st) const { print_on(st, false); }
|
||||
virtual void print_value_on(outputStream* st) const { print_on(st, true); }
|
||||
|
||||
virtual void prepare(Visitor& visitor);
|
||||
virtual void cleanup(Visitor& visitor) NOT_CDS_RETURN;
|
||||
|
||||
template<typename Function>
|
||||
void iterate_compiles(Function fn) const { // lambda enabled API
|
||||
for (int i = 0; i < CompLevel_count - 1; i++) {
|
||||
CompileTrainingData* ctd = _last_toplevel_compiles[i];
|
||||
if (ctd != nullptr) {
|
||||
fn(ctd);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
|
||||
virtual MetaspaceObj::Type type() const { return MethodTrainingDataType; }
|
||||
|
||||
#if INCLUDE_CDS
|
||||
virtual void remove_unshareable_info();
|
||||
#endif
|
||||
|
||||
virtual int size() const {
|
||||
return (int)align_metadata_size(align_up(sizeof(MethodTrainingData), BytesPerWord)/BytesPerWord);
|
||||
}
|
||||
|
||||
virtual const char* internal_name() const {
|
||||
return "{ method training data }";
|
||||
};
|
||||
|
||||
void verify();
|
||||
|
||||
static MethodTrainingData* allocate(Method* m, KlassTrainingData* ktd) {
|
||||
return TrainingData::allocate<MethodTrainingData>(m, ktd);
|
||||
}
|
||||
};
|
||||
#endif // SHARE_OOPS_TRAININGDATA_HPP
|
@ -32,6 +32,7 @@
|
||||
#include "logging/logAsyncWriter.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "oops/trainingData.hpp"
|
||||
#include "prims/downcallLinker.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "prims/methodHandles.hpp"
|
||||
@ -188,6 +189,11 @@ jint init_globals2() {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Initialize TrainingData only we're recording/replaying
|
||||
if (TrainingData::have_data() || TrainingData::need_data()) {
|
||||
TrainingData::initialize();
|
||||
}
|
||||
|
||||
if (!universe_post_init()) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
|
@ -90,6 +90,8 @@ Monitor* InitCompleted_lock = nullptr;
|
||||
Monitor* BeforeExit_lock = nullptr;
|
||||
Monitor* Notify_lock = nullptr;
|
||||
Mutex* ExceptionCache_lock = nullptr;
|
||||
Mutex* TrainingData_lock = nullptr;
|
||||
Monitor* TrainingReplayQueue_lock = nullptr;
|
||||
#ifndef PRODUCT
|
||||
Mutex* FullGCALot_lock = nullptr;
|
||||
#endif
|
||||
@ -256,6 +258,8 @@ void mutex_init() {
|
||||
|
||||
MUTEX_DEFN(CompiledIC_lock , PaddedMutex , nosafepoint); // locks VtableStubs_lock
|
||||
MUTEX_DEFN(MethodCompileQueue_lock , PaddedMonitor, safepoint);
|
||||
MUTEX_DEFL(TrainingData_lock , PaddedMutex , MethodCompileQueue_lock);
|
||||
MUTEX_DEFN(TrainingReplayQueue_lock , PaddedMonitor, safepoint);
|
||||
MUTEX_DEFN(CompileStatistics_lock , PaddedMutex , safepoint);
|
||||
MUTEX_DEFN(DirectivesStack_lock , PaddedMutex , nosafepoint);
|
||||
|
||||
|
@ -83,6 +83,8 @@ extern Mutex* Compile_lock; // a lock held when Compilation
|
||||
extern Monitor* MethodCompileQueue_lock; // a lock held when method compilations are enqueued, dequeued
|
||||
extern Monitor* CompileThread_lock; // a lock held by compile threads during compilation system initialization
|
||||
extern Monitor* Compilation_lock; // a lock used to pause compilation
|
||||
extern Mutex* TrainingData_lock; // a lock used when accessing training records
|
||||
extern Monitor* TrainingReplayQueue_lock; // a lock held when class are added/removed to the training replay queue
|
||||
extern Mutex* CompileTaskAlloc_lock; // a lock held when CompileTasks are allocated
|
||||
extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics
|
||||
extern Mutex* DirectivesStack_lock; // a lock held when mutating the dirstack and ref counting directives
|
||||
|
@ -812,6 +812,11 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
// cache the system and platform class loaders
|
||||
SystemDictionary::compute_java_loaders(CHECK_JNI_ERR);
|
||||
|
||||
// Initiate replay training processing once preloading is over.
|
||||
CompileBroker::init_training_replay();
|
||||
|
||||
AOTLinkedClassBulkLoader::replay_training_at_init_for_preloaded_classes(CHECK_JNI_ERR);
|
||||
|
||||
if (Continuations::enabled()) {
|
||||
// Initialize Continuation class now so that failure to create enterSpecial/doYield
|
||||
// special nmethods due to limited CodeCache size can be treated as a fatal error at
|
||||
|
@ -1024,6 +1024,7 @@
|
||||
declare_type(ServiceThread, JavaThread) \
|
||||
declare_type(NotificationThread, JavaThread) \
|
||||
declare_type(CompilerThread, JavaThread) \
|
||||
declare_type(TrainingReplayThread, JavaThread) \
|
||||
declare_type(StringDedupThread, JavaThread) \
|
||||
declare_type(AttachListenerThread, JavaThread) \
|
||||
DEBUG_ONLY(COMPILER2_OR_JVMCI_PRESENT( \
|
||||
|
@ -116,7 +116,8 @@ public class FileMapInfo {
|
||||
}
|
||||
|
||||
private static void populateMetadataTypeArray(TypeDataBase db) {
|
||||
metadataTypeArray = new Type[9];
|
||||
metadataTypeArray = new Type[11];
|
||||
// The order needs to match up with CPP_VTABLE_TYPES_DO in src/hotspot/share/cds/cppVtables.cpp
|
||||
|
||||
metadataTypeArray[0] = db.lookupType("ConstantPool");
|
||||
metadataTypeArray[1] = db.lookupType("InstanceKlass");
|
||||
@ -125,8 +126,10 @@ public class FileMapInfo {
|
||||
metadataTypeArray[4] = db.lookupType("InstanceRefKlass");
|
||||
metadataTypeArray[5] = db.lookupType("InstanceStackChunkKlass");
|
||||
metadataTypeArray[6] = db.lookupType("Method");
|
||||
metadataTypeArray[7] = db.lookupType("ObjArrayKlass");
|
||||
metadataTypeArray[8] = db.lookupType("TypeArrayKlass");
|
||||
metadataTypeArray[9] = db.lookupType("MethodData");
|
||||
metadataTypeArray[8] = db.lookupType("MethodCounters");
|
||||
metadataTypeArray[9] = db.lookupType("ObjArrayKlass");
|
||||
metadataTypeArray[10] = db.lookupType("TypeArrayKlass");
|
||||
}
|
||||
|
||||
public FileMapHeader getHeader() {
|
||||
|
@ -151,6 +151,7 @@ public class Threads {
|
||||
|
||||
if (!VM.getVM().isCore()) {
|
||||
virtualConstructor.addMapping("CompilerThread", CompilerThread.class);
|
||||
virtualConstructor.addMapping("TrainingReplayThread", HiddenJavaThread.class);
|
||||
}
|
||||
|
||||
// These are all the visible JavaThread subclasses that execute java code.
|
||||
|
@ -0,0 +1,143 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @summary Sanity test of combinations of the diagnostic flags [+-]AOTRecordTraining and [+-]AOTReplayTraining
|
||||
* @requires vm.cds
|
||||
* @comment work around JDK-8345635
|
||||
* @requires !vm.jvmci.enabled
|
||||
* @requires vm.cds.supports.aot.class.linking
|
||||
* @requires vm.flagless
|
||||
* @library /test/lib /test/setup_aot /test/hotspot/jtreg/runtime/cds/appcds/test-classes
|
||||
* @build AOTProfileFlags JavacBenchApp Hello
|
||||
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar
|
||||
* JavacBenchApp
|
||||
* JavacBenchApp$ClassFile
|
||||
* JavacBenchApp$FileManager
|
||||
* JavacBenchApp$SourceFile
|
||||
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar hello.jar Hello
|
||||
* @run driver AOTProfileFlags
|
||||
*/
|
||||
|
||||
import jdk.test.lib.cds.CDSTestUtils;
|
||||
import jdk.test.lib.cds.SimpleCDSAppTester;
|
||||
import jdk.test.lib.helpers.ClassFileInstaller;
|
||||
import jdk.test.lib.Platform;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
|
||||
public class AOTProfileFlags {
|
||||
public static void testDiagnosticFlags() throws Exception {
|
||||
printTestCase("Diagnostic Flags");
|
||||
for (int i = 0; i < 2; i++) {
|
||||
for (int j = 0; j < 2; j ++) {
|
||||
SimpleCDSAppTester.of("AOTProfileFlags" + i + "" + j)
|
||||
.addVmArgs("-XX:+UnlockDiagnosticVMOptions",
|
||||
"-XX:" + (i == 0 ? "-" : "+") + "AOTRecordTraining",
|
||||
"-XX:" + (j == 0 ? "-" : "+") + "AOTReplayTraining")
|
||||
.classpath("app.jar")
|
||||
.appCommandLine("JavacBenchApp", "10")
|
||||
.runAOTWorkflow();
|
||||
}
|
||||
}
|
||||
}
|
||||
static void trainAndRun(String testName, String trainingFlags, String productionFlags, String errorPattern) throws Exception {
|
||||
printTestCase("Flags mismatch " + testName);
|
||||
|
||||
String appJar = ClassFileInstaller.getJarPath("hello.jar");
|
||||
String aotConfigFile = "hello.aotconfig";
|
||||
String aotCacheFile = "hello.aot";
|
||||
String helloClass = "Hello";
|
||||
|
||||
ProcessBuilder pb;
|
||||
OutputAnalyzer out;
|
||||
|
||||
// first make sure we have a valid aotConfigFile with default value of TypeProfileLevel
|
||||
pb = ProcessTools.createLimitedTestJavaProcessBuilder(
|
||||
"-XX:AOTMode=record",
|
||||
"-XX:AOTConfiguration=" + aotConfigFile,
|
||||
"-XX:+UnlockExperimentalVMOptions",
|
||||
trainingFlags,
|
||||
"-cp", appJar, helloClass);
|
||||
|
||||
out = CDSTestUtils.executeAndLog(pb, "train");
|
||||
out.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createLimitedTestJavaProcessBuilder(
|
||||
"-XX:AOTMode=create",
|
||||
"-XX:AOTConfiguration=" + aotConfigFile,
|
||||
"-XX:AOTCache=" + aotCacheFile,
|
||||
"-XX:+UnlockExperimentalVMOptions",
|
||||
trainingFlags,
|
||||
"-cp", appJar);
|
||||
|
||||
out = CDSTestUtils.executeAndLog(pb, "assemble");
|
||||
out.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createLimitedTestJavaProcessBuilder(
|
||||
"-XX:AOTCache=" + aotCacheFile,
|
||||
"-XX:+UnlockExperimentalVMOptions",
|
||||
trainingFlags,
|
||||
"-cp", appJar, helloClass);
|
||||
|
||||
out = CDSTestUtils.executeAndLog(pb, "production_success");
|
||||
out.shouldNotMatch(errorPattern);
|
||||
out.shouldHaveExitValue(0);
|
||||
|
||||
pb = ProcessTools.createLimitedTestJavaProcessBuilder(
|
||||
"-XX:AOTCache=" + aotCacheFile,
|
||||
"-XX:+UnlockExperimentalVMOptions",
|
||||
productionFlags,
|
||||
"-cp", appJar, helloClass);
|
||||
|
||||
out = CDSTestUtils.executeAndLog(pb, "production_failure");
|
||||
out.shouldMatch(errorPattern);
|
||||
out.shouldHaveExitValue(0);
|
||||
}
|
||||
|
||||
public static void testFlagsMismatch() throws Exception {
|
||||
String errorPattern = ".*Profile.* setting .* does not equal the current .*Profile.* setting.*";
|
||||
trainAndRun("TypeProfileLevel", "-XX:TypeProfileLevel=222", "-XX:TypeProfileLevel=111", errorPattern);
|
||||
trainAndRun("TypeProfileArgsLimit", "-XX:TypeProfileArgsLimit=2", "-XX:TypeProfileArgsLimit=3", errorPattern);
|
||||
trainAndRun("TypeProfileParamsLimit", "-XX:TypeProfileParmsLimit=2", "-XX:TypeProfileParmsLimit=3", errorPattern);
|
||||
trainAndRun("TypeProfileWidth", "-XX:TypeProfileWidth=2", "-XX:TypeProfileWidth=3", errorPattern);
|
||||
if (Platform.isDebugBuild()) {
|
||||
trainAndRun("ProfileTraps", "-XX:+ProfileTraps", "-XX:-ProfileTraps", errorPattern);
|
||||
trainAndRun("TypeProfileCasts", "-XX:+TypeProfileCasts", "-XX:-TypeProfileCasts", errorPattern);
|
||||
}
|
||||
errorPattern = "SpecTrapLimitExtraEntries setting .* does not equal the current SpecTrapLimitExtraEntries setting";
|
||||
trainAndRun("SpecTrapLimitExtraEntries", "-XX:SpecTrapLimitExtraEntries=2", "-XX:SpecTrapLimitExtraEntries=3", errorPattern);
|
||||
}
|
||||
|
||||
static int testNum = 0;
|
||||
static void printTestCase(String s) {
|
||||
System.out.println("vvvvvvv TEST CASE " + testNum + ": " + s + " starts here vvvvvvv");
|
||||
testNum++;
|
||||
}
|
||||
public static void main(String... args) throws Exception {
|
||||
testDiagnosticFlags();
|
||||
testFlagsMismatch();
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user