8264805: Remove the experimental Ahead-of-Time Compiler

Reviewed-by: coleenp, erikj, stefank, iignatyev, dholmes, aph, shade, iklam, mchung, iveresov
This commit is contained in:
Vladimir Kozlov 2021-04-27 01:12:18 +00:00
parent 15d4787724
commit 694acedf18
378 changed files with 200 additions and 26970 deletions

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -46,7 +46,7 @@ endif
$(eval $(call ParseKeywordVariable, TEST_OPTS, \
SINGLE_KEYWORDS := JOBS TIMEOUT_FACTOR JCOV JCOV_DIFF_CHANGESET, \
STRING_KEYWORDS := VM_OPTIONS JAVA_OPTIONS AOT_MODULES, \
STRING_KEYWORDS := VM_OPTIONS JAVA_OPTIONS, \
))
# Helper function to propagate TEST_OPTS values.
@ -134,96 +134,6 @@ ifeq ($(GCOV_ENABLED), true)
JTREG_COV_OPTIONS += -e:GCOV_PREFIX="$(GCOV_OUTPUT_DIR)"
endif
################################################################################
# Optionally create AOT libraries for specified modules before running tests.
# Note, this could not be done during JDK build time.
################################################################################
# Parameter 1 is the name of the rule.
#
# Remaining parameters are named arguments.
# MODULE The module to generate a library for
# BIN Output directory in which to put the library
# VM_OPTIONS List of JVM arguments to use when creating library
# OPTIONS_VAR Name of variable to put AOT java options in
# PREREQS_VAR Name of variable to put all AOT prerequisite rule targets in
# for test rules to depend on
#
SetupAotModule = $(NamedParamsMacroTemplate)
define SetupAotModuleBody
$1_AOT_LIB := $$($1_BIN)/$$(call SHARED_LIBRARY,$$($1_MODULE))
$1_AOT_CCLIST := $$(wildcard $$(TOPDIR)/test/hotspot/jtreg/compiler/aot/scripts/$$($1_MODULE)-list.txt)
# Create jaotc flags.
# VM flags which don't affect AOT code generation are filtered out:
# -Xcomp, -XX:+-TieredCompilation
$1_JAOTC_OPTS := \
-J-Xmx4g --info \
$$(addprefix -J, $$(filter-out -Xcomp %TieredCompilation, $$($1_VM_OPTIONS))) \
$$(addprefix --compile-commands$(SPACE), $$($1_AOT_CCLIST)) \
--linker-path $$(LD_JAOTC) \
#
ifneq ($$(filter -ea, $$($1_VM_OPTIONS)), )
$1_JAOTC_OPTS += --compile-with-assertions
endif
ifneq ($$(filter -XX:+VerifyOops, $$($1_VM_OPTIONS)), )
$1_JAOTC_OPTS += -J-Dgraal.AOTVerifyOops=true
endif
$$($1_AOT_LIB): $$(JDK_UNDER_TEST)/release \
$$(call DependOnVariable, $1_JAOTC_OPTS) \
$$(call DependOnVariable, JDK_UNDER_TEST)
$$(call LogWarn, Generating $$(patsubst $$(OUTPUTDIR)/%, %, $$@))
$$(call MakeTargetDir)
$$(call ExecuteWithLog, $$@, \
$((COV_ENVIRONMENT) \
$$(FIXPATH) $$(JDK_UNDER_TEST)/bin/jaotc \
$$($1_JAOTC_OPTS) --output $$@ --module $$($1_MODULE) \
)
$$(call ExecuteWithLog, $$@.check, ( \
$$(FIXPATH) $$(JDK_UNDER_TEST)/bin/java \
$$($1_VM_OPTIONS) -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions \
-XX:+PrintAOT -XX:+UseAOTStrictLoading \
-XX:AOTLibrary=$$@ -version \
> $$@.verify-aot \
))
$1_AOT_OPTIONS += -XX:+UnlockExperimentalVMOptions
$1_AOT_OPTIONS += -XX:AOTLibrary=$$($1_AOT_LIB)
$1_AOT_TARGETS += $$($1_AOT_LIB)
endef
################################################################################
# Optionally create AOT libraries before running tests.
# Note, this could not be done during JDK build time.
################################################################################
# Parameter 1 is the name of the rule.
#
# Remaining parameters are named arguments.
# MODULES The modules to generate a library for
# VM_OPTIONS List of JVM arguments to use when creating libraries
#
# After calling this, the following variables are defined
# $1_AOT_OPTIONS List of all java options needed to use the AOT libraries
# $1_AOT_TARGETS List of all targets that the test rule will need to depend on
#
SetupAot = $(NamedParamsMacroTemplate)
define SetupAotBody
$$(info Running with AOTd libraries for $$($1_MODULES))
# Put aot libraries in a separate directory so they are not deleted between
# test runs and may be reused between make invocations.
$$(foreach m, $$($1_MODULES), \
$$(eval $$(call SetupAotModule, $1_$$m, \
MODULE := $$m, \
BIN := $$(TEST_SUPPORT_DIR)/aot/$1, \
VM_OPTIONS := $$($1_VM_OPTIONS), \
)) \
$$(eval $1_AOT_OPTIONS += $$($1_$$m_AOT_OPTIONS)) \
$$(eval $1_AOT_TARGETS += $$($1_$$m_AOT_TARGETS)) \
)
endef
################################################################################
# Setup global test running parameters
################################################################################
@ -282,7 +192,6 @@ endif
$(eval $(call SetTestOpt,VM_OPTIONS,JTREG))
$(eval $(call SetTestOpt,JAVA_OPTIONS,JTREG))
$(eval $(call SetTestOpt,AOT_MODULES,JTREG))
$(eval $(call SetTestOpt,JOBS,JTREG))
$(eval $(call SetTestOpt,TIMEOUT_FACTOR,JTREG))
@ -293,7 +202,7 @@ $(eval $(call ParseKeywordVariable, JTREG, \
TEST_MODE ASSERT VERBOSE RETAIN MAX_MEM RUN_PROBLEM_LISTS \
RETRY_COUNT MAX_OUTPUT, \
STRING_KEYWORDS := OPTIONS JAVA_OPTIONS VM_OPTIONS KEYWORDS \
EXTRA_PROBLEM_LISTS AOT_MODULES LAUNCHER_OPTIONS, \
EXTRA_PROBLEM_LISTS LAUNCHER_OPTIONS, \
))
ifneq ($(JTREG), )
@ -305,11 +214,10 @@ endif
$(eval $(call SetTestOpt,VM_OPTIONS,GTEST))
$(eval $(call SetTestOpt,JAVA_OPTIONS,GTEST))
$(eval $(call SetTestOpt,AOT_MODULES,GTEST))
$(eval $(call ParseKeywordVariable, GTEST, \
SINGLE_KEYWORDS := REPEAT, \
STRING_KEYWORDS := OPTIONS VM_OPTIONS JAVA_OPTIONS AOT_MODULES, \
STRING_KEYWORDS := OPTIONS VM_OPTIONS JAVA_OPTIONS, \
))
ifneq ($(GTEST), )
@ -592,14 +500,7 @@ define SetupRunGtestTestBody
$1_GTEST_REPEAT :=--gtest_repeat=$$(GTEST_REPEAT)
endif
ifneq ($$(GTEST_AOT_MODULES), )
$$(eval $$(call SetupAot, $1, \
MODULES := $$(GTEST_AOT_MODULES), \
VM_OPTIONS := $$(GTEST_VM_OPTIONS) $$(GTEST_JAVA_OPTIONS), \
))
endif
run-test-$1: pre-run-test $$($1_AOT_TARGETS)
run-test-$1: pre-run-test
$$(call LogWarn)
$$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
@ -610,7 +511,7 @@ define SetupRunGtestTestBody
--gtest_output=xml:$$($1_TEST_RESULTS_DIR)/gtest.xml \
--gtest_catch_exceptions=0 \
$$($1_GTEST_REPEAT) $$(GTEST_OPTIONS) $$(GTEST_VM_OPTIONS) \
$$(GTEST_JAVA_OPTIONS) $$($1_AOT_OPTIONS) \
$$(GTEST_JAVA_OPTIONS) \
> >($(TEE) $$($1_TEST_RESULTS_DIR)/gtest.txt) \
&& $$(ECHO) $$$$? > $$($1_EXITCODE) \
|| $$(ECHO) $$$$? > $$($1_EXITCODE) \
@ -934,17 +835,6 @@ define SetupRunJtregTestBody
endif
endif
ifneq ($$(JTREG_AOT_MODULES), )
$$(eval $$(call SetupAot, $1, \
MODULES := $$(JTREG_AOT_MODULES), \
VM_OPTIONS := $$(JTREG_VM_OPTIONS) $$(JTREG_JAVA_OPTIONS), \
))
endif
ifneq ($$($1_AOT_OPTIONS), )
$1_JTREG_BASIC_OPTIONS += -vmoptions:"$$($1_AOT_OPTIONS)"
endif
clean-workdir-$1:
$$(RM) -r $$($1_TEST_SUPPORT_DIR)
@ -979,7 +869,7 @@ define SetupRunJtregTestBody
done
endif
run-test-$1: pre-run-test clean-workdir-$1 $$($1_AOT_TARGETS)
run-test-$1: pre-run-test clean-workdir-$1
$$(call LogWarn)
$$(call LogWarn, Running test '$$($1_TEST)')
$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR) \

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -222,25 +222,6 @@ ifeq ($(MEMORY_SIZE), )
MEMORY_SIZE := 1024
endif
# Setup LD for AOT support
ifneq ($(DEVKIT_HOME), )
ifeq ($(OPENJDK_TARGET_OS), windows)
LD_JAOTC := $(DEVKIT_HOME)/VC/bin/x64/link.exe
LIBRARY_PREFIX :=
SHARED_LIBRARY_SUFFIX := .dll
else ifeq ($(OPENJDK_TARGET_OS), linux)
LD_JAOTC := $(DEVKIT_HOME)/bin/ld
LIBRARY_PREFIX := lib
SHARED_LIBRARY_SUFFIX := .so
else ifeq ($(OPENJDK_TARGET_OS), macosx)
LD_JAOTC := $(DEVKIT_HOME)/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ld
LIBRARY_PREFIX := lib
SHARED_LIBRARY_SUFFIX := .dylib
endif
else
LD := ld
endif
ifneq ($(wildcard $(JDK_IMAGE_DIR)/template.xml), )
TEST_OPTS_JCOV := true
JCOV_IMAGE_DIR := $(JDK_IMAGE_DIR)
@ -286,9 +267,6 @@ $(call CreateNewSpec, $(NEW_SPEC), \
OPENJDK_TARGET_CPU_ENDIAN := $(OPENJDK_TARGET_CPU_ENDIAN), \
NUM_CORES := $(NUM_CORES), \
MEMORY_SIZE := $(MEMORY_SIZE), \
LD_JAOTC := $(LD_JAOTC), \
LIBRARY_PREFIX := $(LIBRARY_PREFIX), \
SHARED_LIBRARY_SUFFIX := $(SHARED_LIBRARY_SUFFIX), \
include $(TOPDIR)/make/RunTestsPrebuiltSpec.gmk, \
TEST_OPTS_JCOV := $(TEST_OPTS_JCOV), \
$(CUSTOM_NEW_SPEC_LINE), \

View File

@ -44,7 +44,7 @@
m4_define(jvm_features_valid, m4_normalize( \
ifdef([custom_jvm_features_valid], custom_jvm_features_valid) \
\
aot cds compiler1 compiler2 dtrace epsilongc g1gc graal jfr jni-check \
cds compiler1 compiler2 dtrace epsilongc g1gc graal jfr jni-check \
jvmci jvmti link-time-opt management minimal nmt opt-size parallelgc \
serialgc services shenandoahgc static-build vm-structs zero zgc \
))
@ -55,7 +55,6 @@ m4_define(jvm_features_deprecated, m4_normalize(
))
# Feature descriptions
m4_define(jvm_feature_desc_aot, [enable ahead of time compilation (AOT)])
m4_define(jvm_feature_desc_cds, [enable class data sharing (CDS)])
m4_define(jvm_feature_desc_compiler1, [enable hotspot compiler C1])
m4_define(jvm_feature_desc_compiler2, [enable hotspot compiler C2])
@ -94,7 +93,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_PARSE_OPTIONS],
# For historical reasons, some jvm features have their own, shorter names.
# Keep those as aliases for the --enable-jvm-feature-* style arguments.
UTIL_ALIASED_ARG_ENABLE(aot, --enable-jvm-feature-aot)
UTIL_ALIASED_ARG_ENABLE(cds, --enable-jvm-feature-cds)
UTIL_ALIASED_ARG_ENABLE(dtrace, --enable-jvm-feature-dtrace)
@ -229,34 +227,6 @@ AC_DEFUN([JVM_FEATURES_CHECK_AVAILABILITY],
fi
])
###############################################################################
# Check if the feature 'aot' is available on this platform.
#
AC_DEFUN_ONCE([JVM_FEATURES_CHECK_AOT],
[
JVM_FEATURES_CHECK_AVAILABILITY(aot, [
AC_MSG_CHECKING([if platform is supported by AOT])
# AOT is only available where JVMCI is available since it requires JVMCI.
if test "x$OPENJDK_TARGET_CPU" = "xx86_64"; then
AC_MSG_RESULT([yes])
elif test "x$OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU" = "xlinux-aarch64"; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no, $OPENJDK_TARGET_OS-$OPENJDK_TARGET_CPU])
AVAILABLE=false
fi
AC_MSG_CHECKING([if AOT source code is present])
if test -e "${TOPDIR}/src/jdk.internal.vm.compiler" && \
test -e "${TOPDIR}/src/jdk.aot"; then
AC_MSG_RESULT([yes])
else
AC_MSG_RESULT([no, missing src/jdk.internal.vm.compiler or src/jdk.aot])
AVAILABLE=false
fi
])
])
###############################################################################
# Check if the feature 'cds' is available on this platform.
#
@ -440,7 +410,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_PREPARE_PLATFORM],
# The checks below should add unavailable features to
# JVM_FEATURES_PLATFORM_UNAVAILABLE.
JVM_FEATURES_CHECK_AOT
JVM_FEATURES_CHECK_CDS
JVM_FEATURES_CHECK_DTRACE
JVM_FEATURES_CHECK_GRAAL
@ -476,7 +445,7 @@ AC_DEFUN([JVM_FEATURES_PREPARE_VARIANT],
elif test "x$variant" = "xcore"; then
JVM_FEATURES_VARIANT_UNAVAILABLE="cds minimal zero"
elif test "x$variant" = "xzero"; then
JVM_FEATURES_VARIANT_UNAVAILABLE="aot cds compiler1 compiler2 \
JVM_FEATURES_VARIANT_UNAVAILABLE="cds compiler1 compiler2 \
graal jvmci minimal zgc"
else
JVM_FEATURES_VARIANT_UNAVAILABLE="minimal zero"
@ -484,9 +453,9 @@ AC_DEFUN([JVM_FEATURES_PREPARE_VARIANT],
# Check which features should be off by default for this JVM variant.
if test "x$variant" = "xclient"; then
JVM_FEATURES_VARIANT_FILTER="aot compiler2 graal jvmci link-time-opt opt-size"
JVM_FEATURES_VARIANT_FILTER="compiler2 graal jvmci link-time-opt opt-size"
elif test "x$variant" = "xminimal"; then
JVM_FEATURES_VARIANT_FILTER="aot cds compiler2 dtrace epsilongc g1gc \
JVM_FEATURES_VARIANT_FILTER="cds compiler2 dtrace epsilongc g1gc \
graal jfr jni-check jvmci jvmti management nmt parallelgc services \
shenandoahgc vm-structs zgc"
if test "x$OPENJDK_TARGET_CPU" = xarm ; then
@ -497,7 +466,7 @@ AC_DEFUN([JVM_FEATURES_PREPARE_VARIANT],
link-time-opt"
fi
elif test "x$variant" = "xcore"; then
JVM_FEATURES_VARIANT_FILTER="aot compiler1 compiler2 graal jvmci \
JVM_FEATURES_VARIANT_FILTER="compiler1 compiler2 graal jvmci \
link-time-opt opt-size"
elif test "x$variant" = "xzero"; then
JVM_FEATURES_VARIANT_FILTER="jfr link-time-opt opt-size"
@ -574,10 +543,6 @@ AC_DEFUN([JVM_FEATURES_VERIFY],
variant=$1
# Verify that dependencies are met for inter-feature relations.
if JVM_FEATURES_IS_ACTIVE(aot) && ! JVM_FEATURES_IS_ACTIVE(graal); then
AC_MSG_ERROR([Specified JVM feature 'aot' requires feature 'graal' for variant '$variant'])
fi
if JVM_FEATURES_IS_ACTIVE(graal) && ! JVM_FEATURES_IS_ACTIVE(jvmci); then
AC_MSG_ERROR([Specified JVM feature 'graal' requires feature 'jvmci' for variant '$variant'])
fi
@ -597,9 +562,6 @@ AC_DEFUN([JVM_FEATURES_VERIFY],
# For backwards compatibility, disable a feature "globally" if one variant
# is missing the feature.
if ! JVM_FEATURES_IS_ACTIVE(aot); then
ENABLE_AOT="false"
fi
if ! JVM_FEATURES_IS_ACTIVE(cds); then
ENABLE_CDS="false"
fi
@ -630,7 +592,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_SETUP],
# For backwards compatibility, tentatively enable these features "globally",
# and disable them in JVM_FEATURES_VERIFY if a variant is found that are
# missing any of them.
ENABLE_AOT="true"
ENABLE_CDS="true"
INCLUDE_GRAAL="true"
INCLUDE_JVMCI="true"
@ -669,7 +630,6 @@ AC_DEFUN_ONCE([JVM_FEATURES_SETUP],
AC_SUBST(JVM_FEATURES_zero)
AC_SUBST(JVM_FEATURES_custom)
AC_SUBST(ENABLE_AOT)
AC_SUBST(INCLUDE_GRAAL)
AC_SUBST(INCLUDE_JVMCI)

View File

@ -524,9 +524,6 @@ CPP := @CPP@
# The linker can be gcc or ld on unix systems, or link.exe on windows systems.
LD := @LD@
# Linker used by the jaotc tool for AOT compilation.
LD_JAOTC:=@LD_JAOTC@
# Xcode SDK path
SDKROOT:=@SDKROOT@
@ -766,7 +763,6 @@ TAR_INCLUDE_PARAM:=@TAR_INCLUDE_PARAM@
TAR_SUPPORTS_TRANSFORM:=@TAR_SUPPORTS_TRANSFORM@
# Build setup
ENABLE_AOT:=@ENABLE_AOT@
USE_EXTERNAL_LIBJPEG:=@USE_EXTERNAL_LIBJPEG@
USE_EXTERNAL_LIBGIF:=@USE_EXTERNAL_LIBGIF@
USE_EXTERNAL_LIBZ:=@USE_EXTERNAL_LIBZ@

View File

@ -664,18 +664,12 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE],
UTIL_LOOKUP_TOOLCHAIN_PROGS(LD, link)
TOOLCHAIN_VERIFY_LINK_BINARY(LD)
LDCXX="$LD"
# jaotc being a windows program expects the linker to be supplied with exe suffix.but without
# fixpath
LD_JAOTC="${LD##$FIXPATH }"
else
# All other toolchains use the compiler to link.
LD="$CC"
LDCXX="$CXX"
# jaotc expects 'ld' as the linker rather than the compiler.
UTIL_LOOKUP_TOOLCHAIN_PROGS(LD_JAOTC, ld)
fi
AC_SUBST(LD)
AC_SUBST(LD_JAOTC)
# FIXME: it should be CXXLD, according to standard (cf CXXCPP)
AC_SUBST(LDCXX)

View File

@ -70,11 +70,6 @@ ifeq ($(INCLUDE_GRAAL), false)
MODULES_FILTER += jdk.internal.vm.compiler.management
endif
# Filter out aot specific modules if aot is disabled
ifeq ($(ENABLE_AOT), false)
MODULES_FILTER += jdk.aot
endif
# jpackage is only on windows, macosx, and linux
ifeq ($(call isTargetOs, windows macosx linux), false)
MODULES_FILTER += jdk.jpackage

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -44,7 +44,6 @@ LANGTOOLS_MODULES= \
# These models require buildtools-hotspot to process for gensrc
HOTSPOT_MODULES= \
jdk.aot \
jdk.hotspot.agent \
jdk.internal.vm.ci \
jdk.internal.vm.compiler \

View File

@ -251,7 +251,6 @@ var getJibProfilesCommon = function (input, data) {
configure_args: concat("--enable-jtreg-failure-handler",
"--with-exclude-translations=de,es,fr,it,ko,pt_BR,sv,ca,tr,cs,sk,ja_JP_A,ja_JP_HA,ja_JP_HI,ja_JP_I,zh_TW,zh_HK",
"--disable-manpages",
"--disable-jvm-feature-aot",
"--disable-jvm-feature-graal",
"--disable-jvm-feature-shenandoahgc",
versionArgs(input, common))

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -61,7 +61,6 @@ BOOT_MODULES= \
# should carefully be considered if it should be upgradeable or not.
UPGRADEABLE_PLATFORM_MODULES= \
java.compiler \
jdk.aot \
jdk.internal.vm.compiler \
jdk.internal.vm.compiler.management \
#

View File

@ -129,14 +129,6 @@ ifneq ($(call check-jvm-feature, nmt), true)
memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp threadStackTracker.cpp
endif
ifneq ($(call check-jvm-feature, aot), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_AOT=0
JVM_EXCLUDE_FILES += \
compiledIC_aot_x86_64.cpp compiledIC_aot_aarch64.cpp \
compilerRuntime.cpp aotCodeHeap.cpp aotCompiledMethod.cpp \
aotLoader.cpp compiledIC_aot.cpp
endif
ifneq ($(call check-jvm-feature, g1gc), true)
JVM_CFLAGS_FEATURES += -DINCLUDE_G1GC=0
JVM_EXCLUDE_PATTERNS += gc/g1

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -111,7 +111,6 @@ ifeq ($(call isTargetOs, windows), true)
-relativeSrcInclude hotspot \
-hidePath .hg \
-hidePath .jcheck \
-hidePath jdk.aot \
-hidePath jdk.hotspot.agent \
-hidePath jdk.internal.vm.ci \
-hidePath jdk.internal.vm.compiler \

View File

@ -1,54 +0,0 @@
#
# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# -parameters provides method's parameters information in class file,
# JVMCI compilers make use of that information for various sanity checks.
# Don't use Indy strings concatenation to have good JAOTC startup performance.
# The exports are needed since JVMCI is dynamically exported (see
# jdk.vm.ci.services.internal.ReflectionAccessJDK::openJVMCITo).
JAVAC_FLAGS += -parameters -XDstringConcat=inline \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.aarch64=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.amd64=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.code=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.code.site=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.code.stack=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.common=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.meta=jdk.internal.vm.compiler,jdk.aot \
--add-exports jdk.internal.vm.ci/jdk.vm.ci.runtime=jdk.internal.vm.compiler,jdk.aot \
#
EXCLUDES += \
jdk.tools.jaotc.test
#
## WORKAROUND jdk.aot source structure issue
AOT_MODULESOURCEPATH := $(MODULESOURCEPATH) \
$(subst /$(MODULE)/,/*/, $(filter-out %processor/src, \
$(wildcard $(TOPDIR)/src/$(MODULE)/share/classes/*/src)))
MODULESOURCEPATH := $(call PathList, $(AOT_MODULESOURCEPATH))

View File

@ -1,54 +0,0 @@
#
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Oracle designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
include LauncherCommon.gmk
# The JVMCI exports are needed since JVMCI is normally dynamically exported
# (see jdk.vm.ci.services.internal.ReflectionAccessJDK::openJVMCITo).
$(eval $(call SetupBuildLauncher, jaotc, \
MAIN_CLASS := jdk.tools.jaotc.Main, \
EXTRA_JAVA_ARGS := -XX:+UnlockExperimentalVMOptions -XX:+EnableJVMCI \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.amd64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.code=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.code.site=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.code.stack=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.common=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
, \
JAVA_ARGS := --add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.amd64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.hotspot.aarch64=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.meta=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
--add-exports=jdk.internal.vm.ci/jdk.vm.ci.runtime=$(call CommaList, jdk.internal.vm.compiler jdk.aot) \
-XX:+UnlockExperimentalVMOptions -XX:+UseAOT \
-XX:+CalculateClassFingerprint \
-Djvmci.UseProfilingInformation=false \
-Dgraal.UseExceptionProbability=false \
-Djvmci.Compiler=graal \
--add-modules ALL-DEFAULT \
, \
))

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2020, 2021, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -39,7 +39,6 @@ JAVAC_FLAGS += -parameters -XDstringConcat=inline \
EXCLUDES += \
jdk.internal.vm.compiler.collections.test \
jdk.tools.jaotc.test \
org.graalvm.compiler.api.directives.test \
org.graalvm.compiler.api.test \
org.graalvm.compiler.asm.aarch64.test \

View File

@ -1360,7 +1360,7 @@ source %{
// r27 is not allocatable when compressed oops is on and heapbase is not
// zero, compressed klass pointers doesn't use r27 after JDK-8234794
if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL || UseAOT)) {
if (UseCompressedOops && (CompressedOops::ptrs_base() != NULL)) {
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
_NO_SPECIAL_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_HEAPBASE_REG_mask);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -74,7 +74,6 @@ friend class ArrayCopyStub;
// call stub: CompiledStaticCall::to_interp_stub_size() +
// CompiledStaticCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
_call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 7 * NativeInstruction::instruction_size
};

View File

@ -1411,7 +1411,7 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
// membar it's possible for a simple Dekker test to fail if loads
// use LD;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and C1 compiles the loads in another.
if (!CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
if (!CompilerConfig::is_c1_only_no_jvmci()) {
__ membar();
}
__ volatile_load_mem_reg(address, result, info);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -60,15 +60,6 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
// static stub relocation stores the instruction address of the call
__ relocate(static_stub_Relocation::spec(mark));
#if INCLUDE_AOT
// Don't create a Metadata reloc if we're generating immutable PIC.
if (cbuf.immutable_PIC()) {
__ movptr(rmethod, 0);
__ movptr(rscratch1, 0);
__ br(rscratch1);
} else
#endif
{
__ emit_static_call_stub();
}
@ -96,63 +87,8 @@ int CompiledStaticCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
#if INCLUDE_AOT
#define __ _masm.
void CompiledStaticCall::emit_to_aot_stub(CodeBuffer &cbuf, address mark) {
if (!UseAOT) {
return;
}
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling aot code.
// mov r, imm64_aot_code_address
// jmp r
if (mark == NULL) {
mark = cbuf.insts_mark(); // Get mark within main instrs section.
}
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address base =
__ start_a_stub(to_aot_stub_size());
guarantee(base != NULL, "out of space");
// Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark, true /* is_aot */));
// Load destination AOT code address.
__ movptr(rscratch1, 0); // address is zapped till fixup time.
// This is recognized as unresolved by relocs/nativeinst/ic code.
__ br(rscratch1);
assert(__ pc() - base <= to_aot_stub_size(), "wrong stub size");
// Update current stubs pointer and restore insts_end.
__ end_a_stub();
}
#undef __
int CompiledStaticCall::to_aot_stub_size() {
if (UseAOT) {
return 5 * 4; // movz; movk; movk; movk; br
} else {
return 0;
}
}
// Relocation entries for call stub, compiled java to aot.
int CompiledStaticCall::reloc_to_aot_stub() {
if (UseAOT) {
return 5 * 4; // movz; movk; movk; movk; br
} else {
return 0;
}
}
#endif // INCLUDE_AOT
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(false /* is_aot */);
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@ -188,10 +124,8 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
method_holder->set_data(0);
if (!static_stub->is_aot()) {
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
jump->set_jump_destination((address)-1);
}
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
jump->set_jump_destination((address)-1);
}
//-----------------------------------------------------------------------------
@ -204,7 +138,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
address stub = find_stub(false /* is_aot */);
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder

View File

@ -1,104 +0,0 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "aot/compiledIC_aot.hpp"
#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
void CompiledDirectStaticCall::set_to_far(const methodHandle& callee, address entry) {
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_far %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
set_destination_mt_safe(entry);
}
void CompiledPltStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledPltStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
intptr_t data = method_loader->data();
address destination = jump->destination();
assert(data == 0 || data == (intptr_t)callee(),
"a) MT-unsafe modification of inline cache");
assert(destination == (address)Universe::non_oop_word()
|| destination == entry,
"b) MT-unsafe modification of inline cache");
// Update stub.
method_loader->set_data((intptr_t)callee());
jump->set_jump_destination(entry);
// Update jump to call.
set_destination_mt_safe(stub);
}
#ifdef NEVER_CALLED
void CompiledPltStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
method_loader->set_data(0);
jump->set_jump_destination((address)-1);
}
#endif
#ifndef PRODUCT
void CompiledPltStaticCall::verify() {
// Verify call.
_call->verify();
#ifdef ASSERT
CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
assert(cb && cb->is_aot(), "CompiledPltStaticCall can only be used on AOTCompiledMethod");
#endif
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
// Verify state.
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
#endif // !PRODUCT

View File

@ -51,7 +51,7 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, result);
if (CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
if (CompilerConfig::is_c1_only_no_jvmci()) {
// The membar here is necessary to prevent reordering between the
// release store in the CAS above and a subsequent volatile load.
// However for tiered compilation C1 inserts a full barrier before

View File

@ -1084,7 +1084,7 @@ public:
address trampoline_call(Address entry, CodeBuffer* cbuf = NULL);
static bool far_branches() {
return ReservedCodeCacheSize > branch_range || UseAOT;
return ReservedCodeCacheSize > branch_range;
}
// Jumps that can reach anywhere in the code cache.

View File

@ -476,16 +476,9 @@ bool NativeInstruction::is_stop() {
void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
assert(dest == SharedRuntime::get_handle_wrong_method_stub(), "expected fixed destination of patch");
#ifdef ASSERT
// This may be the temporary nmethod generated while we're AOT
// compiling. Such an nmethod doesn't begin with a NOP but with an ADRP.
if (! (CalculateClassFingerprint && UseAOT && is_adrp_at(verified_entry))) {
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
"Aarch64 cannot replace non-jump with jump");
}
#endif
assert(nativeInstruction_at(verified_entry)->is_jump_or_nop()
|| nativeInstruction_at(verified_entry)->is_sigill_zombie_not_entrant(),
"Aarch64 cannot replace non-jump with jump");
// Patch this nmethod atomically.
if (Assembler::reachable_from_branch_at(verified_entry, dest)) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -230,16 +230,6 @@ public:
return is_call_at(return_address - NativeCall::return_address_offset);
}
#if INCLUDE_AOT
// Return true iff a call from instr to target is out of range.
// Used for calls from JIT- to AOT-compiled code.
static bool is_far_call(address instr, address target) {
// On AArch64 we use trampolines which can reach anywhere in the
// address space, so calls are never out of range.
return false;
}
#endif
// MT-safe patching of a call instruction.
static void insert(address code_pos, address entry);

View File

@ -594,7 +594,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::from_compiled_offset())));
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
// check if this call should be routed towards a specific entry point
__ ldr(rscratch2, Address(rthread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())));
Label no_alternative_target;
@ -2177,7 +2177,7 @@ void SharedRuntime::generate_deopt_blob() {
// Setup code generation tools
int pad = 0;
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
pad += 512; // Increase the buffer size when compiling for JVMCI
}
#endif
@ -2252,7 +2252,7 @@ void SharedRuntime::generate_deopt_blob() {
int implicit_exception_uncommon_trap_offset = 0;
int uncommon_trap_offset = 0;
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
implicit_exception_uncommon_trap_offset = __ pc() - start;
__ ldr(lr, Address(rthread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
@ -2378,7 +2378,7 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(false);
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
__ bind(after_fetch_unroll_info_call);
}
#endif
@ -2541,7 +2541,7 @@ void SharedRuntime::generate_deopt_blob() {
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
}

View File

@ -502,7 +502,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) {
if (EnableJVMCI && state == vtos && step == 0) {
Label L;
__ ldrb(rscratch1, Address(rthread, JavaThread::pending_monitorenter_offset()));
__ cbz(rscratch1, L);

View File

@ -2425,7 +2425,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()){
if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()){
Label notVolatile;
__ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::AnyAny);
@ -3028,7 +3028,7 @@ void TemplateTable::fast_accessfield(TosState state)
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()) {
if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
Label notVolatile;
__ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
__ membar(MacroAssembler::AnyAny);
@ -3090,7 +3090,7 @@ void TemplateTable::fast_xaccess(TosState state)
// membar it's possible for a simple Dekker test to fail if loads
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
// the stores in one method and we interpret the loads in another.
if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()) {
if (!CompilerConfig::is_c1_or_interpreter_only_no_jvmci()) {
Label notVolatile;
__ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
ConstantPoolCacheEntry::flags_offset())));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2008, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -53,7 +53,6 @@
enum {
_call_stub_size = 16,
_call_aot_stub_size = 0,
_exception_handler_size = PRODUCT_ONLY(68) NOT_PRODUCT(68+60),
_deopt_handler_size = 16
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,7 +102,7 @@ int CompiledStaticCall::to_interp_stub_size() {
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(/*is_aot*/ false);
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@ -149,7 +149,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
address stub = find_stub(/*is_aot*/ false);
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -62,7 +62,6 @@
enum {
_static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size, // or smaller
_call_stub_size = _static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller
_call_aot_stub_size = 0,
_exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller
_deopt_handler_size = MacroAssembler::bl64_patchable_size
};
@ -70,11 +69,7 @@ enum {
// '_static_call_stub_size' is only used on ppc (see LIR_Assembler::emit_static_call_stub()
// in c1_LIRAssembler_ppc.cpp. The other, shared getters are defined in c1_LIRAssembler.hpp
static int static_call_stub_size() {
if (UseAOT) {
return _static_call_stub_size + _call_aot_stub_size;
} else {
return _static_call_stub_size;
}
return _static_call_stub_size;
}
#endif // CPU_PPC_C1_LIRASSEMBLER_PPC_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -164,7 +164,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(/*is_aot*/ false);
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@ -210,7 +210,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
address stub = find_stub(/*is_aot*/ false);
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -47,7 +47,6 @@
enum {
_call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub.
_call_aot_stub_size = 0,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2019 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -92,7 +92,7 @@ int CompiledStaticCall::reloc_to_interp_stub() {
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(/*is_aot*/ false);
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@ -137,7 +137,7 @@ void CompiledDirectStaticCall::verify() {
_call->verify_alignment();
// Verify stub.
address stub = find_stub(/*is_aot*/ false);
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());

View File

@ -2919,23 +2919,13 @@ void LIR_Assembler::emit_static_call_stub() {
// make sure that the displacement word of the call ends up word aligned
__ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset);
__ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */));
__ relocate(static_stub_Relocation::spec(call_pc));
__ mov_metadata(rbx, (Metadata*)NULL);
// must be set to -1 at code generation time
assert(((__ offset() + 1) % BytesPerWord) == 0, "must be aligned");
// On 64bit this will die since it will take a movq & jmp, must be only a jmp
__ jump(RuntimeAddress(__ pc()));
if (UseAOT) {
// Trampoline to aot code
__ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */));
#ifdef _LP64
__ mov64(rax, CONST64(0)); // address is zapped till fixup time.
#else
__ movl(rax, 0xdeadffff); // address is zapped till fixup time.
#endif
__ jmp(rax);
}
assert(__ offset() - start <= call_stub_size(), "stub too big");
__ end_a_stub();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,7 +48,6 @@
enum {
_call_stub_size = NOT_LP64(15) LP64_ONLY(28),
_call_aot_stub_size = NOT_LP64(7) LP64_ONLY(12),
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = NOT_LP64(10) LP64_ONLY(17)
};

View File

@ -336,7 +336,7 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
mov(rbp, rsp);
}
#if !defined(_LP64) && defined(COMPILER2)
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
// c2 leaves fpu stack dirty. Clean it on entry
empty_FPU_stack();
}

View File

@ -723,7 +723,7 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
}
#if !defined(_LP64) && defined(COMPILER2)
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
// C2 can leave the fpu stack dirty
__ empty_FPU_stack();
}

View File

@ -1,122 +0,0 @@
/*
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "aot/compiledIC_aot.hpp"
#include "code/codeCache.hpp"
#include "memory/resourceArea.hpp"
void CompiledDirectStaticCall::set_to_far(const methodHandle& callee, address entry) {
address stub = find_stub(true /* is_far */);
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_far %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
// mov rax,imm_aot_addr
// jmp rax
NativeMovConstReg* destination_holder = nativeMovConstReg_at(stub);
#ifdef ASSERT
// read the value once
intptr_t data = destination_holder->data();
assert(data == 0 || data == (intptr_t)entry,
"MT-unsafe modification of inline cache");
#endif
// Update stub.
destination_holder->set_data((intptr_t)entry);
// Update jump to call.
set_destination_mt_safe(stub);
}
void CompiledPltStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
ResourceMark rm;
tty->print_cr("CompiledPltStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
intptr_t data = method_loader->data();
address destination = jump->destination();
assert(data == 0 || data == (intptr_t)callee(),
"a) MT-unsafe modification of inline cache");
assert(destination == (address)-1 || destination == entry,
"b) MT-unsafe modification of inline cache");
// Update stub.
method_loader->set_data((intptr_t)callee());
jump->set_jump_destination(entry);
// Update jump to call.
set_destination_mt_safe(stub);
}
#ifdef NEVER_CALLED
void CompiledPltStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != NULL, "stub not found");
assert(CompiledICLocker::is_safe(stub), "mt unsafe call");
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
method_loader->set_data(0);
jump->set_jump_destination((address)-1);
}
#endif
#ifndef PRODUCT
void CompiledPltStaticCall::verify() {
// Verify call.
_call->verify();
#ifdef ASSERT
CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
assert(cb && cb->is_aot(), "CompiledPltStaticCall can only be used on AOTCompiledMethod");
#endif
// Verify stub.
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeLoadGot* method_loader = nativeLoadGot_at(stub);
NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address());
// Verify state.
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
#endif // !PRODUCT

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -54,7 +54,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
return NULL; // CodeBuffer::expand failed.
}
// Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark, false), Assembler::imm_operand);
__ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand);
// Static stub relocation also tags the Method* in the code-stream.
__ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time.
// This is recognized as unresolved by relocs/nativeinst/ic code.
@ -83,68 +83,8 @@ int CompiledStaticCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
#if INCLUDE_AOT
#define __ _masm.
void CompiledStaticCall::emit_to_aot_stub(CodeBuffer &cbuf, address mark) {
if (!UseAOT) {
return;
}
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling aot code.
// movq rax, imm64_aot_code_address
// jmp rax
if (mark == NULL) {
mark = cbuf.insts_mark(); // Get mark within main instrs section.
}
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address base =
__ start_a_stub(to_aot_stub_size());
guarantee(base != NULL, "out of space");
// Static stub relocation stores the instruction address of the call.
__ relocate(static_stub_Relocation::spec(mark, true /* is_aot */), Assembler::imm_operand);
// Load destination AOT code address.
#ifdef _LP64
__ mov64(rax, CONST64(0)); // address is zapped till fixup time.
#else
__ movl(rax, 0); // address is zapped till fixup time.
#endif
// This is recognized as unresolved by relocs/nativeinst/ic code.
__ jmp(rax);
assert(__ pc() - base <= to_aot_stub_size(), "wrong stub size");
// Update current stubs pointer and restore insts_end.
__ end_a_stub();
}
#undef __
int CompiledStaticCall::to_aot_stub_size() {
if (UseAOT) {
return NOT_LP64(7) // movl; jmp
LP64_ONLY(12); // movq (1+1+8); jmp (2)
} else {
return 0;
}
}
// Relocation entries for call stub, compiled java to aot.
int CompiledStaticCall::reloc_to_aot_stub() {
if (UseAOT) {
return 2; // 1 in emit_to_aot_stub + 1 in emit_call
} else {
return 0;
}
}
#endif // INCLUDE_AOT
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub(false /* is_aot */);
address stub = find_stub();
guarantee(stub != NULL, "stub not found");
if (TraceICs) {
@ -175,10 +115,8 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
method_holder->set_data(0);
if (!static_stub->is_aot()) {
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
jump->set_jump_destination((address)-1);
}
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
jump->set_jump_destination((address)-1);
}
@ -193,11 +131,11 @@ void CompiledDirectStaticCall::verify() {
#ifdef ASSERT
CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call);
assert(cb && !cb->is_aot(), "CompiledDirectStaticCall cannot be used on AOTCompiledMethod");
assert(cb != NULL, "sanity");
#endif
// Verify stub.
address stub = find_stub(false /* is_aot */);
address stub = find_stub();
assert(stub != NULL, "no stub found for static call");
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);

View File

@ -70,7 +70,7 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#endif
#if INCLUDE_JVMCI
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS (EnableJVMCI || UseAOT)
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS EnableJVMCI
#else
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#endif

View File

@ -198,13 +198,6 @@ class NativeCall: public NativeInstruction {
nativeCall_at(instr)->destination() == target;
}
#if INCLUDE_AOT
static bool is_far_call(address instr, address target) {
intptr_t disp = target - (instr + sizeof(int32_t));
return !Assembler::is_simm32(disp);
}
#endif
// MT-safe patching of a call instruction.
static void insert(address code_pos, address entry);

View File

@ -859,7 +859,7 @@ void SharedRuntime::gen_i2c_adapter(MacroAssembler *masm,
__ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset())));
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
// check if this call should be routed towards a specific entry point
__ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0);
Label no_alternative_target;
@ -2648,7 +2648,7 @@ void SharedRuntime::generate_deopt_blob() {
pad += 1024;
}
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
pad += 512; // Increase the buffer size when compiling for JVMCI
}
#endif
@ -2722,7 +2722,7 @@ void SharedRuntime::generate_deopt_blob() {
int implicit_exception_uncommon_trap_offset = 0;
int uncommon_trap_offset = 0;
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
implicit_exception_uncommon_trap_offset = __ pc() - start;
__ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset())));
@ -2837,7 +2837,7 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(false);
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
__ bind(after_fetch_unroll_info_call);
}
#endif
@ -3000,7 +3000,7 @@ void SharedRuntime::generate_deopt_blob() {
_deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
_deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
#if INCLUDE_JVMCI
if (EnableJVMCI || UseAOT) {
if (EnableJVMCI) {
_deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset);
_deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset);
}

View File

@ -261,7 +261,7 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
#if INCLUDE_JVMCI
// Check if we need to take lock at entry of synchronized method. This can
// only occur on method entry so emit it only for vtos with step 0.
if ((EnableJVMCI || UseAOT) && state == vtos && step == 0) {
if (EnableJVMCI && state == vtos && step == 0) {
Label L;
__ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0);
__ jcc(Assembler::zero, L);

View File

@ -2229,9 +2229,6 @@ encode %{
ciEnv::current()->record_failure("CodeCache is full");
return;
}
#if INCLUDE_AOT
CompiledStaticCall::emit_to_aot_stub(cbuf, mark);
#endif
}
%}

View File

@ -130,9 +130,6 @@ static FILETIME process_kernel_time;
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
PVOID topLevelVectoredExceptionHandler = NULL;
LPTOP_LEVEL_EXCEPTION_FILTER previousUnhandledExceptionFilter = NULL;
#elif INCLUDE_AOT
PVOID topLevelVectoredExceptionHandler = NULL;
LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo);
#endif
// save DLL module handle, used by GetModuleFileName
@ -153,7 +150,7 @@ BOOL WINAPI DllMain(HINSTANCE hinst, DWORD reason, LPVOID reserved) {
if (ForceTimeHighResolution) {
timeEndPeriod(1L);
}
#if defined(USE_VECTORED_EXCEPTION_HANDLING) || INCLUDE_AOT
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
if (topLevelVectoredExceptionHandler != NULL) {
RemoveVectoredExceptionHandler(topLevelVectoredExceptionHandler);
topLevelVectoredExceptionHandler = NULL;
@ -2684,7 +2681,7 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
return EXCEPTION_CONTINUE_SEARCH;
}
#if defined(USE_VECTORED_EXCEPTION_HANDLING) || INCLUDE_AOT
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
PEXCEPTION_RECORD exceptionRecord = exceptionInfo->ExceptionRecord;
#if defined(_M_ARM64)
@ -2700,9 +2697,7 @@ LONG WINAPI topLevelVectoredExceptionFilter(struct _EXCEPTION_POINTERS* exceptio
return topLevelExceptionFilter(exceptionInfo);
}
// Handle the case where we get an implicit exception in AOT generated
// code. AOT DLL's loaded are not registered for structured exceptions.
// If the exception occurred in the codeCache or AOT code, pass control
// If the exception occurred in the codeCache, pass control
// to our normal exception handler.
CodeBlob* cb = CodeCache::find_blob(pc);
if (cb != NULL) {
@ -4151,14 +4146,6 @@ jint os::init_2(void) {
#if defined(USE_VECTORED_EXCEPTION_HANDLING)
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler(1, topLevelVectoredExceptionFilter);
previousUnhandledExceptionFilter = SetUnhandledExceptionFilter(topLevelUnhandledExceptionFilter);
#elif INCLUDE_AOT
// If AOT is enabled we need to install a vectored exception handler
// in order to forward implicit exceptions from code in AOT
// generated DLLs. This is necessary since these DLLs are not
// registered for structured exceptions like codecache methods are.
if (AOTLibrary != NULL && (UseAOT || FLAG_IS_DEFAULT(UseAOT))) {
topLevelVectoredExceptionHandler = AddVectoredExceptionHandler( 1, topLevelVectoredExceptionFilter);
}
#endif
// for debugging float code generation bugs

File diff suppressed because it is too large Load Diff

View File

@ -1,310 +0,0 @@
/*
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_AOT_AOTCODEHEAP_HPP
#define SHARE_AOT_AOTCODEHEAP_HPP
#include "aot/aotCompiledMethod.hpp"
#include "classfile/symbolTable.hpp"
#include "metaprogramming/integralConstant.hpp"
#include "oops/metadata.hpp"
#include "oops/method.hpp"
enum CodeState {
not_set = 0, // _aot fields is not set yet
in_use = 1, // _aot field is set to corresponding AOTCompiledMethod
invalid = 2 // AOT code is invalidated because dependencies failed
};
typedef struct {
AOTCompiledMethod* _aot;
CodeState _state; // State change cases: not_set->in_use, not_set->invalid
} CodeToAMethod;
class ClassLoaderData;
class AOTClass {
public:
ClassLoaderData* _classloader;
};
typedef struct {
int _name_offset;
int _code_offset;
int _meta_offset;
int _metadata_got_offset;
int _metadata_got_size;
int _code_id;
} AOTMethodOffsets;
typedef struct {
const char* _name;
address _code;
aot_metadata* _meta;
jlong* _state_adr;
address _metadata_table;
int _metadata_size;
} AOTMethodData;
typedef struct {
int _got_index;
int _class_id;
int _compiled_methods_offset;
int _dependent_methods_offset;
uint64_t _fingerprint;
} AOTKlassData;
typedef struct {
int _version;
int _class_count;
int _method_count;
int _klasses_got_size;
int _metadata_got_size;
int _oop_got_size;
int _jvm_version_offset;
enum {
AOT_SHARED_VERSION = 1
};
} AOTHeader;
typedef struct {
enum { CONFIG_SIZE = 7 * jintSize + 9 };
// 7 int values
int _config_size;
int _narrowOopShift;
int _narrowKlassShift;
int _contendedPaddingWidth;
int _objectAlignment;
int _codeSegmentSize;
int _gc;
// byte[9] array map to boolean values here
bool _debug_VM;
bool _useCompressedOops;
bool _useCompressedClassPointers;
bool _useTLAB;
bool _useBiasedLocking;
bool _tieredAOT;
bool _enableContended;
bool _restrictContended;
bool _omitAssertions;
} AOTConfiguration;
class AOTLib : public CHeapObj<mtCode> {
static bool _narrow_oop_shift_initialized;
static int _narrow_oop_shift;
static int _narrow_klass_shift;
bool _valid;
void* _dl_handle;
const int _dso_id;
const char* _name;
// VM configuration during AOT compilation
AOTConfiguration* _config;
AOTHeader* _header;
void handle_config_error(const char* format, ...) ATTRIBUTE_PRINTF(2, 3);
public:
AOTLib(void* handle, const char* name, int dso_id);
virtual ~AOTLib();
static int narrow_oop_shift() { return _narrow_oop_shift; }
static int narrow_klass_shift() { return _narrow_klass_shift; }
static bool narrow_oop_shift_initialized() { return _narrow_oop_shift_initialized; }
bool is_valid() const {
return _valid;
}
const char* name() const {
return _name;
}
void* dl_handle() const {
return _dl_handle;
}
int id() const {
return _dso_id;
}
AOTHeader* header() const {
return _header;
}
AOTConfiguration* config() const {
return _config;
}
void verify_config();
void verify_flag(bool aot_flag, bool flag, const char* name);
void verify_flag(int aot_flag, int flag, const char* name);
address load_symbol(const char *name);
};
class AOTCodeHeap : public CodeHeap {
AOTLib* _lib;
int _aot_id;
int _class_count;
int _method_count;
AOTClass* _classes;
CodeToAMethod* _code_to_aot;
address _code_space;
address _code_segments;
jlong* _method_state;
// Collect metaspace info: names -> address in .got section
const char* _metaspace_names;
address _method_metadata;
address _methods_offsets;
address _klasses_offsets;
address _dependencies;
Metadata** _klasses_got;
Metadata** _metadata_got;
oop* _oop_got;
int _klasses_got_size;
int _metadata_got_size;
int _oop_got_size;
// Collect stubs info
int* _stubs_offsets;
bool _lib_symbols_initialized;
void adjust_boundaries(AOTCompiledMethod* method) {
char* low = (char*)method->code_begin();
if (low < low_boundary()) {
_memory.set_low_boundary(low);
_memory.set_low(low);
}
char* high = (char *)method->code_end();
if (high > high_boundary()) {
_memory.set_high_boundary(high);
_memory.set_high(high);
}
assert(_method_count > 0, "methods count should be set already");
}
void register_stubs();
void link_shared_runtime_symbols();
void link_stub_routines_symbols();
void link_os_symbols();
void link_graal_runtime_symbols();
void link_global_lib_symbols();
void link_klass(const Klass* klass);
void link_known_klasses();
void publish_aot(const methodHandle& mh, AOTMethodData* method_data, int code_id);
AOTCompiledMethod* next_in_use_at(int index) const;
// Find klass in SystemDictionary for aot metadata.
static Klass* lookup_klass(const char* name, int len, const Method* method, Thread* THREAD);
public:
AOTCodeHeap(AOTLib* lib);
virtual ~AOTCodeHeap();
AOTCompiledMethod* find_aot(address p) const;
virtual void* find_start(void* p) const;
virtual CodeBlob* find_blob_unsafe(void* start) const;
virtual void* first() const;
virtual void* next(void *p) const;
AOTKlassData* find_klass(InstanceKlass* ik);
bool load_klass_data(InstanceKlass* ik, Thread* thread);
Klass* get_klass_from_got(const char* klass_name, int klass_len, const Method* method);
bool is_dependent_method(Klass* dependee, AOTCompiledMethod* aot);
void mark_evol_dependent_methods(InstanceKlass* dependee);
const char* get_name_at(int offset) {
return _metaspace_names + offset;
}
void oops_do(OopClosure* f);
void metadata_do(MetadataClosure* f);
void got_metadata_do(MetadataClosure* f);
#ifdef ASSERT
bool got_contains(Metadata **p) {
return (p >= &_metadata_got[0] && p < &_metadata_got[_metadata_got_size]) ||
(p >= &_klasses_got[0] && p < &_klasses_got[_klasses_got_size]);
}
#endif
int dso_id() const { return _lib->id(); }
int aot_id() const { return _aot_id; }
int method_count() { return _method_count; }
AOTCompiledMethod* get_code_desc_at_index(int index) {
if (index < _method_count && _code_to_aot[index]._state == in_use) {
AOTCompiledMethod* m = _code_to_aot[index]._aot;
assert(m != NULL, "AOT method should be set");
if (!m->is_runtime_stub()) {
return m;
}
}
return NULL;
}
static Method* find_method(Klass* klass, Thread* thread, const char* method_name);
void cleanup_inline_caches();
DEBUG_ONLY( int verify_icholder_relocations(); )
void alive_methods_do(void f(CompiledMethod* nm));
#ifndef PRODUCT
static int klasses_seen;
static int aot_klasses_found;
static int aot_klasses_fp_miss;
static int aot_klasses_cl_miss;
static int aot_methods_found;
static void print_statistics();
#endif
bool reconcile_dynamic_invoke(AOTCompiledMethod* caller, InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass);
private:
AOTKlassData* find_klass(const char* name);
void sweep_dependent_methods(int* indexes, int methods_cnt);
void sweep_dependent_methods(AOTKlassData* klass_data);
void sweep_dependent_methods(InstanceKlass* ik);
void sweep_method(AOTCompiledMethod* aot);
bool reconcile_dynamic_klass(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Klass *dyno, const char *descriptor1, const char *descriptor2 = NULL);
bool reconcile_dynamic_method(AOTCompiledMethod *caller, InstanceKlass* holder, int index, Method *adapter_method);
};
#endif // SHARE_AOT_AOTCODEHEAP_HPP

View File

@ -1,441 +0,0 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "aot/aotCodeHeap.hpp"
#include "aot/aotLoader.hpp"
#include "aot/compiledIC_aot.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/nativeInst.hpp"
#include "compiler/compilerOracle.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepointVerifiers.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/sizes.hpp"
#include "utilities/xmlstream.hpp"
#include <stdio.h>
#if 0
static void metadata_oops_do(Metadata** metadata_begin, Metadata **metadata_end, OopClosure* f) {
// Visit the metadata/oops section
for (Metadata** p = metadata_begin; p < metadata_end; p++) {
Metadata* m = *p;
intptr_t meta = (intptr_t)m;
if ((meta & 1) == 1) {
// already resolved
m = (Metadata*)(meta & ~1);
} else {
continue;
}
assert(Metaspace::contains(m), "");
if (m->is_method()) {
m = ((Method*)m)->method_holder();
}
assert(m->is_klass(), "must be");
oop o = ((Klass*)m)->klass_holder();
if (o != NULL) {
f->do_oop(&o);
}
}
}
#endif
address* AOTCompiledMethod::orig_pc_addr(const frame* fr) {
return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset());
}
oop AOTCompiledMethod::oop_at(int index) const {
if (index == 0) { // 0 is reserved
return NULL;
}
Metadata** entry = _metadata_got + (index - 1);
intptr_t meta = (intptr_t)*entry;
if ((meta & 1) == 1) {
// already resolved
Klass* k = (Klass*)(meta & ~1);
return k->java_mirror();
}
// The entry is string which we need to resolve.
const char* meta_name = _heap->get_name_at((int)meta);
int klass_len = Bytes::get_Java_u2((address)meta_name);
const char* klass_name = meta_name + 2;
// Quick check the current method's holder.
Klass* k = _method->method_holder();
ResourceMark rm; // for signature_name()
if (strncmp(k->signature_name(), klass_name, klass_len) != 0) { // Does not match?
// Search klass in got cells in DSO which have this compiled method.
k = _heap->get_klass_from_got(klass_name, klass_len, _method);
}
int method_name_len = Bytes::get_Java_u2((address)klass_name + klass_len);
guarantee(method_name_len == 0, "only klass is expected here");
meta = ((intptr_t)k) | 1;
*entry = (Metadata*)meta; // Should be atomic on x64
return k->java_mirror();
}
Metadata* AOTCompiledMethod::metadata_at(int index) const {
if (index == 0) { // 0 is reserved
return NULL;
}
assert(index - 1 < _metadata_size, "");
{
Metadata** entry = _metadata_got + (index - 1);
intptr_t meta = (intptr_t)*entry;
if ((meta & 1) == 1) {
// already resolved
Metadata *m = (Metadata*)(meta & ~1);
return m;
}
// The entry is string which we need to resolve.
const char* meta_name = _heap->get_name_at((int)meta);
int klass_len = Bytes::get_Java_u2((address)meta_name);
const char* klass_name = meta_name + 2;
// Quick check the current method's holder.
Klass* k = _method->method_holder();
bool klass_matched = true;
ResourceMark rm; // for signature_name() and find_method()
if (strncmp(k->signature_name(), klass_name, klass_len) != 0) { // Does not match?
// Search klass in got cells in DSO which have this compiled method.
k = _heap->get_klass_from_got(klass_name, klass_len, _method);
klass_matched = false;
}
int method_name_len = Bytes::get_Java_u2((address)klass_name + klass_len);
if (method_name_len == 0) { // Array or Klass name only?
meta = ((intptr_t)k) | 1;
*entry = (Metadata*)meta; // Should be atomic on x64
return (Metadata*)k;
} else { // Method
// Quick check the current method's name.
Method* m = _method;
int signature_len = Bytes::get_Java_u2((address)klass_name + klass_len + 2 + method_name_len);
int full_len = 2 + klass_len + 2 + method_name_len + 2 + signature_len;
if (!klass_matched || memcmp(_name, meta_name, full_len) != 0) { // Does not match?
Thread* thread = Thread::current();
const char* method_name = klass_name + klass_len;
m = AOTCodeHeap::find_method(k, thread, method_name);
}
meta = ((intptr_t)m) | 1;
*entry = (Metadata*)meta; // Should be atomic on x64
return (Metadata*)m;
}
}
ShouldNotReachHere(); return NULL;
}
void AOTCompiledMethod::do_unloading(bool unloading_occurred) {
unload_nmethod_caches(unloading_occurred);
}
bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
NoSafepointVerifier nsv;
{
// Enter critical section. Does not block for safepoint.
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
if (*_state_adr == new_state) {
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
return false;
}
// Change state
OrderAccess::storestore();
*_state_adr = new_state;
// Log the transition once
log_state_change();
#if COMPILER1_OR_COMPILER2
// Remain non-entrant forever
if (new_state == not_entrant && method() != NULL) {
method()->set_aot_code(NULL);
}
#endif // COMPILER1_OR_COMPILER2
// Remove AOTCompiledMethod from method.
if (method() != NULL) {
method()->unlink_code(this);
}
} // leave critical region under CompiledMethod_lock
if (TraceCreateZombies) {
ResourceMark m;
const char *new_state_str = (new_state == not_entrant) ? "not entrant" : "not used";
tty->print_cr("aot method <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", new_state_str);
}
return true;
}
bool AOTCompiledMethod::make_entrant() {
#if COMPILER1_OR_COMPILER2
assert(!method()->is_old(), "reviving evolved method!");
NoSafepointVerifier nsv;
{
// Enter critical section. Does not block for safepoint.
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
if (*_state_adr == in_use || *_state_adr == not_entrant) {
// another thread already performed this transition so nothing
// to do, but return false to indicate this.
return false;
}
// Change state
OrderAccess::storestore();
*_state_adr = in_use;
// Log the transition once
log_state_change();
} // leave critical region under CompiledMethod_lock
if (TraceCreateZombies) {
ResourceMark m;
tty->print_cr("aot method <" INTPTR_FORMAT "> %s code made entrant", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null");
}
return true;
#else
return false;
#endif // COMPILER1_OR_COMPILER2
}
// Iterate over metadata calling this function. Used by RedefineClasses
// Copied from nmethod::metadata_do
void AOTCompiledMethod::metadata_do(MetadataClosure* f) {
address low_boundary = verified_entry_point();
{
// Visit all immediate references that are embedded in the instruction stream.
RelocIterator iter(this, low_boundary);
while (iter.next()) {
if (iter.type() == relocInfo::metadata_type ) {
metadata_Relocation* r = iter.metadata_reloc();
// In this metadata, we must only follow those metadatas directly embedded in
// the code. Other metadatas (oop_index>0) are seen as part of
// the metadata section below.
assert(1 == (r->metadata_is_immediate()) +
(r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()),
"metadata must be found in exactly one place");
if (r->metadata_is_immediate() && r->metadata_value() != NULL) {
Metadata* md = r->metadata_value();
if (md != _method) f->do_metadata(md);
}
} else if (iter.type() == relocInfo::virtual_call_type) {
ResourceMark rm;
// Check compiledIC holders associated with this nmethod
CompiledIC *ic = CompiledIC_at(&iter);
if (ic->is_icholder_call()) {
CompiledICHolder* cichk = ic->cached_icholder();
f->do_metadata(cichk->holder_metadata());
f->do_metadata(cichk->holder_klass());
} else {
// Get Klass* or NULL (if value is -1) from GOT cell of virtual call PLT stub.
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
f->do_metadata(ic_oop);
}
}
} else if (iter.type() == relocInfo::static_call_type ||
iter.type() == relocInfo::opt_virtual_call_type) {
// Check Method* in AOT c2i stub for other calls.
Metadata* meta = (Metadata*)nativeLoadGot_at(nativePltCall_at(iter.addr())->plt_c2i_stub())->data();
if (meta != NULL) {
f->do_metadata(meta);
}
}
}
}
// Visit the metadata section
for (Metadata** p = metadata_begin(); p < metadata_end(); p++) {
Metadata* m = *p;
intptr_t meta = (intptr_t)m;
if ((meta & 1) == 1) {
// already resolved
m = (Metadata*)(meta & ~1);
} else {
continue;
}
assert(Metaspace::contains(m), "");
f->do_metadata(m);
}
// Visit metadata not embedded in the other places.
if (_method != NULL) f->do_metadata(_method);
}
void AOTCompiledMethod::print() const {
print_on(tty, "AOTCompiledMethod");
}
void AOTCompiledMethod::print_on(outputStream* st) const {
print_on(st, "AOTCompiledMethod");
}
// Print out more verbose output usually for a newly created aot method.
void AOTCompiledMethod::print_on(outputStream* st, const char* msg) const {
if (st != NULL) {
ttyLocker ttyl;
st->print("%7d ", (int) tty->time_stamp().milliseconds());
st->print("%4d ", _aot_id); // print compilation number
st->print(" aot[%2d]", _heap->dso_id());
// Stubs have _method == NULL
if (_method == NULL) {
st->print(" %s", _name);
} else {
ResourceMark m;
st->print(" %s", _method->name_and_sig_as_C_string());
}
if (Verbose) {
st->print(" entry at " INTPTR_FORMAT, p2i(_code));
}
if (msg != NULL) {
st->print(" %s", msg);
}
st->cr();
}
}
void AOTCompiledMethod::print_value_on(outputStream* st) const {
st->print("AOTCompiledMethod ");
print_on(st, NULL);
}
// Print a short set of xml attributes to identify this aot method. The
// output should be embedded in some other element.
void AOTCompiledMethod::log_identity(xmlStream* log) const {
log->print(" aot_id='%d'", _aot_id);
log->print(" aot='%2d'", _heap->dso_id());
}
void AOTCompiledMethod::log_state_change() const {
if (LogCompilation) {
ResourceMark m;
if (xtty != NULL) {
ttyLocker ttyl; // keep the following output all in one block
if (*_state_adr == not_entrant) {
xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'",
os::current_thread_id());
} else if (*_state_adr == not_used) {
xtty->begin_elem("make_not_used thread='" UINTX_FORMAT "'",
os::current_thread_id());
} else if (*_state_adr == in_use) {
xtty->begin_elem("make_entrant thread='" UINTX_FORMAT "'",
os::current_thread_id());
}
log_identity(xtty);
xtty->stamp();
xtty->end_elem();
}
}
if (PrintCompilation) {
ResourceMark m;
if (*_state_adr == not_entrant) {
print_on(tty, "made not entrant");
} else if (*_state_adr == not_used) {
print_on(tty, "made not used");
} else if (*_state_adr == in_use) {
print_on(tty, "made entrant");
}
}
}
NativeInstruction* PltNativeCallWrapper::get_load_instruction(virtual_call_Relocation* r) const {
return nativeLoadGot_at(_call->plt_load_got());
}
void PltNativeCallWrapper::verify_resolve_call(address dest) const {
CodeBlob* db = CodeCache::find_blob_unsafe(dest);
if (db == NULL) {
assert(dest == _call->plt_resolve_call(), "sanity");
}
}
void PltNativeCallWrapper::set_to_interpreted(const methodHandle& method, CompiledICInfo& info) {
assert(!info.to_aot(), "only for nmethod");
CompiledPltStaticCall* csc = CompiledPltStaticCall::at(instruction_address());
csc->set_to_interpreted(method, info.entry());
}
NativeCallWrapper* AOTCompiledMethod::call_wrapper_at(address call) const {
return new PltNativeCallWrapper((NativePltCall*) call);
}
NativeCallWrapper* AOTCompiledMethod::call_wrapper_before(address return_pc) const {
return new PltNativeCallWrapper(nativePltCall_before(return_pc));
}
CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_at(Relocation* call_site) const {
return CompiledPltStaticCall::at(call_site);
}
CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_at(address call_site) const {
return CompiledPltStaticCall::at(call_site);
}
CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_before(address return_addr) const {
return CompiledPltStaticCall::before(return_addr);
}
address AOTCompiledMethod::call_instruction_address(address pc) const {
NativePltCall* pltcall = nativePltCall_before(pc);
return pltcall->instruction_address();
}
void AOTCompiledMethod::clear_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
if (is_zombie()) {
return;
}
ResourceMark rm;
RelocIterator iter(this);
while (iter.next()) {
iter.reloc()->clear_inline_cache();
if (iter.type() == relocInfo::opt_virtual_call_type) {
CompiledIC* cic = CompiledIC_at(&iter);
assert(cic->is_clean(), "!");
nativePltCall_at(iter.addr())->set_stub_to_clean();
}
}
}

View File

@ -1,323 +0,0 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_AOT_AOTCOMPILEDMETHOD_HPP
#define SHARE_AOT_AOTCOMPILEDMETHOD_HPP
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/compiledMethod.hpp"
#include "code/pcDesc.hpp"
#include "code/relocInfo.hpp"
class AOTCodeHeap;
class aot_metadata {
private:
int _size;
int _code_size;
int _entry;
int _verified_entry;
int _exception_handler_offset;
int _deopt_handler_offset;
int _deopt_mh_handler_offset;
int _stubs_offset;
int _frame_size;
// location in frame (offset for sp) that deopt can store the original
// pc during a deopt.
int _orig_pc_offset;
int _unsafe_access;
int _pc_desc_begin;
int _scopes_begin;
int _reloc_begin;
int _exception_table_begin;
int _nul_chk_table_begin;
int _oopmap_begin;
address at_offset(size_t offset) const { return ((address) this) + offset; }
public:
int code_size() const { return _code_size; }
int frame_size() const { return _frame_size / HeapWordSize; }
PcDesc *scopes_pcs_begin() const { return (PcDesc *) at_offset(_pc_desc_begin); }
PcDesc *scopes_pcs_end() const { return (PcDesc *) at_offset(_scopes_begin); }
address scopes_data_begin() const { return at_offset(_scopes_begin); }
address scopes_data_end() const { return at_offset(_reloc_begin); }
relocInfo* relocation_begin() const { return (relocInfo*) at_offset(_reloc_begin); }
relocInfo* relocation_end() const { return (relocInfo*) at_offset(_exception_table_begin); }
address handler_table_begin () const { return at_offset(_exception_table_begin); }
address handler_table_end() const { return at_offset(_nul_chk_table_begin); }
address nul_chk_table_begin() const { return at_offset(_nul_chk_table_begin); }
address nul_chk_table_end() const { return at_offset(_oopmap_begin); }
ImmutableOopMapSet* oopmap_set() const { return (ImmutableOopMapSet*) at_offset(_oopmap_begin); }
address consts_begin() const { return at_offset(_size); }
address consts_end() const { return at_offset(_size); }
int stub_offset() const { return _stubs_offset; }
int entry_offset() const { return _entry; }
int verified_entry_offset() const { return _verified_entry; }
int exception_handler_offset() const { return _exception_handler_offset; }
int deopt_handler_offset() const { return _deopt_handler_offset; }
int deopt_mh_handler_offset() const { return _deopt_mh_handler_offset; }
int orig_pc_offset() const { return _orig_pc_offset; }
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
bool has_unsafe_access() const { return _unsafe_access != 0; }
};
/*
* Use this for AOTCompiledMethods since a lot of the fields in CodeBlob gets the same
* value when they come from AOT. code_begin == content_begin, etc... */
class AOTCompiledMethodLayout : public CodeBlobLayout {
public:
AOTCompiledMethodLayout(address code_begin, address code_end, address relocation_begin, address relocation_end) :
CodeBlobLayout(
code_begin, // code_begin
code_end, // code_end
code_begin, // content_begin
code_end, // content_end
code_end, // data_end
relocation_begin, // relocation_begin
relocation_end
) {
}
};
class AOTCompiledMethod : public CompiledMethod, public CHeapObj<mtCode> {
private:
address _code;
aot_metadata* _meta;
Metadata** _metadata_got;
jlong* _state_adr; // Address of cell to indicate aot method state (in_use or not_entrant)
AOTCodeHeap* _heap; // code heap which has this method
const char* _name; // For stub: "AOT Stub<name>" for stub,
// For nmethod: "<u2_size>Ljava/lang/ThreadGroup;<u2_size>addUnstarted<u2_size>()V"
const int _metadata_size; // size of _metadata_got
const int _aot_id;
const int _method_index;
oop _oop; // method()->method_holder()->klass_holder()
address* orig_pc_addr(const frame* fr);
bool make_not_entrant_helper(int new_state);
public:
using CHeapObj<mtCode>::operator new;
using CHeapObj<mtCode>::operator delete;
int method_index() const { return _method_index; }
void set_oop(oop o) { _oop = o; }
AOTCompiledMethod(address code, Method* method, aot_metadata* meta, address metadata_got, int metadata_size, jlong* state_adr, AOTCodeHeap* heap, const char* name, int method_index, int aot_id) :
CompiledMethod(method, name, compiler_jvmci, // AOT code is generated by JVMCI compiler
AOTCompiledMethodLayout(code, code + meta->code_size(), (address) meta->relocation_begin(), (address) meta->relocation_end()),
0 /* frame_complete_offset */, meta->frame_size() /* frame_size */, meta->oopmap_set(), false /* caller_must_gc_arguments */),
_code(code),
_meta(meta),
_metadata_got((Metadata**) metadata_got),
_state_adr(state_adr),
_heap(heap),
_name(name),
_metadata_size(metadata_size),
_aot_id(aot_id),
_method_index(method_index) {
_is_far_code = CodeCache::is_far_target(code) ||
CodeCache::is_far_target(code + meta->code_size());
_exception_cache = NULL;
_scopes_data_begin = (address) _meta->scopes_data_begin();
_deopt_handler_begin = (address) _code + _meta->deopt_handler_offset();
if (_meta->deopt_mh_handler_offset() != -1) {
_deopt_mh_handler_begin = (address) _code + _meta->deopt_mh_handler_offset();
} else {
_deopt_mh_handler_begin = (address) this;
}
_pc_desc_container.reset_to(scopes_pcs_begin());
// Mark the AOTCompiledMethod as in_use
*_state_adr = nmethod::in_use;
set_has_unsafe_access(_meta->has_unsafe_access());
_oop = NULL;
}
virtual bool is_aot() const { return true; }
virtual bool is_runtime_stub() const { return is_aot_runtime_stub(); }
virtual bool is_compiled() const { return !is_aot_runtime_stub(); }
virtual bool is_locked_by_vm() const { return false; }
int state() const { return *_state_adr; }
// Non-virtual for speed
bool _is_alive() const { return state() < unloaded; }
virtual bool is_zombie() const { return state() == zombie; }
virtual bool is_unloaded() const { return state() == unloaded; }
virtual bool is_not_entrant() const { return state() == not_entrant ||
state() == not_used; }
virtual bool is_alive() const { return _is_alive(); }
virtual bool is_in_use() const { return state() == in_use; }
virtual bool is_unloading() { return false; }
address exception_begin() const { return (address) _code + _meta->exception_handler_offset(); }
virtual const char* name() const { return _name; }
virtual int compile_id() const { return _aot_id; }
void print_on(outputStream* st) const;
void print_on(outputStream* st, const char* msg) const;
void print() const;
virtual void print_value_on(outputStream *stream) const;
virtual void print_block_comment(outputStream *stream, address block_begin) const { }
virtual void verify() {}
virtual int comp_level() const { return CompLevel_aot; }
virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); }
virtual void log_identity(xmlStream* stream) const;
virtual void log_state_change() const;
virtual bool make_entrant();
virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); }
virtual bool make_not_used() { return make_not_entrant_helper(not_used); }
virtual address entry_point() const { return _code + _meta->entry_offset(); }
virtual bool make_zombie() { ShouldNotReachHere(); return false; }
virtual bool is_osr_method() const { return false; }
virtual int osr_entry_bci() const { ShouldNotReachHere(); return -1; }
// AOT compiled methods do not get into zombie state
virtual bool can_convert_to_zombie() { return false; }
virtual bool is_dependent_on_method(Method* dependee) { return true; }
virtual void clear_inline_caches();
virtual void print_pcs() {}
virtual address scopes_data_end() const { return _meta->scopes_data_end(); }
virtual oop oop_at(int index) const;
virtual Metadata* metadata_at(int index) const;
virtual PcDesc* scopes_pcs_begin() const { return _meta->scopes_pcs_begin(); }
virtual PcDesc* scopes_pcs_end() const { return _meta->scopes_pcs_end(); }
virtual address handler_table_begin() const { return _meta->handler_table_begin(); }
virtual address handler_table_end() const { return _meta->handler_table_end(); }
virtual address nul_chk_table_begin() const { return _meta->nul_chk_table_begin(); }
virtual address nul_chk_table_end() const { return _meta->nul_chk_table_end(); }
virtual address consts_begin() const { return _meta->consts_begin(); }
virtual address consts_end() const { return _meta->consts_end(); }
virtual address stub_begin() const { return code_begin() + _meta->stub_offset(); }
virtual address stub_end() const { return code_end(); }
virtual oop* oop_addr_at(int index) const { ShouldNotReachHere(); return NULL; }
virtual Metadata** metadata_addr_at(int index) const { ShouldNotReachHere(); return NULL; }
// Accessor/mutator for the original pc of a frame before a frame was deopted.
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
virtual void metadata_do(MetadataClosure* f);
bool metadata_got_contains(Metadata **p) {
return p >= &_metadata_got[0] && p < &_metadata_got[_metadata_size];
}
Metadata** metadata_begin() const { return &_metadata_got[0] ; }
Metadata** metadata_end() const { return &_metadata_got[_metadata_size] ; }
const char* compile_kind() const { return "AOT"; }
int get_state() const {
return (int) (*_state_adr);
}
// inlined and non-virtual for AOTCodeHeap::oops_do
void do_oops(OopClosure* f) {
assert(_is_alive(), "");
if (_oop != NULL) {
f->do_oop(&_oop);
}
#if 0
metadata_oops_do(metadata_begin(), metadata_end(), f);
#endif
}
virtual void do_unloading(bool unloading_occurred);
protected:
// AOT compiled methods are not flushed
void flush() {};
NativeCallWrapper* call_wrapper_at(address call) const;
NativeCallWrapper* call_wrapper_before(address return_pc) const;
address call_instruction_address(address pc) const;
CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const;
CompiledStaticCall* compiledStaticCall_at(address addr) const;
CompiledStaticCall* compiledStaticCall_before(address addr) const;
private:
bool is_aot_runtime_stub() const { return _method == NULL; }
};
class PltNativeCallWrapper: public NativeCallWrapper {
private:
NativePltCall* _call;
public:
PltNativeCallWrapper(NativePltCall* call) : _call(call) {}
virtual address destination() const { return _call->destination(); }
virtual address instruction_address() const { return _call->instruction_address(); }
virtual address next_instruction_address() const { return _call->next_instruction_address(); }
virtual address return_address() const { return _call->return_address(); }
virtual address get_resolve_call_stub(bool is_optimized) const { return _call->plt_resolve_call(); }
virtual void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); }
virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info);
virtual void verify() const { _call->verify(); }
virtual void verify_resolve_call(address dest) const;
virtual bool is_call_to_interpreted(address dest) const { return (dest == _call->plt_c2i_stub()); }
// TODO: assume for now that patching of aot code (got cell) is safe.
virtual bool is_safe_for_patching() const { return true; }
virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const;
virtual void *get_data(NativeInstruction* instruction) const {
return (void*)((NativeLoadGot*) instruction)->data();
}
virtual void set_data(NativeInstruction* instruction, intptr_t data) {
((NativeLoadGot*) instruction)->set_data(data);
}
};
#endif // SHARE_AOT_AOTCOMPILEDMETHOD_HPP

View File

@ -1,335 +0,0 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "aot/aotCodeHeap.hpp"
#include "aot/aotLoader.inline.hpp"
#include "classfile/javaClasses.hpp"
#include "jvm.h"
#include "jvmci/jvmci.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compressedOops.hpp"
#include "oops/method.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/registerMap.hpp"
#include "runtime/timerTrace.hpp"
GrowableArray<AOTCodeHeap*>* AOTLoader::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray<AOTCodeHeap*> (2, mtCode);
GrowableArray<AOTLib*>* AOTLoader::_libraries = new(ResourceObj::C_HEAP, mtCode) GrowableArray<AOTLib*> (2, mtCode);
// Iterate over all AOT CodeHeaps
#define FOR_ALL_AOT_HEAPS(heap) for (GrowableArrayIterator<AOTCodeHeap*> heap = heaps()->begin(); heap != heaps()->end(); ++heap)
// Iterate over all AOT Libraries
#define FOR_ALL_AOT_LIBRARIES(lib) for (GrowableArrayIterator<AOTLib*> lib = libraries()->begin(); lib != libraries()->end(); ++lib)
void AOTLoader::load_for_klass(InstanceKlass* ik, Thread* thread) {
if (ik->is_hidden() || ik->is_unsafe_anonymous()) {
// don't even bother
return;
}
if (UseAOT) {
// We allow hotswap to be enabled after the onload phase, but not breakpoints
assert(!JvmtiExport::can_post_breakpoint(), "AOT should have been disabled.");
FOR_ALL_AOT_HEAPS(heap) {
(*heap)->load_klass_data(ik, thread);
}
}
}
uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) {
assert(UseAOT, "called only when AOT is enabled");
if (ik->is_hidden() || ik->is_unsafe_anonymous()) {
// don't even bother
return 0;
}
FOR_ALL_AOT_HEAPS(heap) {
AOTKlassData* klass_data = (*heap)->find_klass(ik);
if (klass_data != NULL) {
return klass_data->_fingerprint;
}
}
return 0;
}
void AOTLoader::oops_do(OopClosure* f) {
if (UseAOT) {
FOR_ALL_AOT_HEAPS(heap) {
(*heap)->oops_do(f);
}
}
}
void AOTLoader::metadata_do(MetadataClosure* f) {
if (UseAOT) {
FOR_ALL_AOT_HEAPS(heap) {
(*heap)->metadata_do(f);
}
}
}
void AOTLoader::mark_evol_dependent_methods(InstanceKlass* dependee) {
if (UseAOT) {
FOR_ALL_AOT_HEAPS(heap) {
(*heap)->mark_evol_dependent_methods(dependee);
}
}
}
/**
* List of core modules for which we search for shared libraries.
*/
static const char* modules[] = {
"java.base",
"java.logging",
"jdk.compiler",
"jdk.internal.vm.ci",
"jdk.internal.vm.compiler"
};
void AOTLoader::initialize() {
TraceTime timer("AOT initialization", TRACETIME_LOG(Info, aot, startuptime));
if (FLAG_IS_DEFAULT(UseAOT) && AOTLibrary != NULL) {
// Don't need to set UseAOT on command line when AOTLibrary is specified
FLAG_SET_DEFAULT(UseAOT, true);
}
if (UseAOT) {
// EagerInitialization is not compatible with AOT
if (EagerInitialization) {
if (PrintAOT) {
warning("EagerInitialization is not compatible with AOT (switching AOT off)");
}
FLAG_SET_DEFAULT(UseAOT, false);
return;
}
if (JvmtiExport::can_post_breakpoint()) {
if (PrintAOT) {
warning("JVMTI capability to post breakpoint is not compatible with AOT (switching AOT off)");
}
FLAG_SET_DEFAULT(UseAOT, false);
return;
}
// -Xint is not compatible with AOT
if (Arguments::is_interpreter_only()) {
if (PrintAOT) {
warning("-Xint is not compatible with AOT (switching AOT off)");
}
FLAG_SET_DEFAULT(UseAOT, false);
return;
}
#ifdef _WINDOWS
const char pathSep = ';';
#else
const char pathSep = ':';
#endif
// Scan the AOTLibrary option.
if (AOTLibrary != NULL) {
const int len = (int)strlen(AOTLibrary);
char* cp = NEW_C_HEAP_ARRAY(char, len+1, mtCode);
memcpy(cp, AOTLibrary, len);
cp[len] = '\0';
char* end = cp + len;
while (cp < end) {
const char* name = cp;
while ((*cp) != '\0' && (*cp) != '\n' && (*cp) != ',' && (*cp) != pathSep) cp++;
cp[0] = '\0'; // Terminate name
cp++;
load_library(name, true);
}
}
// Load well-know AOT libraries from Java installation directory.
const char* home = Arguments::get_java_home();
const char* file_separator = os::file_separator();
for (int i = 0; i < (int) (sizeof(modules) / sizeof(const char*)); i++) {
char library[JVM_MAXPATHLEN];
jio_snprintf(library, sizeof(library), "%s%slib%slib%s%s%s%s", home, file_separator, file_separator, modules[i], UseCompressedOops ? "-coop" : "", UseG1GC ? "" : "-nong1", os::dll_file_extension());
load_library(library, false);
}
}
}
void AOTLoader::universe_init() {
if (UseAOT && libraries_count() > 0) {
// Shifts are static values which initialized by 0 until java heap initialization.
// AOT libs are loaded before heap initialized so shift values are not set.
// It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded.
// AOT sets shift values during heap and metaspace initialization.
// Check shifts value to make sure thay did not change.
if (UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
int oop_shift = CompressedOops::shift();
FOR_ALL_AOT_LIBRARIES(lib) {
(*lib)->verify_flag((*lib)->config()->_narrowOopShift, oop_shift, "CompressedOops::shift");
}
if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set
int klass_shift = CompressedKlassPointers::shift();
FOR_ALL_AOT_LIBRARIES(lib) {
(*lib)->verify_flag((*lib)->config()->_narrowKlassShift, klass_shift, "CompressedKlassPointers::shift");
}
}
}
// Create heaps for all valid libraries
FOR_ALL_AOT_LIBRARIES(lib) {
if ((*lib)->is_valid()) {
AOTCodeHeap* heap = new AOTCodeHeap(*lib);
{
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
add_heap(heap);
CodeCache::add_heap(heap);
}
} else {
// Unload invalid libraries
os::dll_unload((*lib)->dl_handle());
}
}
}
if (heaps_count() == 0) {
if (FLAG_IS_DEFAULT(UseAOT)) {
FLAG_SET_DEFAULT(UseAOT, false);
}
}
}
// Set shift value for compressed oops and classes based on first AOT library config.
// AOTLoader::universe_init(), which is called later, will check the shift value again to make sure nobody change it.
// This code is not executed during CDS dump because it runs in Interpreter mode and AOT is disabled in this mode.
void AOTLoader::set_narrow_oop_shift() {
// This method is called from Universe::initialize_heap().
if (UseAOT && libraries_count() > 0 &&
UseCompressedOops && AOTLib::narrow_oop_shift_initialized()) {
if (CompressedOops::shift() == 0) {
// 0 is valid shift value for small heap but we can safely increase it
// at this point when nobody used it yet.
CompressedOops::set_shift(AOTLib::narrow_oop_shift());
}
}
}
void AOTLoader::set_narrow_klass_shift() {
// This method is called from Metaspace::set_narrow_klass_base_and_shift().
if (UseAOT && libraries_count() > 0 &&
UseCompressedOops && AOTLib::narrow_oop_shift_initialized() &&
UseCompressedClassPointers) {
if (CompressedKlassPointers::shift() == 0) {
CompressedKlassPointers::set_shift(AOTLib::narrow_klass_shift());
}
}
}
void AOTLoader::load_library(const char* name, bool exit_on_error) {
// Skip library if a library with the same name is already loaded.
const int file_separator = *os::file_separator();
const char* start = strrchr(name, file_separator);
const char* new_name = (start == NULL) ? name : (start + 1);
FOR_ALL_AOT_LIBRARIES(lib) {
const char* lib_name = (*lib)->name();
start = strrchr(lib_name, file_separator);
const char* old_name = (start == NULL) ? lib_name : (start + 1);
if (strcmp(old_name, new_name) == 0) {
if (PrintAOT) {
warning("AOT library %s is already loaded as %s.", name, lib_name);
}
return;
}
}
char ebuf[1024];
void* handle = os::dll_load(name, ebuf, sizeof ebuf);
if (handle == NULL) {
if (exit_on_error) {
tty->print_cr("error opening file: %s", ebuf);
vm_exit(1);
}
return;
}
const int dso_id = libraries_count() + 1;
AOTLib* lib = new AOTLib(handle, name, dso_id);
if (!lib->is_valid()) {
delete lib;
os::dll_unload(handle);
return;
}
add_library(lib);
}
#ifndef PRODUCT
void AOTLoader::print_statistics() {
{ ttyLocker ttyl;
tty->print_cr("--- AOT Statistics ---");
tty->print_cr("AOT libraries loaded: %d", heaps_count());
AOTCodeHeap::print_statistics();
}
}
#endif
bool AOTLoader::reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass* appendix_klass) {
if (!UseAOT) {
return true;
}
JavaThread* thread = JavaThread::current();
ResourceMark rm(thread);
RegisterMap map(thread, false);
frame caller_frame = thread->last_frame().sender(&map); // Skip stub
CodeBlob* caller_cb = caller_frame.cb();
guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
CompiledMethod* cm = caller_cb->as_compiled_method();
if (!cm->is_aot()) {
return true;
}
AOTCompiledMethod* aot = (AOTCompiledMethod*)cm;
AOTCodeHeap* caller_heap = NULL;
FOR_ALL_AOT_HEAPS(heap) {
if ((*heap)->contains_blob(aot)) {
caller_heap = *heap;
break;
}
}
guarantee(caller_heap != NULL, "CodeHeap not found");
bool success = caller_heap->reconcile_dynamic_invoke(aot, holder, index, adapter_method, appendix_klass);
vmassert(success || thread->last_frame().sender(&map).is_deoptimized_frame(), "caller not deoptimized on failure");
return success;
}
// This should be called very early during startup before any of the AOTed methods that use boxes can deoptimize.
// Deoptimization machinery expects the caches to be present and populated.
void AOTLoader::initialize_box_caches(TRAPS) {
if (!UseAOT || libraries_count() == 0) {
return;
}
TraceTime timer("AOT initialization of box caches", TRACETIME_LOG(Info, aot, startuptime));
JVMCI::ensure_box_caches_initialized(CHECK);
}

View File

@ -1,74 +0,0 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_AOT_AOTLOADER_HPP
#define SHARE_AOT_AOTLOADER_HPP
#include "runtime/globals_extension.hpp"
#include "runtime/handles.hpp"
class AOTCodeHeap;
class AOTCompiledMethod;
class AOTLib;
class CodeBlob;
template <class T> class GrowableArray;
class InstanceKlass;
class JavaThread;
class Metadata;
class OopClosure;
class AOTLoader {
private:
#if INCLUDE_AOT
static GrowableArray<AOTCodeHeap*>* _heaps;
static GrowableArray<AOTLib*>* _libraries;
#endif
static void load_library(const char* name, bool exit_on_error);
public:
#if INCLUDE_AOT
static GrowableArray<AOTCodeHeap*>* heaps();
static GrowableArray<AOTLib*>* libraries();
static int heaps_count();
static int libraries_count();
static void add_heap(AOTCodeHeap *heap);
static void add_library(AOTLib *lib);
#endif
static void initialize() NOT_AOT({ FLAG_SET_ERGO(UseAOT, false); });
static void universe_init() NOT_AOT_RETURN;
static void set_narrow_oop_shift() NOT_AOT_RETURN;
static void set_narrow_klass_shift() NOT_AOT_RETURN;
static void load_for_klass(InstanceKlass* ik, Thread* thread) NOT_AOT_RETURN;
static uint64_t get_saved_fingerprint(InstanceKlass* ik) NOT_AOT({ return 0; });
static void oops_do(OopClosure* f) NOT_AOT_RETURN;
static void metadata_do(MetadataClosure* f) NOT_AOT_RETURN;
static void mark_evol_dependent_methods(InstanceKlass* dependee) NOT_AOT_RETURN;
static void initialize_box_caches(TRAPS) NOT_AOT_RETURN;
NOT_PRODUCT( static void print_statistics() NOT_AOT_RETURN; )
static bool reconcile_dynamic_invoke(InstanceKlass* holder, int index, Method* adapter_method, Klass *appendix_klass) NOT_AOT({ return true; });
};
#endif // SHARE_AOT_AOTLOADER_HPP

View File

@ -1,39 +0,0 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_AOT_AOTLOADER_INLINE_HPP
#define SHARE_AOT_AOTLOADER_INLINE_HPP
#include "aot/aotLoader.hpp"
#include "utilities/growableArray.hpp"
#if INCLUDE_AOT
GrowableArray<AOTCodeHeap*>* AOTLoader::heaps() { return _heaps; }
GrowableArray<AOTLib*>* AOTLoader::libraries() { return _libraries; }
int AOTLoader::heaps_count() { return heaps()->length(); }
int AOTLoader::libraries_count() { return libraries()->length(); }
void AOTLoader::add_heap(AOTCodeHeap *heap) { heaps()->append(heap); }
void AOTLoader::add_library(AOTLib *lib) { libraries()->append(lib); }
#endif
#endif // SHARE_AOT_AOTLOADER_INLINE_HPP

View File

@ -1,37 +0,0 @@
/*
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "aot/compiledIC_aot.hpp"
bool CompiledPltStaticCall::is_call_to_interpreted() const {
// It is a call to interpreted, if it calls to a stub. Hence, the destination
// must be in the stub part of the nmethod that contains the call
return destination() == _call->plt_c2i_stub();
}
address CompiledPltStaticCall::find_stub() {
// It is static NativePltCall. Return c2i stub address.
return _call->plt_c2i_stub();
}

View File

@ -1,83 +0,0 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_AOT_COMPILEDIC_AOT_HPP
#define SHARE_AOT_COMPILEDIC_AOT_HPP
#include "code/compiledIC.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/linkResolver.hpp"
#include "oops/compiledICHolder.hpp"
class CompiledPltStaticCall: public CompiledStaticCall {
friend class CompiledIC;
friend class PltNativeCallWrapper;
// Also used by CompiledIC
void set_to_interpreted(const methodHandle& callee, address entry);
address instruction_address() const { return _call->instruction_address(); }
void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); }
NativePltCall* _call;
CompiledPltStaticCall(NativePltCall* call) : _call(call) {}
public:
inline static CompiledPltStaticCall* before(address return_addr) {
CompiledPltStaticCall* st = new CompiledPltStaticCall(nativePltCall_before(return_addr));
st->verify();
return st;
}
static inline CompiledPltStaticCall* at(address native_call) {
CompiledPltStaticCall* st = new CompiledPltStaticCall(nativePltCall_at(native_call));
st->verify();
return st;
}
static inline CompiledPltStaticCall* at(Relocation* call_site) {
return at(call_site->addr());
}
// Delegation
address destination() const { return _call->destination(); }
virtual bool is_call_to_interpreted() const;
// Stub support
address find_stub();
static void set_stub_to_clean(static_stub_Relocation* static_stub);
// Misc.
void print() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
protected:
virtual address resolve_call_stub() const { return _call->plt_resolve_call(); }
virtual void set_to_far(const methodHandle& callee, address entry) { set_to_compiled(entry); }
virtual const char* name() const { return "CompiledPltStaticCall"; }
};
#endif // SHARE_AOT_COMPILEDIC_AOT_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -406,10 +406,6 @@ class CodeBuffer: public StackObj {
address _last_insn; // used to merge consecutive memory barriers, loads or stores.
#if INCLUDE_AOT
bool _immutable_PIC;
#endif
#ifndef PRODUCT
CodeStrings _code_strings;
bool _collect_comments; // Indicate if we need to collect block comments at all.
@ -426,9 +422,6 @@ class CodeBuffer: public StackObj {
_oop_recorder = NULL;
_overflow_arena = NULL;
_last_insn = NULL;
#if INCLUDE_AOT
_immutable_PIC = false;
#endif
#ifndef PRODUCT
_decode_begin = NULL;
@ -675,13 +668,6 @@ class CodeBuffer: public StackObj {
// Log a little info about section usage in the CodeBuffer
void log_section_sizes(const char* name);
#if INCLUDE_AOT
// True if this is a code buffer used for immutable PIC, i.e. AOT
// compilation.
bool immutable_PIC() { return _immutable_PIC; }
void set_immutable_PIC(bool pic) { _immutable_PIC = pic; }
#endif
#ifndef PRODUCT
public:
// Printing / Decoding

View File

@ -266,7 +266,7 @@ class Compilation: public StackObj {
// will compilation make optimistic assumptions that might lead to
// deoptimization and that the runtime will account for?
bool is_optimistic() {
return CompilerConfig::is_c1_only_no_aot_or_jvmci() && !is_profiling() &&
return CompilerConfig::is_c1_only_no_jvmci() && !is_profiling() &&
(RangeCheckElimination || UseLoopInvariantCodeMotion) &&
method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
}

View File

@ -482,7 +482,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
#if defined(IA32) && defined(COMPILER2)
// C2 leave fpu stack dirty clean it
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_jvmci()) {
int i;
for ( i = 1; i <= 7 ; i++ ) {
ffree(i);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -259,11 +259,7 @@ class LIR_Assembler: public CompilationResourceObj {
public:
static int call_stub_size() {
if (UseAOT) {
return _call_stub_size + _call_aot_stub_size;
} else {
return _call_stub_size;
}
return _call_stub_size;
}
static int exception_handler_size() {

View File

@ -461,7 +461,7 @@ void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitI
/* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
* is active and the class hasn't yet been resolved we need to emit a patch that resolves
* the class. */
if ((!CompilerConfig::is_c1_only_no_aot_or_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
if ((!CompilerConfig::is_c1_only_no_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
assert(info != NULL, "info must be set if class is not loaded");
__ klass2reg_patch(NULL, r, info);
} else {
@ -661,7 +661,7 @@ void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, L
void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
} else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_aot_or_jvmci() && new_instance->is_unresolved())) {
} else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_jvmci() && new_instance->is_unresolved())) {
tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
}
}

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "jvm.h"
#include "aot/aotLoader.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
@ -5354,25 +5353,6 @@ InstanceKlass* ClassFileParser::create_instance_klass(bool changed_by_loadhook,
assert(_klass == ik, "invariant");
if (ik->should_store_fingerprint()) {
ik->store_fingerprint(_stream->compute_fingerprint());
}
ik->set_has_passed_fingerprint_check(false);
if (UseAOT && ik->supers_have_passed_fingerprint_checks()) {
uint64_t aot_fp = AOTLoader::get_saved_fingerprint(ik);
uint64_t fp = ik->has_stored_fingerprint() ? ik->get_stored_fingerprint() : _stream->compute_fingerprint();
if (aot_fp != 0 && aot_fp == fp) {
// This class matches with a class saved in an AOT library
ik->set_has_passed_fingerprint_check(true);
} else {
ResourceMark rm;
log_info(class, fingerprint)("%s : expected = " PTR64_FORMAT " actual = " PTR64_FORMAT,
ik->external_name(), aot_fp, _stream->compute_fingerprint());
}
}
return ik;
}

View File

@ -75,12 +75,3 @@ const ClassFileStream* ClassFileStream::clone() const {
need_verify(),
from_boot_loader_modules_image());
}
uint64_t ClassFileStream::compute_fingerprint() const {
int classfile_size = length();
int classfile_crc = ClassLoader::crc32(0, (const char*)buffer(), length());
uint64_t fingerprint = (uint64_t(classfile_size) << 32) | uint64_t(uint32_t(classfile_crc));
assert(fingerprint != 0, "must not be zero");
return fingerprint;
}

View File

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "jvm.h"
#include "aot/aotLoader.hpp"
#include "cds/heapShared.hpp"
#include "classfile/classFileParser.hpp"
#include "classfile/classFileStream.hpp"
@ -1230,21 +1229,6 @@ void SystemDictionary::load_shared_class_misc(InstanceKlass* ik, ClassLoaderData
// notify a class loaded from shared object
ClassLoadingService::notify_class_loaded(ik, true /* shared class */);
ik->set_has_passed_fingerprint_check(false);
if (UseAOT && ik->supers_have_passed_fingerprint_checks()) {
uint64_t aot_fp = AOTLoader::get_saved_fingerprint(ik);
uint64_t cds_fp = ik->get_stored_fingerprint();
if (aot_fp != 0 && aot_fp == cds_fp) {
// This class matches with a class saved in an AOT library
ik->set_has_passed_fingerprint_check(true);
} else {
if (log_is_enabled(Info, class, fingerprint)) {
ResourceMark rm;
log_info(class, fingerprint)("%s : expected = " PTR64_FORMAT " actual = " PTR64_FORMAT, ik->external_name(), aot_fp, cds_fp);
}
}
}
}
#endif // INCLUDE_CDS

View File

@ -44,8 +44,7 @@ struct CodeBlobType {
MethodProfiled = 1, // Execution level 2 and 3 (profiled) nmethods
NonNMethod = 2, // Non-nmethods like Buffers, Adapters and Runtime Stubs
All = 3, // All types (No code cache segmentation)
AOT = 4, // AOT methods
NumTypes = 5 // Number of CodeBlobTypes
NumTypes = 4 // Number of CodeBlobTypes
};
};
@ -54,10 +53,6 @@ struct CodeBlobType {
// Subtypes are:
// CompiledMethod : Compiled Java methods (include method that calls to native code)
// nmethod : JIT Compiled Java methods
// AOTCompiledMethod : AOT Compiled Java methods - Not in the CodeCache!
// AOTCompiledMethod objects are allocated in the C-Heap, the code they
// point to is allocated in the AOTCodeHeap which is in the C-Heap as
// well (i.e. it's the memory where the shared library was loaded to)
// RuntimeBlob : Non-compiled method code; generated glue code
// BufferBlob : Used for non-relocatable code such as interpreter, stubroutines, etc.
// AdapterBlob : Used to hold C2I/I2C adapters
@ -71,17 +66,12 @@ struct CodeBlobType {
// UncommonTrapBlob : Used to handle uncommon traps
//
//
// Layout (all except AOTCompiledMethod) : continuous in the CodeCache
// Layout : continuous in the CodeCache
// - header
// - relocation
// - content space
// - instruction space
// - data space
//
// Layout (AOTCompiledMethod) : in the C-Heap
// - header -\
// ... |
// - code <-/
class CodeBlobLayout;
@ -145,7 +135,6 @@ public:
virtual bool is_adapter_blob() const { return false; }
virtual bool is_vtable_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_aot() const { return false; }
virtual bool is_compiled() const { return false; }
inline bool is_compiled_by_c1() const { return _type == compiler_c1; };
@ -246,7 +235,6 @@ public:
#ifndef PRODUCT
void set_strings(CodeStrings& strings) {
assert(!is_aot(), "invalid on aot");
_strings.copy(strings);
}
#endif

View File

@ -24,7 +24,6 @@
#include "precompiled.hpp"
#include "jvm_io.h"
#include "aot/aotLoader.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/codeHeapState.hpp"
@ -683,7 +682,6 @@ void CodeCache::metadata_do(MetadataClosure* f) {
while(iter.next()) {
iter.method()->metadata_do(f);
}
AOTLoader::metadata_do(f);
}
int CodeCache::alignment_unit() {
@ -972,11 +970,6 @@ void codeCache_init() {
CodeCache::initialize();
}
void AOTLoader_init() {
// Load AOT libraries and add AOT code heaps.
AOTLoader::initialize();
}
//------------------------------------------------------------------------------------------------
int CodeCache::number_of_nmethods_with_dependencies() {
@ -1038,15 +1031,6 @@ CompiledMethod* CodeCache::find_compiled(void* start) {
return (CompiledMethod*)cb;
}
bool CodeCache::is_far_target(address target) {
#if INCLUDE_AOT
return NativeCall::is_far_call(_low_bound, target) ||
NativeCall::is_far_call(_high_bound, target);
#else
return false;
#endif
}
#if INCLUDE_JVMTI
// RedefineClasses support for unloading nmethods that are dependent on "old" methods.
// We don't really expect this table to grow very large. If it does, it can become a hashtable.
@ -1096,11 +1080,6 @@ void CodeCache::old_nmethods_do(MetadataClosure* f) {
// Just marks the methods in this class as needing deoptimization
void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) {
assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!");
// Mark dependent AOT nmethods, which are only found via the class redefined.
// TODO: add dependencies to aotCompiledMethod's metadata section so this isn't
// needed.
AOTLoader::mark_evol_dependent_methods(dependee);
}

View File

@ -205,9 +205,6 @@ class CodeCache : AllStatic {
static address high_bound() { return _high_bound; }
static address high_bound(int code_blob_type);
// Have to use far call instructions to call this pc.
static bool is_far_target(address pc);
// Profiling
static size_t capacity();
static size_t unallocated_capacity(int code_blob_type);
@ -229,7 +226,6 @@ class CodeCache : AllStatic {
static bool code_blob_type_accepts_compiled(int type) {
bool result = type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled;
AOT_ONLY( result = result || type == CodeBlobType::AOT; )
return result;
}

View File

@ -425,7 +425,7 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
// transitions are mt_safe
Thread *thread = Thread::current();
if (info.to_interpreter() || info.to_aot()) {
if (info.to_interpreter()) {
// Call to interpreter
if (info.is_optimized() && is_optimized()) {
assert(is_clean(), "unsafe IC path");
@ -439,9 +439,8 @@ bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
if (TraceICs) {
ResourceMark rm(thread);
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
p2i(instruction_address()),
(info.to_aot() ? "aot" : "interpreter"),
method->print_value_string());
}
} else {
@ -541,19 +540,13 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
entry = method_code->entry_point();
}
}
bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code();
if (entry != NULL && !far_c2a) {
// Call to near compiled code (nmethod or aot).
if (entry != NULL) {
// Call to near compiled code.
info.set_compiled_entry(entry, is_optimized ? NULL : receiver_klass, is_optimized);
} else {
if (is_optimized) {
if (far_c2a) {
// Call to aot code from nmethod.
info.set_aot_entry(entry, method());
} else {
// Use stub entry
info.set_interpreter_entry(method()->get_c2i_entry(), method());
}
// Use stub entry
info.set_interpreter_entry(method()->get_c2i_entry(), method());
} else {
// Use icholder entry
assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
@ -614,13 +607,6 @@ bool CompiledDirectStaticCall::is_call_to_interpreted() const {
return cm->stub_contains(destination());
}
bool CompiledDirectStaticCall::is_call_to_far() const {
// It is a call to aot method, if it calls to a stub. Hence, the destination
// must be in the stub part of the nmethod that contains the call
CodeBlob* desc = CodeCache::find_blob(instruction_address());
return desc->as_compiled_method()->stub_contains(destination());
}
void CompiledStaticCall::set_to_compiled(address entry) {
if (TraceICs) {
ResourceMark rm;
@ -645,11 +631,6 @@ void CompiledStaticCall::set(const StaticCallInfo& info) {
if (info._to_interpreter) {
// Call to interpreted code
set_to_interpreted(info.callee(), info.entry());
#if INCLUDE_AOT
} else if (info._to_aot) {
// Call to far code
set_to_far(info.callee(), info.entry());
#endif
} else {
set_to_compiled(info.entry());
}
@ -661,12 +642,6 @@ void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nme
CompiledMethod* m_code = m->code();
info._callee = m;
if (m_code != NULL && m_code->is_in_use()) {
if (caller_is_nmethod && m_code->is_far_code()) {
// Call to far aot code from nmethod.
info._to_aot = true;
} else {
info._to_aot = false;
}
info._to_interpreter = false;
info._entry = m_code->verified_entry_point();
} else {
@ -678,18 +653,18 @@ void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nme
}
}
address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) {
address CompiledDirectStaticCall::find_stub_for(address instruction) {
// Find reloc. information containing this call-site
RelocIterator iter((nmethod*)NULL, instruction);
while (iter.next()) {
if (iter.addr() == instruction) {
switch(iter.type()) {
case relocInfo::static_call_type:
return iter.static_call_reloc()->static_stub(is_aot);
return iter.static_call_reloc()->static_stub();
// We check here for opt_virtual_call_type, since we reuse the code
// from the CompiledIC implementation
case relocInfo::opt_virtual_call_type:
return iter.opt_virtual_call_reloc()->static_stub(is_aot);
return iter.opt_virtual_call_reloc()->static_stub();
case relocInfo::poll_type:
case relocInfo::poll_return_type: // A safepoint can't overlap a call.
default:
@ -700,8 +675,8 @@ address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot
return NULL;
}
address CompiledDirectStaticCall::find_stub(bool is_aot) {
return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot);
address CompiledDirectStaticCall::find_stub() {
return CompiledDirectStaticCall::find_stub_for(instruction_address());
}
address CompiledDirectStaticCall::resolve_call_stub() const {
@ -734,8 +709,6 @@ void CompiledDirectStaticCall::print() {
tty->print("clean");
} else if (is_call_to_compiled()) {
tty->print("compiled");
} else if (is_call_to_far()) {
tty->print("far");
} else if (is_call_to_interpreted()) {
tty->print("interpreted");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -84,7 +84,6 @@ class CompiledICInfo : public StackObj {
bool _is_icholder; // Is the cached value a CompiledICHolder*
bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
bool _to_interpreter; // Call it to interpreter
bool _to_aot; // Call it to aot code
bool _release_icholder;
public:
address entry() const { return _entry; }
@ -99,13 +98,11 @@ class CompiledICInfo : public StackObj {
}
bool is_optimized() const { return _is_optimized; }
bool to_interpreter() const { return _to_interpreter; }
bool to_aot() const { return _to_aot; }
void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
_entry = entry;
_cached_value = (void*)klass;
_to_interpreter = false;
_to_aot = false;
_is_icholder = false;
_is_optimized = is_optimized;
_release_icholder = false;
@ -115,17 +112,6 @@ class CompiledICInfo : public StackObj {
_entry = entry;
_cached_value = (void*)method;
_to_interpreter = true;
_to_aot = false;
_is_icholder = false;
_is_optimized = true;
_release_icholder = false;
}
void set_aot_entry(address entry, Method* method) {
_entry = entry;
_cached_value = (void*)method;
_to_interpreter = false;
_to_aot = true;
_is_icholder = false;
_is_optimized = true;
_release_icholder = false;
@ -135,14 +121,13 @@ class CompiledICInfo : public StackObj {
_entry = entry;
_cached_value = (void*)icholder;
_to_interpreter = true;
_to_aot = false;
_is_icholder = true;
_is_optimized = false;
_release_icholder = true;
}
CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
_is_optimized(false), _to_interpreter(false), _to_aot(false), _release_icholder(false) {
_is_optimized(false), _to_interpreter(false), _release_icholder(false) {
}
~CompiledICInfo() {
// In rare cases the info is computed but not used, so release any
@ -341,7 +326,6 @@ class StaticCallInfo {
address _entry; // Entrypoint
methodHandle _callee; // Callee (used when calling interpreter)
bool _to_interpreter; // call to interpreted method (otherwise compiled)
bool _to_aot; // call to aot method (otherwise compiled)
friend class CompiledStaticCall;
friend class CompiledDirectStaticCall;
@ -358,9 +342,6 @@ class CompiledStaticCall : public ResourceObj {
static int to_interp_stub_size();
static int to_trampoline_stub_size();
static int reloc_to_interp_stub();
static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL);
static int to_aot_stub_size();
static int reloc_to_aot_stub();
// Compute entry point given a method
static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info);
@ -386,9 +367,6 @@ public:
protected:
virtual address resolve_call_stub() const = 0;
virtual void set_destination_mt_safe(address dest) = 0;
#if INCLUDE_AOT
virtual void set_to_far(const methodHandle& callee, address entry) = 0;
#endif
virtual void set_to_interpreted(const methodHandle& callee, address entry) = 0;
virtual const char* name() const = 0;
@ -405,9 +383,6 @@ private:
void verify_mt_safe(const methodHandle& callee, address entry,
NativeMovConstReg* method_holder,
NativeJump* jump) PRODUCT_RETURN;
#if INCLUDE_AOT
void set_to_far(const methodHandle& callee, address entry);
#endif
address instruction_address() const { return _call->instruction_address(); }
void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); }
@ -437,11 +412,10 @@ private:
// State
virtual bool is_call_to_interpreted() const;
bool is_call_to_far() const;
// Stub support
static address find_stub_for(address instruction, bool is_aot);
address find_stub(bool is_aot);
static address find_stub_for(address instruction);
address find_stub();
static void set_stub_to_clean(static_stub_Relocation* static_stub);
// Misc.

View File

@ -72,7 +72,6 @@ CompiledMethod::CompiledMethod(Method* method, const char* name, CompilerType ty
void CompiledMethod::init_defaults() {
{ // avoid uninitialized fields, even for short time periods
_is_far_code = false;
_scopes_data_begin = NULL;
_deopt_handler_begin = NULL;
_deopt_mh_handler_begin = NULL;

View File

@ -152,9 +152,6 @@ protected:
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
bool _is_far_code; // Code is far from CodeCache.
// Have to use far call instructions to call it from code in CodeCache.
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
@ -334,8 +331,6 @@ public:
const char* state() const;
bool is_far_code() const { return _is_far_code; }
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}

View File

@ -439,7 +439,6 @@ void nmethod::init_defaults() {
_stack_traversal_mark = 0;
_load_reported = false; // jvmti state
_unload_reported = false;
_is_far_code = false; // nmethods are located in CodeCache
#ifdef ASSERT
_oops_are_stale = false;
@ -2309,7 +2308,6 @@ nmethodLocker::nmethodLocker(address pc) {
// should pass zombie_ok == true.
void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) {
if (cm == NULL) return;
if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method
nmethod* nm = cm->as_nmethod();
Atomic::inc(&nm->_lock_count);
assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method: %p", nm);
@ -2317,7 +2315,6 @@ void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) {
void nmethodLocker::unlock_nmethod(CompiledMethod* cm) {
if (cm == NULL) return;
if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method
nmethod* nm = cm->as_nmethod();
Atomic::dec(&nm->_lock_count);
assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock");
@ -2465,11 +2462,11 @@ void nmethod::verify_scopes() {
verify_interrupt_point(iter.addr());
break;
case relocInfo::opt_virtual_call_type:
stub = iter.opt_virtual_call_reloc()->static_stub(false);
stub = iter.opt_virtual_call_reloc()->static_stub();
verify_interrupt_point(iter.addr());
break;
case relocInfo::static_call_type:
stub = iter.static_call_reloc()->static_stub(false);
stub = iter.static_call_reloc()->static_stub();
//verify_interrupt_point(iter.addr());
break;
case relocInfo::runtime_call_type:
@ -3409,28 +3406,11 @@ public:
}
virtual void set_destination_mt_safe(address dest) {
#if INCLUDE_AOT
if (UseAOT) {
CodeBlob* callee = CodeCache::find_blob(dest);
CompiledMethod* cm = callee->as_compiled_method_or_null();
if (cm != NULL && cm->is_far_code()) {
// Temporary fix, see JDK-8143106
CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address());
csc->set_to_far(methodHandle(Thread::current(), cm->method()), dest);
return;
}
}
#endif
_call->set_destination_mt_safe(dest);
}
virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) {
CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address());
#if INCLUDE_AOT
if (info.to_aot()) {
csc->set_to_far(method, info.entry());
} else
#endif
{
csc->set_to_interpreted(method, info.entry());
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -415,18 +415,14 @@ void static_stub_Relocation::pack_data_to(CodeSection* dest) {
short* p = (short*) dest->locs_end();
CodeSection* insts = dest->outer()->insts();
normalize_address(_static_call, insts);
jint is_aot = _is_aot ? 1 : 0;
p = pack_2_ints_to(p, scaled_offset(_static_call, insts->start()), is_aot);
p = pack_1_int_to(p, scaled_offset(_static_call, insts->start()));
dest->set_locs_end((relocInfo*) p);
}
void static_stub_Relocation::unpack_data() {
address base = binding()->section_start(CodeBuffer::SECT_INSTS);
jint offset;
jint is_aot;
unpack_2_ints(offset, is_aot);
jint offset = unpack_1_int();
_static_call = address_from_scaled_offset(offset, base);
_is_aot = (is_aot == 1);
}
void trampoline_stub_Relocation::pack_data_to(CodeSection* dest ) {
@ -648,14 +644,14 @@ bool opt_virtual_call_Relocation::clear_inline_cache() {
return set_to_clean_no_ic_refill(icache);
}
address opt_virtual_call_Relocation::static_stub(bool is_aot) {
address opt_virtual_call_Relocation::static_stub() {
// search for the static stub who points back to this static call
address static_call_addr = addr();
RelocIterator iter(code());
while (iter.next()) {
if (iter.type() == relocInfo::static_stub_type) {
static_stub_Relocation* stub_reloc = iter.static_stub_reloc();
if (stub_reloc->static_call() == static_call_addr && stub_reloc->is_aot() == is_aot) {
if (stub_reloc->static_call() == static_call_addr) {
return iter.addr();
}
}
@ -689,14 +685,14 @@ bool static_call_Relocation::clear_inline_cache() {
}
address static_call_Relocation::static_stub(bool is_aot) {
address static_call_Relocation::static_stub() {
// search for the static stub who points back to this static call
address static_call_addr = addr();
RelocIterator iter(code());
while (iter.next()) {
if (iter.type() == relocInfo::static_stub_type) {
static_stub_Relocation* stub_reloc = iter.static_stub_reloc();
if (stub_reloc->static_call() == static_call_addr && stub_reloc->is_aot() == is_aot) {
if (stub_reloc->static_call() == static_call_addr) {
return iter.addr();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1066,7 +1066,7 @@ class opt_virtual_call_Relocation : public CallRelocation {
bool clear_inline_cache();
// find the matching static_stub
address static_stub(bool is_aot);
address static_stub();
};
@ -1098,24 +1098,23 @@ class static_call_Relocation : public CallRelocation {
bool clear_inline_cache();
// find the matching static_stub
address static_stub(bool is_aot);
address static_stub();
};
class static_stub_Relocation : public Relocation {
public:
static RelocationHolder spec(address static_call, bool is_aot = false) {
static RelocationHolder spec(address static_call) {
RelocationHolder rh = newHolder();
new(rh) static_stub_Relocation(static_call, is_aot);
new(rh) static_stub_Relocation(static_call);
return rh;
}
private:
address _static_call; // location of corresponding static_call
bool _is_aot; // trampoline to aot code
static_stub_Relocation(address static_call, bool is_aot)
static_stub_Relocation(address static_call)
: Relocation(relocInfo::static_stub_type),
_static_call(static_call), _is_aot(is_aot) { }
_static_call(static_call) { }
friend class RelocIterator;
static_stub_Relocation() : Relocation(relocInfo::static_stub_type) { }
@ -1124,7 +1123,6 @@ class static_stub_Relocation : public Relocation {
bool clear_inline_cache();
address static_call() { return _static_call; }
bool is_aot() { return _is_aot; }
// data is packed as a scaled offset in "1_int" format: [c] or [Cc]
void pack_data_to(CodeSection* dest);

View File

@ -235,8 +235,6 @@ public:
scale *= threshold_scaling;
}
switch(cur_level) {
case CompLevel_aot:
return b >= Tier3AOTBackEdgeThreshold * scale;
case CompLevel_none:
case CompLevel_limited_profile:
return b >= Tier3BackEdgeThreshold * scale;
@ -250,10 +248,6 @@ public:
static bool apply(int i, int b, CompLevel cur_level, const methodHandle& method) {
double k = 1;
switch(cur_level) {
case CompLevel_aot: {
k = CompilationModeFlag::disable_intermediate() ? 1 : CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
break;
}
case CompLevel_none:
// Fall through
case CompLevel_limited_profile: {
@ -279,9 +273,6 @@ public:
scale *= threshold_scaling;
}
switch(cur_level) {
case CompLevel_aot:
return (i >= Tier3AOTInvocationThreshold * scale) ||
(i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale);
case CompLevel_none:
case CompLevel_limited_profile:
return (i >= Tier3InvocationThreshold * scale) ||
@ -297,10 +288,6 @@ public:
static bool apply(int i, int b, CompLevel cur_level, const methodHandle& method) {
double k = 1;
switch(cur_level) {
case CompLevel_aot: {
k = CompilationModeFlag::disable_intermediate() ? 1 : CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
break;
}
case CompLevel_none:
case CompLevel_limited_profile: {
k = CompilationPolicy::threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
@ -520,8 +507,8 @@ bool CompilationPolicy::verify_level(CompLevel level) {
return false;
}
// AOT and interpreter levels are always valid.
if (level == CompLevel_aot || level == CompLevel_none) {
// Interpreter level is always valid.
if (level == CompLevel_none) {
return true;
}
if (CompilationModeFlag::normal()) {
@ -759,7 +746,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
if (level == CompLevel_none) {
if (mh->has_compiled_code()) {
// Happens when we switch from AOT to interpreter to profile.
// Happens when we switch to interpreter to profile.
MutexLocker ml(Compile_lock);
NoSafepointVerifier nsv;
if (mh->has_compiled_code()) {
@ -773,24 +760,6 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
}
return;
}
if (level == CompLevel_aot) {
if (mh->has_aot_code()) {
if (PrintTieredEvents) {
print_event(COMPILE, mh(), mh(), bci, level);
}
MutexLocker ml(Compile_lock);
NoSafepointVerifier nsv;
if (mh->has_aot_code() && mh->code() != mh->aot_code()) {
mh->aot_code()->make_entrant();
if (mh->has_compiled_code()) {
mh->code()->make_not_entrant();
}
MutexLocker pl(CompiledMethod_lock, Mutex::_no_safepoint_check_flag);
Method::set_code(mh, mh->aot_code());
}
}
return;
}
if (!CompilationModeFlag::disable_intermediate()) {
// Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
@ -1038,16 +1007,6 @@ CompLevel CompilationPolicy::common(const methodHandle& method, CompLevel cur_le
} else {
switch(cur_level) {
default: break;
case CompLevel_aot:
// If we were at full profile level, would we switch to full opt?
if (common<Predicate>(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
next_level = CompLevel_full_optimization;
} else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
Predicate::apply(i, b, cur_level, method))) {
next_level = CompilationModeFlag::disable_intermediate() ? CompLevel_none : CompLevel_full_profile;
}
break;
case CompLevel_none:
// If we were at full profile level, would we switch to full opt?
if (common<Predicate>(method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
@ -1152,26 +1111,6 @@ CompLevel CompilationPolicy::loop_event(const methodHandle& method, CompLevel cu
return next_level;
}
bool CompilationPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, Thread* thread) {
if (UseAOT) {
if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) {
// If the current level is full profile or interpreter and we're switching to any other level,
// activate the AOT code back first so that we won't waste time overprofiling.
compile(mh, InvocationEntryBci, CompLevel_aot, thread);
// Fall through for JIT compilation.
}
if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) {
// If the next level is limited profile, use the aot code (if there is any),
// since it's essentially the same thing.
compile(mh, InvocationEntryBci, CompLevel_aot, thread);
// Not need to JIT, we're done.
return true;
}
}
return false;
}
// Handle the invocation event.
void CompilationPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh,
CompLevel level, CompiledMethod* nm, TRAPS) {
@ -1180,10 +1119,6 @@ void CompilationPolicy::method_invocation_event(const methodHandle& mh, const me
}
CompLevel next_level = call_event(mh, level, THREAD);
if (next_level != level) {
if (maybe_switch_to_aot(mh, level, next_level, THREAD)) {
// No JITting necessary
return;
}
if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, THREAD);
}
@ -1214,14 +1149,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
// enough calls.
CompLevel cur_level, next_level;
if (mh() != imh()) { // If there is an enclosing method
if (level == CompLevel_aot) {
// Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling.
if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) {
CompLevel enclosing_level = limit_level(CompLevel_full_profile);
compile(mh, InvocationEntryBci, enclosing_level, THREAD);
}
} else {
// Current loop event level is not AOT
{
guarantee(nm != NULL, "Should have nmethod here");
cur_level = comp_level(mh());
next_level = call_event(mh, cur_level, THREAD);
@ -1253,7 +1181,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
next_level = CompLevel_full_profile;
}
if (cur_level != next_level) {
if (!maybe_switch_to_aot(mh, cur_level, next_level, THREAD) && !CompileBroker::compilation_is_in_queue(mh)) {
if (!CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, THREAD);
}
}
@ -1262,7 +1190,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
cur_level = comp_level(mh());
next_level = call_event(mh, cur_level, THREAD);
if (next_level != cur_level) {
if (!maybe_switch_to_aot(mh, cur_level, next_level, THREAD) && !CompileBroker::compilation_is_in_queue(mh)) {
if (!CompileBroker::compilation_is_in_queue(mh)) {
compile(mh, InvocationEntryBci, next_level, THREAD);
}
}

View File

@ -210,8 +210,6 @@ class CompilationPolicy : AllStatic {
// Is method profiled enough?
static bool is_method_profiled(const methodHandle& method);
static bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, Thread* thread);
static void set_c1_count(int x) { _c1_count = x; }
static void set_c2_count(int x) { _c2_count = x; }

View File

@ -167,9 +167,6 @@ void CompilerConfig::set_client_emulation_mode_flags() {
#if INCLUDE_JVMCI
FLAG_SET_ERGO(EnableJVMCI, false);
FLAG_SET_ERGO(UseJVMCICompiler, false);
#endif
#if INCLUDE_AOT
FLAG_SET_ERGO(UseAOT, false);
#endif
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
@ -205,7 +202,6 @@ void CompilerConfig::set_client_emulation_mode_flags() {
bool CompilerConfig::is_compilation_mode_selected() {
return !FLAG_IS_DEFAULT(TieredCompilation) ||
!FLAG_IS_DEFAULT(TieredStopAtLevel) ||
!FLAG_IS_DEFAULT(UseAOT) ||
!FLAG_IS_DEFAULT(CompilationMode)
JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
|| !FLAG_IS_DEFAULT(UseJVMCICompiler));
@ -276,14 +272,6 @@ void CompilerConfig::set_legacy_emulation_flags() {
FLAG_SET_ERGO(Tier4BackEdgeThreshold, osr_threshold);
FLAG_SET_ERGO(Tier0ProfilingStartPercentage, InterpreterProfilePercentage);
}
#if INCLUDE_AOT
if (UseAOT) {
FLAG_SET_ERGO(Tier3AOTInvocationThreshold, threshold);
FLAG_SET_ERGO(Tier3AOTMinInvocationThreshold, threshold);
FLAG_SET_ERGO(Tier3AOTCompileThreshold, threshold);
FLAG_SET_ERGO(Tier3AOTBackEdgeThreshold, CompilerConfig::is_c1_only() ? osr_threshold : osr_profile_threshold);
}
#endif
} else {
// Normal tiered mode, ignore legacy flags
}
@ -332,23 +320,6 @@ void CompilerConfig::set_compilation_policy_flags() {
FLAG_SET_DEFAULT(Tier0ProfilingStartPercentage, 33);
}
#if INCLUDE_AOT
if (UseAOT) {
if (FLAG_IS_DEFAULT(Tier3AOTInvocationThreshold)) {
FLAG_SET_DEFAULT(Tier3AOTInvocationThreshold, 200);
}
if (FLAG_IS_DEFAULT(Tier3AOTMinInvocationThreshold)) {
FLAG_SET_DEFAULT(Tier3AOTMinInvocationThreshold, 100);
}
if (FLAG_IS_DEFAULT(Tier3AOTCompileThreshold)) {
FLAG_SET_DEFAULT(Tier3AOTCompileThreshold, 2000);
}
if (FLAG_IS_DEFAULT(Tier3AOTBackEdgeThreshold)) {
FLAG_SET_DEFAULT(Tier3AOTBackEdgeThreshold, 2000);
}
}
#endif
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
FLAG_SET_DEFAULT(Tier4InvocationThreshold, 5000);
}

View File

@ -55,9 +55,8 @@ enum MethodCompilation {
// Enumeration to distinguish tiers of compilation
enum CompLevel {
CompLevel_any = -2, // Used for querying the state
CompLevel_all = -2, // Used for changing the state
CompLevel_aot = -1,
CompLevel_any = -1, // Used for querying the state
CompLevel_all = -1, // Used for changing the state
CompLevel_none = 0, // Interpreter
CompLevel_simple = 1, // C1
CompLevel_limited_profile = 2, // C1, invocation & backedge counters
@ -138,18 +137,16 @@ public:
constexpr static bool has_c2() { return COMPILER2_PRESENT(true) NOT_COMPILER2(false); }
constexpr static bool has_jvmci() { return JVMCI_ONLY(true) NOT_JVMCI(false); }
constexpr static bool has_tiered() { return has_c1() && (has_c2() || has_jvmci()); }
constexpr static bool has_aot() { return AOT_ONLY(true) NOT_AOT(false); }
static bool is_aot() { return AOT_ONLY(has_aot() && UseAOT) NOT_AOT(false); }
static bool is_jvmci_compiler() { return JVMCI_ONLY(has_jvmci() && UseJVMCICompiler) NOT_JVMCI(false); }
static bool is_jvmci() { return JVMCI_ONLY(has_jvmci() && EnableJVMCI) NOT_JVMCI(false); }
static bool is_interpreter_only();
// is_*_only() functions describe situations in which the JVM is in one way or another
// forced to use a particular compiler or their combination. The constraint functions
// deliberately ignore the fact that there may also be AOT methods and methods installed
// deliberately ignore the fact that there may also be methods installed
// through JVMCI (where the JVMCI compiler was invoked not through the broker). Be sure
// to check for those (using is_jvmci() and is_aot()) in situations where it matters.
// to check for those (using is_jvmci()) in situations where it matters.
//
// Is the JVM in a configuration that permits only c1-compiled methods (level 1,2,3)?
@ -163,13 +160,13 @@ public:
return false;
}
static bool is_c1_or_interpreter_only_no_aot_or_jvmci() {
static bool is_c1_or_interpreter_only_no_jvmci() {
assert(is_jvmci_compiler() && is_jvmci() || !is_jvmci_compiler(), "JVMCI compiler implies enabled JVMCI");
return !is_aot() && !is_jvmci() && (is_interpreter_only() || is_c1_only());
return !is_jvmci() && (is_interpreter_only() || is_c1_only());
}
static bool is_c1_only_no_aot_or_jvmci() {
return is_c1_only() && !is_aot() && !is_jvmci();
static bool is_c1_only_no_jvmci() {
return is_c1_only() && !is_jvmci();
}
// Is the JVM in a configuration that permits only c1-compiled methods at level 1?

View File

@ -190,33 +190,6 @@
"Back edge threshold at which tier 3 OSR compilation is invoked") \
range(0, max_jint) \
\
product(intx, Tier3AOTInvocationThreshold, 10000, \
"Compile if number of method invocations crosses this " \
"threshold if coming from AOT;" \
"with CompilationMode=high-only|high-only-quick-internal)" \
"determines when to transition from AOT to interpreter") \
range(0, max_jint) \
\
product(intx, Tier3AOTMinInvocationThreshold, 1000, \
"Minimum invocation to compile at tier 3 if coming from AOT;" \
"with CompilationMode=high-only|high-only-quick-internal)" \
"determines when to transition from AOT to interpreter") \
range(0, max_jint) \
\
product(intx, Tier3AOTCompileThreshold, 15000, \
"Threshold at which tier 3 compilation is invoked (invocation " \
"minimum must be satisfied) if coming from AOT;" \
"with CompilationMode=high-only|high-only-quick-internal)" \
"determines when to transition from AOT to interpreter") \
range(0, max_jint) \
\
product(intx, Tier3AOTBackEdgeThreshold, 120000, \
"Back edge threshold at which tier 3 OSR compilation is invoked " \
"if coming from AOT;" \
"with CompilationMode=high-only|high-only-quick-internal)" \
"determines when to transition from AOT to interpreter") \
range(0, max_jint) \
\
product(intx, Tier4InvocationThreshold, 5000, \
"Compile if number of method invocations crosses this " \
"threshold") \

View File

@ -881,23 +881,9 @@ void Disassembler::decode(CodeBlob* cb, outputStream* st) {
decode_env env(cb, st);
env.output()->print_cr("--------------------------------------------------------------------------------");
if (cb->is_aot()) {
env.output()->print("A ");
if (cb->is_compiled()) {
CompiledMethod* cm = (CompiledMethod*)cb;
env.output()->print("%d ",cm->compile_id());
cm->method()->method_holder()->name()->print_symbol_on(env.output());
env.output()->print(".");
cm->method()->name()->print_symbol_on(env.output());
cm->method()->signature()->print_symbol_on(env.output());
} else {
env.output()->print_cr("%s", cb->name());
}
} else {
env.output()->print("Decoding CodeBlob");
if (cb->name() != NULL) {
env.output()->print(", name: %s,", cb->name());
}
env.output()->print("Decoding CodeBlob");
if (cb->name() != NULL) {
env.output()->print(", name: %s,", cb->name());
}
env.output()->print_cr(" at [" PTR_FORMAT ", " PTR_FORMAT "] " JLONG_FORMAT " bytes", p2i(cb->code_begin()), p2i(cb->code_end()), ((jlong)(cb->code_end() - cb->code_begin())));

View File

@ -63,7 +63,6 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
// Root scanning phases
_gc_par_phases[ThreadRoots] = new WorkerDataArray<double>("ThreadRoots", "Thread Roots (ms):", max_gc_threads);
_gc_par_phases[CLDGRoots] = new WorkerDataArray<double>("CLDGRoots", "CLDG Roots (ms):", max_gc_threads);
AOT_ONLY(_gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>("AOTCodeRoots", "AOT Root Scan (ms):", max_gc_threads);)
_gc_par_phases[CMRefRoots] = new WorkerDataArray<double>("CMRefRoots", "CM RefProcessor Roots (ms):", max_gc_threads);
for (auto id : EnumRange<OopStorageSet::StrongId>()) {

View File

@ -50,7 +50,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
ExtRootScan,
ThreadRoots,
CLDGRoots,
AOT_ONLY(AOTCodeRoots COMMA)
CMRefRoots,
// For every strong OopStorage there will be one element in this enum,
// starting with StrongOopStorageSetRoots.

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
@ -199,15 +198,6 @@ void G1RootProcessor::process_vm_roots(G1RootClosures* closures,
uint worker_id) {
OopClosure* strong_roots = closures->strong_oops();
#if INCLUDE_AOT
if (_process_strong_tasks.try_claim_task(G1RP_PS_aot_oops_do)) {
if (UseAOT) {
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_id);
AOTLoader::oops_do(strong_roots);
}
}
#endif
for (auto id : EnumRange<OopStorageSet::StrongId>()) {
G1GCPhaseTimes::GCParPhases phase = G1GCPhaseTimes::strong_oopstorage_phase(id);
G1GCParPhaseTimesTracker x(phase_times, phase, worker_id);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -55,7 +55,6 @@ class G1RootProcessor : public StackObj {
enum G1H_process_roots_tasks {
G1RP_PS_ClassLoaderDataGraph_oops_do,
G1RP_PS_CodeCache_oops_do,
AOT_ONLY(G1RP_PS_aot_oops_do COMMA)
G1RP_PS_refProcessor_oops_do,
// Leave this one last.
G1RP_PS_NumElements

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
@ -1985,7 +1984,6 @@ static void mark_from_roots_work(ParallelRootType::Value root_type, uint worker_
case ParallelRootType::code_cache:
// Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
//ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
AOTLoader::oops_do(&mark_and_push_closure);
break;
case ParallelRootType::sentinel:
@ -2192,7 +2190,6 @@ class PSAdjustTask final : public AbstractGangTask {
enum PSAdjustSubTask {
PSAdjustSubTask_code_cache,
PSAdjustSubTask_aot,
PSAdjustSubTask_old_ref_process,
PSAdjustSubTask_young_ref_process,
@ -2236,9 +2233,6 @@ public:
CodeBlobToOopClosure adjust_code(&adjust, CodeBlobToOopClosure::FixRelocations);
CodeCache::blobs_do(&adjust_code);
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_aot)) {
AOT_ONLY(AOTLoader::oops_do(&adjust);)
}
if (_sub_tasks.try_claim_task(PSAdjustSubTask_old_ref_process)) {
PSParallelCompact::ref_processor()->weak_oops_do(&adjust);
}

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "code/codeCache.hpp"
@ -104,7 +103,6 @@ static void scavenge_roots_work(ParallelRootType::Value root_type, uint worker_i
{
MarkingCodeBlobClosure code_closure(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations);
ScavengableNMethods::nmethods_do(&code_closure);
AOTLoader::oops_do(&roots_closure);
}
break;

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/stringTable.hpp"
@ -809,11 +808,6 @@ void GenCollectedHeap::process_roots(ScanningOption so,
Threads::oops_do(strong_roots, roots_from_code_p);
#if INCLUDE_AOT
if (UseAOT) {
AOTLoader::oops_do(strong_roots);
}
#endif
OopStorageSet::strong_oops_do(strong_roots);
if (so & SO_ScavengeCodeCache) {

View File

@ -157,14 +157,6 @@ void ShenandoahArguments::initialize() {
FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
}
// AOT is not supported yet
if (UseAOT) {
if (!FLAG_IS_DEFAULT(UseAOT)) {
warning("Shenandoah does not support AOT at this moment, disabling UseAOT");
}
FLAG_SET_DEFAULT(UseAOT, false);
}
// TLAB sizing policy makes resizing decisions before each GC cycle. It averages
// historical data, assigning more recent data the weight according to TLABAllocationWeight.
// Current default is good for generational collectors that run frequent young GCs.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "gc/shared/oopStorage.inline.hpp"
@ -72,7 +71,6 @@ void RootSetClosure<Delegate>::process() {
// We don't follow code blob oops, because they have misaligned oops.
Threads::oops_do(this, NULL);
OopStorageSet::strong_oops_do(this);
AOTLoader::oops_do(this);
}
template class RootSetClosure<BFSClosure>;

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/stringTable.hpp"
#include "gc/shared/oopStorage.inline.hpp"
@ -101,7 +100,6 @@ class ReferenceToRootClosure : public StackObj {
bool do_cldg_roots();
bool do_oop_storage_roots();
bool do_string_table_roots();
bool do_aot_loader_roots();
bool do_roots();
@ -150,13 +148,6 @@ bool ReferenceToRootClosure::do_oop_storage_roots() {
return false;
}
bool ReferenceToRootClosure::do_aot_loader_roots() {
assert(!complete(), "invariant");
ReferenceLocateClosure rcl(_callback, OldObjectRoot::_aot, OldObjectRoot::_type_undetermined, NULL);
AOTLoader::oops_do(&rcl);
return rcl.complete();
}
bool ReferenceToRootClosure::do_roots() {
assert(!complete(), "invariant");
assert(OldObjectRoot::_system_undetermined == _info._system, "invariant");
@ -172,11 +163,6 @@ bool ReferenceToRootClosure::do_roots() {
return true;
}
if (do_aot_loader_roots()) {
_complete = true;
return true;
}
return false;
}

View File

@ -57,8 +57,6 @@ const char* OldObjectRoot::system_description(System system) {
return "Class Loader Data";
case _code_cache:
return "Code Cache";
case _aot:
return "AOT";
#if INCLUDE_JVMCI
case _jvmci:
return "JVMCI";

View File

@ -39,7 +39,6 @@ class OldObjectRoot : public AllStatic {
_strong_oop_storage_set_last = _strong_oop_storage_set_first + EnumRange<OopStorageSet::StrongId>().size() - 1,
_class_loader_data,
_code_cache,
_aot,
JVMCI_ONLY(_jvmci COMMA)
_number_of_systems
};

View File

@ -1,281 +0,0 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "compiler/compilationPolicy.hpp"
#include "interpreter/linkResolver.hpp"
#include "jvmci/compilerRuntime.hpp"
#include "oops/cpCache.inline.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/vframe.inline.hpp"
#include "utilities/sizes.hpp"
// Resolve and allocate String
JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_string_by_symbol(JavaThread* current, void* string_result, const char* name))
JRT_BLOCK
oop str = *(oop*)string_result; // Is it resolved already?
if (str == NULL) { // Do resolution
// First 2 bytes of name contains length (number of bytes).
int len = Bytes::get_Java_u2((address)name);
name += 2;
TempNewSymbol sym = SymbolTable::new_symbol(name, len);
str = StringTable::intern(sym, CHECK);
assert(java_lang_String::is_instance(str), "must be string");
*(oop*)string_result = str; // Store result
}
assert(str != NULL, "Should be allocated!");
current->set_vm_result(str);
JRT_BLOCK_END
JRT_END
Klass* CompilerRuntime::resolve_klass_helper(const char* name, int len, TRAPS) {
JavaThread* current = THREAD->as_Java_thread();
ResourceMark rm(current);
// last java frame on stack (which includes native call frames)
RegisterMap cbl_map(current, false);
// Skip stub
frame caller_frame = current->last_frame().sender(&cbl_map);
CodeBlob* caller_cb = caller_frame.cb();
guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method");
CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null();
methodHandle caller(current, caller_nm->method());
// Use class loader of aot method.
Handle loader(current, caller->method_holder()->class_loader());
Handle protection_domain(current, caller->method_holder()->protection_domain());
TempNewSymbol sym = SymbolTable::new_symbol(name, len);
if (sym != NULL && Signature::has_envelope(sym)) {
// Ignore wrapping L and ;
sym = Signature::strip_envelope(sym);
}
if (sym == NULL) {
return NULL;
}
Klass* k = SystemDictionary::resolve_or_fail(sym, loader, protection_domain, true, CHECK_NULL);
return k;
}
// Resolve Klass
JRT_BLOCK_ENTRY(Klass*, CompilerRuntime::resolve_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name))
Klass* k = NULL;
JRT_BLOCK
k = *klass_result; // Is it resolved already?
if (k == NULL) { // Do resolution
// First 2 bytes of name contains length (number of bytes).
int len = Bytes::get_Java_u2((address)name);
name += 2;
k = CompilerRuntime::resolve_klass_helper(name, len, CHECK_NULL);
*klass_result = k; // Store result
}
JRT_BLOCK_END
assert(k != NULL, " Should be loaded!");
return k;
JRT_END
Method* CompilerRuntime::resolve_method_helper(Klass* klass, const char* method_name, int method_name_len,
const char* signature_name, int signature_name_len) {
Method* m = NULL;
TempNewSymbol name_symbol = SymbolTable::probe(method_name, method_name_len);
TempNewSymbol signature_symbol = SymbolTable::probe(signature_name, signature_name_len);
if (name_symbol != NULL && signature_symbol != NULL) {
if (name_symbol == vmSymbols::object_initializer_name() ||
name_symbol == vmSymbols::class_initializer_name()) {
// Never search superclasses for constructors
if (klass->is_instance_klass()) {
m = InstanceKlass::cast(klass)->find_method(name_symbol, signature_symbol);
}
} else {
m = klass->lookup_method(name_symbol, signature_symbol);
if (m == NULL && klass->is_instance_klass()) {
m = InstanceKlass::cast(klass)->lookup_method_in_ordered_interfaces(name_symbol, signature_symbol);
}
}
}
return m;
}
JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_dynamic_invoke(JavaThread* current, oop* appendix_result))
JRT_BLOCK
{
ResourceMark rm(current);
vframeStream vfst(current, true); // Do not skip and javaCalls
assert(!vfst.at_end(), "Java frame must exist");
methodHandle caller(current, vfst.method());
InstanceKlass* holder = caller->method_holder();
int bci = vfst.bci();
Bytecode_invoke bytecode(caller, bci);
int index = bytecode.index();
// Make sure it's resolved first
CallInfo callInfo;
constantPoolHandle cp(current, holder->constants());
ConstantPoolCacheEntry* cp_cache_entry = cp->cache()->entry_at(cp->decode_cpcache_index(index, true));
Bytecodes::Code invoke_code = bytecode.invoke_code();
if (!cp_cache_entry->is_resolved(invoke_code)) {
LinkResolver::resolve_invoke(callInfo, Handle(), cp, index, invoke_code, CHECK);
if (bytecode.is_invokedynamic()) {
cp_cache_entry->set_dynamic_call(cp, callInfo);
} else {
cp_cache_entry->set_method_handle(cp, callInfo);
}
vmassert(cp_cache_entry->is_resolved(invoke_code), "sanity");
}
Handle appendix(current, cp_cache_entry->appendix_if_resolved(cp));
Klass *appendix_klass = appendix.is_null() ? NULL : appendix->klass();
methodHandle adapter_method(current, cp_cache_entry->f1_as_method());
InstanceKlass *adapter_klass = adapter_method->method_holder();
if (appendix_klass != NULL && appendix_klass->is_instance_klass()) {
vmassert(InstanceKlass::cast(appendix_klass)->is_initialized(), "sanity");
}
if (!adapter_klass->is_initialized()) {
// Force initialization of adapter class
adapter_klass->initialize(CHECK);
// Double-check that it was really initialized,
// because we could be doing a recursive call
// from inside <clinit>.
}
int cpi = cp_cache_entry->constant_pool_index();
if (!AOTLoader::reconcile_dynamic_invoke(holder, cpi, adapter_method(),
appendix_klass)) {
return;
}
*appendix_result = appendix();
current->set_vm_result(appendix());
}
JRT_BLOCK_END
JRT_END
JRT_BLOCK_ENTRY(MethodCounters*, CompilerRuntime::resolve_method_by_symbol_and_load_counters(JavaThread* current, MethodCounters** counters_result, Klass* klass, const char* data))
MethodCounters* c = *counters_result; // Is it resolved already?
JRT_BLOCK
if (c == NULL) { // Do resolution
// Get method name and its length
int method_name_len = Bytes::get_Java_u2((address)data);
data += sizeof(u2);
const char* method_name = data;
data += method_name_len;
// Get signature and its length
int signature_name_len = Bytes::get_Java_u2((address)data);
data += sizeof(u2);
const char* signature_name = data;
assert(klass != NULL, "Klass parameter must not be null");
Method* m = resolve_method_helper(klass, method_name, method_name_len, signature_name, signature_name_len);
assert(m != NULL, "Method must resolve successfully");
// Create method counters immediately to avoid check at runtime.
c = m->get_method_counters(current);
if (c == NULL) {
THROW_MSG_NULL(vmSymbols::java_lang_OutOfMemoryError(), "Cannot allocate method counters");
}
*counters_result = c;
}
JRT_BLOCK_END
return c;
JRT_END
// Resolve and initialize Klass
JRT_BLOCK_ENTRY(Klass*, CompilerRuntime::initialize_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name))
Klass* k = NULL;
JRT_BLOCK
k = klass_result[0]; // Is it initialized already?
if (k == NULL) { // Do initialized
k = klass_result[1]; // Is it resolved already?
if (k == NULL) { // Do resolution
// First 2 bytes of name contains length (number of bytes).
int len = Bytes::get_Java_u2((address)name);
const char *cname = name + 2;
k = CompilerRuntime::resolve_klass_helper(cname, len, CHECK_NULL);
klass_result[1] = k; // Store resolved result
}
Klass* k0 = klass_result[0]; // Is it initialized already?
if (k0 == NULL && k != NULL && k->is_instance_klass()) {
// Force initialization of instance class
InstanceKlass::cast(k)->initialize(CHECK_NULL);
// Double-check that it was really initialized,
// because we could be doing a recursive call
// from inside <clinit>.
if (InstanceKlass::cast(k)->is_initialized()) {
klass_result[0] = k; // Store initialized result
}
}
}
JRT_BLOCK_END
assert(k != NULL, " Should be loaded!");
return k;
JRT_END
JRT_BLOCK_ENTRY(void, CompilerRuntime::invocation_event(JavaThread* current, MethodCounters* counters))
JRT_BLOCK
methodHandle mh(current, counters->method());
RegisterMap map(current, false);
// Compute the enclosing method
frame fr = current->last_frame().sender(&map);
CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
assert(cm != NULL && cm->is_compiled(), "Sanity check");
methodHandle emh(current, cm->method());
CompilationPolicy::event(emh, mh, InvocationEntryBci, InvocationEntryBci, CompLevel_aot, cm, CHECK);
JRT_BLOCK_END
JRT_END
JRT_BLOCK_ENTRY(void, CompilerRuntime::backedge_event(JavaThread* current, MethodCounters* counters, int branch_bci, int target_bci))
assert(branch_bci != InvocationEntryBci && target_bci != InvocationEntryBci, "Wrong bci");
assert(target_bci <= branch_bci, "Expected a back edge");
JRT_BLOCK
methodHandle mh(current, counters->method());
RegisterMap map(current, false);
// Compute the enclosing method
frame fr = current->last_frame().sender(&map);
CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
assert(cm != NULL && cm->is_compiled(), "Sanity check");
methodHandle emh(current, cm->method());
nmethod* osr_nm = CompilationPolicy::event(emh, mh, branch_bci, target_bci, CompLevel_aot, cm, CHECK);
if (osr_nm != NULL) {
Deoptimization::deoptimize_frame(current, fr.id());
}
JRT_BLOCK_END
JRT_END

View File

@ -1,51 +0,0 @@
/*
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_JVMCI_COMPILERRUNTIME_HPP
#define SHARE_JVMCI_COMPILERRUNTIME_HPP
#include "memory/allocation.hpp"
#include "memory/resourceArea.hpp"
#include "oops/klass.hpp"
#include "oops/method.hpp"
#include "utilities/exceptions.hpp"
class CompilerRuntime : AllStatic {
public:
// Resolves klass for aot compiled method.
static Klass* resolve_klass_helper(const char* name, int len, TRAPS);
// Resolves method for aot compiled method.
static Method* resolve_method_helper(Klass* klass, const char* method_name, int method_name_len,
const char* signature_name, int signature_name_len);
// Resolution methods for aot compiled code.
static void resolve_string_by_symbol(JavaThread* current, void* string_result, const char* name);
static void resolve_dynamic_invoke(JavaThread* current, oop* appendix_result);
static Klass* resolve_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name);
static Klass* initialize_klass_by_symbol(JavaThread* current, Klass** klass_result, const char* name);
static MethodCounters* resolve_method_by_symbol_and_load_counters(JavaThread* current, MethodCounters** counters_result, Klass* klass_hint, const char* data);
static void invocation_event(JavaThread* current, MethodCounters* counters);
static void backedge_event(JavaThread* current, MethodCounters* counters, int branch_bci, int target_bci);
};
#endif // SHARE_JVMCI_COMPILERRUNTIME_HPP

View File

@ -172,70 +172,6 @@ OopMap* CodeInstaller::create_oop_map(JVMCIObject debug_info, JVMCI_TRAPS) {
return map;
}
#if INCLUDE_AOT
AOTOopRecorder::AOTOopRecorder(CodeInstaller* code_inst, Arena* arena, bool deduplicate) : OopRecorder(arena, deduplicate) {
_code_inst = code_inst;
_meta_refs = new GrowableArray<jobject>();
}
int AOTOopRecorder::nr_meta_refs() const {
return _meta_refs->length();
}
jobject AOTOopRecorder::meta_element(int pos) const {
return _meta_refs->at(pos);
}
int AOTOopRecorder::find_index(Metadata* h) {
JavaThread* THREAD = JavaThread::current();
JVMCIEnv* JVMCIENV = _code_inst->jvmci_env();
int oldCount = metadata_count();
int index = this->OopRecorder::find_index(h);
int newCount = metadata_count();
if (oldCount == newCount) {
// found a match
return index;
}
vmassert(index + 1 == newCount, "must be last");
JVMCIKlassHandle klass(THREAD);
JVMCIObject result;
guarantee(h != NULL,
"If DebugInformationRecorder::describe_scope passes NULL oldCount == newCount must hold.");
if (h->is_klass()) {
klass = (Klass*) h;
result = JVMCIENV->get_jvmci_type(klass, JVMCI_CATCH);
} else if (h->is_method()) {
Method* method = (Method*) h;
methodHandle mh(THREAD, method);
result = JVMCIENV->get_jvmci_method(mh, JVMCI_CATCH);
}
jobject ref = JVMCIENV->get_jobject(result);
record_meta_ref(ref, index);
return index;
}
int AOTOopRecorder::find_index(jobject h) {
if (h == NULL) {
return 0;
}
oop javaMirror = JNIHandles::resolve(h);
Klass* klass = java_lang_Class::as_Klass(javaMirror);
return find_index(klass);
}
void AOTOopRecorder::record_meta_ref(jobject o, int index) {
assert(index > 0, "must be 1..n");
index -= 1; // reduce by one to convert to array index
assert(index == _meta_refs->length(), "must be last");
_meta_refs->append(o);
}
#endif // INCLUDE_AOT
void* CodeInstaller::record_metadata_reference(CodeSection* section, address dest, JVMCIObject constant, JVMCI_TRAPS) {
/*
* This method needs to return a raw (untyped) pointer, since the value of a pointer to the base
@ -538,69 +474,6 @@ void CodeInstaller::initialize_dependencies(JVMCIObject compiled_code, OopRecord
}
}
#if INCLUDE_AOT
RelocBuffer::~RelocBuffer() {
FREE_C_HEAP_ARRAY(char, _buffer);
}
address RelocBuffer::begin() const {
if (_buffer != NULL) {
return (address) _buffer;
}
return (address) _static_buffer;
}
void RelocBuffer::set_size(size_t bytes) {
assert(bytes <= _size, "can't grow in size!");
_size = bytes;
}
void RelocBuffer::ensure_size(size_t bytes) {
assert(_buffer == NULL, "can only be used once");
assert(_size == 0, "can only be used once");
if (bytes >= RelocBuffer::stack_size) {
_buffer = NEW_C_HEAP_ARRAY(char, bytes, mtJVMCI);
}
_size = bytes;
}
JVMCI::CodeInstallResult CodeInstaller::gather_metadata(JVMCIObject target, JVMCIObject compiled_code, CodeMetadata& metadata, JVMCI_TRAPS) {
assert(JVMCIENV->is_hotspot(), "AOT code is executed only in HotSpot mode");
CodeBuffer buffer("JVMCI Compiler CodeBuffer for Metadata");
AOTOopRecorder* recorder = new AOTOopRecorder(this, &_arena, true);
initialize_dependencies(compiled_code, recorder, JVMCI_CHECK_OK);
metadata.set_oop_recorder(recorder);
// Get instructions and constants CodeSections early because we need it.
_instructions = buffer.insts();
_constants = buffer.consts();
buffer.set_immutable_PIC(_immutable_pic_compilation);
initialize_fields(target, compiled_code, JVMCI_CHECK_OK);
JVMCI::CodeInstallResult result = initialize_buffer(buffer, false, JVMCI_CHECK_OK);
if (result != JVMCI::ok) {
return result;
}
_debug_recorder->pcs_size(); // create the sentinel record
assert(_debug_recorder->pcs_length() >= 2, "must be at least 2");
metadata.set_pc_desc(_debug_recorder->pcs(), _debug_recorder->pcs_length());
metadata.set_scopes(_debug_recorder->stream()->buffer(), _debug_recorder->data_size());
metadata.set_exception_table(&_exception_handler_table);
metadata.set_implicit_exception_table(&_implicit_exception_table);
RelocBuffer* reloc_buffer = metadata.get_reloc_buffer();
reloc_buffer->ensure_size(buffer.total_relocation_size());
size_t size = (size_t) buffer.copy_relocations_to(reloc_buffer->begin(), (CodeBuffer::csize_t) reloc_buffer->size(), true);
reloc_buffer->set_size(size);
return JVMCI::ok;
}
#endif // INCLUDE_AOT
// constructor used to create a method
JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler,
JVMCIObject target,
@ -619,9 +492,6 @@ JVMCI::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler,
// Get instructions and constants CodeSections early because we need it.
_instructions = buffer.insts();
_constants = buffer.consts();
#if INCLUDE_AOT
buffer.set_immutable_PIC(_immutable_pic_compilation);
#endif
initialize_fields(target, compiled_code, JVMCI_CHECK_OK);
JVMCI::CodeInstallResult result = initialize_buffer(buffer, true, JVMCI_CHECK_OK);
@ -741,9 +611,8 @@ void CodeInstaller::initialize_fields(JVMCIObject target, JVMCIObject compiled_c
}
int CodeInstaller::estimate_stubs_size(JVMCI_TRAPS) {
// Estimate the number of static and aot call stubs that might be emitted.
// Estimate the number of static call stubs that might be emitted.
int static_call_stubs = 0;
int aot_call_stubs = 0;
int trampoline_stubs = 0;
JVMCIObjectArray sites = this->sites();
for (int i = 0; i < JVMCIENV->get_length(sites); i++) {
@ -771,22 +640,10 @@ int CodeInstaller::estimate_stubs_size(JVMCI_TRAPS) {
}
}
}
#if INCLUDE_AOT
if (UseAOT && jvmci_env()->isa_site_Call(site)) {
JVMCIObject target = jvmci_env()-> get_site_Call_target(site);
if (!jvmci_env()->isa_HotSpotForeignCallTarget(target)) {
// Add far aot trampolines.
aot_call_stubs++;
}
}
#endif
}
}
int size = static_call_stubs * CompiledStaticCall::to_interp_stub_size();
size += trampoline_stubs * CompiledStaticCall::to_trampoline_stub_size();
#if INCLUDE_AOT
size += aot_call_stubs * CompiledStaticCall::to_aot_stub_size();
#endif
return size;
}
@ -1276,10 +1133,6 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, JVMCIObject si
if (foreign_call.is_non_null()) {
jlong foreign_call_destination = jvmci_env()->get_HotSpotForeignCallTarget_address(foreign_call);
if (_immutable_pic_compilation) {
// Use fake short distance during PIC compilation.
foreign_call_destination = (jlong)(_instructions->start() + pc_offset);
}
CodeInstaller::pd_relocate_ForeignCall(inst, foreign_call_destination, JVMCI_CHECK);
} else { // method != NULL
if (debug_info.is_null()) {
@ -1292,10 +1145,6 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, JVMCIObject si
// Need a static call stub for transitions from compiled to interpreted.
CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
}
#if INCLUDE_AOT
// Trampoline to far aot code.
CompiledStaticCall::emit_to_aot_stub(buffer, _instructions->start() + pc_offset);
#endif
}
_next_call_type = INVOKE_INVALID;
@ -1319,25 +1168,11 @@ void CodeInstaller::site_DataPatch(CodeBuffer& buffer, jint pc_offset, JVMCIObje
const char* to_string = JVMCIENV->as_utf8_string(string);
JVMCI_THROW_MSG(IllegalArgumentException, err_msg("Direct object constant reached the backend: %s", to_string));
}
if (!_immutable_pic_compilation) {
// Do not patch during PIC compilation.
pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
}
pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
} else if (jvmci_env()->isa_IndirectHotSpotObjectConstantImpl(constant)) {
if (!_immutable_pic_compilation) {
// Do not patch during PIC compilation.
pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
}
pd_patch_OopConstant(pc_offset, constant, JVMCI_CHECK);
} else if (jvmci_env()->isa_HotSpotMetaspaceConstantImpl(constant)) {
if (!_immutable_pic_compilation) {
pd_patch_MetaspaceConstant(pc_offset, constant, JVMCI_CHECK);
}
#if INCLUDE_AOT
} else if (jvmci_env()->isa_HotSpotSentinelConstant(constant)) {
if (!_immutable_pic_compilation) {
JVMCI_ERROR("sentinel constant not supported for normal compiles: %s", jvmci_env()->klass_name(constant));
}
#endif
pd_patch_MetaspaceConstant(pc_offset, constant, JVMCI_CHECK);
} else {
JVMCI_ERROR("unknown constant type in data patch: %s", jvmci_env()->klass_name(constant));
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,42 +30,6 @@
#include "jvmci/jvmci.hpp"
#include "jvmci/jvmciEnv.hpp"
#if INCLUDE_AOT
class RelocBuffer : public StackObj {
enum { stack_size = 1024 };
public:
RelocBuffer() : _size(0), _buffer(0) {}
~RelocBuffer();
void ensure_size(size_t bytes);
void set_size(size_t bytes);
address begin() const;
size_t size() const { return _size; }
private:
size_t _size;
char _static_buffer[stack_size];
char *_buffer;
};
class CodeInstaller;
class AOTOopRecorder : public OopRecorder {
public:
AOTOopRecorder(CodeInstaller* code_inst, Arena* arena = NULL, bool deduplicate = false);
virtual int find_index(Metadata* h);
virtual int find_index(jobject h);
int nr_meta_refs() const;
jobject meta_element(int pos) const;
private:
void record_meta_ref(jobject ref, int index);
GrowableArray<jobject>* _meta_refs;
CodeInstaller* _code_inst;
};
#endif // INCLUDE_AOT
class CodeMetadata {
public:
CodeMetadata() {}
@ -78,11 +42,6 @@ public:
u_char* get_scopes_desc() const { return _scopes_desc; }
int get_scopes_size() const { return _nr_scopes_desc; }
#if INCLUDE_AOT
RelocBuffer* get_reloc_buffer() { return &_reloc_buffer; }
AOTOopRecorder* get_oop_recorder() { return _oop_recorder; }
#endif
ExceptionHandlerTable* get_exception_table() { return _exception_table; }
ImplicitExceptionTable* get_implicit_exception_table() { return _implicit_exception_table; }
@ -97,12 +56,6 @@ public:
_nr_scopes_desc = size;
}
#if INCLUDE_AOT
void set_oop_recorder(AOTOopRecorder* recorder) {
_oop_recorder = recorder;
}
#endif
void set_exception_table(ExceptionHandlerTable* table) {
_exception_table = table;
}
@ -119,10 +72,6 @@ private:
u_char* _scopes_desc;
int _nr_scopes_desc;
#if INCLUDE_AOT
RelocBuffer _reloc_buffer;
AOTOopRecorder* _oop_recorder;
#endif
ExceptionHandlerTable* _exception_table;
ImplicitExceptionTable* _implicit_exception_table;
};
@ -202,8 +151,6 @@ private:
ImplicitExceptionTable _implicit_exception_table;
bool _has_auto_box;
bool _immutable_pic_compilation; // Installer is called for Immutable PIC compilation.
static ConstantOopWriteValue* _oop_null_scope_value;
static ConstantIntValue* _int_m1_scope_value;
static ConstantIntValue* _int_0_scope_value;
@ -231,15 +178,11 @@ private:
public:
CodeInstaller(JVMCIEnv* jvmci_env, bool immutable_pic_compilation) :
CodeInstaller(JVMCIEnv* jvmci_env) :
_arena(mtJVMCI),
_jvmci_env(jvmci_env),
_has_auto_box(false),
_immutable_pic_compilation(immutable_pic_compilation) {}
_has_auto_box(false) {}
#if INCLUDE_AOT
JVMCI::CodeInstallResult gather_metadata(JVMCIObject target, JVMCIObject compiled_code, CodeMetadata& metadata, JVMCI_TRAPS);
#endif
JVMCI::CodeInstallResult install(JVMCICompiler* compiler,
JVMCIObject target,
JVMCIObject compiled_code,

View File

@ -57,7 +57,7 @@ JVMCICompiler* JVMCICompiler::instance(bool require_non_null, TRAPS) {
// Initialization
void JVMCICompiler::initialize() {
assert(!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci(), "JVMCI is launched, it's not c1/interpreter only mode");
assert(!CompilerConfig::is_c1_or_interpreter_only_no_jvmci(), "JVMCI is launched, it's not c1/interpreter only mode");
if (!UseCompiler || !EnableJVMCI || !UseJVMCICompiler || !should_perform_init()) {
return;
}

View File

@ -876,9 +876,8 @@ C2V_VMENTRY_0(jint, installCode, (JNIEnv *env, jobject, jobject target, jobject
JVMCICompiler* compiler = JVMCICompiler::instance(true, CHECK_JNI_ERR);
TraceTime install_time("installCode", JVMCICompiler::codeInstallTimer(!thread->is_Compiler_thread()));
bool is_immutable_PIC = JVMCIENV->get_HotSpotCompiledCode_isImmutablePIC(compiled_code_handle) > 0;
CodeInstaller installer(JVMCIENV, is_immutable_PIC);
CodeInstaller installer(JVMCIENV);
JVMCI::CodeInstallResult result = installer.install(compiler,
target_handle,
compiled_code_handle,
@ -920,85 +919,7 @@ C2V_VMENTRY_0(jint, installCode, (JNIEnv *env, jobject, jobject target, jobject
C2V_END
C2V_VMENTRY_0(jint, getMetadata, (JNIEnv *env, jobject, jobject target, jobject compiled_code, jobject metadata))
#if INCLUDE_AOT
HandleMark hm(THREAD);
assert(JVMCIENV->is_hotspot(), "AOT code is executed only in HotSpot mode");
JVMCIObject target_handle = JVMCIENV->wrap(target);
JVMCIObject compiled_code_handle = JVMCIENV->wrap(compiled_code);
JVMCIObject metadata_handle = JVMCIENV->wrap(metadata);
CodeMetadata code_metadata;
CodeInstaller installer(JVMCIENV, true /* immutable PIC compilation */);
JVMCI::CodeInstallResult result = installer.gather_metadata(target_handle, compiled_code_handle, code_metadata, JVMCI_CHECK_0);
if (result != JVMCI::ok) {
return result;
}
if (code_metadata.get_nr_pc_desc() > 0) {
int size = sizeof(PcDesc) * code_metadata.get_nr_pc_desc();
JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
JVMCIENV->copy_bytes_from((jbyte*) code_metadata.get_pc_desc(), array, 0, size);
HotSpotJVMCI::HotSpotMetaData::set_pcDescBytes(JVMCIENV, metadata_handle, array);
}
if (code_metadata.get_scopes_size() > 0) {
int size = code_metadata.get_scopes_size();
JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
JVMCIENV->copy_bytes_from((jbyte*) code_metadata.get_scopes_desc(), array, 0, size);
HotSpotJVMCI::HotSpotMetaData::set_scopesDescBytes(JVMCIENV, metadata_handle, array);
}
RelocBuffer* reloc_buffer = code_metadata.get_reloc_buffer();
int size = (int) reloc_buffer->size();
JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
JVMCIENV->copy_bytes_from((jbyte*) reloc_buffer->begin(), array, 0, size);
HotSpotJVMCI::HotSpotMetaData::set_relocBytes(JVMCIENV, metadata_handle, array);
const OopMapSet* oopMapSet = installer.oopMapSet();
{
ResourceMark mark;
ImmutableOopMapBuilder builder(oopMapSet);
int size = builder.heap_size();
JVMCIPrimitiveArray array = JVMCIENV->new_byteArray(size, JVMCI_CHECK_(JVMCI::cache_full));
builder.generate_into((address) HotSpotJVMCI::resolve(array)->byte_at_addr(0));
HotSpotJVMCI::HotSpotMetaData::set_oopMaps(JVMCIENV, metadata_handle, array);
}
AOTOopRecorder* recorder = code_metadata.get_oop_recorder();
int nr_meta_refs = recorder->nr_meta_refs();
JVMCIObjectArray metadataArray = JVMCIENV->new_Object_array(nr_meta_refs, JVMCI_CHECK_(JVMCI::cache_full));
for (int i = 0; i < nr_meta_refs; ++i) {
jobject element = recorder->meta_element(i);
if (element == NULL) {
return JVMCI::cache_full;
}
JVMCIENV->put_object_at(metadataArray, i, JVMCIENV->wrap(element));
}
HotSpotJVMCI::HotSpotMetaData::set_metadata(JVMCIENV, metadata_handle, metadataArray);
ExceptionHandlerTable* handler = code_metadata.get_exception_table();
int table_size = handler->size_in_bytes();
JVMCIPrimitiveArray exceptionArray = JVMCIENV->new_byteArray(table_size, JVMCI_CHECK_(JVMCI::cache_full));
if (table_size > 0) {
handler->copy_bytes_to((address) HotSpotJVMCI::resolve(exceptionArray)->byte_at_addr(0));
}
HotSpotJVMCI::HotSpotMetaData::set_exceptionBytes(JVMCIENV, metadata_handle, exceptionArray);
ImplicitExceptionTable* implicit = code_metadata.get_implicit_exception_table();
int implicit_table_size = implicit->size_in_bytes();
JVMCIPrimitiveArray implicitExceptionArray = JVMCIENV->new_byteArray(implicit_table_size, JVMCI_CHECK_(JVMCI::cache_full));
if (implicit_table_size > 0) {
implicit->copy_bytes_to((address) HotSpotJVMCI::resolve(implicitExceptionArray)->byte_at_addr(0), implicit_table_size);
}
HotSpotJVMCI::HotSpotMetaData::set_implicitExceptionBytes(JVMCIENV, metadata_handle, implicitExceptionArray);
return result;
#else
JVMCI_THROW_MSG_0(InternalError, "unimplemented");
#endif
C2V_END
C2V_VMENTRY(void, resetCompilationStatistics, (JNIEnv* env, jobject))
@ -1645,16 +1566,7 @@ C2V_VMENTRY_0(jint, methodDataProfileDataSize, (JNIEnv* env, jobject, jlong meta
C2V_END
C2V_VMENTRY_0(jlong, getFingerprint, (JNIEnv* env, jobject, jlong metaspace_klass))
#if INCLUDE_AOT
Klass *k = (Klass*) (address) metaspace_klass;
if (k->is_instance_klass()) {
return InstanceKlass::cast(k)->get_stored_fingerprint();
} else {
return 0;
}
#else
JVMCI_THROW_MSG_0(InternalError, "unimplemented");
#endif
C2V_END
C2V_VMENTRY_NULL(jobject, getHostClass, (JNIEnv* env, jobject, jobject jvmci_type))

View File

@ -1,45 +0,0 @@
/*
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#ifndef SHARE_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP
#define SHARE_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP
#if INCLUDE_AOT
#include "jvmci/compilerRuntime.hpp"
#define VM_ADDRESSES_COMPILER_RUNTIME(declare_address, declare_preprocessor_address, declare_function) \
declare_function(CompilerRuntime::resolve_dynamic_invoke) \
declare_function(CompilerRuntime::resolve_string_by_symbol) \
declare_function(CompilerRuntime::resolve_klass_by_symbol) \
declare_function(CompilerRuntime::resolve_method_by_symbol_and_load_counters) \
declare_function(CompilerRuntime::initialize_klass_by_symbol) \
declare_function(CompilerRuntime::invocation_event) \
declare_function(CompilerRuntime::backedge_event)
#else // INCLUDE_AOT
#define VM_ADDRESSES_COMPILER_RUNTIME(declare_address, declare_preprocessor_address, declare_function)
#endif // INCLUDE_AOT
#endif // SHARE_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP

View File

@ -29,7 +29,6 @@
#include "jvmci/jvmciCodeInstaller.hpp"
#include "jvmci/jvmciCompilerToVM.hpp"
#include "jvmci/jvmciRuntime.hpp"
#include "jvmci/vmStructs_compiler_runtime.hpp"
#include "jvmci/vmStructs_jvmci.hpp"
#include "oops/klassVtable.hpp"
#include "oops/objArrayKlass.hpp"
@ -233,7 +232,6 @@
JVMTI_ONLY(nonstatic_field(MethodCounters, _number_of_breakpoints, u2)) \
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \
nonstatic_field(MethodCounters, _backedge_counter, InvocationCounter) \
AOT_ONLY(nonstatic_field(MethodCounters, _method, Method*)) \
\
nonstatic_field(MethodData, _size, int) \
nonstatic_field(MethodData, _method, Method*) \
@ -870,9 +868,6 @@ VMAddressEntry JVMCIVMStructs::localHotSpotVMAddresses[] = {
VM_ADDRESSES(GENERATE_VM_ADDRESS_ENTRY,
GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY,
GENERATE_VM_FUNCTION_ENTRY)
VM_ADDRESSES_COMPILER_RUNTIME(GENERATE_VM_ADDRESS_ENTRY,
GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY,
GENERATE_VM_FUNCTION_ENTRY)
VM_ADDRESSES_OS(GENERATE_VM_ADDRESS_ENTRY,
GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY,
GENERATE_VM_FUNCTION_ENTRY)

View File

@ -36,7 +36,6 @@
LOG_TAG(age) \
LOG_TAG(alloc) \
LOG_TAG(annotation) \
LOG_TAG(aot) \
LOG_TAG(arguments) \
LOG_TAG(attach) \
LOG_TAG(barrier) \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -171,16 +171,7 @@ class CodeHeap : public CHeapObj<mtCode> {
// Containment means "contained in committed space".
bool contains(const void* p) const { return low() <= p && p < high(); }
bool contains_blob(const CodeBlob* blob) const {
// AOT CodeBlobs (i.e. AOTCompiledMethod) objects aren't allocated in the AOTCodeHeap but on the C-Heap.
// Only the code they are pointing to is located in the AOTCodeHeap. All other CodeBlobs are allocated
// directly in their corresponding CodeHeap with their code appended to the actual C++ object.
// So all CodeBlobs except AOTCompiledMethod are continuous in memory with their data and code while
// AOTCompiledMethod and their code/data is distributed in the C-Heap. This means we can use the
// address of a CodeBlob object in order to locate it in its heap while we have to use the address
// of the actual code an AOTCompiledMethod object is pointing to in order to locate it.
// Notice that for an ordinary CodeBlob with code size zero, code_begin() may point beyond the object!
const void* start = AOT_ONLY( (code_blob_type() == CodeBlobType::AOT) ? blob->code_begin() : ) (void*)blob;
return contains(start);
return contains((void*)blob);
}
virtual void* find_start(void* p) const; // returns the block containing p or NULL

View File

@ -23,7 +23,6 @@
*/
#include "precompiled.hpp"
#include "aot/aotLoader.hpp"
#include "cds/metaspaceShared.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc/shared/collectedHeap.hpp"

Some files were not shown because too many files have changed in this diff Show More