Merge
This commit is contained in:
commit
8f528fba93
3
.hgtags
3
.hgtags
@ -460,7 +460,8 @@ a2008587c13fa05fa2dbfcb09fe987576fbedfd1 jdk-10+32
|
|||||||
bbd692ad4fa300ecca7939ffbe3b1d5e52a28cc6 jdk-10+33
|
bbd692ad4fa300ecca7939ffbe3b1d5e52a28cc6 jdk-10+33
|
||||||
89deac44e51517841491ba86ff44aa82a5ca96b3 jdk-10+34
|
89deac44e51517841491ba86ff44aa82a5ca96b3 jdk-10+34
|
||||||
d8c634b016c628622c9abbdc6bf50509e5dedbec jdk-10+35
|
d8c634b016c628622c9abbdc6bf50509e5dedbec jdk-10+35
|
||||||
cb54a299aa91419cb7caef3992592e7b22488163 jdk-10+36
|
0ee20aad71c4f33c426372b4c8bcc1235ce2ec08 jdk-11+0
|
||||||
|
959f2f7cbaa6d2ee45d50029744efb219721576c jdk-10+36
|
||||||
4f830b447edf04fb4a52151a5ad44d9bb60723cd jdk-10+37
|
4f830b447edf04fb4a52151a5ad44d9bb60723cd jdk-10+37
|
||||||
e569e83139fdfbecfeb3cd9014d560917787f158 jdk-10+38
|
e569e83139fdfbecfeb3cd9014d560917787f158 jdk-10+38
|
||||||
5b834ec962366e00d4445352a999a3ac14e26f64 jdk-10+39
|
5b834ec962366e00d4445352a999a3ac14e26f64 jdk-10+39
|
||||||
|
@ -637,7 +637,7 @@ else
|
|||||||
# Declare dependencies between hotspot-<variant>* targets
|
# Declare dependencies between hotspot-<variant>* targets
|
||||||
$(foreach v, $(JVM_VARIANTS), \
|
$(foreach v, $(JVM_VARIANTS), \
|
||||||
$(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \
|
$(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \
|
||||||
$(eval hotspot-$v-libs: hotspot-$v-gensrc) \
|
$(eval hotspot-$v-libs: hotspot-$v-gensrc java.base-copy) \
|
||||||
)
|
)
|
||||||
|
|
||||||
hotspot-ide-project: hotspot exploded-image
|
hotspot-ide-project: hotspot exploded-image
|
||||||
@ -691,8 +691,9 @@ else
|
|||||||
jdk.jdwp.agent-libs: jdk.jdwp.agent-gensrc
|
jdk.jdwp.agent-libs: jdk.jdwp.agent-gensrc
|
||||||
|
|
||||||
# The swing beans need to have java base properly generated to avoid errors
|
# The swing beans need to have java base properly generated to avoid errors
|
||||||
# in javadoc.
|
# in javadoc. The X11 wrappers need the java.base include files to have been
|
||||||
java.desktop-gensrc-src: java.base-gensrc
|
# copied and processed.
|
||||||
|
java.desktop-gensrc-src: java.base-gensrc java.base-copy
|
||||||
|
|
||||||
# The annotation processing for jdk.internal.vm.ci and jdk.internal.vm.compiler
|
# The annotation processing for jdk.internal.vm.ci and jdk.internal.vm.compiler
|
||||||
# needs classes from the current JDK.
|
# needs classes from the current JDK.
|
||||||
|
@ -55,6 +55,7 @@ OPENJDK_TARGET_CPU_ARCH := @OPENJDK_BUILD_CPU_ARCH@
|
|||||||
OPENJDK_TARGET_CPU_BITS := @OPENJDK_BUILD_CPU_BITS@
|
OPENJDK_TARGET_CPU_BITS := @OPENJDK_BUILD_CPU_BITS@
|
||||||
OPENJDK_TARGET_CPU_ENDIAN := @OPENJDK_BUILD_CPU_ENDIAN@
|
OPENJDK_TARGET_CPU_ENDIAN := @OPENJDK_BUILD_CPU_ENDIAN@
|
||||||
OPENJDK_TARGET_CPU_LEGACY := @OPENJDK_BUILD_CPU_LEGACY@
|
OPENJDK_TARGET_CPU_LEGACY := @OPENJDK_BUILD_CPU_LEGACY@
|
||||||
|
OPENJDK_TARGET_OS_INCLUDE_SUBDIR := @OPENJDK_BUILD_OS_INCLUDE_SUBDIR@
|
||||||
|
|
||||||
HOTSPOT_TARGET_OS := @HOTSPOT_BUILD_OS@
|
HOTSPOT_TARGET_OS := @HOTSPOT_BUILD_OS@
|
||||||
HOTSPOT_TARGET_OS_TYPE := @HOTSPOT_BUILD_OS_TYPE@
|
HOTSPOT_TARGET_OS_TYPE := @HOTSPOT_BUILD_OS_TYPE@
|
||||||
|
@ -1162,9 +1162,7 @@ AC_DEFUN([FLAGS_SETUP_COMPILER_FLAGS_FOR_JDK_HELPER],
|
|||||||
# Setup some hard coded includes
|
# Setup some hard coded includes
|
||||||
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK \
|
$2COMMON_CCXXFLAGS_JDK="[$]$2COMMON_CCXXFLAGS_JDK \
|
||||||
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||||
-I${TOPDIR}/src/java.base/share/native/include \
|
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base/\$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR) \
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_$1_OS/native/include \
|
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_$1_OS_TYPE/native/include \
|
|
||||||
-I${TOPDIR}/src/java.base/share/native/libjava \
|
-I${TOPDIR}/src/java.base/share/native/libjava \
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_$1_OS_TYPE/native/libjava \
|
-I${TOPDIR}/src/java.base/$OPENJDK_$1_OS_TYPE/native/libjava \
|
||||||
-I${TOPDIR}/src/hotspot/share/include \
|
-I${TOPDIR}/src/hotspot/share/include \
|
||||||
|
@ -886,6 +886,8 @@ JAVA
|
|||||||
BOOT_JDK
|
BOOT_JDK
|
||||||
JAVA_CHECK
|
JAVA_CHECK
|
||||||
JAVAC_CHECK
|
JAVAC_CHECK
|
||||||
|
VERSION_CLASSFILE_MINOR
|
||||||
|
VERSION_CLASSFILE_MAJOR
|
||||||
VENDOR_VERSION_STRING
|
VENDOR_VERSION_STRING
|
||||||
VERSION_DATE
|
VERSION_DATE
|
||||||
VERSION_IS_GA
|
VERSION_IS_GA
|
||||||
@ -970,6 +972,7 @@ JDK_VARIANT
|
|||||||
USERNAME
|
USERNAME
|
||||||
TOPDIR
|
TOPDIR
|
||||||
PATH_SEP
|
PATH_SEP
|
||||||
|
OPENJDK_BUILD_OS_INCLUDE_SUBDIR
|
||||||
HOTSPOT_BUILD_CPU_DEFINE
|
HOTSPOT_BUILD_CPU_DEFINE
|
||||||
HOTSPOT_BUILD_CPU_ARCH
|
HOTSPOT_BUILD_CPU_ARCH
|
||||||
HOTSPOT_BUILD_CPU
|
HOTSPOT_BUILD_CPU
|
||||||
@ -980,6 +983,7 @@ OPENJDK_BUILD_CPU_OSARCH
|
|||||||
OPENJDK_BUILD_CPU_ISADIR
|
OPENJDK_BUILD_CPU_ISADIR
|
||||||
OPENJDK_BUILD_CPU_LEGACY_LIB
|
OPENJDK_BUILD_CPU_LEGACY_LIB
|
||||||
OPENJDK_BUILD_CPU_LEGACY
|
OPENJDK_BUILD_CPU_LEGACY
|
||||||
|
OPENJDK_TARGET_OS_INCLUDE_SUBDIR
|
||||||
HOTSPOT_TARGET_CPU_DEFINE
|
HOTSPOT_TARGET_CPU_DEFINE
|
||||||
HOTSPOT_TARGET_CPU_ARCH
|
HOTSPOT_TARGET_CPU_ARCH
|
||||||
HOTSPOT_TARGET_CPU
|
HOTSPOT_TARGET_CPU
|
||||||
@ -16318,6 +16322,14 @@ $as_echo "$COMPILE_TYPE" >&6; }
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# For historical reasons, the OS include directories have odd names.
|
||||||
|
OPENJDK_TARGET_OS_INCLUDE_SUBDIR="$OPENJDK_TARGET_OS"
|
||||||
|
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
|
||||||
|
OPENJDK_TARGET_OS_INCLUDE_SUBDIR="win32"
|
||||||
|
elif test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
|
||||||
|
OPENJDK_TARGET_OS_INCLUDE_SUBDIR="darwin"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Also store the legacy naming of the cpu.
|
# Also store the legacy naming of the cpu.
|
||||||
@ -16469,6 +16481,14 @@ $as_echo "$COMPILE_TYPE" >&6; }
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# For historical reasons, the OS include directories have odd names.
|
||||||
|
OPENJDK_BUILD_OS_INCLUDE_SUBDIR="$OPENJDK_TARGET_OS"
|
||||||
|
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
|
||||||
|
OPENJDK_BUILD_OS_INCLUDE_SUBDIR="win32"
|
||||||
|
elif test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
|
||||||
|
OPENJDK_BUILD_OS_INCLUDE_SUBDIR="darwin"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -25524,6 +25544,10 @@ fi
|
|||||||
VENDOR_VERSION_STRING="$with_vendor_version_string"
|
VENDOR_VERSION_STRING="$with_vendor_version_string"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# We could define --with flags for these, if really needed
|
||||||
|
VERSION_CLASSFILE_MAJOR="$DEFAULT_VERSION_CLASSFILE_MAJOR"
|
||||||
|
VERSION_CLASSFILE_MINOR="$DEFAULT_VERSION_CLASSFILE_MINOR"
|
||||||
|
|
||||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for version string" >&5
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for version string" >&5
|
||||||
$as_echo_n "checking for version string... " >&6; }
|
$as_echo_n "checking for version string... " >&6; }
|
||||||
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $VERSION_STRING" >&5
|
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $VERSION_STRING" >&5
|
||||||
@ -25545,6 +25569,9 @@ $as_echo "$VERSION_STRING" >&6; }
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
#
|
#
|
||||||
# Setup BootJDK, used to bootstrap the build.
|
# Setup BootJDK, used to bootstrap the build.
|
||||||
@ -52817,9 +52844,7 @@ fi
|
|||||||
# Setup some hard coded includes
|
# Setup some hard coded includes
|
||||||
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK \
|
COMMON_CCXXFLAGS_JDK="$COMMON_CCXXFLAGS_JDK \
|
||||||
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||||
-I${TOPDIR}/src/java.base/share/native/include \
|
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base/\$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR) \
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_TARGET_OS/native/include \
|
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_TARGET_OS_TYPE/native/include \
|
|
||||||
-I${TOPDIR}/src/java.base/share/native/libjava \
|
-I${TOPDIR}/src/java.base/share/native/libjava \
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_TARGET_OS_TYPE/native/libjava \
|
-I${TOPDIR}/src/java.base/$OPENJDK_TARGET_OS_TYPE/native/libjava \
|
||||||
-I${TOPDIR}/src/hotspot/share/include \
|
-I${TOPDIR}/src/hotspot/share/include \
|
||||||
@ -53700,9 +53725,7 @@ fi
|
|||||||
# Setup some hard coded includes
|
# Setup some hard coded includes
|
||||||
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK \
|
OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK="$OPENJDK_BUILD_COMMON_CCXXFLAGS_JDK \
|
||||||
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||||
-I${TOPDIR}/src/java.base/share/native/include \
|
-I\$(SUPPORT_OUTPUTDIR)/modules_include/java.base/\$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR) \
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_BUILD_OS/native/include \
|
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_BUILD_OS_TYPE/native/include \
|
|
||||||
-I${TOPDIR}/src/java.base/share/native/libjava \
|
-I${TOPDIR}/src/java.base/share/native/libjava \
|
||||||
-I${TOPDIR}/src/java.base/$OPENJDK_BUILD_OS_TYPE/native/libjava \
|
-I${TOPDIR}/src/java.base/$OPENJDK_BUILD_OS_TYPE/native/libjava \
|
||||||
-I${TOPDIR}/src/hotspot/share/include \
|
-I${TOPDIR}/src/hotspot/share/include \
|
||||||
|
@ -381,6 +381,10 @@ AC_DEFUN_ONCE([JDKVER_SETUP_JDK_VERSION_NUMBERS],
|
|||||||
VENDOR_VERSION_STRING="$with_vendor_version_string"
|
VENDOR_VERSION_STRING="$with_vendor_version_string"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# We could define --with flags for these, if really needed
|
||||||
|
VERSION_CLASSFILE_MAJOR="$DEFAULT_VERSION_CLASSFILE_MAJOR"
|
||||||
|
VERSION_CLASSFILE_MINOR="$DEFAULT_VERSION_CLASSFILE_MINOR"
|
||||||
|
|
||||||
AC_MSG_CHECKING([for version string])
|
AC_MSG_CHECKING([for version string])
|
||||||
AC_MSG_RESULT([$VERSION_STRING])
|
AC_MSG_RESULT([$VERSION_STRING])
|
||||||
|
|
||||||
@ -398,4 +402,7 @@ AC_DEFUN_ONCE([JDKVER_SETUP_JDK_VERSION_NUMBERS],
|
|||||||
AC_SUBST(VERSION_IS_GA)
|
AC_SUBST(VERSION_IS_GA)
|
||||||
AC_SUBST(VERSION_DATE)
|
AC_SUBST(VERSION_DATE)
|
||||||
AC_SUBST(VENDOR_VERSION_STRING)
|
AC_SUBST(VENDOR_VERSION_STRING)
|
||||||
|
AC_SUBST(VERSION_CLASSFILE_MAJOR)
|
||||||
|
AC_SUBST(VERSION_CLASSFILE_MINOR)
|
||||||
|
|
||||||
])
|
])
|
||||||
|
@ -478,6 +478,14 @@ AC_DEFUN([PLATFORM_SETUP_LEGACY_VARS_HELPER],
|
|||||||
fi
|
fi
|
||||||
AC_SUBST(HOTSPOT_$1_CPU_DEFINE)
|
AC_SUBST(HOTSPOT_$1_CPU_DEFINE)
|
||||||
|
|
||||||
|
# For historical reasons, the OS include directories have odd names.
|
||||||
|
OPENJDK_$1_OS_INCLUDE_SUBDIR="$OPENJDK_TARGET_OS"
|
||||||
|
if test "x$OPENJDK_TARGET_OS" = "xwindows"; then
|
||||||
|
OPENJDK_$1_OS_INCLUDE_SUBDIR="win32"
|
||||||
|
elif test "x$OPENJDK_TARGET_OS" = "xmacosx"; then
|
||||||
|
OPENJDK_$1_OS_INCLUDE_SUBDIR="darwin"
|
||||||
|
fi
|
||||||
|
AC_SUBST(OPENJDK_$1_OS_INCLUDE_SUBDIR)
|
||||||
])
|
])
|
||||||
|
|
||||||
AC_DEFUN([PLATFORM_SET_RELEASE_FILE_OS_VALUES],
|
AC_DEFUN([PLATFORM_SET_RELEASE_FILE_OS_VALUES],
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#
|
#
|
||||||
# Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
|
# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
#
|
#
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
# This code is free software; you can redistribute it and/or modify it
|
||||||
@ -78,6 +78,7 @@ OPENJDK_TARGET_CPU_ISADIR:=@OPENJDK_TARGET_CPU_ISADIR@
|
|||||||
OPENJDK_TARGET_CPU_LEGACY:=@OPENJDK_TARGET_CPU_LEGACY@
|
OPENJDK_TARGET_CPU_LEGACY:=@OPENJDK_TARGET_CPU_LEGACY@
|
||||||
OPENJDK_TARGET_CPU_LEGACY_LIB:=@OPENJDK_TARGET_CPU_LEGACY_LIB@
|
OPENJDK_TARGET_CPU_LEGACY_LIB:=@OPENJDK_TARGET_CPU_LEGACY_LIB@
|
||||||
OPENJDK_TARGET_CPU_OSARCH:=@OPENJDK_TARGET_CPU_OSARCH@
|
OPENJDK_TARGET_CPU_OSARCH:=@OPENJDK_TARGET_CPU_OSARCH@
|
||||||
|
OPENJDK_TARGET_OS_INCLUDE_SUBIDR:=@OPENJDK_TARGET_OS_INCLUDE_SUBDIR@
|
||||||
|
|
||||||
HOTSPOT_TARGET_OS := @HOTSPOT_TARGET_OS@
|
HOTSPOT_TARGET_OS := @HOTSPOT_TARGET_OS@
|
||||||
HOTSPOT_TARGET_OS_TYPE := @HOTSPOT_TARGET_OS_TYPE@
|
HOTSPOT_TARGET_OS_TYPE := @HOTSPOT_TARGET_OS_TYPE@
|
||||||
@ -100,6 +101,8 @@ OPENJDK_BUILD_CPU_ARCH:=@OPENJDK_BUILD_CPU_ARCH@
|
|||||||
OPENJDK_BUILD_CPU_BITS:=@OPENJDK_BUILD_CPU_BITS@
|
OPENJDK_BUILD_CPU_BITS:=@OPENJDK_BUILD_CPU_BITS@
|
||||||
OPENJDK_BUILD_CPU_ENDIAN:=@OPENJDK_BUILD_CPU_ENDIAN@
|
OPENJDK_BUILD_CPU_ENDIAN:=@OPENJDK_BUILD_CPU_ENDIAN@
|
||||||
|
|
||||||
|
OPENJDK_BUILD_OS_INCLUDE_SUBIDR:=@OPENJDK_TARGET_OS_INCLUDE_SUBDIR@
|
||||||
|
|
||||||
# Target platform value in ModuleTarget class file attribute.
|
# Target platform value in ModuleTarget class file attribute.
|
||||||
OPENJDK_MODULE_TARGET_PLATFORM:=@OPENJDK_MODULE_TARGET_PLATFORM@
|
OPENJDK_MODULE_TARGET_PLATFORM:=@OPENJDK_MODULE_TARGET_PLATFORM@
|
||||||
|
|
||||||
@ -189,6 +192,10 @@ VERSION_DATE := @VERSION_DATE@
|
|||||||
# Vendor version string
|
# Vendor version string
|
||||||
VENDOR_VERSION_STRING := @VENDOR_VERSION_STRING@
|
VENDOR_VERSION_STRING := @VENDOR_VERSION_STRING@
|
||||||
|
|
||||||
|
# Class-file version
|
||||||
|
VERSION_CLASSFILE_MAJOR := @VERSION_CLASSFILE_MAJOR@
|
||||||
|
VERSION_CLASSFILE_MINOR := @VERSION_CLASSFILE_MINOR@
|
||||||
|
|
||||||
# Convenience CFLAGS settings for passing version information into native programs.
|
# Convenience CFLAGS settings for passing version information into native programs.
|
||||||
VERSION_CFLAGS := \
|
VERSION_CFLAGS := \
|
||||||
-DVERSION_FEATURE=$(VERSION_FEATURE) \
|
-DVERSION_FEATURE=$(VERSION_FEATURE) \
|
||||||
@ -204,6 +211,8 @@ VERSION_CFLAGS := \
|
|||||||
-DVERSION_SPECIFICATION='"$(VERSION_SPECIFICATION)"' \
|
-DVERSION_SPECIFICATION='"$(VERSION_SPECIFICATION)"' \
|
||||||
-DVERSION_DATE='"$(VERSION_DATE)"' \
|
-DVERSION_DATE='"$(VERSION_DATE)"' \
|
||||||
-DVENDOR_VERSION_STRING='"$(VENDOR_VERSION_STRING)"' \
|
-DVENDOR_VERSION_STRING='"$(VENDOR_VERSION_STRING)"' \
|
||||||
|
-DVERSION_CLASSFILE_MAJOR=$(VERSION_CLASSFILE_MAJOR) \
|
||||||
|
-DVERSION_CLASSFILE_MINOR=$(VERSION_CLASSFILE_MINOR) \
|
||||||
#
|
#
|
||||||
|
|
||||||
ifneq ($(COMPANY_NAME),)
|
ifneq ($(COMPANY_NAME),)
|
||||||
@ -604,6 +613,8 @@ INTERIM_LANGTOOLS_BASE_MODULES := java.compiler jdk.compiler jdk.javadoc
|
|||||||
INTERIM_LANGTOOLS_MODULES := $(addsuffix .interim, $(INTERIM_LANGTOOLS_BASE_MODULES))
|
INTERIM_LANGTOOLS_MODULES := $(addsuffix .interim, $(INTERIM_LANGTOOLS_BASE_MODULES))
|
||||||
INTERIM_LANGTOOLS_ADD_EXPORTS := \
|
INTERIM_LANGTOOLS_ADD_EXPORTS := \
|
||||||
--add-exports java.base/sun.reflect.annotation=jdk.compiler.interim \
|
--add-exports java.base/sun.reflect.annotation=jdk.compiler.interim \
|
||||||
|
--add-exports java.base/jdk.internal.jmod=jdk.compiler.interim \
|
||||||
|
--add-exports java.base/jdk.internal.misc=jdk.compiler.interim \
|
||||||
#
|
#
|
||||||
INTERIM_LANGTOOLS_MODULES_COMMA := $(strip $(subst $(SPACE),$(COMMA),$(strip \
|
INTERIM_LANGTOOLS_MODULES_COMMA := $(strip $(subst $(SPACE),$(COMMA),$(strip \
|
||||||
$(INTERIM_LANGTOOLS_MODULES))))
|
$(INTERIM_LANGTOOLS_MODULES))))
|
||||||
|
@ -30,6 +30,8 @@ DEFAULT_VERSION_INTERIM=0
|
|||||||
DEFAULT_VERSION_UPDATE=0
|
DEFAULT_VERSION_UPDATE=0
|
||||||
DEFAULT_VERSION_PATCH=0
|
DEFAULT_VERSION_PATCH=0
|
||||||
DEFAULT_VERSION_DATE=2018-03-20
|
DEFAULT_VERSION_DATE=2018-03-20
|
||||||
|
DEFAULT_VERSION_CLASSFILE_MAJOR=55 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
|
||||||
|
DEFAULT_VERSION_CLASSFILE_MINOR=0
|
||||||
|
|
||||||
LAUNCHER_NAME=openjdk
|
LAUNCHER_NAME=openjdk
|
||||||
PRODUCT_NAME=OpenJDK
|
PRODUCT_NAME=OpenJDK
|
||||||
|
@ -69,7 +69,7 @@ $(eval $(call SetupJavaCompiler,GENERATE_OLDBYTECODE, \
|
|||||||
$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE, \
|
$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE, \
|
||||||
JVM := $(JAVA_JAVAC), \
|
JVM := $(JAVA_JAVAC), \
|
||||||
JAVAC := $(NEW_JAVAC), \
|
JAVAC := $(NEW_JAVAC), \
|
||||||
FLAGS := -source 10 -target 10 --doclint-format html5 \
|
FLAGS := -source 11 -target 11 --doclint-format html5 \
|
||||||
-encoding ascii -XDignore.symbol.file=true $(JAVAC_WARNINGS), \
|
-encoding ascii -XDignore.symbol.file=true $(JAVAC_WARNINGS), \
|
||||||
SERVER_DIR := $(SJAVAC_SERVER_DIR), \
|
SERVER_DIR := $(SJAVAC_SERVER_DIR), \
|
||||||
SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
|
SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
|
||||||
@ -79,7 +79,7 @@ $(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE, \
|
|||||||
$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE_NOWARNINGS, \
|
$(eval $(call SetupJavaCompiler,GENERATE_JDKBYTECODE_NOWARNINGS, \
|
||||||
JVM := $(JAVA_JAVAC), \
|
JVM := $(JAVA_JAVAC), \
|
||||||
JAVAC := $(NEW_JAVAC), \
|
JAVAC := $(NEW_JAVAC), \
|
||||||
FLAGS := -source 10 -target 10 \
|
FLAGS := -source 11 -target 11 \
|
||||||
-encoding ascii -XDignore.symbol.file=true $(DISABLE_WARNINGS), \
|
-encoding ascii -XDignore.symbol.file=true $(DISABLE_WARNINGS), \
|
||||||
SERVER_DIR := $(SJAVAC_SERVER_DIR), \
|
SERVER_DIR := $(SJAVAC_SERVER_DIR), \
|
||||||
SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
|
SERVER_JVM := $(SJAVAC_SERVER_JAVA)))
|
||||||
|
@ -829,7 +829,7 @@ var getJibProfilesDependencies = function (input, common) {
|
|||||||
jtreg: {
|
jtreg: {
|
||||||
server: "javare",
|
server: "javare",
|
||||||
revision: "4.2",
|
revision: "4.2",
|
||||||
build_number: "b10",
|
build_number: "b11",
|
||||||
checksum_file: "MD5_VALUES",
|
checksum_file: "MD5_VALUES",
|
||||||
file: "jtreg_bin-4.2.zip",
|
file: "jtreg_bin-4.2.zip",
|
||||||
environment_name: "JT_HOME",
|
environment_name: "JT_HOME",
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#
|
#
|
||||||
|
|
||||||
include CopyCommon.gmk
|
include CopyCommon.gmk
|
||||||
|
include TextFileProcessing.gmk
|
||||||
|
|
||||||
$(eval $(call IncludeCustomExtension, copy/Copy-java.base.gmk))
|
$(eval $(call IncludeCustomExtension, copy/Copy-java.base.gmk))
|
||||||
|
|
||||||
@ -244,3 +245,16 @@ ifeq ($(ENABLE_LIBFFI_BUNDLING), true)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
# Generate classfile_constants.h
|
||||||
|
|
||||||
|
$(eval $(call SetupTextFileProcessing, CREATE_CLASSFILE_CONSTANTS_H, \
|
||||||
|
SOURCE_FILES := $(TOPDIR)/src/java.base/share/native/include/classfile_constants.h.template, \
|
||||||
|
OUTPUT_FILE := $(SUPPORT_OUTPUTDIR)/modules_include/java.base/classfile_constants.h, \
|
||||||
|
REPLACEMENTS := \
|
||||||
|
@@VERSION_CLASSFILE_MAJOR@@ => $(VERSION_CLASSFILE_MAJOR) ; \
|
||||||
|
@@VERSION_CLASSFILE_MINOR@@ => $(VERSION_CLASSFILE_MINOR) ; , \
|
||||||
|
))
|
||||||
|
|
||||||
|
TARGETS += $(CREATE_CLASSFILE_CONSTANTS_H)
|
||||||
|
|
||||||
|
################################################################################
|
||||||
|
@ -39,20 +39,12 @@ ifneq ($(wildcard $(INCLUDE_SOURCE_DIR)/*), )
|
|||||||
$(eval $(call SetupCopyFiles, COPY_EXPORTED_INCLUDE, \
|
$(eval $(call SetupCopyFiles, COPY_EXPORTED_INCLUDE, \
|
||||||
SRC := $(INCLUDE_SOURCE_DIR), \
|
SRC := $(INCLUDE_SOURCE_DIR), \
|
||||||
DEST := $(INCLUDE_TARGET_DIR), \
|
DEST := $(INCLUDE_TARGET_DIR), \
|
||||||
FILES := $(shell $(FIND) $(INCLUDE_SOURCE_DIR) -type f), \
|
FILES := $(filter %.h, $(call CacheFind, $(INCLUDE_SOURCE_DIR))), \
|
||||||
))
|
))
|
||||||
|
|
||||||
TARGETS += $(COPY_EXPORTED_INCLUDE)
|
TARGETS += $(COPY_EXPORTED_INCLUDE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# For historical reasons, the OS include directories have odd names.
|
|
||||||
INCLUDE_TARGET_OS_SUBDIR := $(OPENJDK_TARGET_OS)
|
|
||||||
ifeq ($(OPENJDK_TARGET_OS), windows)
|
|
||||||
INCLUDE_TARGET_OS_SUBDIR := win32
|
|
||||||
else ifeq ($(OPENJDK_TARGET_OS), macosx)
|
|
||||||
INCLUDE_TARGET_OS_SUBDIR := darwin
|
|
||||||
endif
|
|
||||||
|
|
||||||
# Use the most specific of OS and OS_TYPE.
|
# Use the most specific of OS and OS_TYPE.
|
||||||
INCLUDE_SOURCE_OS_DIR := $(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS)/native/include
|
INCLUDE_SOURCE_OS_DIR := $(TOPDIR)/src/$(MODULE)/$(OPENJDK_TARGET_OS)/native/include
|
||||||
ifeq ($(wildcard $(INCLUDE_SOURCE_OS_DIR)/*), )
|
ifeq ($(wildcard $(INCLUDE_SOURCE_OS_DIR)/*), )
|
||||||
@ -62,8 +54,8 @@ endif
|
|||||||
ifneq ($(wildcard $(INCLUDE_SOURCE_OS_DIR)/*), )
|
ifneq ($(wildcard $(INCLUDE_SOURCE_OS_DIR)/*), )
|
||||||
$(eval $(call SetupCopyFiles, COPY_EXPORTED_INCLUDE_OS, \
|
$(eval $(call SetupCopyFiles, COPY_EXPORTED_INCLUDE_OS, \
|
||||||
SRC := $(INCLUDE_SOURCE_OS_DIR), \
|
SRC := $(INCLUDE_SOURCE_OS_DIR), \
|
||||||
DEST := $(INCLUDE_TARGET_DIR)/$(INCLUDE_TARGET_OS_SUBDIR), \
|
DEST := $(INCLUDE_TARGET_DIR)/$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR), \
|
||||||
FILES := $(shell $(FIND) $(INCLUDE_SOURCE_OS_DIR) -type f), \
|
FILES := $(filter %.h, $(call CacheFind, $(INCLUDE_SOURCE_OS_DIR))), \
|
||||||
))
|
))
|
||||||
|
|
||||||
TARGETS += $(COPY_EXPORTED_INCLUDE_OS)
|
TARGETS += $(COPY_EXPORTED_INCLUDE_OS)
|
||||||
|
@ -92,10 +92,10 @@ ifneq ($(COMPILE_TYPE), cross)
|
|||||||
endif
|
endif
|
||||||
|
|
||||||
SIZER_CFLAGS := \
|
SIZER_CFLAGS := \
|
||||||
-I${TOPDIR}/src/hotspot/share/include \
|
-I$(TOPDIR)/src/hotspot/share/include \
|
||||||
-I${TOPDIR}/src/hotspot/os/$(HOTSPOT_TARGET_OS_TYPE)/include \
|
-I$(TOPDIR)/src/hotspot/os/$(HOTSPOT_TARGET_OS_TYPE)/include \
|
||||||
-I$(TOPDIR)/src/java.base/share/native/include \
|
-I$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||||
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/include \
|
-I$(SUPPORT_OUTPUTDIR)/modules_include/java.base/$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR) \
|
||||||
-I$(TOPDIR)/src/java.base/share/native/libjava \
|
-I$(TOPDIR)/src/java.base/share/native/libjava \
|
||||||
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjava \
|
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/libjava \
|
||||||
-I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
|
-I$(TOPDIR)/src/java.desktop/$(OPENJDK_TARGET_OS_TYPE)/native/common/awt \
|
||||||
|
@ -59,8 +59,8 @@ JVM_CFLAGS_INCLUDES += \
|
|||||||
-I$(TOPDIR)/src/hotspot/share/precompiled \
|
-I$(TOPDIR)/src/hotspot/share/precompiled \
|
||||||
-I$(TOPDIR)/src/hotspot/share/include \
|
-I$(TOPDIR)/src/hotspot/share/include \
|
||||||
-I$(TOPDIR)/src/hotspot/os/$(HOTSPOT_TARGET_OS_TYPE)/include \
|
-I$(TOPDIR)/src/hotspot/os/$(HOTSPOT_TARGET_OS_TYPE)/include \
|
||||||
-I$(TOPDIR)/src/java.base/share/native/include \
|
-I$(SUPPORT_OUTPUTDIR)/modules_include/java.base \
|
||||||
-I$(TOPDIR)/src/java.base/$(OPENJDK_TARGET_OS_TYPE)/native/include \
|
-I$(SUPPORT_OUTPUTDIR)/modules_include/java.base/$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR) \
|
||||||
-I$(TOPDIR)/src/java.base/share/native/libjimage \
|
-I$(TOPDIR)/src/java.base/share/native/libjimage \
|
||||||
#
|
#
|
||||||
|
|
||||||
|
@ -258,7 +258,6 @@ SUNWprivate_1.1 {
|
|||||||
Java_jdk_internal_reflect_NativeConstructorAccessorImpl_newInstance0;
|
Java_jdk_internal_reflect_NativeConstructorAccessorImpl_newInstance0;
|
||||||
Java_jdk_internal_reflect_NativeMethodAccessorImpl_invoke0;
|
Java_jdk_internal_reflect_NativeMethodAccessorImpl_invoke0;
|
||||||
Java_jdk_internal_reflect_Reflection_getCallerClass__;
|
Java_jdk_internal_reflect_Reflection_getCallerClass__;
|
||||||
Java_jdk_internal_reflect_Reflection_getCallerClass__I;
|
|
||||||
Java_jdk_internal_reflect_Reflection_getClassAccessFlags;
|
Java_jdk_internal_reflect_Reflection_getClassAccessFlags;
|
||||||
Java_jdk_internal_misc_VM_latestUserDefinedLoader0;
|
Java_jdk_internal_misc_VM_latestUserDefinedLoader0;
|
||||||
Java_jdk_internal_misc_VM_getuid;
|
Java_jdk_internal_misc_VM_getuid;
|
||||||
|
@ -27,7 +27,6 @@ text: .text%Java_java_io_FileDescriptor_initIDs;
|
|||||||
text: .text%Java_java_io_FileOutputStream_initIDs;
|
text: .text%Java_java_io_FileOutputStream_initIDs;
|
||||||
text: .text%Java_java_lang_System_setIn0;
|
text: .text%Java_java_lang_System_setIn0;
|
||||||
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
|
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
|
||||||
text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
|
|
||||||
text: .text%Java_java_lang_Class_forName0;
|
text: .text%Java_java_lang_Class_forName0;
|
||||||
text: .text%Java_java_lang_Object_getClass;
|
text: .text%Java_java_lang_Object_getClass;
|
||||||
text: .text%Java_sun_reflect_Reflection_getClassAccessFlags;
|
text: .text%Java_sun_reflect_Reflection_getClassAccessFlags;
|
||||||
|
@ -26,7 +26,6 @@ text: .text%Java_java_io_FileDescriptor_initIDs;
|
|||||||
text: .text%Java_java_io_FileOutputStream_initIDs;
|
text: .text%Java_java_io_FileOutputStream_initIDs;
|
||||||
text: .text%Java_java_lang_System_setIn0;
|
text: .text%Java_java_lang_System_setIn0;
|
||||||
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
|
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
|
||||||
text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
|
|
||||||
text: .text%Java_java_lang_Class_forName0;
|
text: .text%Java_java_lang_Class_forName0;
|
||||||
text: .text%Java_java_lang_String_intern;
|
text: .text%Java_java_lang_String_intern;
|
||||||
text: .text%Java_java_lang_StringUTF16_isBigEndian;
|
text: .text%Java_java_lang_StringUTF16_isBigEndian;
|
||||||
|
@ -27,7 +27,6 @@ text: .text%Java_java_io_FileDescriptor_initIDs;
|
|||||||
text: .text%Java_java_io_FileOutputStream_initIDs;
|
text: .text%Java_java_io_FileOutputStream_initIDs;
|
||||||
text: .text%Java_java_lang_System_setIn0;
|
text: .text%Java_java_lang_System_setIn0;
|
||||||
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
|
text: .text%Java_sun_reflect_Reflection_getCallerClass__;
|
||||||
text: .text%Java_sun_reflect_Reflection_getCallerClass__I;
|
|
||||||
text: .text%Java_java_lang_Class_forName0;
|
text: .text%Java_java_lang_Class_forName0;
|
||||||
text: .text%Java_java_lang_String_intern;
|
text: .text%Java_java_lang_String_intern;
|
||||||
text: .text%Java_java_lang_StringUTF16_isBigEndian;
|
text: .text%Java_java_lang_StringUTF16_isBigEndian;
|
||||||
|
@ -5844,8 +5844,8 @@ operand immPollPage()
|
|||||||
operand immByteMapBase()
|
operand immByteMapBase()
|
||||||
%{
|
%{
|
||||||
// Get base of card map
|
// Get base of card map
|
||||||
predicate((jbyte*)n->get_ptr() ==
|
predicate(Universe::heap()->barrier_set()->is_a(BarrierSet::CardTableModRef) &&
|
||||||
((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
|
(jbyte*)n->get_ptr() == ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base);
|
||||||
match(ConP);
|
match(ConP);
|
||||||
|
|
||||||
op_cost(0);
|
op_cost(0);
|
||||||
|
@ -848,7 +848,7 @@ public:
|
|||||||
// architecture. In debug mode we shrink it in order to test
|
// architecture. In debug mode we shrink it in order to test
|
||||||
// trampolines, but not so small that branches in the interpreter
|
// trampolines, but not so small that branches in the interpreter
|
||||||
// are out of range.
|
// are out of range.
|
||||||
static const unsigned long branch_range = INCLUDE_JVMCI ? 128 * M : NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
|
static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
|
||||||
|
|
||||||
static bool reachable_from_branch_at(address branch, address target) {
|
static bool reachable_from_branch_at(address branch, address target) {
|
||||||
return uabs(target - branch) < branch_range;
|
return uabs(target - branch) < branch_range;
|
||||||
|
@ -71,6 +71,13 @@ int CompiledStaticCall::to_interp_stub_size() {
|
|||||||
return 7 * NativeInstruction::instruction_size;
|
return 7 * NativeInstruction::instruction_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int CompiledStaticCall::to_trampoline_stub_size() {
|
||||||
|
// Somewhat pessimistically, we count 3 instructions here (although
|
||||||
|
// there are only two) because we sometimes emit an alignment nop.
|
||||||
|
// Trampoline stubs are always word aligned.
|
||||||
|
return 3 * NativeInstruction::instruction_size + wordSize;
|
||||||
|
}
|
||||||
|
|
||||||
// Relocation entries for call stub, compiled java to interpreter.
|
// Relocation entries for call stub, compiled java to interpreter.
|
||||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||||
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
|
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
|
||||||
|
@ -109,7 +109,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
|
|||||||
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
|
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
|
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle hotspot_method, jint pc_offset, TRAPS) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
Method* method = NULL;
|
Method* method = NULL;
|
||||||
// we need to check, this might also be an unresolved method
|
// we need to check, this might also be an unresolved method
|
||||||
@ -124,22 +124,22 @@ void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset
|
|||||||
case INVOKEINTERFACE: {
|
case INVOKEINTERFACE: {
|
||||||
assert(method == NULL || !method->is_static(), "cannot call static method with invokeinterface");
|
assert(method == NULL || !method->is_static(), "cannot call static method with invokeinterface");
|
||||||
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
||||||
call->set_destination(SharedRuntime::get_resolve_virtual_call_stub());
|
|
||||||
_instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc));
|
_instructions->relocate(call->instruction_address(), virtual_call_Relocation::spec(_invoke_mark_pc));
|
||||||
|
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_virtual_call_stub());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case INVOKESTATIC: {
|
case INVOKESTATIC: {
|
||||||
assert(method == NULL || method->is_static(), "cannot call non-static method with invokestatic");
|
assert(method == NULL || method->is_static(), "cannot call non-static method with invokestatic");
|
||||||
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
||||||
call->set_destination(SharedRuntime::get_resolve_static_call_stub());
|
|
||||||
_instructions->relocate(call->instruction_address(), relocInfo::static_call_type);
|
_instructions->relocate(call->instruction_address(), relocInfo::static_call_type);
|
||||||
|
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_static_call_stub());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case INVOKESPECIAL: {
|
case INVOKESPECIAL: {
|
||||||
assert(method == NULL || !method->is_static(), "cannot call static method with invokespecial");
|
assert(method == NULL || !method->is_static(), "cannot call static method with invokespecial");
|
||||||
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
|
||||||
call->set_destination(SharedRuntime::get_resolve_opt_virtual_call_stub());
|
|
||||||
_instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type);
|
_instructions->relocate(call->instruction_address(), relocInfo::opt_virtual_call_type);
|
||||||
|
call->trampoline_jump(cbuf, SharedRuntime::get_resolve_opt_virtual_call_stub());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
|
@ -801,7 +801,7 @@ address MacroAssembler::emit_trampoline_stub(int insts_call_instruction_offset,
|
|||||||
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
|
assert(is_NativeCallTrampolineStub_at(stub_start_addr), "doesn't look like a trampoline");
|
||||||
|
|
||||||
end_a_stub();
|
end_a_stub();
|
||||||
return stub;
|
return stub_start_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
address MacroAssembler::ic_call(address entry, jint method_index) {
|
address MacroAssembler::ic_call(address entry, jint method_index) {
|
||||||
|
@ -367,3 +367,24 @@ void NativeCallTrampolineStub::set_destination(address new_destination) {
|
|||||||
set_ptr_at(data_offset, new_destination);
|
set_ptr_at(data_offset, new_destination);
|
||||||
OrderAccess::release();
|
OrderAccess::release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Generate a trampoline for a branch to dest. If there's no need for a
|
||||||
|
// trampoline, simply patch the call directly to dest.
|
||||||
|
address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
|
||||||
|
MacroAssembler a(&cbuf);
|
||||||
|
address stub = NULL;
|
||||||
|
|
||||||
|
if (a.far_branches()
|
||||||
|
&& ! is_NativeCallTrampolineStub_at(instruction_address() + displacement())) {
|
||||||
|
stub = a.emit_trampoline_stub(instruction_address() - cbuf.insts()->start(), dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (stub == NULL) {
|
||||||
|
// If we generated no stub, patch this call directly to dest.
|
||||||
|
// This will happen if we don't need far branches or if there
|
||||||
|
// already was a trampoline.
|
||||||
|
set_destination(dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
return stub;
|
||||||
|
}
|
||||||
|
@ -61,7 +61,7 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
|||||||
return uint_at(0);
|
return uint_at(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_blr() const { return (encoding() & 0xfffffc1f) == 0xd63f0000; }
|
bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register)
|
||||||
bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
|
bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
|
||||||
|
|
||||||
inline bool is_nop();
|
inline bool is_nop();
|
||||||
@ -143,8 +143,9 @@ inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inline NativeCall* nativeCall_at(address address);
|
inline NativeCall* nativeCall_at(address address);
|
||||||
// The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
|
// The NativeCall is an abstraction for accessing/manipulating native
|
||||||
// instructions (used to manipulate inline caches, primitive & dll calls, etc.).
|
// call instructions (used to manipulate inline caches, primitive &
|
||||||
|
// DSO calls, etc.).
|
||||||
|
|
||||||
class NativeCall: public NativeInstruction {
|
class NativeCall: public NativeInstruction {
|
||||||
public:
|
public:
|
||||||
@ -155,7 +156,6 @@ class NativeCall: public NativeInstruction {
|
|||||||
return_address_offset = 4
|
return_address_offset = 4
|
||||||
};
|
};
|
||||||
|
|
||||||
enum { cache_line_size = BytesPerWord }; // conservative estimate!
|
|
||||||
address instruction_address() const { return addr_at(instruction_offset); }
|
address instruction_address() const { return addr_at(instruction_offset); }
|
||||||
address next_instruction_address() const { return addr_at(return_address_offset); }
|
address next_instruction_address() const { return addr_at(return_address_offset); }
|
||||||
int displacement() const { return (int_at(displacement_offset) << 6) >> 4; }
|
int displacement() const { return (int_at(displacement_offset) << 6) >> 4; }
|
||||||
@ -206,6 +206,7 @@ class NativeCall: public NativeInstruction {
|
|||||||
void set_destination_mt_safe(address dest, bool assert_lock = true);
|
void set_destination_mt_safe(address dest, bool assert_lock = true);
|
||||||
|
|
||||||
address get_trampoline();
|
address get_trampoline();
|
||||||
|
address trampoline_jump(CodeBuffer &cbuf, address dest);
|
||||||
};
|
};
|
||||||
|
|
||||||
inline NativeCall* nativeCall_at(address address) {
|
inline NativeCall* nativeCall_at(address address) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -35,4 +35,10 @@
|
|||||||
format_width = 0
|
format_width = 0
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
// This platform has no oops in the code that are not also
|
||||||
|
// listed in the oop section.
|
||||||
|
static bool mustIterateImmediateOopsInCode() { return false; }
|
||||||
|
|
||||||
#endif // CPU_AARCH64_VM_RELOCINFO_AARCH64_HPP
|
#endif // CPU_AARCH64_VM_RELOCINFO_AARCH64_HPP
|
||||||
|
@ -3404,7 +3404,6 @@ void TemplateTable::_new() {
|
|||||||
Label done;
|
Label done;
|
||||||
Label initialize_header;
|
Label initialize_header;
|
||||||
Label initialize_object; // including clearing the fields
|
Label initialize_object; // including clearing the fields
|
||||||
Label allocate_shared;
|
|
||||||
|
|
||||||
__ get_cpool_and_tags(r4, r0);
|
__ get_cpool_and_tags(r4, r0);
|
||||||
// Make sure the class we're about to instantiate has been resolved.
|
// Make sure the class we're about to instantiate has been resolved.
|
||||||
@ -3433,18 +3432,24 @@ void TemplateTable::_new() {
|
|||||||
// test to see if it has a finalizer or is malformed in some way
|
// test to see if it has a finalizer or is malformed in some way
|
||||||
__ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
|
__ tbnz(r3, exact_log2(Klass::_lh_instance_slow_path_bit), slow_case);
|
||||||
|
|
||||||
// Allocate the instance
|
// Allocate the instance:
|
||||||
// 1) Try to allocate in the TLAB
|
// If TLAB is enabled:
|
||||||
// 2) if fail and the object is large allocate in the shared Eden
|
// Try to allocate in the TLAB.
|
||||||
// 3) if the above fails (or is not applicable), go to a slow case
|
// If fails, go to the slow path.
|
||||||
// (creates a new TLAB, etc.)
|
// Else If inline contiguous allocations are enabled:
|
||||||
|
// Try to allocate in eden.
|
||||||
|
// If fails due to heap end, go to slow path.
|
||||||
|
//
|
||||||
|
// If TLAB is enabled OR inline contiguous is enabled:
|
||||||
|
// Initialize the allocation.
|
||||||
|
// Exit.
|
||||||
|
//
|
||||||
|
// Go to slow path.
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc();
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
|
|
||||||
if (UseTLAB) {
|
if (UseTLAB) {
|
||||||
__ tlab_allocate(r0, r3, 0, noreg, r1,
|
__ tlab_allocate(r0, r3, 0, noreg, r1, slow_case);
|
||||||
allow_shared_alloc ? allocate_shared : slow_case);
|
|
||||||
|
|
||||||
if (ZeroTLAB) {
|
if (ZeroTLAB) {
|
||||||
// the fields have been already cleared
|
// the fields have been already cleared
|
||||||
@ -3453,19 +3458,19 @@ void TemplateTable::_new() {
|
|||||||
// initialize both the header and fields
|
// initialize both the header and fields
|
||||||
__ b(initialize_object);
|
__ b(initialize_object);
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// Allocation in the shared Eden, if allowed.
|
||||||
|
//
|
||||||
|
// r3: instance size in bytes
|
||||||
|
if (allow_shared_alloc) {
|
||||||
|
__ eden_allocate(r0, r3, 0, r10, slow_case);
|
||||||
|
__ incr_allocated_bytes(rthread, r3, 0, rscratch1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocation in the shared Eden, if allowed.
|
// If UseTLAB or allow_shared_alloc are true, the object is created above and
|
||||||
//
|
// there is an initialize need. Otherwise, skip and go to the slow path.
|
||||||
// r3: instance size in bytes
|
if (UseTLAB || allow_shared_alloc) {
|
||||||
if (allow_shared_alloc) {
|
|
||||||
__ bind(allocate_shared);
|
|
||||||
|
|
||||||
__ eden_allocate(r0, r3, 0, r10, slow_case);
|
|
||||||
__ incr_allocated_bytes(rthread, r3, 0, rscratch1);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
|
|
||||||
// The object is initialized before the header. If the object size is
|
// The object is initialized before the header. If the object size is
|
||||||
// zero, go directly to the header initialization.
|
// zero, go directly to the header initialization.
|
||||||
__ bind(initialize_object);
|
__ bind(initialize_object);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2011, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -32,4 +32,10 @@
|
|||||||
format_width = 0
|
format_width = 0
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
// This platform has no oops in the code that are not also
|
||||||
|
// listed in the oop section.
|
||||||
|
static bool mustIterateImmediateOopsInCode() { return false; }
|
||||||
|
|
||||||
#endif // CPU_ARM_VM_RELOCINFO_ARM_HPP
|
#endif // CPU_ARM_VM_RELOCINFO_ARM_HPP
|
||||||
|
@ -5604,12 +5604,17 @@ void MacroAssembler::zap_from_to(Register low, int before, Register high, int af
|
|||||||
|
|
||||||
#endif // !PRODUCT
|
#endif // !PRODUCT
|
||||||
|
|
||||||
SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
|
void SkipIfEqualZero::skip_to_label_if_equal_zero(MacroAssembler* masm, Register temp,
|
||||||
|
const bool* flag_addr, Label& label) {
|
||||||
int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
|
int simm16_offset = masm->load_const_optimized(temp, (address)flag_addr, R0, true);
|
||||||
assert(sizeof(bool) == 1, "PowerPC ABI");
|
assert(sizeof(bool) == 1, "PowerPC ABI");
|
||||||
masm->lbz(temp, simm16_offset, temp);
|
masm->lbz(temp, simm16_offset, temp);
|
||||||
masm->cmpwi(CCR0, temp, 0);
|
masm->cmpwi(CCR0, temp, 0);
|
||||||
masm->beq(CCR0, _label);
|
masm->beq(CCR0, label);
|
||||||
|
}
|
||||||
|
|
||||||
|
SkipIfEqualZero::SkipIfEqualZero(MacroAssembler* masm, Register temp, const bool* flag_addr) : _masm(masm), _label() {
|
||||||
|
skip_to_label_if_equal_zero(masm, temp, flag_addr, _label);
|
||||||
}
|
}
|
||||||
|
|
||||||
SkipIfEqualZero::~SkipIfEqualZero() {
|
SkipIfEqualZero::~SkipIfEqualZero() {
|
||||||
|
@ -980,6 +980,8 @@ class SkipIfEqualZero : public StackObj {
|
|||||||
public:
|
public:
|
||||||
// 'Temp' is a temp register that this object can use (and trash).
|
// 'Temp' is a temp register that this object can use (and trash).
|
||||||
explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
|
explicit SkipIfEqualZero(MacroAssembler*, Register temp, const bool* flag_addr);
|
||||||
|
static void skip_to_label_if_equal_zero(MacroAssembler*, Register temp,
|
||||||
|
const bool* flag_addr, Label& label);
|
||||||
~SkipIfEqualZero();
|
~SkipIfEqualZero();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
|
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -43,4 +43,10 @@
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
// This platform has no oops in the code that are not also
|
||||||
|
// listed in the oop section.
|
||||||
|
static bool mustIterateImmediateOopsInCode() { return false; }
|
||||||
|
|
||||||
#endif // CPU_PPC_VM_RELOCINFO_PPC_HPP
|
#endif // CPU_PPC_VM_RELOCINFO_PPC_HPP
|
||||||
|
@ -3637,10 +3637,7 @@ void TemplateTable::_new() {
|
|||||||
transition(vtos, atos);
|
transition(vtos, atos);
|
||||||
|
|
||||||
Label Lslow_case,
|
Label Lslow_case,
|
||||||
Ldone,
|
Ldone;
|
||||||
Linitialize_header,
|
|
||||||
Lallocate_shared,
|
|
||||||
Linitialize_object; // Including clearing the fields.
|
|
||||||
|
|
||||||
const Register RallocatedObject = R17_tos,
|
const Register RallocatedObject = R17_tos,
|
||||||
RinstanceKlass = R9_ARG7,
|
RinstanceKlass = R9_ARG7,
|
||||||
@ -3651,8 +3648,6 @@ void TemplateTable::_new() {
|
|||||||
Rtags = R3_ARG1,
|
Rtags = R3_ARG1,
|
||||||
Rindex = R5_ARG3;
|
Rindex = R5_ARG3;
|
||||||
|
|
||||||
const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Check if fast case is possible.
|
// Check if fast case is possible.
|
||||||
|
|
||||||
@ -3661,6 +3656,8 @@ void TemplateTable::_new() {
|
|||||||
// Load index of constant pool entry.
|
// Load index of constant pool entry.
|
||||||
__ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
|
__ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
|
||||||
|
|
||||||
|
// Note: compared to other architectures, PPC's implementation always goes
|
||||||
|
// to the slow path if TLAB is used and fails.
|
||||||
if (UseTLAB) {
|
if (UseTLAB) {
|
||||||
// Make sure the class we're about to instantiate has been resolved
|
// Make sure the class we're about to instantiate has been resolved
|
||||||
// This is done before loading instanceKlass to be consistent with the order
|
// This is done before loading instanceKlass to be consistent with the order
|
||||||
@ -3690,8 +3687,7 @@ void TemplateTable::_new() {
|
|||||||
// Fast case:
|
// Fast case:
|
||||||
// Allocate the instance.
|
// Allocate the instance.
|
||||||
// 1) Try to allocate in the TLAB.
|
// 1) Try to allocate in the TLAB.
|
||||||
// 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
|
// 2) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
|
||||||
// 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
|
|
||||||
|
|
||||||
Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
|
Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
|
||||||
Register RnewTopValue = R6_ARG4;
|
Register RnewTopValue = R6_ARG4;
|
||||||
@ -3705,53 +3701,13 @@ void TemplateTable::_new() {
|
|||||||
|
|
||||||
// If there is enough space, we do not CAS and do not clear.
|
// If there is enough space, we do not CAS and do not clear.
|
||||||
__ cmpld(CCR0, RnewTopValue, RendValue);
|
__ cmpld(CCR0, RnewTopValue, RendValue);
|
||||||
__ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
|
__ bgt(CCR0, Lslow_case);
|
||||||
|
|
||||||
__ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
|
__ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
|
||||||
|
|
||||||
if (ZeroTLAB) {
|
if (!ZeroTLAB) {
|
||||||
// The fields have already been cleared.
|
// --------------------------------------------------------------------------
|
||||||
__ b(Linitialize_header);
|
// Init1: Zero out newly allocated memory.
|
||||||
} else {
|
|
||||||
// Initialize both the header and fields.
|
|
||||||
__ b(Linitialize_object);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fall through: TLAB was too small.
|
|
||||||
if (allow_shared_alloc) {
|
|
||||||
Register RtlabWasteLimitValue = R10_ARG8;
|
|
||||||
Register RfreeValue = RnewTopValue;
|
|
||||||
|
|
||||||
__ bind(Lallocate_shared);
|
|
||||||
// Check if tlab should be discarded (refill_waste_limit >= free).
|
|
||||||
__ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
|
|
||||||
__ subf(RfreeValue, RoldTopValue, RendValue);
|
|
||||||
__ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
|
|
||||||
__ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
|
|
||||||
__ bge(CCR0, Lslow_case);
|
|
||||||
|
|
||||||
// Increment waste limit to prevent getting stuck on this slow path.
|
|
||||||
__ add_const_optimized(RtlabWasteLimitValue, RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment());
|
|
||||||
__ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
|
|
||||||
}
|
|
||||||
// else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
|
|
||||||
}
|
|
||||||
// else: Always go the slow path.
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
|
||||||
// slow case
|
|
||||||
__ bind(Lslow_case);
|
|
||||||
call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
|
|
||||||
|
|
||||||
if (UseTLAB) {
|
|
||||||
__ b(Ldone);
|
|
||||||
// --------------------------------------------------------------------------
|
|
||||||
// Init1: Zero out newly allocated memory.
|
|
||||||
|
|
||||||
if (!ZeroTLAB || allow_shared_alloc) {
|
|
||||||
// Clear object fields.
|
|
||||||
__ bind(Linitialize_object);
|
|
||||||
|
|
||||||
// Initialize remaining object fields.
|
// Initialize remaining object fields.
|
||||||
Register Rbase = Rtags;
|
Register Rbase = Rtags;
|
||||||
__ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
|
__ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
|
||||||
@ -3760,13 +3716,10 @@ void TemplateTable::_new() {
|
|||||||
|
|
||||||
// Clear out object skipping header. Takes also care of the zero length case.
|
// Clear out object skipping header. Takes also care of the zero length case.
|
||||||
__ clear_memory_doubleword(Rbase, Rinstance_size);
|
__ clear_memory_doubleword(Rbase, Rinstance_size);
|
||||||
// fallthru: __ b(Linitialize_header);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
// Init2: Initialize the header: mark, klass
|
// Init2: Initialize the header: mark, klass
|
||||||
__ bind(Linitialize_header);
|
|
||||||
|
|
||||||
// Init mark.
|
// Init mark.
|
||||||
if (UseBiasedLocking) {
|
if (UseBiasedLocking) {
|
||||||
__ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
|
__ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
|
||||||
@ -3780,14 +3733,19 @@ void TemplateTable::_new() {
|
|||||||
__ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
|
__ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
|
||||||
|
|
||||||
// Check and trigger dtrace event.
|
// Check and trigger dtrace event.
|
||||||
{
|
SkipIfEqualZero::skip_to_label_if_equal_zero(_masm, Rscratch, &DTraceAllocProbes, Ldone);
|
||||||
SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
|
__ push(atos);
|
||||||
__ push(atos);
|
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
|
||||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
|
__ pop(atos);
|
||||||
__ pop(atos);
|
|
||||||
}
|
__ b(Ldone);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --------------------------------------------------------------------------
|
||||||
|
// slow case
|
||||||
|
__ bind(Lslow_case);
|
||||||
|
call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
|
||||||
|
|
||||||
// continue
|
// continue
|
||||||
__ bind(Ldone);
|
__ bind(Ldone);
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright (c) 2016 SAP SE. All rights reserved.
|
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -110,6 +110,10 @@
|
|||||||
pcrel_data_format = 3 // Relocation is for the target data of a pc-relative instruction.
|
pcrel_data_format = 3 // Relocation is for the target data of a pc-relative instruction.
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// This platform has no oops in the code that are not also
|
||||||
|
// listed in the oop section.
|
||||||
|
static bool mustIterateImmediateOopsInCode() { return false; }
|
||||||
|
|
||||||
// Store the new target address into an oop_Relocation cell, if any.
|
// Store the new target address into an oop_Relocation cell, if any.
|
||||||
// Return indication if update happened.
|
// Return indication if update happened.
|
||||||
static bool update_oop_pool(address begin, address end, address newTarget, CodeBlob* cb);
|
static bool update_oop_pool(address begin, address end, address newTarget, CodeBlob* cb);
|
||||||
|
@ -73,6 +73,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
|
|||||||
}
|
}
|
||||||
#undef __
|
#undef __
|
||||||
|
|
||||||
|
int CompiledStaticCall::to_trampoline_stub_size() {
|
||||||
|
// SPARC doesn't use trampolines.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int CompiledStaticCall::to_interp_stub_size() {
|
int CompiledStaticCall::to_interp_stub_size() {
|
||||||
// This doesn't need to be accurate but it must be larger or equal to
|
// This doesn't need to be accurate but it must be larger or equal to
|
||||||
// the real size of the stub.
|
// the real size of the stub.
|
||||||
|
@ -115,7 +115,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
|
|||||||
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
|
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
|
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
Method* method = NULL;
|
Method* method = NULL;
|
||||||
// we need to check, this might also be an unresolved method
|
// we need to check, this might also be an unresolved method
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -38,6 +38,11 @@
|
|||||||
format_width = 1
|
format_width = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
// This platform has no oops in the code that are not also
|
||||||
|
// listed in the oop section.
|
||||||
|
static bool mustIterateImmediateOopsInCode() { return false; }
|
||||||
|
|
||||||
//Reconciliation History
|
//Reconciliation History
|
||||||
// 1.3 97/10/15 15:38:36 relocInfo_i486.hpp
|
// 1.3 97/10/15 15:38:36 relocInfo_i486.hpp
|
||||||
|
@ -3259,11 +3259,19 @@ void TemplateTable::_new() {
|
|||||||
__ br(Assembler::notZero, false, Assembler::pn, slow_case);
|
__ br(Assembler::notZero, false, Assembler::pn, slow_case);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
|
|
||||||
// allocate the instance
|
// Allocate the instance:
|
||||||
// 1) Try to allocate in the TLAB
|
// If TLAB is enabled:
|
||||||
// 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
|
// Try to allocate in the TLAB.
|
||||||
// 3) if the above fails (or is not applicable), go to a slow case
|
// If fails, go to the slow path.
|
||||||
// (creates a new TLAB, etc.)
|
// Else If inline contiguous allocations are enabled:
|
||||||
|
// Try to allocate in eden.
|
||||||
|
// If fails due to heap end, go to slow path.
|
||||||
|
//
|
||||||
|
// If TLAB is enabled OR inline contiguous is enabled:
|
||||||
|
// Initialize the allocation.
|
||||||
|
// Exit.
|
||||||
|
//
|
||||||
|
// Go to slow path.
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc();
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
@ -3291,61 +3299,43 @@ void TemplateTable::_new() {
|
|||||||
}
|
}
|
||||||
__ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
|
__ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
|
||||||
|
|
||||||
|
// Allocation does not fit in the TLAB.
|
||||||
|
__ ba_short(slow_case);
|
||||||
|
} else {
|
||||||
|
// Allocation in the shared Eden
|
||||||
if (allow_shared_alloc) {
|
if (allow_shared_alloc) {
|
||||||
// Check if tlab should be discarded (refill_waste_limit >= free)
|
Register RoldTopValue = G1_scratch;
|
||||||
__ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
|
Register RtopAddr = G3_scratch;
|
||||||
__ sub(RendValue, RoldTopValue, RfreeValue);
|
Register RnewTopValue = RallocatedObject;
|
||||||
__ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
|
Register RendValue = Rscratch;
|
||||||
__ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
|
|
||||||
|
|
||||||
// increment waste limit to prevent getting stuck on this slow path
|
__ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
|
||||||
if (Assembler::is_simm13(ThreadLocalAllocBuffer::refill_waste_limit_increment())) {
|
|
||||||
__ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
|
Label retry;
|
||||||
} else {
|
__ bind(retry);
|
||||||
// set64 does not use the temp register if the given constant is 32 bit. So
|
__ set((intptr_t)Universe::heap()->end_addr(), RendValue);
|
||||||
// we can just use any register; using G0 results in ignoring of the upper 32 bit
|
__ ld_ptr(RendValue, 0, RendValue);
|
||||||
// of that value.
|
__ ld_ptr(RtopAddr, 0, RoldTopValue);
|
||||||
__ set64(ThreadLocalAllocBuffer::refill_waste_limit_increment(), G4_scratch, G0);
|
__ add(RoldTopValue, Roffset, RnewTopValue);
|
||||||
__ add(RtlabWasteLimitValue, G4_scratch, RtlabWasteLimitValue);
|
|
||||||
}
|
// RnewTopValue contains the top address after the new object
|
||||||
__ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
|
// has been allocated.
|
||||||
} else {
|
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
|
||||||
// No allocation in the shared eden.
|
|
||||||
__ ba_short(slow_case);
|
__ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
|
||||||
|
|
||||||
|
// if someone beat us on the allocation, try again, otherwise continue
|
||||||
|
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
|
||||||
|
|
||||||
|
// bump total bytes allocated by this thread
|
||||||
|
// RoldTopValue and RtopAddr are dead, so can use G1 and G3
|
||||||
|
__ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocation in the shared Eden
|
// If UseTLAB or allow_shared_alloc are true, the object is created above and
|
||||||
if (allow_shared_alloc) {
|
// there is an initialize need. Otherwise, skip and go to the slow path.
|
||||||
Register RoldTopValue = G1_scratch;
|
if (UseTLAB || allow_shared_alloc) {
|
||||||
Register RtopAddr = G3_scratch;
|
|
||||||
Register RnewTopValue = RallocatedObject;
|
|
||||||
Register RendValue = Rscratch;
|
|
||||||
|
|
||||||
__ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
|
|
||||||
|
|
||||||
Label retry;
|
|
||||||
__ bind(retry);
|
|
||||||
__ set((intptr_t)Universe::heap()->end_addr(), RendValue);
|
|
||||||
__ ld_ptr(RendValue, 0, RendValue);
|
|
||||||
__ ld_ptr(RtopAddr, 0, RoldTopValue);
|
|
||||||
__ add(RoldTopValue, Roffset, RnewTopValue);
|
|
||||||
|
|
||||||
// RnewTopValue contains the top address after the new object
|
|
||||||
// has been allocated.
|
|
||||||
__ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
|
|
||||||
|
|
||||||
__ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
|
|
||||||
|
|
||||||
// if someone beat us on the allocation, try again, otherwise continue
|
|
||||||
__ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
|
|
||||||
|
|
||||||
// bump total bytes allocated by this thread
|
|
||||||
// RoldTopValue and RtopAddr are dead, so can use G1 and G3
|
|
||||||
__ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
|
|
||||||
// clear object fields
|
// clear object fields
|
||||||
__ bind(initialize_object);
|
__ bind(initialize_object);
|
||||||
__ deccc(Roffset, sizeof(oopDesc));
|
__ deccc(Roffset, sizeof(oopDesc));
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -3167,6 +3167,89 @@ void Assembler::nop(int i) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (UseAddressNop && VM_Version::is_zx()) {
|
||||||
|
//
|
||||||
|
// Using multi-bytes nops "0x0F 0x1F [address]" for ZX
|
||||||
|
// 1: 0x90
|
||||||
|
// 2: 0x66 0x90
|
||||||
|
// 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
|
||||||
|
// 4: 0x0F 0x1F 0x40 0x00
|
||||||
|
// 5: 0x0F 0x1F 0x44 0x00 0x00
|
||||||
|
// 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
|
||||||
|
// 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
|
||||||
|
// 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
|
||||||
|
// 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
|
||||||
|
// 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
|
||||||
|
// 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
|
||||||
|
|
||||||
|
// The rest coding is ZX specific - don't use consecutive address nops
|
||||||
|
|
||||||
|
// 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
|
||||||
|
// 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
|
||||||
|
// 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
|
||||||
|
// 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
|
||||||
|
|
||||||
|
while (i >= 15) {
|
||||||
|
// For ZX don't generate consecutive addess nops (mix with regular nops)
|
||||||
|
i -= 15;
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
addr_nop_8();
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8((unsigned char)0x90);
|
||||||
|
// nop
|
||||||
|
}
|
||||||
|
switch (i) {
|
||||||
|
case 14:
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 13:
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 12:
|
||||||
|
addr_nop_8();
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
emit_int8((unsigned char)0x90);
|
||||||
|
// nop
|
||||||
|
break;
|
||||||
|
case 11:
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 10:
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 9:
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 8:
|
||||||
|
addr_nop_8();
|
||||||
|
break;
|
||||||
|
case 7:
|
||||||
|
addr_nop_7();
|
||||||
|
break;
|
||||||
|
case 6:
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 5:
|
||||||
|
addr_nop_5();
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
addr_nop_4();
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
// Don't use "0x0F 0x1F 0x00" - need patching safe padding
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 2:
|
||||||
|
emit_int8(0x66); // size prefix
|
||||||
|
case 1:
|
||||||
|
emit_int8((unsigned char)0x90);
|
||||||
|
// nop
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert(i == 0, " ");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Using nops with size prefixes "0x66 0x90".
|
// Using nops with size prefixes "0x66 0x90".
|
||||||
// From AMD Optimization Guide:
|
// From AMD Optimization Guide:
|
||||||
// 1: 0x90
|
// 1: 0x90
|
||||||
|
@ -73,6 +73,11 @@ int CompiledStaticCall::to_interp_stub_size() {
|
|||||||
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
|
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int CompiledStaticCall::to_trampoline_stub_size() {
|
||||||
|
// x86 doesn't use trampolines.
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
// Relocation entries for call stub, compiled java to interpreter.
|
// Relocation entries for call stub, compiled java to interpreter.
|
||||||
int CompiledStaticCall::reloc_to_interp_stub() {
|
int CompiledStaticCall::reloc_to_interp_stub() {
|
||||||
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
|
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
|
||||||
|
@ -144,7 +144,7 @@ void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong forei
|
|||||||
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
|
TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
|
void CodeInstaller::pd_relocate_JavaMethod(CodeBuffer &, Handle hotspot_method, jint pc_offset, TRAPS) {
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
Method* method = NULL;
|
Method* method = NULL;
|
||||||
// we need to check, this might also be an unresolved method
|
// we need to check, this might also be an unresolved method
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -40,4 +40,10 @@
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
// Instruct loadConP of x86_64.ad places oops in code that are not also
|
||||||
|
// listed in the oop section.
|
||||||
|
static bool mustIterateImmediateOopsInCode() { return true; }
|
||||||
|
|
||||||
#endif // CPU_X86_VM_RELOCINFO_X86_HPP
|
#endif // CPU_X86_VM_RELOCINFO_X86_HPP
|
||||||
|
@ -433,7 +433,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
|
|
||||||
|
|
||||||
//----------------------------------------------------------------------------------------------------
|
//----------------------------------------------------------------------------------------------------
|
||||||
// Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest)
|
// Support for int32_t Atomic::xchg(int32_t exchange_value, volatile int32_t* dest)
|
||||||
//
|
//
|
||||||
// xchg exists as far back as 8086, lock needed for MP only
|
// xchg exists as far back as 8086, lock needed for MP only
|
||||||
// Stack layout immediately after call:
|
// Stack layout immediately after call:
|
||||||
|
@ -611,8 +611,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support for jbyte atomic::atomic_cmpxchg(jbyte exchange_value, volatile jbyte* dest,
|
// Support for int8_t atomic::atomic_cmpxchg(int8_t exchange_value, volatile int8_t* dest,
|
||||||
// jbyte compare_value)
|
// int8_t compare_value)
|
||||||
//
|
//
|
||||||
// Arguments :
|
// Arguments :
|
||||||
// c_rarg0: exchange_value
|
// c_rarg0: exchange_value
|
||||||
@ -637,9 +637,9 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
return start;
|
return start;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Support for jlong atomic::atomic_cmpxchg(jlong exchange_value,
|
// Support for int64_t atomic::atomic_cmpxchg(int64_t exchange_value,
|
||||||
// volatile jlong* dest,
|
// volatile int64_t* dest,
|
||||||
// jlong compare_value)
|
// int64_t compare_value)
|
||||||
// Arguments :
|
// Arguments :
|
||||||
// c_rarg0: exchange_value
|
// c_rarg0: exchange_value
|
||||||
// c_rarg1: dest
|
// c_rarg1: dest
|
||||||
@ -694,8 +694,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
// Result:
|
// Result:
|
||||||
// *dest += add_value
|
// *dest += add_value
|
||||||
// return *dest;
|
// return *dest;
|
||||||
address generate_atomic_add_ptr() {
|
address generate_atomic_add_long() {
|
||||||
StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
|
StubCodeMark mark(this, "StubRoutines", "atomic_add_long");
|
||||||
address start = __ pc();
|
address start = __ pc();
|
||||||
|
|
||||||
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
|
__ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
|
||||||
@ -5015,14 +5015,14 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_catch_exception_entry = generate_catch_exception();
|
StubRoutines::_catch_exception_entry = generate_catch_exception();
|
||||||
|
|
||||||
// atomic calls
|
// atomic calls
|
||||||
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
StubRoutines::_atomic_xchg_entry = generate_atomic_xchg();
|
||||||
StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
|
StubRoutines::_atomic_xchg_long_entry = generate_atomic_xchg_long();
|
||||||
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
|
StubRoutines::_atomic_cmpxchg_entry = generate_atomic_cmpxchg();
|
||||||
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
|
StubRoutines::_atomic_cmpxchg_byte_entry = generate_atomic_cmpxchg_byte();
|
||||||
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
StubRoutines::_atomic_cmpxchg_long_entry = generate_atomic_cmpxchg_long();
|
||||||
StubRoutines::_atomic_add_entry = generate_atomic_add();
|
StubRoutines::_atomic_add_entry = generate_atomic_add();
|
||||||
StubRoutines::_atomic_add_ptr_entry = generate_atomic_add_ptr();
|
StubRoutines::_atomic_add_long_entry = generate_atomic_add_long();
|
||||||
StubRoutines::_fence_entry = generate_orderaccess_fence();
|
StubRoutines::_fence_entry = generate_orderaccess_fence();
|
||||||
|
|
||||||
// platform dependent
|
// platform dependent
|
||||||
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
|
StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
|
||||||
|
@ -3869,7 +3869,6 @@ void TemplateTable::_new() {
|
|||||||
Label done;
|
Label done;
|
||||||
Label initialize_header;
|
Label initialize_header;
|
||||||
Label initialize_object; // including clearing the fields
|
Label initialize_object; // including clearing the fields
|
||||||
Label allocate_shared;
|
|
||||||
|
|
||||||
__ get_cpool_and_tags(rcx, rax);
|
__ get_cpool_and_tags(rcx, rax);
|
||||||
|
|
||||||
@ -3895,12 +3894,19 @@ void TemplateTable::_new() {
|
|||||||
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
|
__ testl(rdx, Klass::_lh_instance_slow_path_bit);
|
||||||
__ jcc(Assembler::notZero, slow_case);
|
__ jcc(Assembler::notZero, slow_case);
|
||||||
|
|
||||||
|
// Allocate the instance:
|
||||||
|
// If TLAB is enabled:
|
||||||
|
// Try to allocate in the TLAB.
|
||||||
|
// If fails, go to the slow path.
|
||||||
|
// Else If inline contiguous allocations are enabled:
|
||||||
|
// Try to allocate in eden.
|
||||||
|
// If fails due to heap end, go to slow path.
|
||||||
//
|
//
|
||||||
// Allocate the instance
|
// If TLAB is enabled OR inline contiguous is enabled:
|
||||||
// 1) Try to allocate in the TLAB
|
// Initialize the allocation.
|
||||||
// 2) if fail and the object is large allocate in the shared Eden
|
// Exit.
|
||||||
// 3) if the above fails (or is not applicable), go to a slow case
|
//
|
||||||
// (creates a new TLAB, etc.)
|
// Go to slow path.
|
||||||
|
|
||||||
const bool allow_shared_alloc =
|
const bool allow_shared_alloc =
|
||||||
Universe::heap()->supports_inline_contig_alloc();
|
Universe::heap()->supports_inline_contig_alloc();
|
||||||
@ -3916,7 +3922,7 @@ void TemplateTable::_new() {
|
|||||||
__ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
|
__ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
|
||||||
__ lea(rbx, Address(rax, rdx, Address::times_1));
|
__ lea(rbx, Address(rax, rdx, Address::times_1));
|
||||||
__ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
|
__ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
|
||||||
__ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
|
__ jcc(Assembler::above, slow_case);
|
||||||
__ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
|
__ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
|
||||||
if (ZeroTLAB) {
|
if (ZeroTLAB) {
|
||||||
// the fields have been already cleared
|
// the fields have been already cleared
|
||||||
@ -3925,40 +3931,40 @@ void TemplateTable::_new() {
|
|||||||
// initialize both the header and fields
|
// initialize both the header and fields
|
||||||
__ jmp(initialize_object);
|
__ jmp(initialize_object);
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
// Allocation in the shared Eden, if allowed.
|
||||||
// Allocation in the shared Eden, if allowed.
|
|
||||||
//
|
|
||||||
// rdx: instance size in bytes
|
|
||||||
if (allow_shared_alloc) {
|
|
||||||
__ bind(allocate_shared);
|
|
||||||
|
|
||||||
ExternalAddress heap_top((address)Universe::heap()->top_addr());
|
|
||||||
ExternalAddress heap_end((address)Universe::heap()->end_addr());
|
|
||||||
|
|
||||||
Label retry;
|
|
||||||
__ bind(retry);
|
|
||||||
__ movptr(rax, heap_top);
|
|
||||||
__ lea(rbx, Address(rax, rdx, Address::times_1));
|
|
||||||
__ cmpptr(rbx, heap_end);
|
|
||||||
__ jcc(Assembler::above, slow_case);
|
|
||||||
|
|
||||||
// Compare rax, with the top addr, and if still equal, store the new
|
|
||||||
// top addr in rbx, at the address of the top addr pointer. Sets ZF if was
|
|
||||||
// equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
|
|
||||||
//
|
//
|
||||||
// rax,: object begin
|
|
||||||
// rbx,: object end
|
|
||||||
// rdx: instance size in bytes
|
// rdx: instance size in bytes
|
||||||
__ locked_cmpxchgptr(rbx, heap_top);
|
if (allow_shared_alloc) {
|
||||||
|
ExternalAddress heap_top((address)Universe::heap()->top_addr());
|
||||||
|
ExternalAddress heap_end((address)Universe::heap()->end_addr());
|
||||||
|
|
||||||
// if someone beat us on the allocation, try again, otherwise continue
|
Label retry;
|
||||||
__ jcc(Assembler::notEqual, retry);
|
__ bind(retry);
|
||||||
|
__ movptr(rax, heap_top);
|
||||||
|
__ lea(rbx, Address(rax, rdx, Address::times_1));
|
||||||
|
__ cmpptr(rbx, heap_end);
|
||||||
|
__ jcc(Assembler::above, slow_case);
|
||||||
|
|
||||||
__ incr_allocated_bytes(thread, rdx, 0);
|
// Compare rax, with the top addr, and if still equal, store the new
|
||||||
|
// top addr in rbx, at the address of the top addr pointer. Sets ZF if was
|
||||||
|
// equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
|
||||||
|
//
|
||||||
|
// rax,: object begin
|
||||||
|
// rbx,: object end
|
||||||
|
// rdx: instance size in bytes
|
||||||
|
__ locked_cmpxchgptr(rbx, heap_top);
|
||||||
|
|
||||||
|
// if someone beat us on the allocation, try again, otherwise continue
|
||||||
|
__ jcc(Assembler::notEqual, retry);
|
||||||
|
|
||||||
|
__ incr_allocated_bytes(thread, rdx, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
|
// If UseTLAB or allow_shared_alloc are true, the object is created above and
|
||||||
|
// there is an initialize need. Otherwise, skip and go to the slow path.
|
||||||
|
if (UseTLAB || allow_shared_alloc) {
|
||||||
// The object is initialized before the header. If the object size is
|
// The object is initialized before the header. If the object size is
|
||||||
// zero, go directly to the header initialization.
|
// zero, go directly to the header initialization.
|
||||||
__ bind(initialize_object);
|
__ bind(initialize_object);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -628,6 +628,11 @@ void VM_Version::get_processor_features() {
|
|||||||
if (UseSSE < 1)
|
if (UseSSE < 1)
|
||||||
_features &= ~CPU_SSE;
|
_features &= ~CPU_SSE;
|
||||||
|
|
||||||
|
//since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0.
|
||||||
|
if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) {
|
||||||
|
UseAVX = 0;
|
||||||
|
}
|
||||||
|
|
||||||
// first try initial setting and detect what we can support
|
// first try initial setting and detect what we can support
|
||||||
int use_avx_limit = 0;
|
int use_avx_limit = 0;
|
||||||
if (UseAVX > 0) {
|
if (UseAVX > 0) {
|
||||||
@ -1078,6 +1083,66 @@ void VM_Version::get_processor_features() {
|
|||||||
// UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
|
// UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
|
||||||
// UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
|
// UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
|
||||||
|
|
||||||
|
|
||||||
|
if (is_zx()) { // ZX cpus specific settings
|
||||||
|
if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
|
||||||
|
UseStoreImmI16 = false; // don't use it on ZX cpus
|
||||||
|
}
|
||||||
|
if ((cpu_family() == 6) || (cpu_family() == 7)) {
|
||||||
|
if (FLAG_IS_DEFAULT(UseAddressNop)) {
|
||||||
|
// Use it on all ZX cpus
|
||||||
|
UseAddressNop = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
|
||||||
|
UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus
|
||||||
|
}
|
||||||
|
if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
|
||||||
|
if (supports_sse3()) {
|
||||||
|
UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus
|
||||||
|
} else {
|
||||||
|
UseXmmRegToRegMoveAll = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus
|
||||||
|
#ifdef COMPILER2
|
||||||
|
if (FLAG_IS_DEFAULT(MaxLoopPad)) {
|
||||||
|
// For new ZX cpus do the next optimization:
|
||||||
|
// don't align the beginning of a loop if there are enough instructions
|
||||||
|
// left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
|
||||||
|
// in current fetch line (OptoLoopAlignment) or the padding
|
||||||
|
// is big (> MaxLoopPad).
|
||||||
|
// Set MaxLoopPad to 11 for new ZX cpus to reduce number of
|
||||||
|
// generated NOP instructions. 11 is the largest size of one
|
||||||
|
// address NOP instruction '0F 1F' (see Assembler::nop(i)).
|
||||||
|
MaxLoopPad = 11;
|
||||||
|
}
|
||||||
|
#endif // COMPILER2
|
||||||
|
if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
|
||||||
|
UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
|
||||||
|
}
|
||||||
|
if (supports_sse4_2()) { // new ZX cpus
|
||||||
|
if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
|
||||||
|
UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (supports_sse4_2()) {
|
||||||
|
if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
|
||||||
|
FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
|
||||||
|
warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
|
||||||
|
}
|
||||||
|
FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
|
||||||
|
FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if( is_amd() ) { // AMD cpus specific settings
|
if( is_amd() ) { // AMD cpus specific settings
|
||||||
if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
|
if( supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop) ) {
|
||||||
// Use it on new AMD cpus starting from Opteron.
|
// Use it on new AMD cpus starting from Opteron.
|
||||||
@ -1374,6 +1439,14 @@ void VM_Version::get_processor_features() {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) {
|
||||||
|
#ifdef COMPILER2
|
||||||
|
if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
|
||||||
|
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef _LP64
|
#ifdef _LP64
|
||||||
// Prefetch settings
|
// Prefetch settings
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -305,6 +305,9 @@ protected:
|
|||||||
enum Extended_Family {
|
enum Extended_Family {
|
||||||
// AMD
|
// AMD
|
||||||
CPU_FAMILY_AMD_11H = 0x11,
|
CPU_FAMILY_AMD_11H = 0x11,
|
||||||
|
// ZX
|
||||||
|
CPU_FAMILY_ZX_CORE_F6 = 6,
|
||||||
|
CPU_FAMILY_ZX_CORE_F7 = 7,
|
||||||
// Intel
|
// Intel
|
||||||
CPU_FAMILY_INTEL_CORE = 6,
|
CPU_FAMILY_INTEL_CORE = 6,
|
||||||
CPU_MODEL_NEHALEM = 0x1e,
|
CPU_MODEL_NEHALEM = 0x1e,
|
||||||
@ -549,6 +552,16 @@ protected:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ZX features.
|
||||||
|
if (is_zx()) {
|
||||||
|
if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0)
|
||||||
|
result |= CPU_LZCNT;
|
||||||
|
// for ZX, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw
|
||||||
|
if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) {
|
||||||
|
result |= CPU_3DNOW_PREFETCH;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -657,6 +670,7 @@ public:
|
|||||||
static bool is_P6() { return cpu_family() >= 6; }
|
static bool is_P6() { return cpu_family() >= 6; }
|
||||||
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
|
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
|
||||||
static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
|
static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG'
|
||||||
|
static bool is_zx() { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS '
|
||||||
static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton
|
static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton
|
||||||
static bool is_knights_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi
|
static bool is_knights_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi
|
||||||
|
|
||||||
@ -680,6 +694,15 @@ public:
|
|||||||
}
|
}
|
||||||
} else if (is_amd()) {
|
} else if (is_amd()) {
|
||||||
result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
|
result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1);
|
||||||
|
} else if (is_zx()) {
|
||||||
|
bool supports_topology = supports_processor_topology();
|
||||||
|
if (supports_topology) {
|
||||||
|
result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus /
|
||||||
|
_cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
|
||||||
|
}
|
||||||
|
if (!supports_topology || result == 0) {
|
||||||
|
result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
@ -688,6 +711,8 @@ public:
|
|||||||
uint result = 1;
|
uint result = 1;
|
||||||
if (is_intel() && supports_processor_topology()) {
|
if (is_intel() && supports_processor_topology()) {
|
||||||
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
|
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
|
||||||
|
} else if (is_zx() && supports_processor_topology()) {
|
||||||
|
result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus;
|
||||||
} else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
|
} else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) {
|
||||||
if (cpu_family() >= 0x17) {
|
if (cpu_family() >= 0x17) {
|
||||||
result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
|
result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1;
|
||||||
@ -705,6 +730,8 @@ public:
|
|||||||
result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
|
result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
|
||||||
} else if (is_amd()) {
|
} else if (is_amd()) {
|
||||||
result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
|
result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size;
|
||||||
|
} else if (is_zx()) {
|
||||||
|
result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1);
|
||||||
}
|
}
|
||||||
if (result < 32) // not defined ?
|
if (result < 32) // not defined ?
|
||||||
result = 32; // 32 bytes by default on x86 and other x64
|
result = 32; // 32 bytes by default on x86 and other x64
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* Copyright 2007 Red Hat, Inc.
|
* Copyright 2007 Red Hat, Inc.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
@ -34,4 +34,8 @@
|
|||||||
format_width = 1
|
format_width = 1
|
||||||
};
|
};
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
static bool mustIterateImmediateOopsInCode() { return true; }
|
||||||
|
|
||||||
#endif // CPU_ZERO_VM_RELOCINFO_ZERO_HPP
|
#endif // CPU_ZERO_VM_RELOCINFO_ZERO_HPP
|
||||||
|
@ -258,7 +258,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_cmpxchg_byte_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_cmpxchg_long_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_add_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_atomic_add_ptr_entry = ShouldNotCallThisStub();
|
StubRoutines::_atomic_add_long_entry = ShouldNotCallThisStub();
|
||||||
StubRoutines::_fence_entry = ShouldNotCallThisStub();
|
StubRoutines::_fence_entry = ShouldNotCallThisStub();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,8 +132,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
|||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
// defined in bsd_x86.s
|
// defined in bsd_x86.s
|
||||||
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong, bool);
|
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t, bool);
|
||||||
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
|
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -143,15 +143,15 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
|||||||
T compare_value,
|
T compare_value,
|
||||||
cmpxchg_memory_order order) const {
|
cmpxchg_memory_order order) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
volatile jlong dest;
|
volatile int64_t dest;
|
||||||
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
|
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
|
||||||
return PrimitiveConversions::cast<T>(dest);
|
return PrimitiveConversions::cast<T>(dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ template<typename T>
|
|||||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
|
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
@ -633,10 +633,10 @@ mmx_acs_CopyLeft:
|
|||||||
ret
|
ret
|
||||||
|
|
||||||
|
|
||||||
# Support for jlong Atomic::cmpxchg(jlong exchange_value,
|
# Support for int64_t Atomic::cmpxchg(int64_t exchange_value,
|
||||||
# volatile jlong* dest,
|
# volatile int64_t* dest,
|
||||||
# jlong compare_value,
|
# int64_t compare_value,
|
||||||
# bool is_MP)
|
# bool is_MP)
|
||||||
#
|
#
|
||||||
.p2align 4,,15
|
.p2align 4,,15
|
||||||
ELF_TYPE(_Atomic_cmpxchg_long,@function)
|
ELF_TYPE(_Atomic_cmpxchg_long,@function)
|
||||||
@ -658,8 +658,8 @@ SYMBOL(_Atomic_cmpxchg_long):
|
|||||||
ret
|
ret
|
||||||
|
|
||||||
|
|
||||||
# Support for jlong Atomic::load and Atomic::store.
|
# Support for int64_t Atomic::load and Atomic::store.
|
||||||
# void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
|
# void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst)
|
||||||
.p2align 4,,15
|
.p2align 4,,15
|
||||||
ELF_TYPE(_Atomic_move_long,@function)
|
ELF_TYPE(_Atomic_move_long,@function)
|
||||||
SYMBOL(_Atomic_move_long):
|
SYMBOL(_Atomic_move_long):
|
||||||
|
@ -265,8 +265,8 @@ template<>
|
|||||||
template<typename T>
|
template<typename T>
|
||||||
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
volatile jlong dest;
|
volatile int64_t dest;
|
||||||
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
|
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
|
||||||
return PrimitiveConversions::cast<T>(dest);
|
return PrimitiveConversions::cast<T>(dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -275,7 +275,7 @@ template<typename T>
|
|||||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
|
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
|
#endif // OS_CPU_BSD_ZERO_VM_ATOMIC_BSD_ZERO_HPP
|
||||||
|
@ -50,7 +50,7 @@ template<typename T>
|
|||||||
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
return PrimitiveConversions::cast<T>(
|
return PrimitiveConversions::cast<T>(
|
||||||
(*os::atomic_load_long_func)(reinterpret_cast<const volatile jlong*>(src)));
|
(*os::atomic_load_long_func)(reinterpret_cast<const volatile int64_t*>(src)));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -59,7 +59,7 @@ inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
|||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
(*os::atomic_store_long_func)(
|
(*os::atomic_store_long_func)(
|
||||||
PrimitiveConversions::cast<jlong>(store_value), reinterpret_cast<volatile jlong*>(dest));
|
PrimitiveConversions::cast<int64_t>(store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
|||||||
: "memory");
|
: "memory");
|
||||||
return val;
|
return val;
|
||||||
#else
|
#else
|
||||||
return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
|
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,7 +146,7 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
|||||||
: "memory");
|
: "memory");
|
||||||
return old_val;
|
return old_val;
|
||||||
#else
|
#else
|
||||||
return xchg_using_helper<jint>(os::atomic_xchg_func, exchange_value, dest);
|
return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,17 +178,17 @@ struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
|
|||||||
|
|
||||||
#ifndef AARCH64
|
#ifndef AARCH64
|
||||||
|
|
||||||
inline jint reorder_cmpxchg_func(jint exchange_value,
|
inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
|
||||||
jint volatile* dest,
|
int32_t volatile* dest,
|
||||||
jint compare_value) {
|
int32_t compare_value) {
|
||||||
// Warning: Arguments are swapped to avoid moving them for kernel call
|
// Warning: Arguments are swapped to avoid moving them for kernel call
|
||||||
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
|
return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline jlong reorder_cmpxchg_long_func(jlong exchange_value,
|
inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
|
||||||
jlong volatile* dest,
|
int64_t volatile* dest,
|
||||||
jlong compare_value) {
|
int64_t compare_value) {
|
||||||
assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!");
|
assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
|
||||||
// Warning: Arguments are swapped to avoid moving them for kernel call
|
// Warning: Arguments are swapped to avoid moving them for kernel call
|
||||||
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
|
return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
|
||||||
}
|
}
|
||||||
@ -221,7 +221,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
|||||||
: "memory");
|
: "memory");
|
||||||
return rv;
|
return rv;
|
||||||
#else
|
#else
|
||||||
return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
|
return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
|||||||
: "memory");
|
: "memory");
|
||||||
return rv;
|
return rv;
|
||||||
#else
|
#else
|
||||||
return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
|
return cmpxchg_using_helper<int64_t>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -598,11 +598,11 @@ void os::print_register_info(outputStream *st, const void *context) {
|
|||||||
|
|
||||||
#ifndef AARCH64
|
#ifndef AARCH64
|
||||||
|
|
||||||
typedef jlong cmpxchg_long_func_t(jlong, jlong, volatile jlong*);
|
typedef int64_t cmpxchg_long_func_t(int64_t, int64_t, volatile int64_t*);
|
||||||
|
|
||||||
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap;
|
||||||
|
|
||||||
jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_value, volatile jlong* dest) {
|
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t compare_value, int64_t exchange_value, volatile int64_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
||||||
|
|
||||||
@ -612,16 +612,16 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong compare_value, jlong exchange_valu
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jlong old_value = *dest;
|
int64_t old_value = *dest;
|
||||||
if (old_value == compare_value)
|
if (old_value == compare_value)
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
typedef jlong load_long_func_t(const volatile jlong*);
|
typedef int64_t load_long_func_t(const volatile int64_t*);
|
||||||
|
|
||||||
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
|
load_long_func_t* os::atomic_load_long_func = os::atomic_load_long_bootstrap;
|
||||||
|
|
||||||
jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
|
int64_t os::atomic_load_long_bootstrap(const volatile int64_t* src) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
|
load_long_func_t* func = CAST_TO_FN_PTR(load_long_func_t*, StubRoutines::atomic_load_long_entry());
|
||||||
|
|
||||||
@ -631,15 +631,15 @@ jlong os::atomic_load_long_bootstrap(const volatile jlong* src) {
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jlong old_value = *src;
|
int64_t old_value = *src;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef void store_long_func_t(jlong, volatile jlong*);
|
typedef void store_long_func_t(int64_t, volatile int64_t*);
|
||||||
|
|
||||||
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
|
store_long_func_t* os::atomic_store_long_func = os::atomic_store_long_bootstrap;
|
||||||
|
|
||||||
void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
|
void os::atomic_store_long_bootstrap(int64_t val, volatile int64_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
|
store_long_func_t* func = CAST_TO_FN_PTR(store_long_func_t*, StubRoutines::atomic_store_long_entry());
|
||||||
|
|
||||||
@ -652,11 +652,11 @@ void os::atomic_store_long_bootstrap(jlong val, volatile jlong* dest) {
|
|||||||
*dest = val;
|
*dest = val;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef jint atomic_add_func_t(jint add_value, volatile jint *dest);
|
typedef int32_t atomic_add_func_t(int32_t add_value, volatile int32_t *dest);
|
||||||
|
|
||||||
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
|
atomic_add_func_t * os::atomic_add_func = os::atomic_add_bootstrap;
|
||||||
|
|
||||||
jint os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
|
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest) {
|
||||||
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
|
atomic_add_func_t * func = CAST_TO_FN_PTR(atomic_add_func_t*,
|
||||||
StubRoutines::atomic_add_entry());
|
StubRoutines::atomic_add_entry());
|
||||||
if (func != NULL) {
|
if (func != NULL) {
|
||||||
@ -664,16 +664,16 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint *dest) {
|
|||||||
return (*func)(add_value, dest);
|
return (*func)(add_value, dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
jint old_value = *dest;
|
int32_t old_value = *dest;
|
||||||
*dest = old_value + add_value;
|
*dest = old_value + add_value;
|
||||||
return (old_value + add_value);
|
return (old_value + add_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef jint atomic_xchg_func_t(jint exchange_value, volatile jint *dest);
|
typedef int32_t atomic_xchg_func_t(int32_t exchange_value, volatile int32_t *dest);
|
||||||
|
|
||||||
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
atomic_xchg_func_t * os::atomic_xchg_func = os::atomic_xchg_bootstrap;
|
||||||
|
|
||||||
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
|
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest) {
|
||||||
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
|
atomic_xchg_func_t * func = CAST_TO_FN_PTR(atomic_xchg_func_t*,
|
||||||
StubRoutines::atomic_xchg_entry());
|
StubRoutines::atomic_xchg_entry());
|
||||||
if (func != NULL) {
|
if (func != NULL) {
|
||||||
@ -681,16 +681,16 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest) {
|
|||||||
return (*func)(exchange_value, dest);
|
return (*func)(exchange_value, dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
jint old_value = *dest;
|
int32_t old_value = *dest;
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return (old_value);
|
return (old_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef jint cmpxchg_func_t(jint, jint, volatile jint*);
|
typedef int32_t cmpxchg_func_t(int32_t, int32_t, volatile int32_t*);
|
||||||
|
|
||||||
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
||||||
|
|
||||||
jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volatile jint* dest) {
|
int32_t os::atomic_cmpxchg_bootstrap(int32_t compare_value, int32_t exchange_value, volatile int32_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
||||||
|
|
||||||
@ -700,7 +700,7 @@ jint os::atomic_cmpxchg_bootstrap(jint compare_value, jint exchange_value, volat
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jint old_value = *dest;
|
int32_t old_value = *dest;
|
||||||
if (old_value == compare_value)
|
if (old_value == compare_value)
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
|
@ -45,35 +45,35 @@
|
|||||||
static bool register_code_area(char *low, char *high) { return true; }
|
static bool register_code_area(char *low, char *high) { return true; }
|
||||||
|
|
||||||
#ifndef AARCH64
|
#ifndef AARCH64
|
||||||
static jlong (*atomic_cmpxchg_long_func)(jlong compare_value,
|
static int64_t (*atomic_cmpxchg_long_func)(int64_t compare_value,
|
||||||
jlong exchange_value,
|
int64_t exchange_value,
|
||||||
volatile jlong *dest);
|
volatile int64_t *dest);
|
||||||
|
|
||||||
static jlong (*atomic_load_long_func)(const volatile jlong*);
|
static int64_t (*atomic_load_long_func)(const volatile int64_t*);
|
||||||
|
|
||||||
static void (*atomic_store_long_func)(jlong, volatile jlong*);
|
static void (*atomic_store_long_func)(int64_t, volatile int64_t*);
|
||||||
|
|
||||||
static jint (*atomic_add_func)(jint add_value, volatile jint *dest);
|
static int32_t (*atomic_add_func)(int32_t add_value, volatile int32_t *dest);
|
||||||
|
|
||||||
static jint (*atomic_xchg_func)(jint exchange_value, volatile jint *dest);
|
static int32_t (*atomic_xchg_func)(int32_t exchange_value, volatile int32_t *dest);
|
||||||
|
|
||||||
static jint (*atomic_cmpxchg_func)(jint compare_value,
|
static int32_t (*atomic_cmpxchg_func)(int32_t compare_value,
|
||||||
jint exchange_value,
|
int32_t exchange_value,
|
||||||
volatile jint *dest);
|
volatile int32_t *dest);
|
||||||
|
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, jlong, volatile jlong*);
|
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, int64_t, volatile int64_t*);
|
||||||
|
|
||||||
static jlong atomic_load_long_bootstrap(const volatile jlong*);
|
static int64_t atomic_load_long_bootstrap(const volatile int64_t*);
|
||||||
|
|
||||||
static void atomic_store_long_bootstrap(jlong, volatile jlong*);
|
static void atomic_store_long_bootstrap(int64_t, volatile int64_t*);
|
||||||
|
|
||||||
static jint atomic_add_bootstrap(jint add_value, volatile jint *dest);
|
static int32_t atomic_add_bootstrap(int32_t add_value, volatile int32_t *dest);
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap(jint exchange_value, volatile jint *dest);
|
static int32_t atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t *dest);
|
||||||
|
|
||||||
static jint atomic_cmpxchg_bootstrap(jint compare_value,
|
static int32_t atomic_cmpxchg_bootstrap(int32_t compare_value,
|
||||||
jint exchange_value,
|
int32_t exchange_value,
|
||||||
volatile jint *dest);
|
volatile int32_t *dest);
|
||||||
#endif // !AARCH64
|
#endif // !AARCH64
|
||||||
|
|
||||||
#endif // OS_CPU_LINUX_ARM_VM_OS_LINUX_ARM_HPP
|
#endif // OS_CPU_LINUX_ARM_VM_OS_LINUX_ARM_HPP
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -28,15 +28,15 @@
|
|||||||
//
|
//
|
||||||
// NOTE: we are back in class os here, not Linux
|
// NOTE: we are back in class os here, not Linux
|
||||||
//
|
//
|
||||||
static jint (*atomic_xchg_func) (jint, volatile jint*);
|
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
|
||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
|
||||||
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
|
||||||
|
|
||||||
static void setup_fpu() {}
|
static void setup_fpu() {}
|
||||||
|
|
||||||
|
@ -133,8 +133,8 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
|||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
// defined in linux_x86.s
|
// defined in linux_x86.s
|
||||||
jlong _Atomic_cmpxchg_long(jlong, volatile jlong*, jlong);
|
int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t);
|
||||||
void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst);
|
void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -144,15 +144,15 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
|||||||
T compare_value,
|
T compare_value,
|
||||||
cmpxchg_memory_order order) const {
|
cmpxchg_memory_order order) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
return cmpxchg_using_helper<jlong>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
template<typename T>
|
template<typename T>
|
||||||
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
volatile jlong dest;
|
volatile int64_t dest;
|
||||||
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
|
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
|
||||||
return PrimitiveConversions::cast<T>(dest);
|
return PrimitiveConversions::cast<T>(dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ template<typename T>
|
|||||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
_Atomic_move_long(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
|
_Atomic_move_long(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
@ -30,67 +30,6 @@
|
|||||||
|
|
||||||
// Implementation of class atomic
|
// Implementation of class atomic
|
||||||
|
|
||||||
#ifdef ARM
|
|
||||||
|
|
||||||
/*
|
|
||||||
* __kernel_cmpxchg
|
|
||||||
*
|
|
||||||
* Atomically store newval in *ptr if *ptr is equal to oldval for user space.
|
|
||||||
* Return zero if *ptr was changed or non-zero if no exchange happened.
|
|
||||||
* The C flag is also set if *ptr was changed to allow for assembly
|
|
||||||
* optimization in the calling code.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
|
|
||||||
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/* Perform an atomic compare and swap: if the current value of `*PTR'
|
|
||||||
is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of
|
|
||||||
`*PTR' before the operation.*/
|
|
||||||
static inline int arm_compare_and_swap(int newval,
|
|
||||||
volatile int *ptr,
|
|
||||||
int oldval) {
|
|
||||||
for (;;) {
|
|
||||||
int prev = *ptr;
|
|
||||||
if (prev != oldval)
|
|
||||||
return prev;
|
|
||||||
|
|
||||||
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
|
|
||||||
// Success.
|
|
||||||
return prev;
|
|
||||||
|
|
||||||
// We failed even though prev == oldval. Try again.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Atomically add an int to memory. */
|
|
||||||
static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
|
|
||||||
for (;;) {
|
|
||||||
// Loop until a __kernel_cmpxchg succeeds.
|
|
||||||
|
|
||||||
int prev = *ptr;
|
|
||||||
|
|
||||||
if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
|
|
||||||
return prev + add_value;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Atomically write VALUE into `*PTR' and returns the previous
|
|
||||||
contents of `*PTR'. */
|
|
||||||
static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
|
|
||||||
for (;;) {
|
|
||||||
// Loop until a __kernel_cmpxchg succeeds.
|
|
||||||
int prev = *ptr;
|
|
||||||
|
|
||||||
if (__kernel_cmpxchg (prev, newval, ptr) == 0)
|
|
||||||
return prev;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif // ARM
|
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
struct Atomic::PlatformAdd
|
struct Atomic::PlatformAdd
|
||||||
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
: Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
|
||||||
@ -105,11 +44,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
|||||||
STATIC_ASSERT(4 == sizeof(I));
|
STATIC_ASSERT(4 == sizeof(I));
|
||||||
STATIC_ASSERT(4 == sizeof(D));
|
STATIC_ASSERT(4 == sizeof(D));
|
||||||
|
|
||||||
#ifdef ARM
|
|
||||||
return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
|
|
||||||
#else
|
|
||||||
return __sync_add_and_fetch(dest, add_value);
|
return __sync_add_and_fetch(dest, add_value);
|
||||||
#endif // ARM
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -117,7 +52,6 @@ template<typename I, typename D>
|
|||||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||||
STATIC_ASSERT(8 == sizeof(I));
|
STATIC_ASSERT(8 == sizeof(I));
|
||||||
STATIC_ASSERT(8 == sizeof(D));
|
STATIC_ASSERT(8 == sizeof(D));
|
||||||
|
|
||||||
return __sync_add_and_fetch(dest, add_value);
|
return __sync_add_and_fetch(dest, add_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -126,9 +60,6 @@ template<typename T>
|
|||||||
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(4 == sizeof(T));
|
STATIC_ASSERT(4 == sizeof(T));
|
||||||
#ifdef ARM
|
|
||||||
return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
|
|
||||||
#else
|
|
||||||
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
// __sync_lock_test_and_set is a bizarrely named atomic exchange
|
||||||
// operation. Note that some platforms only support this with the
|
// operation. Note that some platforms only support this with the
|
||||||
// limitation that the only valid value to store is the immediate
|
// limitation that the only valid value to store is the immediate
|
||||||
@ -140,7 +71,6 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
|||||||
// barrier.
|
// barrier.
|
||||||
__sync_synchronize();
|
__sync_synchronize();
|
||||||
return result;
|
return result;
|
||||||
#endif // ARM
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -164,11 +94,7 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
|||||||
T compare_value,
|
T compare_value,
|
||||||
cmpxchg_memory_order order) const {
|
cmpxchg_memory_order order) const {
|
||||||
STATIC_ASSERT(4 == sizeof(T));
|
STATIC_ASSERT(4 == sizeof(T));
|
||||||
#ifdef ARM
|
|
||||||
return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
|
|
||||||
#else
|
|
||||||
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
|
||||||
#endif // ARM
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -185,8 +111,8 @@ template<>
|
|||||||
template<typename T>
|
template<typename T>
|
||||||
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
volatile jlong dest;
|
volatile int64_t dest;
|
||||||
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
|
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
|
||||||
return PrimitiveConversions::cast<T>(dest);
|
return PrimitiveConversions::cast<T>(dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +121,7 @@ template<typename T>
|
|||||||
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
inline void Atomic::PlatformStore<8>::operator()(T store_value,
|
||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
os::atomic_copy64(reinterpret_cast<const volatile jlong*>(&store_value), reinterpret_cast<volatile jlong*>(dest));
|
os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
|
#endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -28,15 +28,15 @@
|
|||||||
//
|
//
|
||||||
// NOTE: we are back in class os here, not Solaris
|
// NOTE: we are back in class os here, not Solaris
|
||||||
//
|
//
|
||||||
static jint (*atomic_xchg_func) (jint, volatile jint*);
|
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
|
||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
|
||||||
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
|
||||||
|
|
||||||
static void setup_fpu() {}
|
static void setup_fpu() {}
|
||||||
|
|
||||||
|
@ -28,16 +28,16 @@
|
|||||||
// For Sun Studio - implementation is in solaris_x86_64.il.
|
// For Sun Studio - implementation is in solaris_x86_64.il.
|
||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
jint _Atomic_add(jint add_value, volatile jint* dest);
|
int32_t _Atomic_add(int32_t add_value, volatile int32_t* dest);
|
||||||
jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
|
int64_t _Atomic_add_long(int64_t add_value, volatile int64_t* dest);
|
||||||
|
|
||||||
jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
|
int32_t _Atomic_xchg(int32_t exchange_value, volatile int32_t* dest);
|
||||||
jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
|
int8_t _Atomic_cmpxchg_byte(int8_t exchange_value, volatile int8_t* dest,
|
||||||
jbyte compare_value);
|
int8_t compare_value);
|
||||||
jint _Atomic_cmpxchg(jint exchange_value, volatile jint* dest,
|
int32_t _Atomic_cmpxchg(int32_t exchange_value, volatile int32_t* dest,
|
||||||
jint compare_value);
|
int32_t compare_value);
|
||||||
jlong _Atomic_cmpxchg_long(jlong exchange_value, volatile jlong* dest,
|
int64_t _Atomic_cmpxchg_long(int64_t exchange_value, volatile int64_t* dest,
|
||||||
jlong compare_value);
|
int64_t compare_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<size_t byte_size>
|
template<size_t byte_size>
|
||||||
@ -55,8 +55,8 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) co
|
|||||||
STATIC_ASSERT(4 == sizeof(I));
|
STATIC_ASSERT(4 == sizeof(I));
|
||||||
STATIC_ASSERT(4 == sizeof(D));
|
STATIC_ASSERT(4 == sizeof(D));
|
||||||
return PrimitiveConversions::cast<D>(
|
return PrimitiveConversions::cast<D>(
|
||||||
_Atomic_add(PrimitiveConversions::cast<jint>(add_value),
|
_Atomic_add(PrimitiveConversions::cast<int32_t>(add_value),
|
||||||
reinterpret_cast<jint volatile*>(dest)));
|
reinterpret_cast<int32_t volatile*>(dest)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not using add_using_helper; see comment for cmpxchg.
|
// Not using add_using_helper; see comment for cmpxchg.
|
||||||
@ -66,8 +66,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
|||||||
STATIC_ASSERT(8 == sizeof(I));
|
STATIC_ASSERT(8 == sizeof(I));
|
||||||
STATIC_ASSERT(8 == sizeof(D));
|
STATIC_ASSERT(8 == sizeof(D));
|
||||||
return PrimitiveConversions::cast<D>(
|
return PrimitiveConversions::cast<D>(
|
||||||
_Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value),
|
_Atomic_add_long(PrimitiveConversions::cast<int64_t>(add_value),
|
||||||
reinterpret_cast<jlong volatile*>(dest)));
|
reinterpret_cast<int64_t volatile*>(dest)));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -76,11 +76,11 @@ inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
|
|||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(4 == sizeof(T));
|
STATIC_ASSERT(4 == sizeof(T));
|
||||||
return PrimitiveConversions::cast<T>(
|
return PrimitiveConversions::cast<T>(
|
||||||
_Atomic_xchg(PrimitiveConversions::cast<jint>(exchange_value),
|
_Atomic_xchg(PrimitiveConversions::cast<int32_t>(exchange_value),
|
||||||
reinterpret_cast<jint volatile*>(dest)));
|
reinterpret_cast<int32_t volatile*>(dest)));
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
|
extern "C" int64_t _Atomic_xchg_long(int64_t exchange_value, volatile int64_t* dest);
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
template<typename T>
|
template<typename T>
|
||||||
@ -88,8 +88,8 @@ inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
|
|||||||
T volatile* dest) const {
|
T volatile* dest) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
return PrimitiveConversions::cast<T>(
|
return PrimitiveConversions::cast<T>(
|
||||||
_Atomic_xchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
|
_Atomic_xchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
|
||||||
reinterpret_cast<jlong volatile*>(dest)));
|
reinterpret_cast<int64_t volatile*>(dest)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not using cmpxchg_using_helper here, because some configurations of
|
// Not using cmpxchg_using_helper here, because some configurations of
|
||||||
@ -106,9 +106,9 @@ inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value,
|
|||||||
cmpxchg_memory_order order) const {
|
cmpxchg_memory_order order) const {
|
||||||
STATIC_ASSERT(1 == sizeof(T));
|
STATIC_ASSERT(1 == sizeof(T));
|
||||||
return PrimitiveConversions::cast<T>(
|
return PrimitiveConversions::cast<T>(
|
||||||
_Atomic_cmpxchg_byte(PrimitiveConversions::cast<jbyte>(exchange_value),
|
_Atomic_cmpxchg_byte(PrimitiveConversions::cast<int8_t>(exchange_value),
|
||||||
reinterpret_cast<jbyte volatile*>(dest),
|
reinterpret_cast<int8_t volatile*>(dest),
|
||||||
PrimitiveConversions::cast<jbyte>(compare_value)));
|
PrimitiveConversions::cast<int8_t>(compare_value)));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -119,9 +119,9 @@ inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
|
|||||||
cmpxchg_memory_order order) const {
|
cmpxchg_memory_order order) const {
|
||||||
STATIC_ASSERT(4 == sizeof(T));
|
STATIC_ASSERT(4 == sizeof(T));
|
||||||
return PrimitiveConversions::cast<T>(
|
return PrimitiveConversions::cast<T>(
|
||||||
_Atomic_cmpxchg(PrimitiveConversions::cast<jint>(exchange_value),
|
_Atomic_cmpxchg(PrimitiveConversions::cast<int32_t>(exchange_value),
|
||||||
reinterpret_cast<jint volatile*>(dest),
|
reinterpret_cast<int32_t volatile*>(dest),
|
||||||
PrimitiveConversions::cast<jint>(compare_value)));
|
PrimitiveConversions::cast<int32_t>(compare_value)));
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
@ -132,9 +132,9 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
|||||||
cmpxchg_memory_order order) const {
|
cmpxchg_memory_order order) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
return PrimitiveConversions::cast<T>(
|
return PrimitiveConversions::cast<T>(
|
||||||
_Atomic_cmpxchg_long(PrimitiveConversions::cast<jlong>(exchange_value),
|
_Atomic_cmpxchg_long(PrimitiveConversions::cast<int64_t>(exchange_value),
|
||||||
reinterpret_cast<jlong volatile*>(dest),
|
reinterpret_cast<int64_t volatile*>(dest),
|
||||||
PrimitiveConversions::cast<jlong>(compare_value)));
|
PrimitiveConversions::cast<int64_t>(compare_value)));
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
|
#endif // OS_CPU_SOLARIS_X86_VM_ATOMIC_SOLARIS_X86_HPP
|
||||||
|
@ -904,12 +904,12 @@ void os::Solaris::init_thread_fpu_state(void) {
|
|||||||
// until initialization is complete.
|
// until initialization is complete.
|
||||||
// TODO - replace with .il implementation when compiler supports it.
|
// TODO - replace with .il implementation when compiler supports it.
|
||||||
|
|
||||||
typedef jint xchg_func_t (jint, volatile jint*);
|
typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
|
||||||
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
|
||||||
typedef jlong cmpxchg_long_func_t(jlong, volatile jlong*, jlong);
|
typedef int64_t cmpxchg_long_func_t(int64_t, volatile int64_t*, int64_t);
|
||||||
typedef jint add_func_t (jint, volatile jint*);
|
typedef int32_t add_func_t (int32_t, volatile int32_t*);
|
||||||
|
|
||||||
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
|
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
|
||||||
|
|
||||||
@ -919,12 +919,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jint old_value = *dest;
|
int32_t old_value = *dest;
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
|
int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
||||||
|
|
||||||
@ -934,13 +934,13 @@ jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jint old_value = *dest;
|
int32_t old_value = *dest;
|
||||||
if (old_value == compare_value)
|
if (old_value == compare_value)
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
||||||
|
|
||||||
@ -950,13 +950,13 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jlong old_value = *dest;
|
int64_t old_value = *dest;
|
||||||
if (old_value == compare_value)
|
if (old_value == compare_value)
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
|
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
|
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -31,15 +31,15 @@
|
|||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
static void setup_fpu() {}
|
static void setup_fpu() {}
|
||||||
#else
|
#else
|
||||||
static jint (*atomic_xchg_func) (jint, volatile jint*);
|
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
|
||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
|
||||||
static jlong (*atomic_cmpxchg_long_func)(jlong, volatile jlong*, jlong);
|
static int64_t (*atomic_cmpxchg_long_func)(int64_t, volatile int64_t*, int64_t);
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
|
||||||
|
|
||||||
static void setup_fpu();
|
static void setup_fpu();
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
@ -54,13 +54,13 @@ struct Atomic::PlatformAdd
|
|||||||
template<>
|
template<>
|
||||||
template<typename I, typename D>
|
template<typename I, typename D>
|
||||||
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||||
return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
|
return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
template<typename I, typename D>
|
template<typename I, typename D>
|
||||||
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
|
||||||
return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
|
return add_using_helper<int64_t>(os::atomic_add_long_func, add_value, dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
|
#define DEFINE_STUB_XCHG(ByteSize, StubType, StubName) \
|
||||||
@ -72,8 +72,8 @@ inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) co
|
|||||||
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
|
return xchg_using_helper<StubType>(StubName, exchange_value, dest); \
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_STUB_XCHG(4, jint, os::atomic_xchg_func)
|
DEFINE_STUB_XCHG(4, int32_t, os::atomic_xchg_func)
|
||||||
DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
|
DEFINE_STUB_XCHG(8, int64_t, os::atomic_xchg_long_func)
|
||||||
|
|
||||||
#undef DEFINE_STUB_XCHG
|
#undef DEFINE_STUB_XCHG
|
||||||
|
|
||||||
@ -88,9 +88,9 @@ DEFINE_STUB_XCHG(8, jlong, os::atomic_xchg_long_func)
|
|||||||
return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
|
return cmpxchg_using_helper<StubType>(StubName, exchange_value, dest, compare_value); \
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_STUB_CMPXCHG(1, jbyte, os::atomic_cmpxchg_byte_func)
|
DEFINE_STUB_CMPXCHG(1, int8_t, os::atomic_cmpxchg_byte_func)
|
||||||
DEFINE_STUB_CMPXCHG(4, jint, os::atomic_cmpxchg_func)
|
DEFINE_STUB_CMPXCHG(4, int32_t, os::atomic_cmpxchg_func)
|
||||||
DEFINE_STUB_CMPXCHG(8, jlong, os::atomic_cmpxchg_long_func)
|
DEFINE_STUB_CMPXCHG(8, int64_t, os::atomic_cmpxchg_long_func)
|
||||||
|
|
||||||
#undef DEFINE_STUB_CMPXCHG
|
#undef DEFINE_STUB_CMPXCHG
|
||||||
|
|
||||||
@ -162,10 +162,10 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
|
|||||||
T compare_value,
|
T compare_value,
|
||||||
cmpxchg_memory_order order) const {
|
cmpxchg_memory_order order) const {
|
||||||
STATIC_ASSERT(8 == sizeof(T));
|
STATIC_ASSERT(8 == sizeof(T));
|
||||||
jint ex_lo = (jint)exchange_value;
|
int32_t ex_lo = (int32_t)exchange_value;
|
||||||
jint ex_hi = *( ((jint*)&exchange_value) + 1 );
|
int32_t ex_hi = *( ((int32_t*)&exchange_value) + 1 );
|
||||||
jint cmp_lo = (jint)compare_value;
|
int32_t cmp_lo = (int32_t)compare_value;
|
||||||
jint cmp_hi = *( ((jint*)&compare_value) + 1 );
|
int32_t cmp_hi = *( ((int32_t*)&compare_value) + 1 );
|
||||||
__asm {
|
__asm {
|
||||||
push ebx
|
push ebx
|
||||||
push edi
|
push edi
|
||||||
|
@ -218,17 +218,17 @@ void os::initialize_thread(Thread* thr) {
|
|||||||
|
|
||||||
// Atomics and Stub Functions
|
// Atomics and Stub Functions
|
||||||
|
|
||||||
typedef jint xchg_func_t (jint, volatile jint*);
|
typedef int32_t xchg_func_t (int32_t, volatile int32_t*);
|
||||||
typedef intptr_t xchg_long_func_t (jlong, volatile jlong*);
|
typedef int64_t xchg_long_func_t (int64_t, volatile int64_t*);
|
||||||
typedef jint cmpxchg_func_t (jint, volatile jint*, jint);
|
typedef int32_t cmpxchg_func_t (int32_t, volatile int32_t*, int32_t);
|
||||||
typedef jbyte cmpxchg_byte_func_t (jbyte, volatile jbyte*, jbyte);
|
typedef int8_t cmpxchg_byte_func_t (int8_t, volatile int8_t*, int8_t);
|
||||||
typedef jlong cmpxchg_long_func_t (jlong, volatile jlong*, jlong);
|
typedef int64_t cmpxchg_long_func_t (int64_t, volatile int64_t*, int64_t);
|
||||||
typedef jint add_func_t (jint, volatile jint*);
|
typedef int32_t add_func_t (int32_t, volatile int32_t*);
|
||||||
typedef intptr_t add_ptr_func_t (intptr_t, volatile intptr_t*);
|
typedef int64_t add_long_func_t (int64_t, volatile int64_t*);
|
||||||
|
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
|
|
||||||
jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
int32_t os::atomic_xchg_bootstrap(int32_t exchange_value, volatile int32_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
|
xchg_func_t* func = CAST_TO_FN_PTR(xchg_func_t*, StubRoutines::atomic_xchg_entry());
|
||||||
|
|
||||||
@ -238,12 +238,12 @@ jint os::atomic_xchg_bootstrap(jint exchange_value, volatile jint* dest) {
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jint old_value = *dest;
|
int32_t old_value = *dest;
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* dest) {
|
int64_t os::atomic_xchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
|
xchg_long_func_t* func = CAST_TO_FN_PTR(xchg_long_func_t*, StubRoutines::atomic_xchg_long_entry());
|
||||||
|
|
||||||
@ -253,13 +253,13 @@ intptr_t os::atomic_xchg_long_bootstrap(jlong exchange_value, volatile jlong* de
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
intptr_t old_value = *dest;
|
int64_t old_value = *dest;
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint compare_value) {
|
int32_t os::atomic_cmpxchg_bootstrap(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
cmpxchg_func_t* func = CAST_TO_FN_PTR(cmpxchg_func_t*, StubRoutines::atomic_cmpxchg_entry());
|
||||||
|
|
||||||
@ -269,13 +269,13 @@ jint os::atomic_cmpxchg_bootstrap(jint exchange_value, volatile jint* dest, jint
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jint old_value = *dest;
|
int32_t old_value = *dest;
|
||||||
if (old_value == compare_value)
|
if (old_value == compare_value)
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* dest, jbyte compare_value) {
|
int8_t os::atomic_cmpxchg_byte_bootstrap(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
|
cmpxchg_byte_func_t* func = CAST_TO_FN_PTR(cmpxchg_byte_func_t*, StubRoutines::atomic_cmpxchg_byte_entry());
|
||||||
|
|
||||||
@ -285,7 +285,7 @@ jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* de
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jbyte old_value = *dest;
|
int8_t old_value = *dest;
|
||||||
if (old_value == compare_value)
|
if (old_value == compare_value)
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
@ -293,7 +293,7 @@ jbyte os::atomic_cmpxchg_byte_bootstrap(jbyte exchange_value, volatile jbyte* de
|
|||||||
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
|
int64_t os::atomic_cmpxchg_long_bootstrap(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
cmpxchg_long_func_t* func = CAST_TO_FN_PTR(cmpxchg_long_func_t*, StubRoutines::atomic_cmpxchg_long_entry());
|
||||||
|
|
||||||
@ -303,7 +303,7 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
|
|||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
|
|
||||||
jlong old_value = *dest;
|
int64_t old_value = *dest;
|
||||||
if (old_value == compare_value)
|
if (old_value == compare_value)
|
||||||
*dest = exchange_value;
|
*dest = exchange_value;
|
||||||
return old_value;
|
return old_value;
|
||||||
@ -311,7 +311,7 @@ jlong os::atomic_cmpxchg_long_bootstrap(jlong exchange_value, volatile jlong* de
|
|||||||
|
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
|
|
||||||
jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
|
int32_t os::atomic_add_bootstrap(int32_t add_value, volatile int32_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
|
add_func_t* func = CAST_TO_FN_PTR(add_func_t*, StubRoutines::atomic_add_entry());
|
||||||
|
|
||||||
@ -324,12 +324,12 @@ jint os::atomic_add_bootstrap(jint add_value, volatile jint* dest) {
|
|||||||
return (*dest) += add_value;
|
return (*dest) += add_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
intptr_t os::atomic_add_ptr_bootstrap(intptr_t add_value, volatile intptr_t* dest) {
|
int64_t os::atomic_add_long_bootstrap(int64_t add_value, volatile int64_t* dest) {
|
||||||
// try to use the stub:
|
// try to use the stub:
|
||||||
add_ptr_func_t* func = CAST_TO_FN_PTR(add_ptr_func_t*, StubRoutines::atomic_add_ptr_entry());
|
add_long_func_t* func = CAST_TO_FN_PTR(add_long_func_t*, StubRoutines::atomic_add_long_entry());
|
||||||
|
|
||||||
if (func != NULL) {
|
if (func != NULL) {
|
||||||
os::atomic_add_ptr_func = func;
|
os::atomic_add_long_func = func;
|
||||||
return (*func)(add_value, dest);
|
return (*func)(add_value, dest);
|
||||||
}
|
}
|
||||||
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
assert(Threads::number_of_threads() == 0, "for bootstrap only");
|
||||||
@ -342,7 +342,7 @@ xchg_long_func_t* os::atomic_xchg_long_func = os::atomic_xchg_long_bootstr
|
|||||||
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
cmpxchg_func_t* os::atomic_cmpxchg_func = os::atomic_cmpxchg_bootstrap;
|
||||||
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
|
cmpxchg_byte_func_t* os::atomic_cmpxchg_byte_func = os::atomic_cmpxchg_byte_bootstrap;
|
||||||
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
add_func_t* os::atomic_add_func = os::atomic_add_bootstrap;
|
||||||
add_ptr_func_t* os::atomic_add_ptr_func = os::atomic_add_ptr_bootstrap;
|
add_long_func_t* os::atomic_add_long_func = os::atomic_add_long_bootstrap;
|
||||||
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
|
@ -29,32 +29,32 @@
|
|||||||
// NOTE: we are back in class os here, not win32
|
// NOTE: we are back in class os here, not win32
|
||||||
//
|
//
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
static jint (*atomic_xchg_func) (jint, volatile jint*);
|
static int32_t (*atomic_xchg_func) (int32_t, volatile int32_t*);
|
||||||
static intptr_t (*atomic_xchg_long_func) (jlong, volatile jlong*);
|
static int64_t (*atomic_xchg_long_func) (int64_t, volatile int64_t*);
|
||||||
|
|
||||||
static jint (*atomic_cmpxchg_func) (jint, volatile jint*, jint);
|
static int32_t (*atomic_cmpxchg_func) (int32_t, volatile int32_t*, int32_t);
|
||||||
static jbyte (*atomic_cmpxchg_byte_func) (jbyte, volatile jbyte*, jbyte);
|
static int8_t (*atomic_cmpxchg_byte_func) (int8_t, volatile int8_t*, int8_t);
|
||||||
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
|
static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
|
||||||
|
|
||||||
static jint (*atomic_add_func) (jint, volatile jint*);
|
static int32_t (*atomic_add_func) (int32_t, volatile int32_t*);
|
||||||
static intptr_t (*atomic_add_ptr_func) (intptr_t, volatile intptr_t*);
|
static int64_t (*atomic_add_long_func) (int64_t, volatile int64_t*);
|
||||||
|
|
||||||
static jint atomic_xchg_bootstrap (jint, volatile jint*);
|
static int32_t atomic_xchg_bootstrap (int32_t, volatile int32_t*);
|
||||||
static intptr_t atomic_xchg_long_bootstrap (jlong, volatile jlong*);
|
static int64_t atomic_xchg_long_bootstrap (int64_t, volatile int64_t*);
|
||||||
|
|
||||||
static jint atomic_cmpxchg_bootstrap (jint, volatile jint*, jint);
|
static int32_t atomic_cmpxchg_bootstrap (int32_t, volatile int32_t*, int32_t);
|
||||||
static jbyte atomic_cmpxchg_byte_bootstrap(jbyte, volatile jbyte*, jbyte);
|
static int8_t atomic_cmpxchg_byte_bootstrap(int8_t, volatile int8_t*, int8_t);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
static jlong (*atomic_cmpxchg_long_func) (jlong, volatile jlong*, jlong);
|
static int64_t (*atomic_cmpxchg_long_func) (int64_t, volatile int64_t*, int64_t);
|
||||||
|
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
static jlong atomic_cmpxchg_long_bootstrap(jlong, volatile jlong*, jlong);
|
static int64_t atomic_cmpxchg_long_bootstrap(int64_t, volatile int64_t*, int64_t);
|
||||||
|
|
||||||
#ifdef AMD64
|
#ifdef AMD64
|
||||||
static jint atomic_add_bootstrap (jint, volatile jint*);
|
static int32_t atomic_add_bootstrap (int32_t, volatile int32_t*);
|
||||||
static intptr_t atomic_add_ptr_bootstrap (intptr_t, volatile intptr_t*);
|
static int64_t atomic_add_long_bootstrap (int64_t, volatile int64_t*);
|
||||||
#endif // AMD64
|
#endif // AMD64
|
||||||
|
|
||||||
static void setup_fpu();
|
static void setup_fpu();
|
||||||
|
@ -86,8 +86,6 @@
|
|||||||
|
|
||||||
#define JAVA_CLASSFILE_MAGIC 0xCAFEBABE
|
#define JAVA_CLASSFILE_MAGIC 0xCAFEBABE
|
||||||
#define JAVA_MIN_SUPPORTED_VERSION 45
|
#define JAVA_MIN_SUPPORTED_VERSION 45
|
||||||
#define JAVA_MAX_SUPPORTED_VERSION 54
|
|
||||||
#define JAVA_MAX_SUPPORTED_MINOR_VERSION 0
|
|
||||||
|
|
||||||
// Used for two backward compatibility reasons:
|
// Used for two backward compatibility reasons:
|
||||||
// - to check for new additions to the class file format in JDK1.5
|
// - to check for new additions to the class file format in JDK1.5
|
||||||
@ -110,6 +108,8 @@
|
|||||||
|
|
||||||
#define JAVA_10_VERSION 54
|
#define JAVA_10_VERSION 54
|
||||||
|
|
||||||
|
#define JAVA_11_VERSION 55
|
||||||
|
|
||||||
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
|
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
|
||||||
assert((bad_constant == 19 || bad_constant == 20) && _major_version >= JAVA_9_VERSION,
|
assert((bad_constant == 19 || bad_constant == 20) && _major_version >= JAVA_9_VERSION,
|
||||||
"Unexpected bad constant pool entry");
|
"Unexpected bad constant pool entry");
|
||||||
@ -4642,11 +4642,11 @@ static bool has_illegal_visibility(jint flags) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool is_supported_version(u2 major, u2 minor){
|
static bool is_supported_version(u2 major, u2 minor){
|
||||||
const u2 max_version = JAVA_MAX_SUPPORTED_VERSION;
|
const u2 max_version = JVM_CLASSFILE_MAJOR_VERSION;
|
||||||
return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
|
return (major >= JAVA_MIN_SUPPORTED_VERSION) &&
|
||||||
(major <= max_version) &&
|
(major <= max_version) &&
|
||||||
((major != max_version) ||
|
((major != max_version) ||
|
||||||
(minor <= JAVA_MAX_SUPPORTED_MINOR_VERSION));
|
(minor <= JVM_CLASSFILE_MINOR_VERSION));
|
||||||
}
|
}
|
||||||
|
|
||||||
void ClassFileParser::verify_legal_field_modifiers(jint flags,
|
void ClassFileParser::verify_legal_field_modifiers(jint flags,
|
||||||
@ -5808,8 +5808,8 @@ void ClassFileParser::parse_stream(const ClassFileStream* const stream,
|
|||||||
_class_name->as_C_string(),
|
_class_name->as_C_string(),
|
||||||
_major_version,
|
_major_version,
|
||||||
_minor_version,
|
_minor_version,
|
||||||
JAVA_MAX_SUPPORTED_VERSION,
|
JVM_CLASSFILE_MAJOR_VERSION,
|
||||||
JAVA_MAX_SUPPORTED_MINOR_VERSION);
|
JVM_CLASSFILE_MINOR_VERSION);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -802,6 +802,14 @@ void ClassLoader::setup_boot_search_path(const char *class_path) {
|
|||||||
int end = 0;
|
int end = 0;
|
||||||
bool set_base_piece = true;
|
bool set_base_piece = true;
|
||||||
|
|
||||||
|
#if INCLUDE_CDS
|
||||||
|
if (DumpSharedSpaces) {
|
||||||
|
if (!Arguments::has_jimage()) {
|
||||||
|
vm_exit_during_initialization("CDS is not supported in exploded JDK build", NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Iterate over class path entries
|
// Iterate over class path entries
|
||||||
for (int start = 0; start < len; start = end) {
|
for (int start = 0; start < len; start = end) {
|
||||||
while (class_path[end] && class_path[end] != os::path_separator()[0]) {
|
while (class_path[end] && class_path[end] != os::path_separator()[0]) {
|
||||||
|
@ -26,6 +26,7 @@
|
|||||||
#include "classfile/bytecodeAssembler.hpp"
|
#include "classfile/bytecodeAssembler.hpp"
|
||||||
#include "classfile/defaultMethods.hpp"
|
#include "classfile/defaultMethods.hpp"
|
||||||
#include "classfile/symbolTable.hpp"
|
#include "classfile/symbolTable.hpp"
|
||||||
|
#include "classfile/systemDictionary.hpp"
|
||||||
#include "logging/log.hpp"
|
#include "logging/log.hpp"
|
||||||
#include "logging/logStream.hpp"
|
#include "logging/logStream.hpp"
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
@ -683,10 +684,11 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
|
|||||||
Symbol* _method_name;
|
Symbol* _method_name;
|
||||||
Symbol* _method_signature;
|
Symbol* _method_signature;
|
||||||
StatefulMethodFamily* _family;
|
StatefulMethodFamily* _family;
|
||||||
|
bool _cur_class_is_interface;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
FindMethodsByErasedSig(Symbol* name, Symbol* signature) :
|
FindMethodsByErasedSig(Symbol* name, Symbol* signature, bool is_interf) :
|
||||||
_method_name(name), _method_signature(signature),
|
_method_name(name), _method_signature(signature), _cur_class_is_interface(is_interf),
|
||||||
_family(NULL) {}
|
_family(NULL) {}
|
||||||
|
|
||||||
void get_discovered_family(MethodFamily** family) {
|
void get_discovered_family(MethodFamily** family) {
|
||||||
@ -709,14 +711,17 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
|
|||||||
InstanceKlass* iklass = current_class();
|
InstanceKlass* iklass = current_class();
|
||||||
|
|
||||||
Method* m = iklass->find_method(_method_name, _method_signature);
|
Method* m = iklass->find_method(_method_name, _method_signature);
|
||||||
// private interface methods are not candidates for default methods
|
// Private interface methods are not candidates for default methods.
|
||||||
// invokespecial to private interface methods doesn't use default method logic
|
// invokespecial to private interface methods doesn't use default method logic.
|
||||||
// private class methods are not candidates for default methods,
|
// Private class methods are not candidates for default methods.
|
||||||
// private methods do not override default methods, so need to perform
|
// Private methods do not override default methods, so need to perform
|
||||||
// default method inheritance without including private methods
|
// default method inheritance without including private methods.
|
||||||
// The overpasses are your supertypes' errors, we do not include them
|
// The overpasses are your supertypes' errors, we do not include them.
|
||||||
// future: take access controls into account for superclass methods
|
// Non-public methods in java.lang.Object are not candidates for default
|
||||||
if (m != NULL && !m->is_static() && !m->is_overpass() && !m->is_private()) {
|
// methods.
|
||||||
|
// Future: take access controls into account for superclass methods
|
||||||
|
if (m != NULL && !m->is_static() && !m->is_overpass() && !m->is_private() &&
|
||||||
|
(!_cur_class_is_interface || !SystemDictionary::is_nonpublic_Object_method(m))) {
|
||||||
if (_family == NULL) {
|
if (_family == NULL) {
|
||||||
_family = new StatefulMethodFamily();
|
_family = new StatefulMethodFamily();
|
||||||
}
|
}
|
||||||
@ -726,8 +731,8 @@ class FindMethodsByErasedSig : public HierarchyVisitor<FindMethodsByErasedSig> {
|
|||||||
scope->add_mark(restorer);
|
scope->add_mark(restorer);
|
||||||
} else {
|
} else {
|
||||||
// This is the rule that methods in classes "win" (bad word) over
|
// This is the rule that methods in classes "win" (bad word) over
|
||||||
// methods in interfaces. This works because of single inheritance
|
// methods in interfaces. This works because of single inheritance.
|
||||||
// private methods in classes do not "win", they will be found
|
// Private methods in classes do not "win", they will be found
|
||||||
// first on searching, but overriding for invokevirtual needs
|
// first on searching, but overriding for invokevirtual needs
|
||||||
// to find default method candidates for the same signature
|
// to find default method candidates for the same signature
|
||||||
_family->set_target_if_empty(m);
|
_family->set_target_if_empty(m);
|
||||||
@ -745,10 +750,10 @@ static void create_defaults_and_exceptions(
|
|||||||
|
|
||||||
static void generate_erased_defaults(
|
static void generate_erased_defaults(
|
||||||
InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
|
InstanceKlass* klass, GrowableArray<EmptyVtableSlot*>* empty_slots,
|
||||||
EmptyVtableSlot* slot, TRAPS) {
|
EmptyVtableSlot* slot, bool is_intf, TRAPS) {
|
||||||
|
|
||||||
// sets up a set of methods with the same exact erased signature
|
// sets up a set of methods with the same exact erased signature
|
||||||
FindMethodsByErasedSig visitor(slot->name(), slot->signature());
|
FindMethodsByErasedSig visitor(slot->name(), slot->signature(), is_intf);
|
||||||
visitor.run(klass);
|
visitor.run(klass);
|
||||||
|
|
||||||
MethodFamily* family;
|
MethodFamily* family;
|
||||||
@ -817,7 +822,7 @@ void DefaultMethods::generate_default_methods(
|
|||||||
slot->print_on(&ls);
|
slot->print_on(&ls);
|
||||||
ls.cr();
|
ls.cr();
|
||||||
}
|
}
|
||||||
generate_erased_defaults(klass, empty_slots, slot, CHECK);
|
generate_erased_defaults(klass, empty_slots, slot, klass->is_interface(), CHECK);
|
||||||
}
|
}
|
||||||
log_debug(defaultmethods)("Creating defaults and overpasses...");
|
log_debug(defaultmethods)("Creating defaults and overpasses...");
|
||||||
create_defaults_and_exceptions(empty_slots, klass, CHECK);
|
create_defaults_and_exceptions(empty_slots, klass, CHECK);
|
||||||
|
@ -654,6 +654,12 @@ public:
|
|||||||
static bool is_platform_class_loader(oop class_loader);
|
static bool is_platform_class_loader(oop class_loader);
|
||||||
static void clear_invoke_method_table();
|
static void clear_invoke_method_table();
|
||||||
|
|
||||||
|
// Returns TRUE if the method is a non-public member of class java.lang.Object.
|
||||||
|
static bool is_nonpublic_Object_method(Method* m) {
|
||||||
|
assert(m != NULL, "Unexpected NULL Method*");
|
||||||
|
return !m->is_public() && m->method_holder() == SystemDictionary::Object_klass();
|
||||||
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
static InstanceKlass* find_shared_class(Symbol* class_name);
|
static InstanceKlass* find_shared_class(Symbol* class_name);
|
||||||
|
|
||||||
|
@ -997,8 +997,8 @@
|
|||||||
do_name( montgomerySquare_name, "implMontgomerySquare") \
|
do_name( montgomerySquare_name, "implMontgomerySquare") \
|
||||||
do_signature(montgomerySquare_signature, "([I[IIJ[I)[I") \
|
do_signature(montgomerySquare_signature, "([I[IIJ[I)[I") \
|
||||||
\
|
\
|
||||||
do_class(java_util_ArraysSupport, "java/util/ArraysSupport") \
|
do_class(jdk_internal_util_ArraysSupport, "jdk/internal/util/ArraysSupport") \
|
||||||
do_intrinsic(_vectorizedMismatch, java_util_ArraysSupport, vectorizedMismatch_name, vectorizedMismatch_signature, F_S)\
|
do_intrinsic(_vectorizedMismatch, jdk_internal_util_ArraysSupport, vectorizedMismatch_name, vectorizedMismatch_signature, F_S)\
|
||||||
do_name(vectorizedMismatch_name, "vectorizedMismatch") \
|
do_name(vectorizedMismatch_name, "vectorizedMismatch") \
|
||||||
do_signature(vectorizedMismatch_signature, "(Ljava/lang/Object;JLjava/lang/Object;JII)I") \
|
do_signature(vectorizedMismatch_signature, "(Ljava/lang/Object;JLjava/lang/Object;JII)I") \
|
||||||
\
|
\
|
||||||
|
@ -344,6 +344,7 @@ class CompiledStaticCall : public ResourceObj {
|
|||||||
// Code
|
// Code
|
||||||
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL);
|
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL);
|
||||||
static int to_interp_stub_size();
|
static int to_interp_stub_size();
|
||||||
|
static int to_trampoline_stub_size();
|
||||||
static int reloc_to_interp_stub();
|
static int reloc_to_interp_stub();
|
||||||
static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL);
|
static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL);
|
||||||
static int to_aot_stub_size();
|
static int to_aot_stub_size();
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -1485,16 +1485,18 @@ bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_oc
|
|||||||
|
|
||||||
bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
|
bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
|
||||||
// Compiled code
|
// Compiled code
|
||||||
{
|
|
||||||
RelocIterator iter(this, low_boundary);
|
// Prevent extra code cache walk for platforms that don't have immediate oops.
|
||||||
while (iter.next()) {
|
if (relocInfo::mustIterateImmediateOopsInCode()) {
|
||||||
if (iter.type() == relocInfo::oop_type) {
|
RelocIterator iter(this, low_boundary);
|
||||||
if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
|
while (iter.next()) {
|
||||||
return true;
|
if (iter.type() == relocInfo::oop_type) {
|
||||||
|
if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return do_unloading_scopes(is_alive, unloading_occurred);
|
return do_unloading_scopes(is_alive, unloading_occurred);
|
||||||
}
|
}
|
||||||
@ -1584,18 +1586,21 @@ void nmethod::oops_do(OopClosure* f, bool allow_zombie) {
|
|||||||
// (See comment above.)
|
// (See comment above.)
|
||||||
}
|
}
|
||||||
|
|
||||||
RelocIterator iter(this, low_boundary);
|
// Prevent extra code cache walk for platforms that don't have immediate oops.
|
||||||
|
if (relocInfo::mustIterateImmediateOopsInCode()) {
|
||||||
|
RelocIterator iter(this, low_boundary);
|
||||||
|
|
||||||
while (iter.next()) {
|
while (iter.next()) {
|
||||||
if (iter.type() == relocInfo::oop_type ) {
|
if (iter.type() == relocInfo::oop_type ) {
|
||||||
oop_Relocation* r = iter.oop_reloc();
|
oop_Relocation* r = iter.oop_reloc();
|
||||||
// In this loop, we must only follow those oops directly embedded in
|
// In this loop, we must only follow those oops directly embedded in
|
||||||
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
|
// the code. Other oops (oop_index>0) are seen as part of scopes_oops.
|
||||||
assert(1 == (r->oop_is_immediate()) +
|
assert(1 == (r->oop_is_immediate()) +
|
||||||
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
(r->oop_addr() >= oops_begin() && r->oop_addr() < oops_end()),
|
||||||
"oop must be found in exactly one place");
|
"oop must be found in exactly one place");
|
||||||
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
if (r->oop_is_immediate() && r->oop_value() != NULL) {
|
||||||
f->do_oop(r->oop_addr());
|
f->do_oop(r->oop_addr());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1620,7 +1625,7 @@ bool nmethod::test_set_oops_do_mark() {
|
|||||||
assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
|
assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
|
||||||
if (_oops_do_mark_link == NULL) {
|
if (_oops_do_mark_link == NULL) {
|
||||||
// Claim this nmethod for this thread to mark.
|
// Claim this nmethod for this thread to mark.
|
||||||
if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
|
if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) {
|
||||||
// Atomically append this nmethod (now claimed) to the head of the list:
|
// Atomically append this nmethod (now claimed) to the head of the list:
|
||||||
nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
|
nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -910,6 +910,10 @@ class oop_Relocation : public DataRelocation {
|
|||||||
}
|
}
|
||||||
// an oop in the instruction stream
|
// an oop in the instruction stream
|
||||||
static RelocationHolder spec_for_immediate() {
|
static RelocationHolder spec_for_immediate() {
|
||||||
|
// If no immediate oops are generated, we can skip some walks over nmethods.
|
||||||
|
// Assert that they don't get generated accidently!
|
||||||
|
assert(relocInfo::mustIterateImmediateOopsInCode(),
|
||||||
|
"Must return true so we will search for oops as roots etc. in the code.");
|
||||||
const int oop_index = 0;
|
const int oop_index = 0;
|
||||||
const int offset = 0; // if you want an offset, use the oop pool
|
const int offset = 0; // if you want an offset, use the oop pool
|
||||||
RelocationHolder rh = newHolder();
|
RelocationHolder rh = newHolder();
|
||||||
|
@ -77,7 +77,7 @@ GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
|
|||||||
if (_time_stamps == NULL) {
|
if (_time_stamps == NULL) {
|
||||||
// We allocate the _time_stamps array lazily since logging can be enabled dynamically
|
// We allocate the _time_stamps array lazily since logging can be enabled dynamically
|
||||||
GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
|
GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
|
||||||
if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
|
if (!Atomic::replace_if_null(time_stamps, &_time_stamps)) {
|
||||||
// Someone already setup the time stamps
|
// Someone already setup the time stamps
|
||||||
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
|
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
|
||||||
}
|
}
|
||||||
|
@ -317,23 +317,18 @@ JVM_NewArray(JNIEnv *env, jclass eltClass, jint length);
|
|||||||
JNIEXPORT jobject JNICALL
|
JNIEXPORT jobject JNICALL
|
||||||
JVM_NewMultiArray(JNIEnv *env, jclass eltClass, jintArray dim);
|
JVM_NewMultiArray(JNIEnv *env, jclass eltClass, jintArray dim);
|
||||||
|
|
||||||
/*
|
|
||||||
* java.lang.Class and java.lang.ClassLoader
|
|
||||||
*/
|
|
||||||
|
|
||||||
#define JVM_CALLER_DEPTH -1
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the immediate caller class of the native method invoking
|
* Returns the immediate caller class of the native method invoking
|
||||||
* JVM_GetCallerClass. The Method.invoke and other frames due to
|
* JVM_GetCallerClass. The Method.invoke and other frames due to
|
||||||
* reflection machinery are skipped.
|
* reflection machinery are skipped.
|
||||||
*
|
*
|
||||||
* The depth parameter must be -1 (JVM_DEPTH). The caller is expected
|
* The caller is expected to be marked with
|
||||||
* to be marked with sun.reflect.CallerSensitive. The JVM will throw
|
* jdk.internal.reflect.CallerSensitive. The JVM will throw an
|
||||||
* an error if it is not marked propertly.
|
* error if it is not marked properly.
|
||||||
*/
|
*/
|
||||||
JNIEXPORT jclass JNICALL
|
JNIEXPORT jclass JNICALL
|
||||||
JVM_GetCallerClass(JNIEnv *env, int depth);
|
JVM_GetCallerClass(JNIEnv *env);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -699,6 +699,7 @@ int CodeInstaller::estimate_stubs_size(TRAPS) {
|
|||||||
// Estimate the number of static and aot call stubs that might be emitted.
|
// Estimate the number of static and aot call stubs that might be emitted.
|
||||||
int static_call_stubs = 0;
|
int static_call_stubs = 0;
|
||||||
int aot_call_stubs = 0;
|
int aot_call_stubs = 0;
|
||||||
|
int trampoline_stubs = 0;
|
||||||
objArrayOop sites = this->sites();
|
objArrayOop sites = this->sites();
|
||||||
for (int i = 0; i < sites->length(); i++) {
|
for (int i = 0; i < sites->length(); i++) {
|
||||||
oop site = sites->obj_at(i);
|
oop site = sites->obj_at(i);
|
||||||
@ -710,8 +711,18 @@ int CodeInstaller::estimate_stubs_size(TRAPS) {
|
|||||||
JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name());
|
JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name());
|
||||||
}
|
}
|
||||||
jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
|
jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT));
|
||||||
if (id == INVOKESTATIC || id == INVOKESPECIAL) {
|
switch (id) {
|
||||||
|
case INVOKEINTERFACE:
|
||||||
|
case INVOKEVIRTUAL:
|
||||||
|
trampoline_stubs++;
|
||||||
|
break;
|
||||||
|
case INVOKESTATIC:
|
||||||
|
case INVOKESPECIAL:
|
||||||
static_call_stubs++;
|
static_call_stubs++;
|
||||||
|
trampoline_stubs++;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -726,6 +737,7 @@ int CodeInstaller::estimate_stubs_size(TRAPS) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
int size = static_call_stubs * CompiledStaticCall::to_interp_stub_size();
|
int size = static_call_stubs * CompiledStaticCall::to_interp_stub_size();
|
||||||
|
size += trampoline_stubs * CompiledStaticCall::to_trampoline_stub_size();
|
||||||
#if INCLUDE_AOT
|
#if INCLUDE_AOT
|
||||||
size += aot_call_stubs * CompiledStaticCall::to_aot_stub_size();
|
size += aot_call_stubs * CompiledStaticCall::to_aot_stub_size();
|
||||||
#endif
|
#endif
|
||||||
@ -1171,7 +1183,7 @@ void CodeInstaller::site_Call(CodeBuffer& buffer, jint pc_offset, Handle site, T
|
|||||||
}
|
}
|
||||||
|
|
||||||
TRACE_jvmci_3("method call");
|
TRACE_jvmci_3("method call");
|
||||||
CodeInstaller::pd_relocate_JavaMethod(hotspot_method, pc_offset, CHECK);
|
CodeInstaller::pd_relocate_JavaMethod(buffer, hotspot_method, pc_offset, CHECK);
|
||||||
if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) {
|
if (_next_call_type == INVOKESTATIC || _next_call_type == INVOKESPECIAL) {
|
||||||
// Need a static call stub for transitions from compiled to interpreted.
|
// Need a static call stub for transitions from compiled to interpreted.
|
||||||
CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
|
CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset);
|
||||||
@ -1282,4 +1294,3 @@ void CodeInstaller::site_Mark(CodeBuffer& buffer, jint pc_offset, Handle site, T
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +185,7 @@ private:
|
|||||||
void pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS);
|
void pd_patch_MetaspaceConstant(int pc_offset, Handle constant, TRAPS);
|
||||||
void pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS);
|
void pd_patch_DataSectionReference(int pc_offset, int data_offset, TRAPS);
|
||||||
void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS);
|
void pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS);
|
||||||
void pd_relocate_JavaMethod(Handle method, jint pc_offset, TRAPS);
|
void pd_relocate_JavaMethod(CodeBuffer &cbuf, Handle method, jint pc_offset, TRAPS);
|
||||||
void pd_relocate_poll(address pc, jint mark, TRAPS);
|
void pd_relocate_poll(address pc, jint mark, TRAPS);
|
||||||
|
|
||||||
objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); }
|
objArrayOop sites() { return (objArrayOop) JNIHandles::resolve(_sites_handle); }
|
||||||
|
@ -749,8 +749,13 @@ C2V_VMENTRY(jobject, findUniqueConcreteMethod, (JNIEnv *, jobject, jobject jvmci
|
|||||||
C2V_END
|
C2V_END
|
||||||
|
|
||||||
C2V_VMENTRY(jobject, getImplementor, (JNIEnv *, jobject, jobject jvmci_type))
|
C2V_VMENTRY(jobject, getImplementor, (JNIEnv *, jobject, jobject jvmci_type))
|
||||||
InstanceKlass* klass = (InstanceKlass*) CompilerToVM::asKlass(jvmci_type);
|
Klass* klass = CompilerToVM::asKlass(jvmci_type);
|
||||||
oop implementor = CompilerToVM::get_jvmci_type(klass->implementor(), CHECK_NULL);
|
if (!klass->is_interface()) {
|
||||||
|
THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
|
||||||
|
err_msg("Expected interface type, got %s", klass->external_name()));
|
||||||
|
}
|
||||||
|
InstanceKlass* iklass = InstanceKlass::cast(klass);
|
||||||
|
oop implementor = CompilerToVM::get_jvmci_type(iklass->implementor(), CHECK_NULL);
|
||||||
return JNIHandles::make_local(THREAD, implementor);
|
return JNIHandles::make_local(THREAD, implementor);
|
||||||
C2V_END
|
C2V_END
|
||||||
|
|
||||||
@ -989,8 +994,12 @@ C2V_VMENTRY(jboolean, hasFinalizableSubclass,(JNIEnv *, jobject, jobject jvmci_t
|
|||||||
C2V_END
|
C2V_END
|
||||||
|
|
||||||
C2V_VMENTRY(jobject, getClassInitializer, (JNIEnv *, jobject, jobject jvmci_type))
|
C2V_VMENTRY(jobject, getClassInitializer, (JNIEnv *, jobject, jobject jvmci_type))
|
||||||
InstanceKlass* klass = (InstanceKlass*) CompilerToVM::asKlass(jvmci_type);
|
Klass* klass = CompilerToVM::asKlass(jvmci_type);
|
||||||
oop result = CompilerToVM::get_jvmci_method(klass->class_initializer(), CHECK_NULL);
|
if (!klass->is_instance_klass()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
InstanceKlass* iklass = InstanceKlass::cast(klass);
|
||||||
|
oop result = CompilerToVM::get_jvmci_method(iklass->class_initializer(), CHECK_NULL);
|
||||||
return JNIHandles::make_local(THREAD, result);
|
return JNIHandles::make_local(THREAD, result);
|
||||||
C2V_END
|
C2V_END
|
||||||
|
|
||||||
|
@ -177,6 +177,7 @@ void ConstantPoolCacheEntry::set_direct_or_vtable_call(Bytecodes::Code invoke_co
|
|||||||
// instruction somehow links to a non-interface method (in Object).
|
// instruction somehow links to a non-interface method (in Object).
|
||||||
// In that case, the method has no itable index and must be invoked as a virtual.
|
// In that case, the method has no itable index and must be invoked as a virtual.
|
||||||
// Set a flag to keep track of this corner case.
|
// Set a flag to keep track of this corner case.
|
||||||
|
assert(method->is_public(), "Calling non-public method in Object with invokeinterface");
|
||||||
change_to_virtual = true;
|
change_to_virtual = true;
|
||||||
|
|
||||||
// ...and fall through as if we were handling invokevirtual:
|
// ...and fall through as if we were handling invokevirtual:
|
||||||
|
@ -86,13 +86,14 @@ void klassVtable::compute_vtable_size_and_num_mirandas(
|
|||||||
|
|
||||||
GrowableArray<Method*> new_mirandas(20);
|
GrowableArray<Method*> new_mirandas(20);
|
||||||
// compute the number of mirandas methods that must be added to the end
|
// compute the number of mirandas methods that must be added to the end
|
||||||
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
|
get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces,
|
||||||
|
class_flags.is_interface());
|
||||||
*num_new_mirandas = new_mirandas.length();
|
*num_new_mirandas = new_mirandas.length();
|
||||||
|
|
||||||
// Interfaces do not need interface methods in their vtables
|
// Interfaces do not need interface methods in their vtables
|
||||||
// This includes miranda methods and during later processing, default methods
|
// This includes miranda methods and during later processing, default methods
|
||||||
if (!class_flags.is_interface()) {
|
if (!class_flags.is_interface()) {
|
||||||
vtable_length += *num_new_mirandas * vtableEntry::size();
|
vtable_length += *num_new_mirandas * vtableEntry::size();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Universe::is_bootstrapping() && vtable_length == 0) {
|
if (Universe::is_bootstrapping() && vtable_length == 0) {
|
||||||
@ -454,8 +455,13 @@ bool klassVtable::update_inherited_vtable(InstanceKlass* klass, const methodHand
|
|||||||
} else {
|
} else {
|
||||||
super_method = method_at(i);
|
super_method = method_at(i);
|
||||||
}
|
}
|
||||||
// Check if method name matches
|
// Check if method name matches. Ignore match if klass is an interface and the
|
||||||
if (super_method->name() == name && super_method->signature() == signature) {
|
// matching method is a non-public java.lang.Object method. (See JVMS 5.4.3.4)
|
||||||
|
// This is safe because the method at this slot should never get invoked.
|
||||||
|
// (TBD: put in a method to throw NoSuchMethodError if this slot is ever used.)
|
||||||
|
if (super_method->name() == name && super_method->signature() == signature &&
|
||||||
|
(!_klass->is_interface() ||
|
||||||
|
!SystemDictionary::is_nonpublic_Object_method(super_method))) {
|
||||||
|
|
||||||
// get super_klass for method_holder for the found method
|
// get super_klass for method_holder for the found method
|
||||||
InstanceKlass* super_klass = super_method->method_holder();
|
InstanceKlass* super_klass = super_method->method_holder();
|
||||||
@ -713,7 +719,7 @@ bool klassVtable::is_miranda_entry_at(int i) {
|
|||||||
if (mhk->is_interface()) {
|
if (mhk->is_interface()) {
|
||||||
assert(m->is_public(), "should be public");
|
assert(m->is_public(), "should be public");
|
||||||
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
|
assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
|
||||||
if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super())) {
|
if (is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super(), klass()->is_interface())) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -738,7 +744,10 @@ bool klassVtable::is_miranda_entry_at(int i) {
|
|||||||
// During the first run, the current instanceKlass has not yet been
|
// During the first run, the current instanceKlass has not yet been
|
||||||
// created, the superclasses and superinterfaces do have instanceKlasses
|
// created, the superclasses and superinterfaces do have instanceKlasses
|
||||||
// but may not have vtables, the default_methods list is empty, no overpasses.
|
// but may not have vtables, the default_methods list is empty, no overpasses.
|
||||||
// This is seen by default method creation.
|
// Default method generation uses the all_mirandas array as the starter set for
|
||||||
|
// maximally-specific default method calculation. So, for both classes and
|
||||||
|
// interfaces, it is necessary that the first pass will find all non-private
|
||||||
|
// interface instance methods, whether or not they are concrete.
|
||||||
//
|
//
|
||||||
// Pass 2: recalculated during vtable initialization: only include abstract methods.
|
// Pass 2: recalculated during vtable initialization: only include abstract methods.
|
||||||
// The goal of pass 2 is to walk through the superinterfaces to see if any of
|
// The goal of pass 2 is to walk through the superinterfaces to see if any of
|
||||||
@ -772,7 +781,8 @@ bool klassVtable::is_miranda_entry_at(int i) {
|
|||||||
// Part of the Miranda Rights in the US mean that if you do not have
|
// Part of the Miranda Rights in the US mean that if you do not have
|
||||||
// an attorney one will be appointed for you.
|
// an attorney one will be appointed for you.
|
||||||
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
|
bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
|
||||||
Array<Method*>* default_methods, const Klass* super) {
|
Array<Method*>* default_methods, const Klass* super,
|
||||||
|
bool is_interface) {
|
||||||
if (m->is_static() || m->is_private() || m->is_overpass()) {
|
if (m->is_static() || m->is_private() || m->is_overpass()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -800,8 +810,11 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
|
|||||||
|
|
||||||
for (const Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
|
for (const Klass* cursuper = super; cursuper != NULL; cursuper = cursuper->super())
|
||||||
{
|
{
|
||||||
if (InstanceKlass::cast(cursuper)->find_local_method(name, signature,
|
Method* found_mth = InstanceKlass::cast(cursuper)->find_local_method(name, signature,
|
||||||
Klass::find_overpass, Klass::skip_static, Klass::skip_private) != NULL) {
|
Klass::find_overpass, Klass::skip_static, Klass::skip_private);
|
||||||
|
// Ignore non-public methods in java.lang.Object if klass is an interface.
|
||||||
|
if (found_mth != NULL && (!is_interface ||
|
||||||
|
!SystemDictionary::is_nonpublic_Object_method(found_mth))) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -820,7 +833,7 @@ bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
|
|||||||
void klassVtable::add_new_mirandas_to_lists(
|
void klassVtable::add_new_mirandas_to_lists(
|
||||||
GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
|
GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
|
||||||
Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
|
Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
|
||||||
Array<Method*>* default_methods, const Klass* super) {
|
Array<Method*>* default_methods, const Klass* super, bool is_interface) {
|
||||||
|
|
||||||
// iterate thru the current interface's method to see if it a miranda
|
// iterate thru the current interface's method to see if it a miranda
|
||||||
int num_methods = current_interface_methods->length();
|
int num_methods = current_interface_methods->length();
|
||||||
@ -839,7 +852,7 @@ void klassVtable::add_new_mirandas_to_lists(
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
|
if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
|
||||||
if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
|
if (is_miranda(im, class_methods, default_methods, super, is_interface)) { // is it a miranda at all?
|
||||||
const InstanceKlass *sk = InstanceKlass::cast(super);
|
const InstanceKlass *sk = InstanceKlass::cast(super);
|
||||||
// check if it is a duplicate of a super's miranda
|
// check if it is a duplicate of a super's miranda
|
||||||
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::find_defaults) == NULL) {
|
if (sk->lookup_method_in_all_interfaces(im->name(), im->signature(), Klass::find_defaults) == NULL) {
|
||||||
@ -858,7 +871,8 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
|
|||||||
const Klass* super,
|
const Klass* super,
|
||||||
Array<Method*>* class_methods,
|
Array<Method*>* class_methods,
|
||||||
Array<Method*>* default_methods,
|
Array<Method*>* default_methods,
|
||||||
Array<Klass*>* local_interfaces) {
|
Array<Klass*>* local_interfaces,
|
||||||
|
bool is_interface) {
|
||||||
assert((new_mirandas->length() == 0) , "current mirandas must be 0");
|
assert((new_mirandas->length() == 0) , "current mirandas must be 0");
|
||||||
|
|
||||||
// iterate thru the local interfaces looking for a miranda
|
// iterate thru the local interfaces looking for a miranda
|
||||||
@ -867,7 +881,7 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
|
|||||||
InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i));
|
InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i));
|
||||||
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
|
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
|
||||||
ik->methods(), class_methods,
|
ik->methods(), class_methods,
|
||||||
default_methods, super);
|
default_methods, super, is_interface);
|
||||||
// iterate thru each local's super interfaces
|
// iterate thru each local's super interfaces
|
||||||
Array<Klass*>* super_ifs = ik->transitive_interfaces();
|
Array<Klass*>* super_ifs = ik->transitive_interfaces();
|
||||||
int num_super_ifs = super_ifs->length();
|
int num_super_ifs = super_ifs->length();
|
||||||
@ -875,7 +889,7 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
|
|||||||
InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j));
|
InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j));
|
||||||
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
|
add_new_mirandas_to_lists(new_mirandas, all_mirandas,
|
||||||
sik->methods(), class_methods,
|
sik->methods(), class_methods,
|
||||||
default_methods, super);
|
default_methods, super, is_interface);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -888,7 +902,8 @@ void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
|
|||||||
int klassVtable::fill_in_mirandas(int initialized) {
|
int klassVtable::fill_in_mirandas(int initialized) {
|
||||||
GrowableArray<Method*> mirandas(20);
|
GrowableArray<Method*> mirandas(20);
|
||||||
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
|
get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
|
||||||
ik()->default_methods(), ik()->local_interfaces());
|
ik()->default_methods(), ik()->local_interfaces(),
|
||||||
|
klass()->is_interface());
|
||||||
for (int i = 0; i < mirandas.length(); i++) {
|
for (int i = 0; i < mirandas.length(); i++) {
|
||||||
if (log_develop_is_enabled(Trace, vtables)) {
|
if (log_develop_is_enabled(Trace, vtables)) {
|
||||||
Method* meth = mirandas.at(i);
|
Method* meth = mirandas.at(i);
|
||||||
|
@ -144,21 +144,24 @@ class klassVtable VALUE_OBJ_CLASS_SPEC {
|
|||||||
bool is_miranda_entry_at(int i);
|
bool is_miranda_entry_at(int i);
|
||||||
int fill_in_mirandas(int initialized);
|
int fill_in_mirandas(int initialized);
|
||||||
static bool is_miranda(Method* m, Array<Method*>* class_methods,
|
static bool is_miranda(Method* m, Array<Method*>* class_methods,
|
||||||
Array<Method*>* default_methods, const Klass* super);
|
Array<Method*>* default_methods, const Klass* super,
|
||||||
|
bool is_interface);
|
||||||
static void add_new_mirandas_to_lists(
|
static void add_new_mirandas_to_lists(
|
||||||
GrowableArray<Method*>* new_mirandas,
|
GrowableArray<Method*>* new_mirandas,
|
||||||
GrowableArray<Method*>* all_mirandas,
|
GrowableArray<Method*>* all_mirandas,
|
||||||
Array<Method*>* current_interface_methods,
|
Array<Method*>* current_interface_methods,
|
||||||
Array<Method*>* class_methods,
|
Array<Method*>* class_methods,
|
||||||
Array<Method*>* default_methods,
|
Array<Method*>* default_methods,
|
||||||
const Klass* super);
|
const Klass* super,
|
||||||
|
bool is_interface);
|
||||||
static void get_mirandas(
|
static void get_mirandas(
|
||||||
GrowableArray<Method*>* new_mirandas,
|
GrowableArray<Method*>* new_mirandas,
|
||||||
GrowableArray<Method*>* all_mirandas,
|
GrowableArray<Method*>* all_mirandas,
|
||||||
const Klass* super,
|
const Klass* super,
|
||||||
Array<Method*>* class_methods,
|
Array<Method*>* class_methods,
|
||||||
Array<Method*>* default_methods,
|
Array<Method*>* default_methods,
|
||||||
Array<Klass*>* local_interfaces);
|
Array<Klass*>* local_interfaces,
|
||||||
|
bool is_interface);
|
||||||
void verify_against(outputStream* st, klassVtable* vt, int index);
|
void verify_against(outputStream* st, klassVtable* vt, int index);
|
||||||
inline InstanceKlass* ik() const;
|
inline InstanceKlass* ik() const;
|
||||||
// When loading a class from CDS archive at run time, and no class redefintion
|
// When loading a class from CDS archive at run time, and no class redefintion
|
||||||
|
@ -446,7 +446,7 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
|
|||||||
|
|
||||||
bool Method::init_method_counters(MethodCounters* counters) {
|
bool Method::init_method_counters(MethodCounters* counters) {
|
||||||
// Try to install a pointer to MethodCounters, return true on success.
|
// Try to install a pointer to MethodCounters, return true on success.
|
||||||
return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL;
|
return Atomic::replace_if_null(counters, &_method_counters);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Method::cleanup_inline_caches() {
|
void Method::cleanup_inline_caches() {
|
||||||
|
@ -678,17 +678,9 @@ JVM_END
|
|||||||
// Misc. class handling ///////////////////////////////////////////////////////////
|
// Misc. class handling ///////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
|
JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env))
|
||||||
JVMWrapper("JVM_GetCallerClass");
|
JVMWrapper("JVM_GetCallerClass");
|
||||||
|
|
||||||
// Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation; or
|
|
||||||
// sun.reflect.Reflection.getCallerClass with a depth parameter is provided
|
|
||||||
// temporarily for existing code to use until a replacement API is defined.
|
|
||||||
if (SystemDictionary::reflect_CallerSensitive_klass() == NULL || depth != JVM_CALLER_DEPTH) {
|
|
||||||
Klass* k = thread->security_get_caller_class(depth);
|
|
||||||
return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getting the class of the caller frame.
|
// Getting the class of the caller frame.
|
||||||
//
|
//
|
||||||
// The call stack at this point looks something like this:
|
// The call stack at this point looks something like this:
|
||||||
|
@ -127,7 +127,7 @@ JvmtiRawMonitor::is_valid() {
|
|||||||
|
|
||||||
int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
|
int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
|
if (Atomic::replace_if_null(Self, &_owner)) {
|
||||||
return OS_OK ;
|
return OS_OK ;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -139,7 +139,7 @@ int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
|
|||||||
Node._next = _EntryList ;
|
Node._next = _EntryList ;
|
||||||
_EntryList = &Node ;
|
_EntryList = &Node ;
|
||||||
OrderAccess::fence() ;
|
OrderAccess::fence() ;
|
||||||
if (_owner == NULL && Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
|
if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) {
|
||||||
_EntryList = Node._next ;
|
_EntryList = Node._next ;
|
||||||
RawMonitor_lock->unlock() ;
|
RawMonitor_lock->unlock() ;
|
||||||
return OS_OK ;
|
return OS_OK ;
|
||||||
|
@ -1727,10 +1727,16 @@ WB_END
|
|||||||
|
|
||||||
WB_ENTRY(jboolean, WB_IsCDSIncludedInVmBuild(JNIEnv* env))
|
WB_ENTRY(jboolean, WB_IsCDSIncludedInVmBuild(JNIEnv* env))
|
||||||
#if INCLUDE_CDS
|
#if INCLUDE_CDS
|
||||||
|
# ifdef _LP64
|
||||||
|
if (!UseCompressedOops || !UseCompressedClassPointers) {
|
||||||
|
// On 64-bit VMs, CDS is supported only with compressed oops/pointers
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
# endif // _LP64
|
||||||
return true;
|
return true;
|
||||||
#else
|
#else
|
||||||
return false;
|
return false;
|
||||||
#endif
|
#endif // INCLUDE_CDS
|
||||||
WB_END
|
WB_END
|
||||||
|
|
||||||
|
|
||||||
|
@ -204,7 +204,9 @@ SystemProperty::SystemProperty(const char* key, const char* value, bool writeabl
|
|||||||
_writeable = writeable;
|
_writeable = writeable;
|
||||||
}
|
}
|
||||||
|
|
||||||
AgentLibrary::AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib) {
|
AgentLibrary::AgentLibrary(const char* name, const char* options,
|
||||||
|
bool is_absolute_path, void* os_lib,
|
||||||
|
bool instrument_lib) {
|
||||||
_name = AllocateHeap(strlen(name)+1, mtArguments);
|
_name = AllocateHeap(strlen(name)+1, mtArguments);
|
||||||
strcpy(_name, name);
|
strcpy(_name, name);
|
||||||
if (options == NULL) {
|
if (options == NULL) {
|
||||||
@ -218,6 +220,7 @@ AgentLibrary::AgentLibrary(const char* name, const char* options, bool is_absolu
|
|||||||
_next = NULL;
|
_next = NULL;
|
||||||
_state = agent_invalid;
|
_state = agent_invalid;
|
||||||
_is_static_lib = false;
|
_is_static_lib = false;
|
||||||
|
_is_instrument_lib = instrument_lib;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if head of 'option' matches 'name', and sets 'tail' to the remaining
|
// Check if head of 'option' matches 'name', and sets 'tail' to the remaining
|
||||||
@ -294,6 +297,10 @@ void Arguments::add_init_agent(const char* name, char* options, bool absolute_pa
|
|||||||
_agentList.add(new AgentLibrary(name, options, absolute_path, NULL));
|
_agentList.add(new AgentLibrary(name, options, absolute_path, NULL));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Arguments::add_instrument_agent(const char* name, char* options, bool absolute_path) {
|
||||||
|
_agentList.add(new AgentLibrary(name, options, absolute_path, NULL, true));
|
||||||
|
}
|
||||||
|
|
||||||
// Late-binding agents not started via arguments
|
// Late-binding agents not started via arguments
|
||||||
void Arguments::add_loaded_agent(AgentLibrary *agentLib) {
|
void Arguments::add_loaded_agent(AgentLibrary *agentLib) {
|
||||||
_agentList.add(agentLib);
|
_agentList.add(agentLib);
|
||||||
@ -501,7 +508,7 @@ static SpecialFlag const special_jvm_flags[] = {
|
|||||||
{ "MaxRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
{ "MaxRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||||
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
{ "MinRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||||
{ "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
{ "InitialRAMFraction", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||||
{ "UseMembar", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
{ "UseMembar", JDK_Version::jdk(10), JDK_Version::undefined(), JDK_Version::undefined() },
|
||||||
{ "FastTLABRefill", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
{ "FastTLABRefill", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
||||||
{ "SafepointSpinBeforeYield", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
{ "SafepointSpinBeforeYield", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
||||||
{ "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
{ "DeferThrSuspendLoopCount", JDK_Version::jdk(10), JDK_Version::jdk(11), JDK_Version::jdk(12) },
|
||||||
@ -678,6 +685,14 @@ static bool lookup_special_flag(const char *flag_name, size_t skip_index) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Verifies the correctness of the entries in the special_jvm_flags table.
|
||||||
|
// If there is a semantic error (i.e. a bug in the table) such as the obsoletion
|
||||||
|
// version being earlier than the deprecation version, then a warning is issued
|
||||||
|
// and verification fails - by returning false. If it is detected that the table
|
||||||
|
// is out of date, with respect to the current version, then a warning is issued
|
||||||
|
// but verification does not fail. This allows the VM to operate when the version
|
||||||
|
// is first updated, without needing to update all the impacted flags at the
|
||||||
|
// same time.
|
||||||
static bool verify_special_jvm_flags() {
|
static bool verify_special_jvm_flags() {
|
||||||
bool success = true;
|
bool success = true;
|
||||||
for (size_t i = 0; special_jvm_flags[i].name != NULL; i++) {
|
for (size_t i = 0; special_jvm_flags[i].name != NULL; i++) {
|
||||||
@ -714,7 +729,6 @@ static bool verify_special_jvm_flags() {
|
|||||||
if (!version_less_than(JDK_Version::current(), flag.obsolete_in)) {
|
if (!version_less_than(JDK_Version::current(), flag.obsolete_in)) {
|
||||||
if (Flag::find_flag(flag.name) != NULL) {
|
if (Flag::find_flag(flag.name) != NULL) {
|
||||||
warning("Global variable for obsolete special flag entry \"%s\" should be removed", flag.name);
|
warning("Global variable for obsolete special flag entry \"%s\" should be removed", flag.name);
|
||||||
success = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -724,7 +738,6 @@ static bool verify_special_jvm_flags() {
|
|||||||
if (!version_less_than(JDK_Version::current(), flag.expired_in)) {
|
if (!version_less_than(JDK_Version::current(), flag.expired_in)) {
|
||||||
if (Flag::find_flag(flag.name) != NULL) {
|
if (Flag::find_flag(flag.name) != NULL) {
|
||||||
warning("Global variable for expired flag entry \"%s\" should be removed", flag.name);
|
warning("Global variable for expired flag entry \"%s\" should be removed", flag.name);
|
||||||
success = false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2795,7 +2808,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_m
|
|||||||
size_t length = strlen(tail) + 1;
|
size_t length = strlen(tail) + 1;
|
||||||
char *options = NEW_C_HEAP_ARRAY(char, length, mtArguments);
|
char *options = NEW_C_HEAP_ARRAY(char, length, mtArguments);
|
||||||
jio_snprintf(options, length, "%s", tail);
|
jio_snprintf(options, length, "%s", tail);
|
||||||
add_init_agent("instrument", options, false);
|
add_instrument_agent("instrument", options, false);
|
||||||
// java agents need module java.instrument
|
// java agents need module java.instrument
|
||||||
if (!create_numbered_property("jdk.module.addmods", "java.instrument", addmods_count++)) {
|
if (!create_numbered_property("jdk.module.addmods", "java.instrument", addmods_count++)) {
|
||||||
return JNI_ENOMEM;
|
return JNI_ENOMEM;
|
||||||
|
@ -142,6 +142,7 @@ public:
|
|||||||
void* _os_lib;
|
void* _os_lib;
|
||||||
bool _is_absolute_path;
|
bool _is_absolute_path;
|
||||||
bool _is_static_lib;
|
bool _is_static_lib;
|
||||||
|
bool _is_instrument_lib;
|
||||||
AgentState _state;
|
AgentState _state;
|
||||||
AgentLibrary* _next;
|
AgentLibrary* _next;
|
||||||
|
|
||||||
@ -154,13 +155,15 @@ public:
|
|||||||
void set_os_lib(void* os_lib) { _os_lib = os_lib; }
|
void set_os_lib(void* os_lib) { _os_lib = os_lib; }
|
||||||
AgentLibrary* next() const { return _next; }
|
AgentLibrary* next() const { return _next; }
|
||||||
bool is_static_lib() const { return _is_static_lib; }
|
bool is_static_lib() const { return _is_static_lib; }
|
||||||
|
bool is_instrument_lib() const { return _is_instrument_lib; }
|
||||||
void set_static_lib(bool is_static_lib) { _is_static_lib = is_static_lib; }
|
void set_static_lib(bool is_static_lib) { _is_static_lib = is_static_lib; }
|
||||||
bool valid() { return (_state == agent_valid); }
|
bool valid() { return (_state == agent_valid); }
|
||||||
void set_valid() { _state = agent_valid; }
|
void set_valid() { _state = agent_valid; }
|
||||||
void set_invalid() { _state = agent_invalid; }
|
void set_invalid() { _state = agent_invalid; }
|
||||||
|
|
||||||
// Constructor
|
// Constructor
|
||||||
AgentLibrary(const char* name, const char* options, bool is_absolute_path, void* os_lib);
|
AgentLibrary(const char* name, const char* options, bool is_absolute_path,
|
||||||
|
void* os_lib, bool instrument_lib=false);
|
||||||
};
|
};
|
||||||
|
|
||||||
// maintain an order of entry list of AgentLibrary
|
// maintain an order of entry list of AgentLibrary
|
||||||
@ -337,6 +340,7 @@ class Arguments : AllStatic {
|
|||||||
// -agentlib and -agentpath arguments
|
// -agentlib and -agentpath arguments
|
||||||
static AgentLibraryList _agentList;
|
static AgentLibraryList _agentList;
|
||||||
static void add_init_agent(const char* name, char* options, bool absolute_path);
|
static void add_init_agent(const char* name, char* options, bool absolute_path);
|
||||||
|
static void add_instrument_agent(const char* name, char* options, bool absolute_path);
|
||||||
|
|
||||||
// Late-binding agents not started via arguments
|
// Late-binding agents not started via arguments
|
||||||
static void add_loaded_agent(AgentLibrary *agentLib);
|
static void add_loaded_agent(AgentLibrary *agentLib);
|
||||||
|
@ -45,8 +45,8 @@ enum cmpxchg_memory_order {
|
|||||||
|
|
||||||
class Atomic : AllStatic {
|
class Atomic : AllStatic {
|
||||||
public:
|
public:
|
||||||
// Atomic operations on jlong types are not available on all 32-bit
|
// Atomic operations on int64 types are not available on all 32-bit
|
||||||
// platforms. If atomic ops on jlongs are defined here they must only
|
// platforms. If atomic ops on int64 are defined here they must only
|
||||||
// be used from code that verifies they are available at runtime and
|
// be used from code that verifies they are available at runtime and
|
||||||
// can provide an alternative action if not - see supports_cx8() for
|
// can provide an alternative action if not - see supports_cx8() for
|
||||||
// a means to test availability.
|
// a means to test availability.
|
||||||
@ -639,16 +639,16 @@ struct Atomic::AddImpl<
|
|||||||
//
|
//
|
||||||
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
|
// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
|
||||||
template<>
|
template<>
|
||||||
struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
|
struct Atomic::AddImpl<short, short> VALUE_OBJ_CLASS_SPEC {
|
||||||
jshort operator()(jshort add_value, jshort volatile* dest) const {
|
short operator()(short add_value, short volatile* dest) const {
|
||||||
#ifdef VM_LITTLE_ENDIAN
|
#ifdef VM_LITTLE_ENDIAN
|
||||||
assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
|
assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
|
||||||
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
|
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest-1));
|
||||||
#else
|
#else
|
||||||
assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
|
assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
|
||||||
jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
|
int new_value = Atomic::add(add_value << 16, (volatile int*)(dest));
|
||||||
#endif
|
#endif
|
||||||
return (jshort)(new_value >> 16); // preserves sign
|
return (short)(new_value >> 16); // preserves sign
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -807,7 +807,7 @@ inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
|
|||||||
do {
|
do {
|
||||||
// value to swap in matches current value ...
|
// value to swap in matches current value ...
|
||||||
uint32_t new_value = cur;
|
uint32_t new_value = cur;
|
||||||
// ... except for the one jbyte we want to update
|
// ... except for the one byte we want to update
|
||||||
reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
|
reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
|
||||||
|
|
||||||
uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
|
uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
|
||||||
|
@ -47,6 +47,7 @@ jobject JNIHandles::make_local(oop obj) {
|
|||||||
} else {
|
} else {
|
||||||
Thread* thread = Thread::current();
|
Thread* thread = Thread::current();
|
||||||
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
|
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
|
||||||
|
assert(!current_thread_in_native(), "must not be in native");
|
||||||
return thread->active_handles()->allocate_handle(obj);
|
return thread->active_handles()->allocate_handle(obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -59,6 +60,8 @@ jobject JNIHandles::make_local(Thread* thread, oop obj) {
|
|||||||
return NULL; // ignore null handles
|
return NULL; // ignore null handles
|
||||||
} else {
|
} else {
|
||||||
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
|
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
|
||||||
|
assert(thread->is_Java_thread(), "not a Java thread");
|
||||||
|
assert(!current_thread_in_native(), "must not be in native");
|
||||||
return thread->active_handles()->allocate_handle(obj);
|
return thread->active_handles()->allocate_handle(obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,6 +73,7 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
|
|||||||
} else {
|
} else {
|
||||||
JavaThread* thread = JavaThread::thread_from_jni_environment(env);
|
JavaThread* thread = JavaThread::thread_from_jni_environment(env);
|
||||||
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
|
assert(Universe::heap()->is_in_reserved(obj), "sanity check");
|
||||||
|
assert(!current_thread_in_native(), "must not be in native");
|
||||||
return thread->active_handles()->allocate_handle(obj);
|
return thread->active_handles()->allocate_handle(obj);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -77,6 +81,7 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
|
|||||||
|
|
||||||
jobject JNIHandles::make_global(Handle obj) {
|
jobject JNIHandles::make_global(Handle obj) {
|
||||||
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
|
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
|
||||||
|
assert(!current_thread_in_native(), "must not be in native");
|
||||||
jobject res = NULL;
|
jobject res = NULL;
|
||||||
if (!obj.is_null()) {
|
if (!obj.is_null()) {
|
||||||
// ignore null handles
|
// ignore null handles
|
||||||
@ -93,6 +98,7 @@ jobject JNIHandles::make_global(Handle obj) {
|
|||||||
|
|
||||||
jobject JNIHandles::make_weak_global(Handle obj) {
|
jobject JNIHandles::make_weak_global(Handle obj) {
|
||||||
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
|
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
|
||||||
|
assert(!current_thread_in_native(), "must not be in native");
|
||||||
jobject res = NULL;
|
jobject res = NULL;
|
||||||
if (!obj.is_null()) {
|
if (!obj.is_null()) {
|
||||||
// ignore null handles
|
// ignore null handles
|
||||||
@ -265,6 +271,13 @@ void JNIHandles::verify() {
|
|||||||
weak_oops_do(&verify_handle);
|
weak_oops_do(&verify_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This method is implemented here to avoid circular includes between
|
||||||
|
// jniHandles.hpp and thread.hpp.
|
||||||
|
bool JNIHandles::current_thread_in_native() {
|
||||||
|
Thread* thread = Thread::current();
|
||||||
|
return (thread->is_Java_thread() &&
|
||||||
|
JavaThread::current()->thread_state() == _thread_in_native);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void jni_handles_init() {
|
void jni_handles_init() {
|
||||||
|
@ -48,6 +48,10 @@ class JNIHandles : AllStatic {
|
|||||||
template<bool external_guard> inline static oop resolve_impl(jobject handle);
|
template<bool external_guard> inline static oop resolve_impl(jobject handle);
|
||||||
template<bool external_guard> static oop resolve_jweak(jweak handle);
|
template<bool external_guard> static oop resolve_jweak(jweak handle);
|
||||||
|
|
||||||
|
// This method is not inlined in order to avoid circular includes between
|
||||||
|
// this header file and thread.hpp.
|
||||||
|
static bool current_thread_in_native();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// Low tag bit in jobject used to distinguish a jweak. jweak is
|
// Low tag bit in jobject used to distinguish a jweak. jweak is
|
||||||
// type equivalent to jobject, but there are places where we need to
|
// type equivalent to jobject, but there are places where we need to
|
||||||
@ -230,6 +234,7 @@ inline oop JNIHandles::guard_value(oop value) {
|
|||||||
template<bool external_guard>
|
template<bool external_guard>
|
||||||
inline oop JNIHandles::resolve_impl(jobject handle) {
|
inline oop JNIHandles::resolve_impl(jobject handle) {
|
||||||
assert(handle != NULL, "precondition");
|
assert(handle != NULL, "precondition");
|
||||||
|
assert(!current_thread_in_native(), "must not be in native");
|
||||||
oop result;
|
oop result;
|
||||||
if (is_jweak(handle)) { // Unlikely
|
if (is_jweak(handle)) { // Unlikely
|
||||||
result = resolve_jweak<external_guard>(handle);
|
result = resolve_jweak<external_guard>(handle);
|
||||||
|
@ -467,7 +467,7 @@ void Monitor::ILock(Thread * Self) {
|
|||||||
OrderAccess::fence();
|
OrderAccess::fence();
|
||||||
|
|
||||||
// Optional optimization ... try barging on the inner lock
|
// Optional optimization ... try barging on the inner lock
|
||||||
if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) {
|
if ((NativeMonitorFlags & 32) && Atomic::replace_if_null(ESelf, &_OnDeck)) {
|
||||||
goto OnDeck_LOOP;
|
goto OnDeck_LOOP;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -574,7 +574,7 @@ void Monitor::IUnlock(bool RelaxAssert) {
|
|||||||
// Unlike a normal lock, however, the exiting thread "locks" OnDeck,
|
// Unlike a normal lock, however, the exiting thread "locks" OnDeck,
|
||||||
// picks a successor and marks that thread as OnDeck. That successor
|
// picks a successor and marks that thread as OnDeck. That successor
|
||||||
// thread will then clear OnDeck once it eventually acquires the outer lock.
|
// thread will then clear OnDeck once it eventually acquires the outer lock.
|
||||||
if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) {
|
if (!Atomic::replace_if_null((ParkEvent*)_LBIT, &_OnDeck)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -421,7 +421,7 @@ void ObjectMonitor::enter(TRAPS) {
|
|||||||
int ObjectMonitor::TryLock(Thread * Self) {
|
int ObjectMonitor::TryLock(Thread * Self) {
|
||||||
void * own = _owner;
|
void * own = _owner;
|
||||||
if (own != NULL) return 0;
|
if (own != NULL) return 0;
|
||||||
if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
|
if (Atomic::replace_if_null(Self, &_owner)) {
|
||||||
// Either guarantee _recursions == 0 or set _recursions = 0.
|
// Either guarantee _recursions == 0 or set _recursions = 0.
|
||||||
assert(_recursions == 0, "invariant");
|
assert(_recursions == 0, "invariant");
|
||||||
assert(_owner == Self, "invariant");
|
assert(_owner == Self, "invariant");
|
||||||
@ -529,7 +529,7 @@ void ObjectMonitor::EnterI(TRAPS) {
|
|||||||
if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
|
if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
|
||||||
// Try to assume the role of responsible thread for the monitor.
|
// Try to assume the role of responsible thread for the monitor.
|
||||||
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
|
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
|
||||||
Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
|
Atomic::replace_if_null(Self, &_Responsible);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The lock might have been released while this thread was occupied queueing
|
// The lock might have been released while this thread was occupied queueing
|
||||||
@ -553,7 +553,7 @@ void ObjectMonitor::EnterI(TRAPS) {
|
|||||||
assert(_owner != Self, "invariant");
|
assert(_owner != Self, "invariant");
|
||||||
|
|
||||||
if ((SyncFlags & 2) && _Responsible == NULL) {
|
if ((SyncFlags & 2) && _Responsible == NULL) {
|
||||||
Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
|
Atomic::replace_if_null(Self, &_Responsible);
|
||||||
}
|
}
|
||||||
|
|
||||||
// park self
|
// park self
|
||||||
@ -1007,7 +1007,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
|||||||
// to reacquire the lock the responsibility for ensuring succession
|
// to reacquire the lock the responsibility for ensuring succession
|
||||||
// falls to the new owner.
|
// falls to the new owner.
|
||||||
//
|
//
|
||||||
if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
|
if (!Atomic::replace_if_null(THREAD, &_owner)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
TEVENT(Exit - Reacquired);
|
TEVENT(Exit - Reacquired);
|
||||||
@ -1032,7 +1032,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
|||||||
// B. If the elements forming the EntryList|cxq are TSM
|
// B. If the elements forming the EntryList|cxq are TSM
|
||||||
// we could simply unpark() the lead thread and return
|
// we could simply unpark() the lead thread and return
|
||||||
// without having set _succ.
|
// without having set _succ.
|
||||||
if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
|
if (!Atomic::replace_if_null(THREAD, &_owner)) {
|
||||||
TEVENT(Inflated exit - reacquired succeeded);
|
TEVENT(Inflated exit - reacquired succeeded);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1714,7 +1714,7 @@ void ObjectMonitor::INotify(Thread * Self) {
|
|||||||
ObjectWaiter * tail = _cxq;
|
ObjectWaiter * tail = _cxq;
|
||||||
if (tail == NULL) {
|
if (tail == NULL) {
|
||||||
iterator->_next = NULL;
|
iterator->_next = NULL;
|
||||||
if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) {
|
if (Atomic::replace_if_null(iterator, &_cxq)) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -103,7 +103,7 @@ void SafepointSynchronize::begin() {
|
|||||||
|
|
||||||
int nof_threads = Threads::number_of_threads();
|
int nof_threads = Threads::number_of_threads();
|
||||||
|
|
||||||
log_debug(safepoint)("Safepoint synchronization initiated. (%d)", nof_threads);
|
log_debug(safepoint)("Safepoint synchronization initiated. (%d threads)", nof_threads);
|
||||||
|
|
||||||
RuntimeService::record_safepoint_begin();
|
RuntimeService::record_safepoint_begin();
|
||||||
|
|
||||||
@ -407,9 +407,7 @@ void SafepointSynchronize::begin() {
|
|||||||
// Update the count of active JNI critical regions
|
// Update the count of active JNI critical regions
|
||||||
GCLocker::set_jni_lock_count(_current_jni_active_count);
|
GCLocker::set_jni_lock_count(_current_jni_active_count);
|
||||||
|
|
||||||
if (log_is_enabled(Debug, safepoint)) {
|
log_info(safepoint)("Entering safepoint region: %s", VMThread::vm_safepoint_description());
|
||||||
log_debug(safepoint)("Entering safepoint region: %s", VMThread::vm_safepoint_description());
|
|
||||||
}
|
|
||||||
|
|
||||||
RuntimeService::record_safepoint_synchronized();
|
RuntimeService::record_safepoint_synchronized();
|
||||||
if (PrintSafepointStatistics) {
|
if (PrintSafepointStatistics) {
|
||||||
@ -496,14 +494,14 @@ void SafepointSynchronize::end() {
|
|||||||
cur_state->restart(); // TSS _running
|
cur_state->restart(); // TSS _running
|
||||||
SafepointMechanism::disarm_local_poll(current); // release store, local state -> polling page
|
SafepointMechanism::disarm_local_poll(current); // release store, local state -> polling page
|
||||||
}
|
}
|
||||||
log_debug(safepoint)("Leaving safepoint region");
|
log_info(safepoint)("Leaving safepoint region");
|
||||||
} else {
|
} else {
|
||||||
// Set to not synchronized, so the threads will not go into the signal_thread_blocked method
|
// Set to not synchronized, so the threads will not go into the signal_thread_blocked method
|
||||||
// when they get restarted.
|
// when they get restarted.
|
||||||
_state = _not_synchronized;
|
_state = _not_synchronized;
|
||||||
OrderAccess::fence();
|
OrderAccess::fence();
|
||||||
|
|
||||||
log_debug(safepoint)("Leaving safepoint region");
|
log_info(safepoint)("Leaving safepoint region");
|
||||||
|
|
||||||
// Start suspended threads
|
// Start suspended threads
|
||||||
jtiwh.rewind();
|
jtiwh.rewind();
|
||||||
|
@ -62,12 +62,11 @@ address StubRoutines::_verify_oop_subroutine_entry = NULL;
|
|||||||
address StubRoutines::_atomic_xchg_entry = NULL;
|
address StubRoutines::_atomic_xchg_entry = NULL;
|
||||||
address StubRoutines::_atomic_xchg_long_entry = NULL;
|
address StubRoutines::_atomic_xchg_long_entry = NULL;
|
||||||
address StubRoutines::_atomic_store_entry = NULL;
|
address StubRoutines::_atomic_store_entry = NULL;
|
||||||
address StubRoutines::_atomic_store_ptr_entry = NULL;
|
|
||||||
address StubRoutines::_atomic_cmpxchg_entry = NULL;
|
address StubRoutines::_atomic_cmpxchg_entry = NULL;
|
||||||
address StubRoutines::_atomic_cmpxchg_byte_entry = NULL;
|
address StubRoutines::_atomic_cmpxchg_byte_entry = NULL;
|
||||||
address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
|
address StubRoutines::_atomic_cmpxchg_long_entry = NULL;
|
||||||
address StubRoutines::_atomic_add_entry = NULL;
|
address StubRoutines::_atomic_add_entry = NULL;
|
||||||
address StubRoutines::_atomic_add_ptr_entry = NULL;
|
address StubRoutines::_atomic_add_long_entry = NULL;
|
||||||
address StubRoutines::_fence_entry = NULL;
|
address StubRoutines::_fence_entry = NULL;
|
||||||
address StubRoutines::_d2i_wrapper = NULL;
|
address StubRoutines::_d2i_wrapper = NULL;
|
||||||
address StubRoutines::_d2l_wrapper = NULL;
|
address StubRoutines::_d2l_wrapper = NULL;
|
||||||
|
@ -103,12 +103,11 @@ class StubRoutines: AllStatic {
|
|||||||
static address _atomic_xchg_entry;
|
static address _atomic_xchg_entry;
|
||||||
static address _atomic_xchg_long_entry;
|
static address _atomic_xchg_long_entry;
|
||||||
static address _atomic_store_entry;
|
static address _atomic_store_entry;
|
||||||
static address _atomic_store_ptr_entry;
|
|
||||||
static address _atomic_cmpxchg_entry;
|
static address _atomic_cmpxchg_entry;
|
||||||
static address _atomic_cmpxchg_byte_entry;
|
static address _atomic_cmpxchg_byte_entry;
|
||||||
static address _atomic_cmpxchg_long_entry;
|
static address _atomic_cmpxchg_long_entry;
|
||||||
static address _atomic_add_entry;
|
static address _atomic_add_entry;
|
||||||
static address _atomic_add_ptr_entry;
|
static address _atomic_add_long_entry;
|
||||||
static address _fence_entry;
|
static address _fence_entry;
|
||||||
static address _d2i_wrapper;
|
static address _d2i_wrapper;
|
||||||
static address _d2l_wrapper;
|
static address _d2l_wrapper;
|
||||||
@ -277,12 +276,11 @@ class StubRoutines: AllStatic {
|
|||||||
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
|
static address atomic_xchg_entry() { return _atomic_xchg_entry; }
|
||||||
static address atomic_xchg_long_entry() { return _atomic_xchg_long_entry; }
|
static address atomic_xchg_long_entry() { return _atomic_xchg_long_entry; }
|
||||||
static address atomic_store_entry() { return _atomic_store_entry; }
|
static address atomic_store_entry() { return _atomic_store_entry; }
|
||||||
static address atomic_store_ptr_entry() { return _atomic_store_ptr_entry; }
|
|
||||||
static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
|
static address atomic_cmpxchg_entry() { return _atomic_cmpxchg_entry; }
|
||||||
static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; }
|
static address atomic_cmpxchg_byte_entry() { return _atomic_cmpxchg_byte_entry; }
|
||||||
static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; }
|
static address atomic_cmpxchg_long_entry() { return _atomic_cmpxchg_long_entry; }
|
||||||
static address atomic_add_entry() { return _atomic_add_entry; }
|
static address atomic_add_entry() { return _atomic_add_entry; }
|
||||||
static address atomic_add_ptr_entry() { return _atomic_add_ptr_entry; }
|
static address atomic_add_long_entry() { return _atomic_add_long_entry; }
|
||||||
static address fence_entry() { return _fence_entry; }
|
static address fence_entry() { return _fence_entry; }
|
||||||
|
|
||||||
static address d2i_wrapper() { return _d2i_wrapper; }
|
static address d2i_wrapper() { return _d2i_wrapper; }
|
||||||
|
@ -238,8 +238,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
|
|||||||
// and last are the inflated Java Monitor (ObjectMonitor) checks.
|
// and last are the inflated Java Monitor (ObjectMonitor) checks.
|
||||||
lock->set_displaced_header(markOopDesc::unused_mark());
|
lock->set_displaced_header(markOopDesc::unused_mark());
|
||||||
|
|
||||||
if (owner == NULL &&
|
if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
|
||||||
Atomic::cmpxchg(Self, &(m->_owner), (void*)NULL) == NULL) {
|
|
||||||
assert(m->_recursions == 0, "invariant");
|
assert(m->_recursions == 0, "invariant");
|
||||||
assert(m->_owner == Self, "invariant");
|
assert(m->_owner == Self, "invariant");
|
||||||
return true;
|
return true;
|
||||||
|
@ -4039,9 +4039,16 @@ static OnLoadEntry_t lookup_on_load(AgentLibrary* agent,
|
|||||||
}
|
}
|
||||||
if (library == NULL) {
|
if (library == NULL) {
|
||||||
const char *sub_msg = " on the library path, with error: ";
|
const char *sub_msg = " on the library path, with error: ";
|
||||||
size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) + strlen(ebuf) + 1;
|
const char *sub_msg2 = "\nModule java.instrument may be missing from runtime image.";
|
||||||
|
|
||||||
|
size_t len = strlen(msg) + strlen(name) + strlen(sub_msg) +
|
||||||
|
strlen(ebuf) + strlen(sub_msg2) + 1;
|
||||||
char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
|
char *buf = NEW_C_HEAP_ARRAY(char, len, mtThread);
|
||||||
jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
|
if (!agent->is_instrument_lib()) {
|
||||||
|
jio_snprintf(buf, len, "%s%s%s%s", msg, name, sub_msg, ebuf);
|
||||||
|
} else {
|
||||||
|
jio_snprintf(buf, len, "%s%s%s%s%s", msg, name, sub_msg, ebuf, sub_msg2);
|
||||||
|
}
|
||||||
// If we can't find the agent, exit.
|
// If we can't find the agent, exit.
|
||||||
vm_exit_during_initialization(buf, NULL);
|
vm_exit_during_initialization(buf, NULL);
|
||||||
FREE_C_HEAP_ARRAY(char, buf);
|
FREE_C_HEAP_ARRAY(char, buf);
|
||||||
|
@ -147,7 +147,7 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* b
|
|||||||
if (entry == NULL) return NULL;
|
if (entry == NULL) return NULL;
|
||||||
|
|
||||||
// swap in the head
|
// swap in the head
|
||||||
if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) {
|
if (Atomic::replace_if_null(entry, &_table[index])) {
|
||||||
return entry->data();
|
return entry->data();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -259,5 +259,5 @@ void MallocSiteTable::AccessLock::exclusiveLock() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
|
bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
|
||||||
return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL;
|
return Atomic::replace_if_null(entry, &_next);
|
||||||
}
|
}
|
||||||
|
@ -628,7 +628,7 @@ void BitMap::init_pop_count_table() {
|
|||||||
table[i] = num_set_bits(i);
|
table[i] = num_set_bits(i);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Atomic::cmpxchg(table, &_pop_count_table, (BitMap::idx_t*)NULL) != NULL) {
|
if (!Atomic::replace_if_null(table, &_pop_count_table)) {
|
||||||
guarantee(_pop_count_table != NULL, "invariant");
|
guarantee(_pop_count_table != NULL, "invariant");
|
||||||
FREE_C_HEAP_ARRAY(idx_t, table);
|
FREE_C_HEAP_ARRAY(idx_t, table);
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -66,6 +66,8 @@ class LinuxFileStore
|
|||||||
}
|
}
|
||||||
|
|
||||||
// step 2: find mount point
|
// step 2: find mount point
|
||||||
|
List<UnixMountEntry> procMountsEntries =
|
||||||
|
fs.getMountEntries("/proc/mounts");
|
||||||
UnixPath parent = path.getParent();
|
UnixPath parent = path.getParent();
|
||||||
while (parent != null) {
|
while (parent != null) {
|
||||||
UnixFileAttributes attrs = null;
|
UnixFileAttributes attrs = null;
|
||||||
@ -74,16 +76,23 @@ class LinuxFileStore
|
|||||||
} catch (UnixException x) {
|
} catch (UnixException x) {
|
||||||
x.rethrowAsIOException(parent);
|
x.rethrowAsIOException(parent);
|
||||||
}
|
}
|
||||||
if (attrs.dev() != dev())
|
if (attrs.dev() != dev()) {
|
||||||
break;
|
// step 3: lookup mounted file systems (use /proc/mounts to
|
||||||
|
// ensure we find the file system even when not in /etc/mtab)
|
||||||
|
byte[] dir = path.asByteArray();
|
||||||
|
for (UnixMountEntry entry : procMountsEntries) {
|
||||||
|
if (Arrays.equals(dir, entry.dir()))
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
}
|
||||||
path = parent;
|
path = parent;
|
||||||
parent = parent.getParent();
|
parent = parent.getParent();
|
||||||
}
|
}
|
||||||
|
|
||||||
// step 3: lookup mounted file systems (use /proc/mounts to ensure we
|
// step 3: lookup mounted file systems (use /proc/mounts to
|
||||||
// find the file system even when not in /etc/mtab)
|
// ensure we find the file system even when not in /etc/mtab)
|
||||||
byte[] dir = path.asByteArray();
|
byte[] dir = path.asByteArray();
|
||||||
for (UnixMountEntry entry: fs.getMountEntries("/proc/mounts")) {
|
for (UnixMountEntry entry : procMountsEntries) {
|
||||||
if (Arrays.equals(dir, entry.dir()))
|
if (Arrays.equals(dir, entry.dir()))
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user