diff --git a/.jcheck/conf b/.jcheck/conf
index 18228df5dfe46..b374bde90a538 100644
--- a/.jcheck/conf
+++ b/.jcheck/conf
@@ -15,7 +15,7 @@ version=0
domain=openjdk.org
[checks "whitespace"]
-files=.*\.cpp|.*\.hpp|.*\.c|.*\.h|.*\.java|.*\.cc|.*\.hh|.*\.m|.*\.mm|.*\.md|.*\.gmk|.*\.m4|.*\.ac|Makefile
+files=.*\.cpp|.*\.hpp|.*\.c|.*\.h|.*\.java|.*\.cc|.*\.hh|.*\.m|.*\.mm|.*\.md|.*\.properties|.*\.gmk|.*\.m4|.*\.ac|Makefile
ignore-tabs=.*\.gmk|Makefile
[checks "merge"]
diff --git a/doc/building.html b/doc/building.html
index d51e74d1454be..8a0acada254ac 100644
--- a/doc/building.html
+++ b/doc/building.html
@@ -526,7 +526,7 @@
Linux
The basic tooling is provided as part of the core operating system,
but you will most likely need to install developer packages.
For apt-based distributions (Debian, Ubuntu, etc), try this:
-sudo apt-get install build-essential
+sudo apt-get install build-essential autoconf
For rpm-based distributions (Fedora, Red Hat, etc), try this:
sudo yum groupinstall "Development Tools"
For Alpine Linux, aside from basic tooling, install the GNU versions
@@ -2166,15 +2166,26 @@
Using Multiple
configure
from there, e.g.
mkdir build/<name> && cd build/<name> && bash ../../configure
.
Then you can build that configuration using
-make CONF_NAME=<name>
or
-make CONF=<pattern>
, where
-<pattern>
is a substring matching one or several
-configurations, e.g. CONF=debug
. The special empty pattern
-(CONF=
) will match all available configuration, so
-make CONF= hotspot
will build the hotspot
-target for all configurations. Alternatively, you can execute
-make
in the configuration directory, e.g.
-cd build/<name> && make
.
+make CONF=<selector>
, where
+<selector>
is interpreted as follows:
+
+- If
<selector>
exacly matches the name of a
+configuration, this and only this configuration will be selected.
+- If
<selector>
matches (i.e. is a substring of)
+the names of several configurations, then all these configurations will
+be selected.
+- If
<selector>
is empty (i.e. CONF=
),
+then all configurations will be selected.
+- If
<selector>
begins with !
, then
+all configurations not matching the string following
+!
will be selected.
+
+A more specialized version, CONF_NAME=<name>
also
+exists, which will only match if the given <name>
+exactly matches a single configuration.
+Alternatively, you can execute make
in the configuration
+directory, e.g. cd build/<name> && make
.
+make CONF_NAME=<name>
or
Handling Reconfigurations
If you update the repository and part of the configure script has
changed, the build system will force you to re-run
diff --git a/doc/building.md b/doc/building.md
index 9d928a3924557..ed8a06693551d 100644
--- a/doc/building.md
+++ b/doc/building.md
@@ -349,7 +349,7 @@ will most likely need to install developer packages.
For apt-based distributions (Debian, Ubuntu, etc), try this:
```
-sudo apt-get install build-essential
+sudo apt-get install build-essential autoconf
```
For rpm-based distributions (Fedora, Red Hat, etc), try this:
@@ -1952,12 +1952,25 @@ configuration with the name ``. Alternatively, you can create a directory
under `build` and run `configure` from there, e.g. `mkdir build/ && cd
build/ && bash ../../configure`.
-Then you can build that configuration using `make CONF_NAME=` or `make
-CONF=`, where `` is a substring matching one or several
-configurations, e.g. `CONF=debug`. The special empty pattern (`CONF=`) will
-match *all* available configuration, so `make CONF= hotspot` will build the
-`hotspot` target for all configurations. Alternatively, you can execute `make`
-in the configuration directory, e.g. `cd build/ && make`.
+Then you can build that configuration using `make CONF=`, where
+`` is interpreted as follows:
+
+* If `` exacly matches the name of a configuration, this and only
+ this configuration will be selected.
+* If `` matches (i.e. is a substring of) the names of several
+ configurations, then all these configurations will be selected.
+* If `` is empty (i.e. `CONF=`), then all configurations will be
+ selected.
+* If `` begins with `!`, then all configurations **not** matching the
+ string following `!` will be selected.
+
+A more specialized version, `CONF_NAME=` also exists, which will only
+match if the given `` exactly matches a single configuration.
+
+Alternatively, you can execute `make` in the configuration directory, e.g. `cd
+build/ && make`.
+
+`make CONF_NAME=` or
### Handling Reconfigurations
diff --git a/make/Global.gmk b/make/Global.gmk
index e5e76b475b941..1df6c5fb6bc4b 100644
--- a/make/Global.gmk
+++ b/make/Global.gmk
@@ -87,10 +87,9 @@ help:
$(info $(_) # (gensrc, java, copy, libs, launchers, gendata))
$(info )
$(info Make control variables)
- $(info $(_) CONF= # Build all configurations (note, assignment is empty))
- $(info $(_) CONF= # Build the configuration(s) with a name matching)
- $(info $(_) # )
- $(info $(_) CONF_NAME= # Build the configuration with exactly the )
+ $(info $(_) CONF= # Select which configuration(s) to build)
+ $(info $(_) CONF= # Select all configurations (note, assignment is empty))
+ $(info $(_) CONF_NAME= # Select the configuration with the name )
$(info $(_) SPEC= # Build the configuration given by the spec file)
$(info $(_) LOG= # Change the log level from warn to )
$(info $(_) # Available log levels are:)
diff --git a/make/Hsdis.gmk b/make/Hsdis.gmk
index 7496a3a2cf1b4..6de0e628a5288 100644
--- a/make/Hsdis.gmk
+++ b/make/Hsdis.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,7 @@ HSDIS_OUTPUT_DIR := $(SUPPORT_OUTPUTDIR)/hsdis
REAL_HSDIS_NAME := hsdis-$(OPENJDK_TARGET_CPU_LEGACY_LIB)$(SHARED_LIBRARY_SUFFIX)
BUILT_HSDIS_LIB := $(HSDIS_OUTPUT_DIR)/$(REAL_HSDIS_NAME)
-HSDIS_TOOLCHAIN := TOOLCHAIN_DEFAULT
+HSDIS_LINK_TYPE := C
HSDIS_TOOLCHAIN_CFLAGS := $(CFLAGS_JDKLIB)
HSDIS_TOOLCHAIN_LDFLAGS := $(LDFLAGS_JDKLIB)
@@ -59,8 +59,8 @@ endif
ifeq ($(HSDIS_BACKEND), llvm)
# Use C++ instead of C
+ HSDIS_LINK_TYPE := C++
HSDIS_TOOLCHAIN_CFLAGS := $(CXXFLAGS_JDKLIB)
- HSDIS_TOOLCHAIN := TOOLCHAIN_LINK_CXX
ifeq ($(call isTargetOs, linux), true)
LLVM_OS := pc-linux-gnu
@@ -91,14 +91,11 @@ ifeq ($(HSDIS_BACKEND), binutils)
endif
endif
- $(eval $(call DefineNativeToolchain, TOOLCHAIN_MINGW, \
- CC := $(MINGW_BASE)-gcc, \
- LD := $(MINGW_BASE)-ld, \
- OBJCOPY := $(MINGW_BASE)-objcopy, \
- RC := $(RC), \
- SYSROOT_CFLAGS := --sysroot=$(MINGW_SYSROOT), \
- SYSROOT_LDFLAGS := --sysroot=$(MINGW_SYSROOT), \
- ))
+ BUILD_HSDIS_CC := $(MINGW_BASE)-gcc
+ BUILD_HSDIS_LD := $(MINGW_BASE)-ld
+ BUILD_HSDIS_OBJCOPY := $(MINGW_BASE)-objcopy
+ BUILD_HSDIS_SYSROOT_CFLAGS := --sysroot=$(MINGW_SYSROOT)
+ BUILD_HSDIS_SYSROOT_LDFLAGS := --sysroot=$(MINGW_SYSROOT)
MINGW_SYSROOT_LIB_PATH := $(MINGW_SYSROOT)/mingw/lib
ifeq ($(wildcard $(MINGW_SYSROOT_LIB_PATH)), )
@@ -122,8 +119,8 @@ ifeq ($(HSDIS_BACKEND), binutils)
TOOLCHAIN_TYPE := gcc
OPENJDK_TARGET_OS := linux
+ OPENJDK_TARGET_OS_TYPE := unix
CC_OUT_OPTION := -o$(SPACE)
- LD_OUT_OPTION := -o$(SPACE)
GENDEPS_FLAGS := -MMD -MF
CFLAGS_DEBUG_SYMBOLS := -g
DISABLED_WARNINGS :=
@@ -131,7 +128,6 @@ ifeq ($(HSDIS_BACKEND), binutils)
CFLAGS_WARNINGS_ARE_ERRORS := -Werror
SHARED_LIBRARY_FLAGS := -shared
- HSDIS_TOOLCHAIN := TOOLCHAIN_MINGW
HSDIS_TOOLCHAIN_CFLAGS :=
HSDIS_TOOLCHAIN_LDFLAGS := -L$(MINGW_GCC_LIB_PATH) -L$(MINGW_SYSROOT_LIB_PATH)
MINGW_DLLCRT := $(MINGW_SYSROOT_LIB_PATH)/dllcrt2.o
@@ -144,9 +140,9 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_HSDIS, \
NAME := hsdis, \
+ LINK_TYPE := $(HSDIS_LINK_TYPE), \
SRC := $(TOPDIR)/src/utils/hsdis/$(HSDIS_BACKEND), \
EXTRA_HEADER_DIRS := $(TOPDIR)/src/utils/hsdis, \
- TOOLCHAIN := $(HSDIS_TOOLCHAIN), \
OUTPUT_DIR := $(HSDIS_OUTPUT_DIR), \
OBJECT_DIR := $(HSDIS_OUTPUT_DIR), \
DISABLED_WARNINGS_gcc := undef format-nonliteral sign-compare, \
diff --git a/make/InitSupport.gmk b/make/InitSupport.gmk
index 31c80e2f7267f..4b14c4f9ad951 100644
--- a/make/InitSupport.gmk
+++ b/make/InitSupport.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -202,8 +202,14 @@ ifeq ($(HAS_SPEC),)
matching_confs := $$(strip $$(all_confs))
else
# Otherwise select those that contain the given CONF string
- matching_confs := $$(strip $$(foreach var, $$(all_confs), \
- $$(if $$(findstring $$(CONF), $$(var)), $$(var))))
+ ifeq ($$(patsubst !%,,$$(CONF)),)
+ # A CONF starting with ! means we should negate the search term
+ matching_confs := $$(strip $$(foreach var, $$(all_confs), \
+ $$(if $$(findstring $$(subst !,,$$(CONF)), $$(var)), ,$$(var))))
+ else
+ matching_confs := $$(strip $$(foreach var, $$(all_confs), \
+ $$(if $$(findstring $$(CONF), $$(var)), $$(var))))
+ endif
ifneq ($$(filter $$(CONF), $$(matching_confs)), )
# If we found an exact match, use that
matching_confs := $$(CONF)
@@ -421,8 +427,9 @@ else # $(HAS_SPEC)=true
# Cleanup after a compare build
define CleanupCompareBuild
- # If running with a COMPARE_BUILD patch, reverse-apply it
- $(if $(COMPARE_BUILD_PATCH), cd $(topdir) && $(PATCH) -R -p1 < $(COMPARE_BUILD_PATCH))
+ # If running with a COMPARE_BUILD patch, reverse-apply it, but continue
+ # even if that fails (can happen with removed files).
+ $(if $(COMPARE_BUILD_PATCH), cd $(topdir) && $(PATCH) -R -p1 < $(COMPARE_BUILD_PATCH) || true)
# Move this build away and restore the original build
$(MKDIR) -p $(topdir)/build/compare-build
$(MV) $(OUTPUTDIR) $(COMPARE_BUILD_OUTPUTDIR)
diff --git a/make/autoconf/build-aux/pkg.m4 b/make/autoconf/build-aux/pkg.m4
index 5f4b22bb27f05..ddb685e9bc35f 100644
--- a/make/autoconf/build-aux/pkg.m4
+++ b/make/autoconf/build-aux/pkg.m4
@@ -25,7 +25,7 @@
# questions.
#
-#
+#
# Copyright © 2004 Scott James Remnant .
#
# This program is free software; you can redistribute it and/or modify
@@ -54,18 +54,18 @@ AC_DEFUN([PKG_PROG_PKG_CONFIG],
m4_pattern_allow([^PKG_CONFIG(_PATH)?$])
AC_ARG_VAR([PKG_CONFIG], [path to pkg-config utility])dnl
if test "x$ac_cv_env_PKG_CONFIG_set" != "xset"; then
- AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
+ AC_PATH_TOOL([PKG_CONFIG], [pkg-config])
fi
if test -n "$PKG_CONFIG"; then
- _pkg_min_version=m4_default([$1], [0.9.0])
- AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version])
- if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then
- AC_MSG_RESULT([yes])
- else
- AC_MSG_RESULT([no])
- PKG_CONFIG=""
- fi
-
+ _pkg_min_version=m4_default([$1], [0.9.0])
+ AC_MSG_CHECKING([pkg-config is at least version $_pkg_min_version])
+ if $PKG_CONFIG --atleast-pkgconfig-version $_pkg_min_version; then
+ AC_MSG_RESULT([yes])
+ else
+ AC_MSG_RESULT([no])
+ PKG_CONFIG=""
+ fi
+
fi[]dnl
])# PKG_PROG_PKG_CONFIG
@@ -97,7 +97,7 @@ m4_define([_PKG_CONFIG],
elif test -n "$PKG_CONFIG"; then
PKG_CHECK_EXISTS([$3],
[pkg_cv_[]$1=`$PKG_CONFIG --[]$2 "$3" 2>/dev/null`],
- [pkg_failed=yes])
+ [pkg_failed=yes])
else
pkg_failed=untried
fi[]dnl
@@ -143,14 +143,14 @@ See the pkg-config man page for more details.])
if test $pkg_failed = yes; then
_PKG_SHORT_ERRORS_SUPPORTED
if test $_pkg_short_errors_supported = yes; then
- $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$2" 2>&1`
- else
- $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors "$2" 2>&1`
+ $1[]_PKG_ERRORS=`$PKG_CONFIG --short-errors --print-errors "$2" 2>&1`
+ else
+ $1[]_PKG_ERRORS=`$PKG_CONFIG --print-errors "$2" 2>&1`
fi
- # Put the nasty error message in config.log where it belongs
- echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD
+ # Put the nasty error message in config.log where it belongs
+ echo "$$1[]_PKG_ERRORS" >&AS_MESSAGE_LOG_FD
- ifelse([$4], , [AC_MSG_ERROR(dnl
+ ifelse([$4], , [AC_MSG_ERROR(dnl
[Package requirements ($2) were not met:
$$1_PKG_ERRORS
@@ -160,10 +160,10 @@ installed software in a non-standard prefix.
_PKG_TEXT
])],
- [AC_MSG_RESULT([no])
+ [AC_MSG_RESULT([no])
$4])
elif test $pkg_failed = untried; then
- ifelse([$4], , [AC_MSG_FAILURE(dnl
+ ifelse([$4], , [AC_MSG_FAILURE(dnl
[The pkg-config script could not be found or is too old. Make sure it
is in your PATH or set the PKG_CONFIG environment variable to the full
path to pkg-config.
@@ -171,11 +171,11 @@ path to pkg-config.
_PKG_TEXT
To get pkg-config, see .])],
- [$4])
+ [$4])
else
- $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS
- $1[]_LIBS=$pkg_cv_[]$1[]_LIBS
+ $1[]_CFLAGS=$pkg_cv_[]$1[]_CFLAGS
+ $1[]_LIBS=$pkg_cv_[]$1[]_LIBS
AC_MSG_RESULT([yes])
- ifelse([$3], , :, [$3])
+ ifelse([$3], , :, [$3])
fi[]dnl
])# PKG_CHECK_MODULES
diff --git a/make/autoconf/buildjdk-spec.gmk.template b/make/autoconf/buildjdk-spec.gmk.template
index 993ed50390210..924389b94e8b0 100644
--- a/make/autoconf/buildjdk-spec.gmk.template
+++ b/make/autoconf/buildjdk-spec.gmk.template
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -40,6 +40,7 @@ LDCXX := @BUILD_LDCXX@
AS := @BUILD_AS@
NM := @BUILD_NM@
AR := @BUILD_AR@
+LIB := @BUILD_LIB@
OBJCOPY := @BUILD_OBJCOPY@
STRIP := @BUILD_STRIP@
SYSROOT_CFLAGS := @BUILD_SYSROOT_CFLAGS@
diff --git a/make/autoconf/flags-cflags.m4 b/make/autoconf/flags-cflags.m4
index e95f32f4f7d6f..efc8025a074cf 100644
--- a/make/autoconf/flags-cflags.m4
+++ b/make/autoconf/flags-cflags.m4
@@ -40,7 +40,6 @@ AC_DEFUN([FLAGS_SETUP_SHARED_LIBS],
SET_EXECUTABLE_ORIGIN='-Wl,-rpath,\$$ORIGIN[$]1'
SET_SHARED_LIBRARY_ORIGIN="-Wl,-z,origin $SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Wl,-soname=[$]1'
- SET_SHARED_LIBRARY_MAPFILE='-Wl,-version-script=[$]1'
elif test "x$TOOLCHAIN_TYPE" = xclang; then
if test "x$OPENJDK_TARGET_OS" = xmacosx; then
@@ -49,7 +48,6 @@ AC_DEFUN([FLAGS_SETUP_SHARED_LIBS],
SET_EXECUTABLE_ORIGIN='-Wl,-rpath,@loader_path$(or [$]1,/.)'
SET_SHARED_LIBRARY_ORIGIN="$SET_EXECUTABLE_ORIGIN"
SET_SHARED_LIBRARY_NAME='-Wl,-install_name,@rpath/[$]1'
- SET_SHARED_LIBRARY_MAPFILE='-Wl,-exported_symbols_list,[$]1'
elif test "x$OPENJDK_TARGET_OS" = xaix; then
# Linking is different on aix
@@ -57,14 +55,12 @@ AC_DEFUN([FLAGS_SETUP_SHARED_LIBS],
SET_EXECUTABLE_ORIGIN=""
SET_SHARED_LIBRARY_ORIGIN=''
SET_SHARED_LIBRARY_NAME=''
- SET_SHARED_LIBRARY_MAPFILE=''
else
# Default works for linux, might work on other platforms as well.
SHARED_LIBRARY_FLAGS='-shared'
SET_EXECUTABLE_ORIGIN='-Wl,-rpath,\$$ORIGIN[$]1'
SET_SHARED_LIBRARY_NAME='-Wl,-soname=[$]1'
- SET_SHARED_LIBRARY_MAPFILE='-Wl,-version-script=[$]1'
# arm specific settings
if test "x$OPENJDK_TARGET_CPU" = "xarm"; then
@@ -80,20 +76,17 @@ AC_DEFUN([FLAGS_SETUP_SHARED_LIBS],
SET_EXECUTABLE_ORIGIN=""
SET_SHARED_LIBRARY_ORIGIN=''
SET_SHARED_LIBRARY_NAME=''
- SET_SHARED_LIBRARY_MAPFILE=''
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
SHARED_LIBRARY_FLAGS="-dll"
SET_EXECUTABLE_ORIGIN=''
SET_SHARED_LIBRARY_ORIGIN=''
SET_SHARED_LIBRARY_NAME=''
- SET_SHARED_LIBRARY_MAPFILE='-def:[$]1'
fi
AC_SUBST(SET_EXECUTABLE_ORIGIN)
AC_SUBST(SET_SHARED_LIBRARY_ORIGIN)
AC_SUBST(SET_SHARED_LIBRARY_NAME)
- AC_SUBST(SET_SHARED_LIBRARY_MAPFILE)
AC_SUBST(SHARED_LIBRARY_FLAGS)
])
@@ -122,6 +115,11 @@ AC_DEFUN([FLAGS_SETUP_DEBUG_SYMBOLS],
# Add debug prefix map gcc system include paths, as they cause
# non-deterministic debug paths depending on gcc path location.
DEBUG_PREFIX_MAP_GCC_INCLUDE_PATHS
+
+ # Add debug prefix map for OUTPUTDIR to handle the scenario when
+ # it is not located within WORKSPACE_ROOT
+ outputdir_slash="${OUTPUTDIR%/}/"
+ DEBUG_PREFIX_CFLAGS="$DEBUG_PREFIX_CFLAGS -fdebug-prefix-map=${outputdir_slash}="
]
)
fi
diff --git a/make/autoconf/flags-ldflags.m4 b/make/autoconf/flags-ldflags.m4
index 195c1d341595f..58bc4a44bfbdf 100644
--- a/make/autoconf/flags-ldflags.m4
+++ b/make/autoconf/flags-ldflags.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -93,7 +93,7 @@ AC_DEFUN([FLAGS_SETUP_LDFLAGS_HELPER],
BASIC_LDFLAGS_JVM_ONLY="-Wl,-lC_r -bbigtoc"
elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
- BASIC_LDFLAGS="-nologo -opt:ref"
+ BASIC_LDFLAGS="-opt:ref"
BASIC_LDFLAGS_JDK_ONLY="-incremental:no"
BASIC_LDFLAGS_JVM_ONLY="-opt:icf,8 -subsystem:windows"
fi
diff --git a/make/autoconf/flags-other.m4 b/make/autoconf/flags-other.m4
index 7e2521ffef3b0..8d4d405b07639 100644
--- a/make/autoconf/flags-other.m4
+++ b/make/autoconf/flags-other.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -33,9 +33,6 @@ AC_DEFUN([FLAGS_SETUP_ARFLAGS],
# FIXME: figure out if we should select AR flags depending on OS or toolchain.
if test "x$OPENJDK_TARGET_OS" = xaix; then
ARFLAGS="-X64"
- elif test "x$OPENJDK_TARGET_OS" = xwindows; then
- # lib.exe is used as AR to create static libraries.
- ARFLAGS="-nologo -NODEFAULTLIB:MSVCRT"
else
ARFLAGS=""
fi
@@ -43,6 +40,18 @@ AC_DEFUN([FLAGS_SETUP_ARFLAGS],
AC_SUBST(ARFLAGS)
])
+AC_DEFUN([FLAGS_SETUP_LIBFLAGS],
+[
+ # LIB is used to create static libraries on Windows
+ if test "x$OPENJDK_TARGET_OS" = xwindows; then
+ LIBFLAGS="-nodefaultlib:msvcrt"
+ else
+ LIBFLAGS=""
+ fi
+
+ AC_SUBST(LIBFLAGS)
+])
+
AC_DEFUN([FLAGS_SETUP_STRIPFLAGS],
[
## Setup strip.
diff --git a/make/autoconf/flags.m4 b/make/autoconf/flags.m4
index 8c029f7d2f58f..147382f398eed 100644
--- a/make/autoconf/flags.m4
+++ b/make/autoconf/flags.m4
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -364,24 +364,12 @@ AC_DEFUN([FLAGS_SETUP_TOOLCHAIN_CONTROL],
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
CC_OUT_OPTION=-Fo
- LD_OUT_OPTION=-out:
- AR_OUT_OPTION=-out:
else
# The option used to specify the target .o,.a or .so file.
# When compiling, how to specify the to be created object file.
CC_OUT_OPTION='-o$(SPACE)'
- # When linking, how to specify the output
- LD_OUT_OPTION='-o$(SPACE)'
- # When archiving, how to specify the destination static archive.
- if test "x$OPENJDK_TARGET_OS" = xmacosx; then
- AR_OUT_OPTION='-r -cs$(SPACE)'
- else
- AR_OUT_OPTION='-rcs$(SPACE)'
- fi
fi
AC_SUBST(CC_OUT_OPTION)
- AC_SUBST(LD_OUT_OPTION)
- AC_SUBST(AR_OUT_OPTION)
# Generate make dependency files
if test "x$TOOLCHAIN_TYPE" = xgcc; then
@@ -423,6 +411,7 @@ AC_DEFUN([FLAGS_SETUP_FLAGS],
FLAGS_SETUP_LDFLAGS
FLAGS_SETUP_ARFLAGS
+ FLAGS_SETUP_LIBFLAGS
FLAGS_SETUP_STRIPFLAGS
FLAGS_SETUP_RCFLAGS
FLAGS_SETUP_NMFLAGS
diff --git a/make/autoconf/spec.gmk.template b/make/autoconf/spec.gmk.template
index e9d53fcd77a56..863a51eeb4afa 100644
--- a/make/autoconf/spec.gmk.template
+++ b/make/autoconf/spec.gmk.template
@@ -498,8 +498,6 @@ COMPILER_COMMAND_FILE_FLAG := @COMPILER_COMMAND_FILE_FLAG@
COMPILER_BINDCMD_FILE_FLAG := @COMPILER_BINDCMD_FILE_FLAG@
CC_OUT_OPTION := @CC_OUT_OPTION@
-LD_OUT_OPTION := @LD_OUT_OPTION@
-AR_OUT_OPTION := @AR_OUT_OPTION@
# Flags used for overriding the default opt setting for a C/C++ source file.
C_O_FLAG_HIGHEST_JVM := @C_O_FLAG_HIGHEST_JVM@
@@ -604,10 +602,10 @@ BUILD_SYSROOT_LDFLAGS := @BUILD_SYSROOT_LDFLAGS@
AS := @AS@
-# AR is used to create a static library (is ar in unix, lib.exe in windows)
AR := @AR@
ARFLAGS := @ARFLAGS@
-
+LIB := @LIB@
+LIBFLAGS := @LIBFLAGS@
NM := @NM@
NMFLAGS := @NMFLAGS@
STRIP := @STRIP@
@@ -619,10 +617,6 @@ INSTALL_NAME_TOOL := @INSTALL_NAME_TOOL@
METAL := @METAL@
METALLIB := @METALLIB@
-# Options to linker to specify a mapfile.
-# (Note absence of := assignment, because we do not want to evaluate the macro body here)
-SET_SHARED_LIBRARY_MAPFILE = @SET_SHARED_LIBRARY_MAPFILE@
-
#
# Options for generating debug symbols
COMPILE_WITH_DEBUG_SYMBOLS := @COMPILE_WITH_DEBUG_SYMBOLS@
diff --git a/make/autoconf/toolchain.m4 b/make/autoconf/toolchain.m4
index 7a24815d163f5..6a29529c5c5f5 100644
--- a/make/autoconf/toolchain.m4
+++ b/make/autoconf/toolchain.m4
@@ -732,11 +732,10 @@ AC_DEFUN_ONCE([TOOLCHAIN_DETECT_TOOLCHAIN_CORE],
AC_SUBST(AS)
#
- # Setup the archiver (AR)
+ # Setup tools for creating static libraries (AR/LIB)
#
if test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
- # The corresponding ar tool is lib.exe (used to create static libraries)
- UTIL_LOOKUP_TOOLCHAIN_PROGS(AR, lib)
+ UTIL_LOOKUP_TOOLCHAIN_PROGS(LIB, lib)
elif test "x$TOOLCHAIN_TYPE" = xgcc; then
UTIL_LOOKUP_TOOLCHAIN_PROGS(AR, ar gcc-ar)
else
diff --git a/make/common/MakeBase.gmk b/make/common/MakeBase.gmk
index 252d9dd50da68..3858b652ee65c 100644
--- a/make/common/MakeBase.gmk
+++ b/make/common/MakeBase.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -152,6 +152,10 @@ define SetupLogging
endif
endif
+ ifneq ($$(findstring $$(LOG_LEVEL), debug trace),)
+ SHELL := $$(SHELL) -x
+ endif
+
ifeq ($$(LOG_LEVEL), trace)
SHELL_NO_RECURSE := $$(SHELL)
# Shell redefinition trick inspired by http://www.cmcrossroads.com/ask-mr-make/6535-tracing-rule-execution-in-gnu-make
diff --git a/make/common/NativeCompilation.gmk b/make/common/NativeCompilation.gmk
index 68d1dba27ffcc..13b0318b4c776 100644
--- a/make/common/NativeCompilation.gmk
+++ b/make/common/NativeCompilation.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -23,9 +23,11 @@
# questions.
#
-# When you read this source. Remember that $(sort ...) has the side effect
-# of removing duplicates. It is actually this side effect that is
-# desired whenever sort is used below!
+################################################################################
+# This is the top-level entry point for our native compilation and linking.
+# It contains the SetupNativeCompilation macro, but is supported by helper
+# macros in the make/common/native directory.
+################################################################################
ifndef _NATIVE_COMPILATION_GMK
_NATIVE_COMPILATION_GMK := 1
@@ -34,463 +36,14 @@ ifeq ($(_MAKEBASE_GMK), )
$(error You must include MakeBase.gmk prior to including NativeCompilation.gmk)
endif
-################################################################################
-# Create exported symbols file for static libraries
-################################################################################
-
-# get the exported symbols from mapfiles and if there
-# is no mapfile, get them from the archive
-define GetSymbols
- $(RM) $$(@D)/$$(basename $$(@F)).symbols; \
- if [ ! -z $$($1_MAPFILE) -a -e $$($1_MAPFILE) ]; then \
- $(ECHO) "Getting symbols from mapfile $$($1_MAPFILE)"; \
- $(AWK) '/global:/','/local:/' $$($1_MAPFILE) | \
- $(SED) -e 's/#.*//;s/global://;s/local://;s/\;//;s/^[ ]*/_/;/^_$$$$/d' | \
- $(EGREP) -v "JNI_OnLoad|JNI_OnUnload|Agent_OnLoad|Agent_OnUnload|Agent_OnAttach" > \
- $$(@D)/$$(basename $$(@F)).symbols || true; \
- $(NM) $(NMFLAGS) $$($1_TARGET) | $(GREP) " T " | \
- $(EGREP) "JNI_OnLoad|JNI_OnUnload|Agent_OnLoad|Agent_OnUnload|Agent_OnAttach" | \
- $(CUT) -d ' ' -f 3 >> $$(@D)/$$(basename $$(@F)).symbols || true;\
- else \
- $(ECHO) "Getting symbols from nm"; \
- $(NM) $(NMFLAGS) -m $$($1_TARGET) | $(GREP) "__TEXT" | \
- $(EGREP) -v "non-external|private extern|__TEXT,__eh_frame" | \
- $(SED) -e 's/.* //' > $$(@D)/$$(basename $$(@F)).symbols; \
- fi
-endef
-
-################################################################################
-# Creates a recipe that creates a compile_commands.json fragment. Remove any
-# occurrences of FIXPATH programs from the command to show the actual invocation.
-#
-# Param 1: Name of file to create
-# Param 2: Working directory
-# Param 3: Source file
-# Param 4: Compile command
-################################################################################
-define WriteCompileCommandsFragment
- $(call LogInfo, Creating compile commands fragment for $(notdir $3))
- $(call MakeDir, $(dir $1))
- $(call WriteFile,{ \
- "directory": "$(strip $(call FixPath, $2))"$(COMMA) \
- "file": "$(strip $(call FixPath, $3))"$(COMMA) \
- "command": "$(strip $(subst $(DQUOTE),\$(DQUOTE),$(subst \,\\,\
- $(subst $(FIXPATH),,$(call FixPath, $4)))))" \
- }$(COMMA), \
- $1)
-endef
-
-################################################################################
-# Define a native toolchain configuration that can be used by
-# SetupNativeCompilation calls
-#
-# Parameter 1 is the name of the toolchain definition
-#
-# Remaining parameters are named arguments:
-# EXTENDS - Optional parent definition to get defaults from
-# CC - The C compiler
-# CXX - The C++ compiler
-# LD - The Linker
-# AR - Static linker
-# AS - Assembler
-# MT - Windows MT tool
-# RC - Windows RC tool
-# OBJCOPY - The objcopy tool for debug symbol handling
-# STRIP - The tool to use for stripping debug symbols
-# SYSROOT_CFLAGS - Compiler flags for using the specific sysroot
-# SYSROOT_LDFLAGS - Linker flags for using the specific sysroot
-DefineNativeToolchain = $(NamedParamsMacroTemplate)
-define DefineNativeToolchainBody
- # If extending another definition, get default values from that,
- # otherwise, nothing more needs to be done as variable assignments
- # already happened in NamedParamsMacroTemplate.
- ifneq ($$($1_EXTENDS), )
- $$(call SetIfEmpty, $1_CC, $$($$($1_EXTENDS)_CC))
- $$(call SetIfEmpty, $1_CXX, $$($$($1_EXTENDS)_CXX))
- $$(call SetIfEmpty, $1_LD, $$($$($1_EXTENDS)_LD))
- $$(call SetIfEmpty, $1_AR, $$($$($1_EXTENDS)_AR))
- $$(call SetIfEmpty, $1_AS, $$($$($1_EXTENDS)_AS))
- $$(call SetIfEmpty, $1_MT, $$($$($1_EXTENDS)_MT))
- $$(call SetIfEmpty, $1_RC, $$($$($1_EXTENDS)_RC))
- $$(call SetIfEmpty, $1_OBJCOPY, $$($$($1_EXTENDS)_OBJCOPY))
- $$(call SetIfEmpty, $1_STRIP, $$($$($1_EXTENDS)_STRIP))
- $$(call SetIfEmpty, $1_SYSROOT_CFLAGS, $$($$($1_EXTENDS)_SYSROOT_CFLAGS))
- $$(call SetIfEmpty, $1_SYSROOT_LDFLAGS, $$($$($1_EXTENDS)_SYSROOT_LDFLAGS))
- endif
-endef
-
-# Create a default toolchain with the main compiler and linker
-$(eval $(call DefineNativeToolchain, TOOLCHAIN_DEFAULT, \
- CC := $(CC), \
- CXX := $(CXX), \
- LD := $(LD), \
- AR := $(AR), \
- AS := $(AS), \
- MT := $(MT), \
- RC := $(RC), \
- OBJCOPY := $(OBJCOPY), \
- STRIP := $(STRIP), \
- SYSROOT_CFLAGS := $(SYSROOT_CFLAGS), \
- SYSROOT_LDFLAGS := $(SYSROOT_LDFLAGS), \
-))
-
-# Create a toolchain where linking is done with the C++ linker
-$(eval $(call DefineNativeToolchain, TOOLCHAIN_LINK_CXX, \
- EXTENDS := TOOLCHAIN_DEFAULT, \
- LD := $(LDCXX), \
-))
-
-# Create a toolchain with the BUILD compiler, used for build tools that
-# are to be run during the build.
-$(eval $(call DefineNativeToolchain, TOOLCHAIN_BUILD, \
- CC := $(BUILD_CC), \
- CXX := $(BUILD_CXX), \
- LD := $(BUILD_LD), \
- AR := $(BUILD_AR), \
- AS := $(BUILD_AS), \
- OBJCOPY := $(BUILD_OBJCOPY), \
- STRIP := $(BUILD_STRIP), \
- SYSROOT_CFLAGS := $(BUILD_SYSROOT_CFLAGS), \
- SYSROOT_LDFLAGS := $(BUILD_SYSROOT_LDFLAGS), \
-))
-
-# BUILD toolchain with the C++ linker
-$(eval $(call DefineNativeToolchain, TOOLCHAIN_BUILD_LINK_CXX, \
- EXTENDS := TOOLCHAIN_BUILD, \
- LD := $(BUILD_LDCXX), \
-))
-
-################################################################################
-
-# Extensions of files handled by this macro.
-NATIVE_SOURCE_EXTENSIONS := %.S %.c %.cpp %.cc %.m %.mm
-
-# Replaces native source extensions with the object file extension in a string.
-# Param 1: the string containing source file names with extensions
-# The surrounding strip is needed to keep additional whitespace out
-define replace_with_obj_extension
-$(strip \
- $(foreach extension, $(NATIVE_SOURCE_EXTENSIONS), \
- $(patsubst $(extension),%$(OBJ_SUFFIX), $(filter $(extension), $1))) \
-)
-endef
-
-# This pattern is used to transform the output of the microsoft CL compiler
-# into a make syntax dependency file (.d)
-WINDOWS_SHOWINCLUDE_SED_PATTERN := \
- -e '/^Note: including file:/!d' \
- -e 's|Note: including file: *||' \
- -e 's|\r||g' \
- -e 's|\\|/|g' \
- -e 's|^\([a-zA-Z]\):|$(WINENV_PREFIX)/\1|g' \
- -e '\|$(TOPDIR)|I !d' \
- -e 's|$$$$| \\|g' \
- #
-
-# This pattern is used to transform a dependency file (.d) to a list
-# of make targets for dependent files (.d.targets)
-DEPENDENCY_TARGET_SED_PATTERN := \
- -e 's/\#.*//' \
- -e 's/^[^:]*: *//' \
- -e 's/ *\\$$$$//' \
- -e 's/^[ ]*//' \
- -e '/^$$$$/ d' \
- -e 's/$$$$/ :/' \
- #
-
-################################################################################
-# When absolute paths are not allowed in the output, and the compiler does not
-# support any options to avoid it, we need to rewrite compile commands to use
-# relative paths. By doing this, the __FILE__ macro will resolve to relative
-# paths. The relevant input paths on the command line are the -I flags and the
-# path to the source file itself.
-#
-# The macro MakeCommandRelative is used to rewrite the command line like this:
-# 'CD $(WORKSPACE_ROOT) && '
-# and changes all paths in cmd to be relative to the workspace root. This only
-# works properly if the build dir is inside the workspace root. If it's not,
-# relative paths are still calculated, but depending on the distance between the
-# dirs, paths in the build dir may end up as essentially absolute anyway.
-#
-# The fix-deps-file macro is used to adjust the contents of the generated make
-# dependency files to contain paths compatible with make.
-#
-REWRITE_PATHS_RELATIVE = false
-ifeq ($(ALLOW_ABSOLUTE_PATHS_IN_OUTPUT)-$(FILE_MACRO_CFLAGS), false-)
- REWRITE_PATHS_RELATIVE = true
-endif
-
-# CCACHE_BASEDIR needs fix-deps-file as makefiles use absolute filenames for
-# object files while CCACHE_BASEDIR will make ccache relativize all paths for
-# its compiler. The compiler then produces relative dependency files.
-# make does not know a relative and absolute filename is the same so it will
-# ignore such dependencies. This only applies when the OUTPUTDIR is inside
-# the WORKSPACE_ROOT.
-ifneq ($(CCACHE), )
- ifneq ($(filter $(WORKSPACE_ROOT)/%, $(OUTPUTDIR)), )
- REWRITE_PATHS_RELATIVE = true
- endif
-endif
-
-ifeq ($(REWRITE_PATHS_RELATIVE), true)
- # Need to handle -I flags as both '-Ifoo' and '-I foo'.
- MakeCommandRelative = \
- $(CD) $(WORKSPACE_ROOT) && \
- $(foreach o, $1, \
- $(if $(filter $(WORKSPACE_ROOT)/% $(OUTPUTDIR)/%, $o), \
- $(call RelativePath, $o, $(WORKSPACE_ROOT)) \
- , \
- $(if $(filter -I$(WORKSPACE_ROOT)/%, $o), \
- -I$(call RelativePath, $(patsubst -I%, %, $o), $(WORKSPACE_ROOT)) \
- , \
- $o \
- ) \
- ) \
- )
-
- # When compiling with relative paths, the deps file may come out with relative
- # paths, and that path may start with './'. First remove any leading ./, then
- # add WORKSPACE_ROOT to any line not starting with /, while allowing for
- # leading spaces. There may also be multiple entries on the same line, so start
- # with splitting such lines.
- # Non GNU sed (BSD on macosx) cannot substitute in literal \n using regex.
- # Instead use a bash escaped literal newline. To avoid having unmatched quotes
- # ruin the ability for an editor to properly syntax highlight this file, define
- # that newline sequence as a separate variable and add the closing quote behind
- # a comment.
- sed_newline := \'$$'\n''#'
- define fix-deps-file
- $(SED) \
- -e 's|\([^ ]\) \{1,\}\([^\\:]\)|\1 \\$(sed_newline) \2|g' \
- $1.tmp \
- | $(SED) \
- -e 's|^\([ ]*\)\./|\1|' \
- -e '/^[ ]*[^/ ]/s|^\([ ]*\)|\1$(WORKSPACE_ROOT)/|' \
- > $1
- endef
-else
- # By default the MakeCommandRelative macro does nothing.
- MakeCommandRelative = $1
-
- # No adjustment is needed.
- define fix-deps-file
- $(MV) $1.tmp $1
- endef
-endif
-
-################################################################################
-# GetEntitlementsFile
-# Find entitlements file for executable when signing on macosx. If no
-# specialized file is found, returns the default file.
-# $1 Executable to find entitlements file for.
-ENTITLEMENTS_DIR := $(TOPDIR)/make/data/macosxsigning
-ifeq ($(MACOSX_CODESIGN_MODE), debug)
- CODESIGN_PLIST_SUFFIX := -debug
-else
- CODESIGN_PLIST_SUFFIX :=
-endif
-DEFAULT_ENTITLEMENTS_FILE := $(ENTITLEMENTS_DIR)/default$(CODESIGN_PLIST_SUFFIX).plist
-
-GetEntitlementsFile = \
- $(foreach f, $(ENTITLEMENTS_DIR)/$(strip $(notdir $1))$(CODESIGN_PLIST_SUFFIX).plist, \
- $(if $(wildcard $f), $f, $(DEFAULT_ENTITLEMENTS_FILE)) \
- )
+include native/CompileFile.gmk
+include native/DebugSymbols.gmk
+include native/Flags.gmk
+include native/Link.gmk
+include native/LinkMicrosoft.gmk
+include native/Paths.gmk
################################################################################
-# Create the recipe needed to compile a single native source file.
-#
-# Parameter 1 is the name of the rule, based on the name of the library/
-# program being build and the name of the source code file, e.g.
-# BUILD_LIBFOO_fooMain.cpp.
-#
-# Remaining parameters are named arguments:
-# FILE - The full path of the source file to compiler
-# BASE - The name of the rule for the entire binary to build ($1)
-#
-SetupCompileNativeFile = $(NamedParamsMacroTemplate)
-define SetupCompileNativeFileBody
- $1_FILENAME := $$(notdir $$($1_FILE))
-
- # The target file to be generated.
- $1_OBJ := $$($$($1_BASE)_OBJECT_DIR)/$$(call replace_with_obj_extension, \
- $$($1_FILENAME))
-
- # Generate the corresponding compile_commands.json fragment.
- $1_OBJ_JSON = $$(MAKESUPPORT_OUTPUTDIR)/compile-commands/$$(subst /,_,$$(subst \
- $$(OUTPUTDIR)/,,$$($1_OBJ))).json
- $$($1_BASE)_ALL_OBJS_JSON += $$($1_OBJ_JSON)
-
- # Only continue if this object file hasn't been processed already. This lets
- # the first found source file override any other with the same name.
- ifeq ($$($1_OBJ_PROCESSED), )
- $1_OBJ_PROCESSED := true
- # This is the definite source file to use for $1_FILENAME.
- $1_SRC_FILE := $$($1_FILE)
-
- ifeq ($$($1_OPTIMIZATION), )
- $1_OPT_CFLAGS := $$($$($1_BASE)_OPT_CFLAGS)
- $1_OPT_CXXFLAGS := $$($$($1_BASE)_OPT_CXXFLAGS)
- else
- ifeq ($$($1_OPTIMIZATION), NONE)
- $1_OPT_CFLAGS := $(C_O_FLAG_NONE)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NONE)
- else ifeq ($$($1_OPTIMIZATION), LOW)
- $1_OPT_CFLAGS := $(C_O_FLAG_NORM)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM)
- else ifeq ($$($1_OPTIMIZATION), HIGH)
- $1_OPT_CFLAGS := $(C_O_FLAG_HI)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI)
- else ifeq ($$($1_OPTIMIZATION), HIGHEST)
- $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST)
- else ifeq ($$($1_OPTIMIZATION), HIGHEST_JVM)
- $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST_JVM)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST_JVM)
- else ifeq ($$($1_OPTIMIZATION), SIZE)
- $1_OPT_CFLAGS := $(C_O_FLAG_SIZE)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_SIZE)
- else
- $$(error Unknown value for file OPTIMIZATION: $$($1_OPTIMIZATION))
- endif
- endif
-
- ifneq ($$($$($1_BASE)_PRECOMPILED_HEADER), )
- ifeq ($$(filter $$($1_FILENAME), $$($$($1_BASE)_PRECOMPILED_HEADER_EXCLUDE)), )
- $1_USE_PCH_FLAGS := $$($$($1_BASE)_USE_PCH_FLAGS)
- endif
- endif
-
- ifneq ($(DISABLE_WARNING_PREFIX), )
- $1_WARNINGS_FLAGS := $$(addprefix $(DISABLE_WARNING_PREFIX), \
- $$($$($1_BASE)_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$$($1_FILENAME)) \
- $$($$($1_BASE)_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)_$$($1_FILENAME)))
- endif
-
- $1_BASE_CFLAGS := $$($$($1_BASE)_CFLAGS) $$($$($1_BASE)_EXTRA_CFLAGS) \
- $$($$($1_BASE)_SYSROOT_CFLAGS)
- $1_BASE_CXXFLAGS := $$($$($1_BASE)_CXXFLAGS) $$($$($1_BASE)_EXTRA_CXXFLAGS) \
- $$($$($1_BASE)_SYSROOT_CFLAGS) $$($1_EXTRA_CXXFLAGS)
- $1_BASE_ASFLAGS := $$($$($1_BASE)_ASFLAGS) $$($$($1_BASE)_EXTRA_ASFLAGS)
-
- ifneq ($$(filter %.c, $$($1_FILENAME)), )
- # Compile as a C file
- $1_CFLAGS += $$($1_WARNINGS_FLAGS)
- $1_FLAGS := $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) $$($1_BASE_CFLAGS) \
- $$($1_OPT_CFLAGS) $$($1_CFLAGS) -c
- $1_COMPILER := $$($$($1_BASE)_CC)
- else ifneq ($$(filter %.m, $$($1_FILENAME)), )
- # Compile as an Objective-C file
- $1_CFLAGS += $$($1_WARNINGS_FLAGS)
- $1_FLAGS := -x objective-c $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) \
- $$($1_BASE_CFLAGS) $$($1_OPT_CFLAGS) $$($1_CFLAGS) -c
- $1_COMPILER := $$($$($1_BASE)_CC)
- else ifneq ($$(filter %.S, $$($1_FILENAME)), )
- # Compile as preprocessed assembler file
- $1_FLAGS := $(BASIC_ASFLAGS) $$($1_BASE_ASFLAGS)
- $1_COMPILER := $(AS)
-
- # gcc or clang assembly files must contain an appropriate relative .file
- # path for reproducible builds.
- ifneq ($(findstring $(TOOLCHAIN_TYPE), gcc clang), )
- # If no absolute paths allowed, work out relative source file path
- # for assembly .file substitution, otherwise use full file path
- ifeq ($(ALLOW_ABSOLUTE_PATHS_IN_OUTPUT), false)
- $1_REL_ASM_SRC := $$(call RelativePath, $$($1_FILE), $(WORKSPACE_ROOT))
- else
- $1_REL_ASM_SRC := $$($1_FILE)
- endif
- $1_FLAGS := $$($1_FLAGS) -DASSEMBLY_SRC_FILE='"$$($1_REL_ASM_SRC)"' \
- -include $(TOPDIR)/make/data/autoheaders/assemblyprefix.h
- endif
- else ifneq ($$(filter %.cpp %.cc %.mm, $$($1_FILENAME)), )
- # Compile as a C++ or Objective-C++ file
- $1_CXXFLAGS += $$($1_WARNINGS_FLAGS)
- $1_FLAGS := $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) $$($1_BASE_CXXFLAGS) \
- $$($1_OPT_CXXFLAGS) $$($1_CXXFLAGS) -c
- $1_COMPILER := $$($$($1_BASE)_CXX)
- else
- $$(error Internal error in NativeCompilation.gmk: no compiler for file $$($1_FILENAME))
- endif
-
- # And this is the dependency file for this obj file.
- $1_DEPS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
- # The dependency target file lists all dependencies as empty targets to
- # avoid make error "No rule to make target" for removed files
- $1_DEPS_TARGETS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
-
- # Only try to load individual dependency information files if the global
- # file hasn't been loaded (could happen if make was interrupted).
- ifneq ($$($$($1_BASE)_DEPS_FILE_LOADED), true)
- # Include previously generated dependency information. (if it exists)
- -include $$($1_DEPS_FILE)
- -include $$($1_DEPS_TARGETS_FILE)
- endif
-
- ifneq ($$(strip $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)), )
- $1_VARDEPS := $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)
- $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, $$($1_OBJ).vardeps)
- endif
-
- $1_OBJ_DEPS := $$($1_SRC_FILE) $$($$($1_BASE)_COMPILE_VARDEPS_FILE) \
- $$($$($1_BASE)_EXTRA_DEPS) $$($1_VARDEPS_FILE)
- $1_COMPILE_OPTIONS := $$($1_FLAGS) $(CC_OUT_OPTION)$$($1_OBJ) $$($1_SRC_FILE)
- # For reproducible builds with gcc and clang ensure random symbol generation is
- # seeded deterministically
- ifneq ($(findstring $(TOOLCHAIN_TYPE), gcc clang), )
- $1_COMPILE_OPTIONS += -frandom-seed="$$($1_FILENAME)"
- endif
-
- $$($1_OBJ_JSON): $$($1_OBJ_DEPS)
- $$(call WriteCompileCommandsFragment, $$@, $$(PWD), $$($1_SRC_FILE), \
- $$($1_COMPILER) $$($1_COMPILE_OPTIONS))
-
- $$($1_OBJ): $$($1_OBJ_DEPS) | $$($$($1_BASE)_BUILD_INFO)
- $$(call LogInfo, Compiling $$($1_FILENAME) (for $$($$($1_BASE)_BASENAME)))
- $$(call MakeDir, $$(@D))
- ifneq ($(TOOLCHAIN_TYPE), microsoft)
- $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
- $$($1_COMPILER) $$(GENDEPS_FLAGS) \
- $$(addsuffix .tmp, $$($1_DEPS_FILE)) \
- $$($1_COMPILE_OPTIONS)))
- ifneq ($$($1_DEPS_FILE), )
- $$(call fix-deps-file, $$($1_DEPS_FILE))
- # Create a dependency target file from the dependency file.
- # Solution suggested by:
- # http://make.mad-scientist.net/papers/advanced-auto-dependency-generation/
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) \
- > $$($1_DEPS_TARGETS_FILE)
- endif
- else
- # The Visual Studio compiler lacks a feature for generating make
- # dependencies, but by setting -showIncludes, all included files are
- # printed. These are filtered out and parsed into make dependences.
- #
- # Keep as much as possible on one execution line for best performance
- # on Windows. No need to save exit code from compilation since
- # pipefail is always active on Windows.
- ifeq ($$(filter %.S, $$($1_FILENAME)), )
- $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
- $$($1_COMPILER) -showIncludes $$($1_COMPILE_OPTIONS))) \
- | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
- -e "^$$($1_FILENAME)$$$$" || test "$$$$?" = "1" ; \
- $(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \
- $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \
- | $(SORT) -u >> $$($1_DEPS_FILE) ; \
- $(ECHO) >> $$($1_DEPS_FILE) ; \
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
- else
- # For assembler calls just create empty dependency lists
- $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
- $$($1_COMPILER) $$($1_FLAGS) \
- $(CC_OUT_OPTION)$$($1_OBJ) -Ta $$($1_SRC_FILE))) \
- | $(TR) -d '\r' | $(GREP) -v -e "Assembling:" || test "$$$$?" = "1" ; \
- $(ECHO) > $$($1_DEPS_FILE) ; \
- $(ECHO) > $$($1_DEPS_TARGETS_FILE)
- endif
- endif
- endif
-endef
-
# Setup make rules for creating a native binary (a shared library or an
# executable).
#
@@ -501,7 +54,8 @@ endef
# NAME The base name for the resulting binary, excluding decorations (like *.exe)
# TYPE Type of binary (EXECUTABLE, LIBRARY or STATIC_LIBRARY). Default is LIBRARY.
# SUFFIX Override the default suffix for the output file
-# TOOLCHAIN Name of toolchain setup to use. Defaults to TOOLCHAIN_DEFAULT.
+# TARGET_TYPE The type to target, BUILD or TARGET. Defaults to TARGET.
+# LINK_TYPE The language to use for the linker, C or C++. Defaults to C.
# SRC one or more directory roots to scan for C/C++ files.
# CFLAGS the compiler flags to be used, used both for C and C++.
# CXXFLAGS the compiler flags to be used for c++, if set overrides CFLAGS.
@@ -519,7 +73,8 @@ endef
# used both for C and C++.
# LIBS__ the libraries to link to for the specified target
# OS and toolchain, used both for C and C++.
-# ARFLAGS the archiver flags to be used
+# ARFLAGS the archiver flags to be used on unix platforms
+# LIBFLAGS the flags for the lib tool used on windows
# OBJECT_DIR the directory where we store the object files
# OUTPUT_DIR the directory where the resulting binary is put
# SYMBOLS_DIR the directory where the debug symbols are put, defaults to OUTPUT_DIR
@@ -534,11 +89,18 @@ endef
# VERSIONINFO_RESOURCE Input file for RC. Setting this implies that RC will be run
# RCFLAGS flags for RC.
# EMBED_MANIFEST if true, embed manifest on Windows.
-# MAPFILE mapfile
-# USE_MAPFILE_FOR_SYMBOLS if true and this is a STATIC_BUILD, just copy the
-# mapfile for the output symbols file
-# CC the compiler to use, default is $(CC)
-# LD the linker to use, default is $(LD)
+# CC the C compiler to use
+# CXX the C++ compiler to use
+# LD the Linker to use
+# AR the static linker to use
+# LIB the Windows lib tool to use for creating static libraries
+# AS the assembler to use
+# MT the Windows MT tool to use
+# RC the Windows RC tool to use
+# OBJCOPY the objcopy tool for debug symbol handling
+# STRIP the tool to use for stripping debug symbols
+# SYSROOT_CFLAGS the compiler flags for using the specific sysroot
+# SYSROOT_LDFLAGS the linker flags for using the specific sysroot
# OPTIMIZATION sets optimization level to NONE, LOW, HIGH, HIGHEST, HIGHEST_JVM, SIZE
# DISABLED_WARNINGS_ Disable the given warnings for the specified toolchain
# DISABLED_WARNINGS__ Disable the given warnings for the specified
@@ -573,9 +135,122 @@ endef
# TARGET_DEPS All prerequisites for the target calculated by the macro
# ALL_OBJS All object files
# IMPORT_LIBRARY The import library created for a shared library on Windows
+#
SetupNativeCompilation = $(NamedParamsMacroTemplate)
define SetupNativeCompilationBody
+ # When reading this code, note that macros named Setup are just setting
+ # variables, and macros called Create are setting up rules to create
+ # files. Macros starting with any other verb are more complicated, and can do
+ # all of the above, and also call directly to the shell.
+
+ ###
+ ### Prepare for compilation and linking
+ ###
+
+ $$(eval $$(call VerifyArguments,$1))
+
+ # Setup variables for the rest of this macro to work with
+ $$(eval $$(call SetupBasicVariables,$1))
+
+ # Setup the toolchain to be used
+ $$(eval $$(call SetupToolchain,$1))
+
+ # Find all source files to compile and determine the output object file names
+ $$(eval $$(call SetupSourceFiles,$1))
+ $$(eval $$(call SetupOutputFiles,$1))
+
+ # Setup CFLAGS/CXXFLAGS based on warnings, optimizations, extra flags etc.
+ $$(eval $$(call SetupCompilerFlags,$1))
+
+ # Machinery needed for the build to function properly
+ $$(eval $$(call SetupBuildSystemSupport,$1))
+
+ $$(eval $$(call RemoveSuperfluousOutputFiles,$1))
+
+ # Need to make sure TARGET is first on list before starting to create files
+ $1 := $$($1_TARGET)
+
+ # Have make print information about the library when we start compiling
+ $$(eval $$(call PrintStartInfo,$1))
+
+ ###
+ ### Compile all native source code files
+ ###
+
+ # Create a PCH, if requested
+ $$(eval $$(call CreatePrecompiledHeader,$1))
+
+ # Now call CreateCompiledNativeFile for each source file we are going to compile.
+ $$(foreach file, $$($1_SRCS), \
+ $$(eval $$(call CreateCompiledNativeFile,$1_$$(notdir $$(file)),\
+ FILE := $$(file), \
+ BASE := $1, \
+ )) \
+ )
+
+ ifeq ($(call isTargetOs, windows), true)
+ # On windows we need to create a resource file
+ $$(eval $$(call CreateWindowsResourceFile,$1))
+ endif
+
+ # Setup a library-wide dependency file from individual object file dependency
+ # files, and import it in the makefile.
+ $$(eval $$(call CreateDependencyFile,$1))
+ $$(eval $$(call ImportDependencyFile,$1))
+
+ ###
+ ### Link the object files into a native output library/executable
+ ###
+
+ # Handle native debug symbols
+ $$(eval $$(call CreateDebugSymbols,$1))
+
+ # Prepare for linking
+ $$(eval $$(call SetupLinkerFlags,$1))
+ ifneq ($(TOOLCHAIN_TYPE), microsoft)
+ $$(eval $$(call SetupLinking,$1))
+ endif
+
+ $$(eval $$(call SetupObjectFileList,$1))
+
+ # Link the individually compiled files into a single unit
+ ifneq ($(TOOLCHAIN_TYPE), microsoft)
+ $$(eval $$(call CreateLinkedResult,$1))
+ else
+ $$(eval $$(call CreateLinkedResultMicrosoft,$1))
+ endif
+
+ ifeq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
+ # Override all targets (this is a hack)
+ $1 := $$($1_ALL_OBJS_JSON)
+ endif
+endef
+
+################################################################################
+# Verify that user passed arguments are valid
+define VerifyArguments
+ ifneq ($$($1_NAME), $(basename $$($1_NAME)))
+ $$(error NAME must not contain any directory path in $1)
+ endif
+ ifneq ($(findstring $$($1_SUFFIX), $$($1_NAME)), )
+ $$(error NAME should be specified without suffix: $$($1_SUFFIX) in $1)
+ endif
+ ifneq ($(findstring $$($1_PREFIX), $$($1_NAME)), )
+ $$(error NAME should be specified without prefix: $$($1_PREFIX) in $1)
+ endif
+ ifeq ($$($1_OUTPUT_DIR), )
+ $$(error OUTPUT_DIR is missing in $1)
+ endif
+ ifneq ($$($1_MANIFEST), )
+ ifeq ($$($1_MANIFEST_VERSION), )
+ $$(error If MANIFEST is provided, then MANIFEST_VERSION is required in $1)
+ endif
+ endif
+endef
+################################################################################
+# Setup basic variables
+define SetupBasicVariables
# If type is unspecified, default to LIBRARY
ifeq ($$($1_TYPE), )
$1_TYPE := LIBRARY
@@ -589,8 +264,6 @@ define SetupNativeCompilationBody
endif
endif
- $$(call SetIfEmpty, $1_COMPILE_WITH_DEBUG_SYMBOLS, $$(COMPILE_WITH_DEBUG_SYMBOLS))
-
# STATIC_LIBS is set from Main.gmk when building static versions of certain
# native libraries.
ifeq ($(STATIC_LIBS), true)
@@ -600,15 +273,6 @@ define SetupNativeCompilationBody
# jmods.
$1_OBJECT_DIR := $$($1_OBJECT_DIR)/static
$1_OUTPUT_DIR := $$($1_OBJECT_DIR)
- # For release builds where debug symbols are configured to be moved to
- # separate debuginfo files, disable debug symbols for static libs instead.
- # We don't currently support this configuration and we don't want symbol
- # information in release builds unless explicitly asked to provide it.
- ifeq ($(DEBUG_LEVEL), release)
- ifeq ($(COPY_DEBUG_SYMBOLS), true)
- $1_COMPILE_WITH_DEBUG_SYMBOLS := false
- endif
- endif
endif
ifeq ($$($1_TYPE), EXECUTABLE)
@@ -629,248 +293,53 @@ define SetupNativeCompilationBody
endif
endif
- ifneq ($$($1_NAME), $(basename $$($1_NAME)))
- $$(error NAME must not contain any directory path in $1)
- endif
- ifneq ($(findstring $$($1_SUFFIX), $$($1_NAME)), )
- $$(error NAME should be specified without suffix: $$($1_SUFFIX) in $1)
- endif
- ifneq ($(findstring $$($1_PREFIX), $$($1_NAME)), )
- $$(error NAME should be specified without prefix: $$($1_PREFIX) in $1)
- endif
- ifeq ($$($1_OUTPUT_DIR), )
- $$(error OUTPUT_DIR is missing in $1)
- endif
- ifneq ($$($1_MANIFEST), )
- ifeq ($$($1_MANIFEST_VERSION), )
- $$(error If MANIFEST is provided, then MANIFEST_VERSION is required in $1)
- endif
- endif
-
$1_BASENAME := $$($1_PREFIX)$$($1_NAME)$$($1_SUFFIX)
$1_TARGET := $$($1_OUTPUT_DIR)/$$($1_BASENAME)
$1_NOSUFFIX := $$($1_PREFIX)$$($1_NAME)
$1_SAFE_NAME := $$(strip $$(subst /,_, $1))
+endef
-# Need to make sure TARGET is first on list
- $1 := $$($1_TARGET)
-
- # Setup the toolchain to be used
- $$(call SetIfEmpty, $1_TOOLCHAIN, TOOLCHAIN_DEFAULT)
- $$(call SetIfEmpty, $1_CC, $$($$($1_TOOLCHAIN)_CC))
- $$(call SetIfEmpty, $1_CXX, $$($$($1_TOOLCHAIN)_CXX))
- $$(call SetIfEmpty, $1_LD, $$($$($1_TOOLCHAIN)_LD))
- $$(call SetIfEmpty, $1_AR, $$($$($1_TOOLCHAIN)_AR))
- $$(call SetIfEmpty, $1_AS, $$($$($1_TOOLCHAIN)_AS))
- $$(call SetIfEmpty, $1_MT, $$($$($1_TOOLCHAIN)_MT))
- $$(call SetIfEmpty, $1_RC, $$($$($1_TOOLCHAIN)_RC))
- $$(call SetIfEmpty, $1_OBJCOPY, $$($$($1_TOOLCHAIN)_OBJCOPY))
- $$(call SetIfEmpty, $1_STRIP, $$($$($1_TOOLCHAIN)_STRIP))
- $$(call SetIfEmpty, $1_SYSROOT_CFLAGS, $$($$($1_TOOLCHAIN)_SYSROOT_CFLAGS))
- $$(call SetIfEmpty, $1_SYSROOT_LDFLAGS, $$($$($1_TOOLCHAIN)_SYSROOT_LDFLAGS))
-
- $$(foreach d, $$($1_SRC), $$(if $$(wildcard $$d), , \
- $$(error SRC specified to SetupNativeCompilation $1 contains missing directory $$d)))
-
- $1_SRCS_RAW := $$(call FindFiles, $$($1_SRC))
- # Order src files according to the order of the src dirs
- $1_SRCS := $$(foreach d, $$($1_SRC), $$(filter $$d%, $$($1_SRCS_RAW)))
- $1_SRCS := $$(filter $$(NATIVE_SOURCE_EXTENSIONS), $$($1_SRCS))
- # Extract the C/C++ files.
- ifneq ($$($1_EXCLUDE_PATTERNS), )
- # We must not match the exclude pattern against the src root(s).
- $1_SRCS_WITHOUT_ROOTS := $$($1_SRCS)
- $$(foreach i, $$($1_SRC), $$(eval $1_SRCS_WITHOUT_ROOTS := $$(patsubst \
- $$i/%,%, $$($1_SRCS_WITHOUT_ROOTS))))
- $1_ALL_EXCLUDE_FILES := $$(call containing, $$($1_EXCLUDE_PATTERNS), \
- $$($1_SRCS_WITHOUT_ROOTS))
- endif
- ifneq ($$($1_EXCLUDE_FILES), )
- $1_ALL_EXCLUDE_FILES += $$($1_EXCLUDE_FILES)
- endif
- ifneq ($$($1_ALL_EXCLUDE_FILES), )
- $1_EXCLUDE_FILES_PAT := $$($1_ALL_EXCLUDE_FILES) \
- $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$($1_ALL_EXCLUDE_FILES)))
- $1_EXCLUDE_FILES_PAT := $$(addprefix %, $$($1_EXCLUDE_FILES_PAT))
- $1_SRCS := $$(filter-out $$($1_EXCLUDE_FILES_PAT), $$($1_SRCS))
- endif
- ifneq ($$($1_INCLUDE_FILES), )
- $1_INCLUDE_FILES_PAT := $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$($1_INCLUDE_FILES)))
- $1_SRCS := $$(filter $$($1_INCLUDE_FILES_PAT), $$($1_SRCS))
- endif
- # There can be only a single bin dir root, no need to foreach over the roots.
- $1_BINS := $$(wildcard $$($1_OBJECT_DIR)/*$(OBJ_SUFFIX))
- # Now we have a list of all c/c++ files to compile: $$($1_SRCS)
- # and we have a list of all existing object files: $$($1_BINS)
-
- # Prepend the source/bin path to the filter expressions. Then do the filtering.
- ifneq ($$($1_INCLUDES), )
- $1_SRC_INCLUDES := $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$(addsuffix /%, $$($1_INCLUDES))))
- $1_SRCS := $$(filter $$($1_SRC_INCLUDES), $$($1_SRCS))
- endif
- ifneq ($$($1_EXCLUDES), )
- $1_SRC_EXCLUDES := $$(addsuffix /%, $$($1_EXCLUDES))
- $1_SRC_EXCLUDES += $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$(addsuffix /%, $$($1_EXCLUDES))))
- $1_SRCS := $$(filter-out $$($1_SRC_EXCLUDES), $$($1_SRCS))
- endif
-
- $1_SRCS += $$($1_EXTRA_FILES)
-
- ifeq ($$($1_SRCS), )
- $$(error No sources found for $1 when looking inside the dirs $$($1_SRC))
- endif
-
- ifeq ($$($1_TYPE), EXECUTABLE)
- ifeq ($(UBSAN_ENABLED), true)
- # We need to set the default options for UBSan. This needs to be included in every executable.
- # Rather than copy and paste code to everything with a main function, we add an additional
- # source file to every executable that exports __ubsan_default_options.
- ifneq ($$(filter %.cpp %.cc, $$($1_SRCS)), )
- $1_SRCS += $(TOPDIR)/make/data/ubsan/ubsan_default_options.cpp
- else
- $1_SRCS += $(TOPDIR)/make/data/ubsan/ubsan_default_options.c
- endif
- endif
- endif
-
- # Calculate the expected output from compiling the sources
- $1_EXPECTED_OBJS_FILENAMES := $$(call replace_with_obj_extension, $$(notdir $$($1_SRCS)))
- $1_EXPECTED_OBJS := $$(addprefix $$($1_OBJECT_DIR)/, $$($1_EXPECTED_OBJS_FILENAMES))
- # Are there too many object files on disk? Perhaps because some source file was removed?
- $1_SUPERFLOUS_OBJS := $$(sort $$(filter-out $$($1_EXPECTED_OBJS), $$($1_BINS)))
- # Clean out the superfluous object files.
- ifneq ($$($1_SUPERFLUOUS_OBJS), )
- $$(shell $(RM) -f $$($1_SUPERFLUOUS_OBJS))
- endif
- # Sort to remove duplicates and provide a reproducible order on the input files to the linker.
- $1_ALL_OBJS := $$(sort $$($1_EXPECTED_OBJS) $$($1_EXTRA_OBJECT_FILES))
- ifeq ($(STATIC_LIBS), true)
- # Exclude the object files that match with $1_STATIC_LIB_EXCLUDE_OBJS.
- ifneq ($$($1_STATIC_LIB_EXCLUDE_OBJS), )
- $1_ALL_OBJS := $$(call not-containing, $$($1_STATIC_LIB_EXCLUDE_OBJS), $$($1_ALL_OBJS))
+################################################################################
+# Setup the toolchain variables
+define SetupToolchain
+ ifeq ($$($1_TARGET_TYPE), BUILD)
+ $$(call SetIfEmpty, $1_CC, $(BUILD_CC))
+ $$(call SetIfEmpty, $1_CXX, $(BUILD_CXX))
+ $$(call SetIfEmpty, $1_AR, $(BUILD_AR))
+ $$(call SetIfEmpty, $1_LIB, $(BUILD_LIB))
+ $$(call SetIfEmpty, $1_AS, $(BUILD_AS))
+ $$(call SetIfEmpty, $1_OBJCOPY, $(BUILD_OBJCOPY))
+ $$(call SetIfEmpty, $1_STRIP, $(BUILD_STRIP))
+ $$(call SetIfEmpty, $1_SYSROOT_CFLAGS, $(BUILD_SYSROOT_CFLAGS))
+ $$(call SetIfEmpty, $1_SYSROOT_LDFLAGS, $(BUILD_SYSROOT_LDFLAGS))
+ ifeq ($$($1_LINK_TYPE), C++)
+ $$(call SetIfEmpty, $1_LD, $(BUILD_LDCXX))
+ else
+ $$(call SetIfEmpty, $1_LD, $(BUILD_LD))
endif
- endif
-
- # Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS, TOOLCHAIN_TYPE and
- # OPENJDK_TARGET_OS plus OPENJDK_TARGET_CPU pair dependent variables for CFLAGS.
- $1_EXTRA_CFLAGS := $$($1_CFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_CFLAGS_$(OPENJDK_TARGET_OS)) \
- $$($1_CFLAGS_$(TOOLCHAIN_TYPE)) \
- $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_$(OPENJDK_TARGET_CPU))
-
- ifneq ($(DEBUG_LEVEL), release)
- # Pickup extra debug dependent variables for CFLAGS
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_debug)
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS_TYPE)_debug)
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_debug)
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_$(OPENJDK_TARGET_CPU)_debug)
- else
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_release)
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS_TYPE)_release)
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_release)
- $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_$(OPENJDK_TARGET_CPU)_release)
- endif
- ifeq ($(STATIC_LIBS), true)
- $1_EXTRA_CFLAGS += $$(STATIC_LIBS_CFLAGS)
- endif
-
- # Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS and/or TOOLCHAIN_TYPE
- # dependent variables for CXXFLAGS.
- $1_EXTRA_CXXFLAGS := $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS)) \
- $$($1_CXXFLAGS_$(TOOLCHAIN_TYPE))
-
- ifneq ($(DEBUG_LEVEL), release)
- # Pickup extra debug dependent variables for CXXFLAGS
- $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_debug)
- $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS_TYPE)_debug)
- $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS)_debug)
else
- $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_release)
- $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS_TYPE)_release)
- $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS)_release)
- endif
- ifeq ($(STATIC_LIBS), true)
- $1_EXTRA_CXXFLAGS += $$(STATIC_LIB_CFLAGS)
- endif
-
- # If no C++ flags are explicitly set, default to using the C flags.
- # After that, we can set additional C++ flags that should not interfere
- # with the mechanism for copying the C flags by default.
- ifeq ($$($1_CXXFLAGS), )
- $1_CXXFLAGS := $$($1_CFLAGS)
- endif
- ifeq ($$(strip $$($1_EXTRA_CXXFLAGS)), )
- $1_EXTRA_CXXFLAGS := $$($1_EXTRA_CFLAGS)
- endif
-
- ifeq ($$($1_COMPILE_WITH_DEBUG_SYMBOLS), true)
- $1_EXTRA_CFLAGS += $$(CFLAGS_DEBUG_SYMBOLS)
- $1_EXTRA_CXXFLAGS += $$(CFLAGS_DEBUG_SYMBOLS)
- $1_EXTRA_ASFLAGS += $$(ASFLAGS_DEBUG_SYMBOLS)
- endif
-
- # Pass the library name for static JNI library naming
- ifeq ($$($1_TYPE), STATIC_LIBRARY)
- $1_EXTRA_CFLAGS += -DLIBRARY_NAME=$$($1_NAME)
- $1_EXTRA_CXXFLAGS += -DLIBRARY_NAME=$$($1_NAME)
- endif
-
- # Pick up disabled warnings, if possible on this platform.
- ifneq ($(DISABLE_WARNING_PREFIX), )
- $1_EXTRA_CFLAGS += $$(addprefix $(DISABLE_WARNING_PREFIX), \
- $$(DISABLED_WARNINGS) \
- $$(DISABLED_WARNINGS_C) \
- $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)) \
- $$($1_DISABLED_WARNINGS_C_$(TOOLCHAIN_TYPE)) \
- $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)) \
- $$($1_DISABLED_WARNINGS_C_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)))
- $1_EXTRA_CXXFLAGS += $$(addprefix $(DISABLE_WARNING_PREFIX), \
- $$(DISABLED_WARNINGS) \
- $$(DISABLED_WARNINGS_CXX) \
- $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)) \
- $$($1_DISABLED_WARNINGS_CXX_$(TOOLCHAIN_TYPE)) \
- $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)) \
- $$($1_DISABLED_WARNINGS_CXX_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)))
- endif
-
- # Check if warnings should be considered errors.
- # Pick first binary and toolchain specific, then binary specific, then general setting.
- ifeq ($$($1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE)), )
- ifeq ($$($1_WARNINGS_AS_ERRORS), )
- $1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE) := $$(WARNINGS_AS_ERRORS)
+ $$(call SetIfEmpty, $1_CC, $(CC))
+ $$(call SetIfEmpty, $1_CXX, $(CXX))
+ $$(call SetIfEmpty, $1_AR, $(AR))
+ $$(call SetIfEmpty, $1_LIB, $(LIB))
+ $$(call SetIfEmpty, $1_AS, $(AS))
+ $$(call SetIfEmpty, $1_MT, $(MT))
+ $$(call SetIfEmpty, $1_RC, $(RC))
+ $$(call SetIfEmpty, $1_OBJCOPY, $(OBJCOPY))
+ $$(call SetIfEmpty, $1_STRIP, $(STRIP))
+ $$(call SetIfEmpty, $1_SYSROOT_CFLAGS, $(SYSROOT_CFLAGS))
+ $$(call SetIfEmpty, $1_SYSROOT_LDFLAGS, $(SYSROOT_LDFLAGS))
+ ifeq ($$($1_LINK_TYPE), C++)
+ $$(call SetIfEmpty, $1_LD, $(LDCXX))
else
- $1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE) := $$($1_WARNINGS_AS_ERRORS)
+ $$(call SetIfEmpty, $1_LD, $(LD))
endif
endif
+endef
- ifeq ($$($1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE)), true)
- $1_EXTRA_CFLAGS += $(CFLAGS_WARNINGS_ARE_ERRORS)
- $1_EXTRA_CXXFLAGS += $(CFLAGS_WARNINGS_ARE_ERRORS)
- endif
-
- ifeq (NONE, $$($1_OPTIMIZATION))
- $1_OPT_CFLAGS := $(C_O_FLAG_NONE)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NONE)
- else ifeq (LOW, $$($1_OPTIMIZATION))
- $1_OPT_CFLAGS := $(C_O_FLAG_NORM)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM)
- else ifeq (HIGH, $$($1_OPTIMIZATION))
- $1_OPT_CFLAGS := $(C_O_FLAG_HI)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI)
- else ifeq (HIGHEST, $$($1_OPTIMIZATION))
- $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST)
- else ifeq (HIGHEST_JVM, $$($1_OPTIMIZATION))
- $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST_JVM)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST_JVM)
- else ifeq (SIZE, $$($1_OPTIMIZATION))
- $1_OPT_CFLAGS := $(C_O_FLAG_SIZE)
- $1_OPT_CXXFLAGS := $(CXX_O_FLAG_SIZE)
- else ifneq (, $$($1_OPTIMIZATION))
- $$(error Unknown value for OPTIMIZATION: $$($1_OPTIMIZATION))
- endif
-
- $1_BUILD_INFO := $$($1_OBJECT_DIR)/_build-info.marker
-
+################################################################################
+# Setup machinery needed by the build system
+define SetupBuildSystemSupport
# Track variable changes for all variables that affect the compilation command
# lines for all object files in this setup. This includes at least all the
# variables used in the call to add_native_source below.
@@ -879,87 +348,19 @@ define SetupNativeCompilationBody
$$($1_CC) $$($1_CXX) $$($1_AS) $$($1_ASFLAGS)
$1_COMPILE_VARDEPS_FILE := $$(call DependOnVariable, $1_COMPILE_VARDEPS, \
$$($1_OBJECT_DIR)/$$($1_NOSUFFIX).comp.vardeps)
+endef
- ifneq ($$($1_PRECOMPILED_HEADER), )
- ifeq ($(USE_PRECOMPILED_HEADER), true)
- ifeq ($(TOOLCHAIN_TYPE), microsoft)
- $1_PCH_FILE := $$($1_OBJECT_DIR)/$1.pch
- $1_GENERATED_PCH_SRC := $$($1_OBJECT_DIR)/$1_pch.cpp
- $1_GENERATED_PCH_OBJ := $$($1_OBJECT_DIR)/$1_pch$(OBJ_SUFFIX)
-
- $$(eval $$(call SetupCompileNativeFile, $1_$$(notdir $$($1_GENERATED_PCH_SRC)), \
- FILE := $$($1_GENERATED_PCH_SRC), \
- BASE := $1, \
- EXTRA_CXXFLAGS := -Fp$$($1_PCH_FILE) -Yc$$(notdir $$($1_PRECOMPILED_HEADER)), \
- ))
-
- $1_USE_PCH_FLAGS := \
- -Fp$$($1_PCH_FILE) -Yu$$(notdir $$($1_PRECOMPILED_HEADER))
-
- $$($1_ALL_OBJS): $$($1_GENERATED_PCH_OBJ)
-
- # Explicitly add the pch obj file first to ease comparing to old
- # hotspot build.
- $1_ALL_OBJS := $$($1_GENERATED_PCH_OBJ) $$($1_ALL_OBJS)
-
- $$($1_GENERATED_PCH_SRC):
- $(ECHO) "#include \"$$(notdir $$($1_PRECOMPILED_HEADER))\"" > $$@
-
- else ifneq ($(findstring $(TOOLCHAIN_TYPE), gcc clang), )
- ifeq ($(TOOLCHAIN_TYPE), gcc)
- $1_PCH_FILE := $$($1_OBJECT_DIR)/precompiled/$$(notdir $$($1_PRECOMPILED_HEADER)).gch
- $1_USE_PCH_FLAGS := -I$$($1_OBJECT_DIR)/precompiled
- else ifeq ($(TOOLCHAIN_TYPE), clang)
- $1_PCH_FILE := $$($1_OBJECT_DIR)/precompiled/$$(notdir $$($1_PRECOMPILED_HEADER)).pch
- $1_USE_PCH_FLAGS := -include-pch $$($1_PCH_FILE)
- endif
- $1_PCH_DEPS_FILE := $$($1_PCH_FILE).d
- $1_PCH_DEPS_TARGETS_FILE := $$($1_PCH_FILE).d.targets
-
- -include $$($1_PCH_DEPS_FILE)
- -include $$($1_PCH_DEPS_TARGETS_FILE)
-
- $1_PCH_COMMAND := $$($1_CC) $$($1_CFLAGS) $$($1_EXTRA_CFLAGS) $$($1_SYSROOT_CFLAGS) \
- $$($1_OPT_CFLAGS) -x c++-header -c $(GENDEPS_FLAGS) \
- $$(addsuffix .tmp, $$($1_PCH_DEPS_FILE))
-
- $$($1_PCH_FILE): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE)
- $$(call LogInfo, Generating precompiled header)
- $$(call MakeDir, $$(@D))
- $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
- $$($1_PCH_COMMAND) $$< -o $$@))
- $$(call fix-deps-file, $$($1_PCH_DEPS_FILE))
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEPS_FILE) \
- > $$($1_PCH_DEPS_TARGETS_FILE)
-
- $$($1_ALL_OBJS): $$($1_PCH_FILE)
-
- # Generate the corresponding compile_commands.json fragment.
- $1_PCH_FILE_JSON := $$(MAKESUPPORT_OUTPUTDIR)/compile-commands/$$(subst /,_,$$(subst \
- $$(OUTPUTDIR)/,,$$($1_PCH_FILE))).json
- $1_ALL_OBJS_JSON += $$($1_PCH_FILE_JSON)
-
- $$($1_PCH_FILE_JSON): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE)
- $$(call WriteCompileCommandsFragment, $$@, $$(PWD), $$<, \
- $$($1_PCH_COMMAND) $$< -o $$($1_PCH_FILE))
- endif
- endif
- endif
-
- # Now call SetupCompileNativeFile for each source file we are going to compile.
- $$(foreach file, $$($1_SRCS), \
- $$(eval $$(call SetupCompileNativeFile, $1_$$(notdir $$(file)),\
- FILE := $$(file), \
- BASE := $1, \
- )) \
- )
-
+################################################################################
+# Have make print information about the library when we start compiling
+define PrintStartInfo
# Setup rule for printing progress info when compiling source files.
# This is a rough heuristic and may not always print accurate information.
# The $1_BUILD_INFO and $1_BUILD_INFO_DEPS variables are used in
# TestFilesCompilation.gmk.
$$(call SetIfEmpty, $1_BUILD_INFO_LOG_MACRO, LogWarn)
$1_BUILD_INFO_DEPS := $$($1_SRCS) $$($1_COMPILE_VARDEPS_FILE)
+ $1_BUILD_INFO := $$($1_OBJECT_DIR)/_build-info.marker
+
$$($1_BUILD_INFO): $$($1_BUILD_INFO_DEPS)
ifeq ($$(wildcard $$($1_TARGET)), )
$$(call $$($1_BUILD_INFO_LOG_MACRO), \
@@ -973,47 +374,12 @@ define SetupNativeCompilationBody
$$(if $$(filter %.vardeps, $$?), due to makefile changes))))
endif
$(TOUCH) $$@
+endef
- # On windows we need to create a resource file
- ifeq ($(call isTargetOs, windows), true)
- ifneq ($$($1_VERSIONINFO_RESOURCE), )
- $1_RES := $$($1_OBJECT_DIR)/$$($1_BASENAME).res
- $1_RES_DEPS_FILE := $$($1_RES).d
- $1_RES_DEPS_TARGETS_FILE := $$($1_RES).d.targets
- -include $$($1_RES_DEPS_FILE)
- -include $$($1_RES_DEPS_TARGETS_FILE)
-
- $1_RES_VARDEPS := $$($1_RC) $$($1_RCFLAGS)
- $1_RES_VARDEPS_FILE := $$(call DependOnVariable, $1_RES_VARDEPS, \
- $$($1_RES).vardeps)
-
- $$($1_RES): $$($1_VERSIONINFO_RESOURCE) $$($1_RES_VARDEPS_FILE)
- $$(call LogInfo, Compiling resource $$(notdir $$($1_VERSIONINFO_RESOURCE)) (for $$($1_BASENAME)))
- $$(call MakeDir, $$(@D) $$($1_OBJECT_DIR))
- $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
- $$($1_RC) $$($1_RCFLAGS) $$($1_SYSROOT_CFLAGS) $(CC_OUT_OPTION)$$@ \
- $$($1_VERSIONINFO_RESOURCE) 2>&1 ))
- # Windows RC compiler does not support -showIncludes, so we mis-use CL
- # for this. Filter out RC specific arguments that are unknown to CL.
- # For some unknown reason, in this case CL actually outputs the show
- # includes to stderr so need to redirect it to hide the output from the
- # main log.
- $$(call ExecuteWithLog, $$($1_RES_DEPS_FILE)$(OBJ_SUFFIX), \
- $$($1_CC) $$(filter-out -l%, $$($1_RCFLAGS)) \
- $$($1_SYSROOT_CFLAGS) -showIncludes -nologo -TC \
- $(CC_OUT_OPTION)$$($1_RES_DEPS_FILE)$(OBJ_SUFFIX) -P -Fi$$($1_RES_DEPS_FILE).pp \
- $$($1_VERSIONINFO_RESOURCE)) 2>&1 \
- | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
- -e "^$$(notdir $$($1_VERSIONINFO_RESOURCE))$$$$" || test "$$$$?" = "1" ; \
- $(ECHO) $$($1_RES): \\ > $$($1_RES_DEPS_FILE) ; \
- $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEPS_FILE)$(OBJ_SUFFIX).log \
- >> $$($1_RES_DEPS_FILE) ; \
- $(ECHO) >> $$($1_RES_DEPS_FILE) ;\
- $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEPS_FILE) \
- > $$($1_RES_DEPS_TARGETS_FILE)
- endif
- endif
-
+################################################################################
+# Setup a library-wide dependency file from individual object file dependency
+# files
+define CreateDependencyFile
# Create a rule to collect all the individual make dependency files into a
# single makefile.
$1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d
@@ -1031,7 +397,11 @@ define SetupNativeCompilationBody
$(MV) $$@.tmp $$@
$1 += $$($1_DEPS_FILE)
+endef
+################################################################################
+# Import the dependency file into the makefile
+define ImportDependencyFile
# The include must be on the .old file, which represents the state from the
# previous invocation of make. The file being included must not have a rule
# defined for it as otherwise make will think it has to run the rule before
@@ -1041,328 +411,6 @@ define SetupNativeCompilationBody
$1_DEPS_FILE_LOADED := true
-include $$($1_DEPS_FILE).old
endif
-
- ifneq ($(DISABLE_MAPFILES), true)
- $1_REAL_MAPFILE := $$($1_MAPFILE)
- endif
-
- # Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS and TOOLCHAIN_TYPE
- # dependent variables for LDFLAGS and LIBS, and additionally the pair dependent
- # TOOLCHAIN_TYPE plus OPENJDK_TARGET_OS
- $1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \
- $$($1_LDFLAGS_$(TOOLCHAIN_TYPE)) $$($1_LDFLAGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS))
- $1_EXTRA_LIBS += $$($1_LIBS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LIBS_$(OPENJDK_TARGET_OS)) \
- $$($1_LIBS_$(TOOLCHAIN_TYPE)) $$($1_LIBS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS))
-
- ifneq ($$($1_REAL_MAPFILE), )
- $1_EXTRA_LDFLAGS += $(call SET_SHARED_LIBRARY_MAPFILE,$$($1_REAL_MAPFILE))
- endif
-
- ifneq ($$($1_COPY_DEBUG_SYMBOLS), false)
- $1_COPY_DEBUG_SYMBOLS := $(COPY_DEBUG_SYMBOLS)
- endif
-
- ifneq ($$($1_ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
- $1_ZIP_EXTERNAL_DEBUG_SYMBOLS := $(ZIP_EXTERNAL_DEBUG_SYMBOLS)
- endif
-
- ifeq ($$($1_COPY_DEBUG_SYMBOLS), true)
- ifneq ($$($1_DEBUG_SYMBOLS), false)
- $$(call SetIfEmpty, $1_SYMBOLS_DIR, $$($1_OUTPUT_DIR))
- # Only copy debug symbols for dynamic libraries and programs.
- ifneq ($$($1_TYPE), STATIC_LIBRARY)
- # Generate debuginfo files.
- ifeq ($(call isTargetOs, windows), true)
- $1_EXTRA_LDFLAGS += -debug "-pdb:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).pdb" \
- "-map:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).map"
- ifeq ($(SHIP_DEBUG_SYMBOLS), public)
- $1_EXTRA_LDFLAGS += "-pdbstripped:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).stripped.pdb"
- endif
- $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_BASENAME).pdb \
- $$($1_SYMBOLS_DIR)/$$($1_BASENAME).map
-
- else ifeq ($(call isTargetOs, linux), true)
- $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).debuginfo
- # Setup the command line creating debuginfo files, to be run after linking.
- # It cannot be run separately since it updates the original target file
- # Creating the debuglink is done in another command rather than all at once
- # so we can run it after strip is called, since strip can sometimes mangle the
- # embedded debuglink, which we want to avoid.
- $1_CREATE_DEBUGINFO_CMDS := \
- $$($1_OBJCOPY) --only-keep-debug $$($1_TARGET) $$($1_DEBUGINFO_FILES) $$(NEWLINE)
- $1_CREATE_DEBUGLINK_CMDS := $(CD) $$($1_SYMBOLS_DIR) && \
- $$($1_OBJCOPY) --add-gnu-debuglink=$$($1_DEBUGINFO_FILES) $$($1_TARGET)
-
- else ifeq ($(call isTargetOs, aix), true)
- # AIX does not provide the equivalent of OBJCOPY to extract debug symbols,
- # so we copy the compiled object with symbols to the .debuginfo file, which
- # happens prior to the STRIP_CMD on the original target object file.
- $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).debuginfo
- $1_CREATE_DEBUGINFO_CMDS := $(CP) $$($1_TARGET) $$($1_DEBUGINFO_FILES)
-
- else ifeq ($(call isTargetOs, macosx), true)
- $1_DEBUGINFO_FILES := \
- $$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM/Contents/Info.plist \
- $$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM/Contents/Resources/DWARF/$$($1_BASENAME)
- $1_CREATE_DEBUGINFO_CMDS := \
- $(DSYMUTIL) --out $$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM $$($1_TARGET)
- endif
-
- # Since the link rule creates more than one file that we want to track,
- # we have to use some tricks to get make to cooperate. To properly
- # trigger downstream dependants of $$($1_DEBUGINFO_FILES), we must have
- # a recipe in the rule below. To avoid rerunning the recipe every time
- # have it touch the target. If a debuginfo file is deleted by something
- # external, explicitly delete the TARGET to trigger a rebuild of both.
- ifneq ($$(wildcard $$($1_DEBUGINFO_FILES)), $$($1_DEBUGINFO_FILES))
- $$(call LogDebug, Deleting $$($1_BASENAME) because debuginfo files are missing)
- $$(shell $(RM) $$($1_TARGET))
- endif
- $$($1_DEBUGINFO_FILES): $$($1_TARGET)
- $$(if $$(CORRECT_FUNCTION_IN_RECIPE_EVALUATION), \
- $$(if $$(wildcard $$@), , $$(error $$@ was not created for $$<)) \
- )
- $(TOUCH) $$@
-
- $1 += $$($1_DEBUGINFO_FILES)
-
- ifeq ($$($1_ZIP_EXTERNAL_DEBUG_SYMBOLS), true)
- ifeq ($(call isTargetOs, windows), true)
- $1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_BASENAME).diz
- else
- $1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).diz
- endif
- $1 += $$($1_DEBUGINFO_ZIP)
-
- # The dependency on TARGET is needed for debuginfo files
- # to be rebuilt properly.
- $$($1_DEBUGINFO_ZIP): $$($1_DEBUGINFO_FILES) $$($1_TARGET)
- $(CD) $$($1_SYMBOLS_DIR) && \
- $(ZIPEXE) -q -r $$@ $$(subst $$($1_SYMBOLS_DIR)/,, $$($1_DEBUGINFO_FILES))
-
- endif
- endif # !STATIC_LIBRARY
- endif # $1_DEBUG_SYMBOLS != false
- endif # COPY_DEBUG_SYMBOLS
-
- # Unless specifically set, stripping should only happen if symbols are also
- # being copied.
- $$(call SetIfEmpty, $1_STRIP_SYMBOLS, $$($1_COPY_DEBUG_SYMBOLS))
-
- ifneq ($$($1_STRIP_SYMBOLS), false)
- ifneq ($$($1_STRIP), )
- # Default to using the global STRIPFLAGS. Allow for overriding with an empty value
- $1_STRIPFLAGS ?= $(STRIPFLAGS)
- $1_STRIP_CMD := $$($1_STRIP) $$($1_STRIPFLAGS) $$($1_TARGET)
- endif
- endif
-
- $1_LD_OBJ_ARG := $$($1_ALL_OBJS)
-
- # If there are many object files, use an @-file...
- ifneq ($$(word 17, $$($1_ALL_OBJS)), )
- $1_OBJ_FILE_LIST := $$($1_OBJECT_DIR)/_$1_objectfilenames.txt
- ifneq ($(COMPILER_COMMAND_FILE_FLAG), )
- $1_LD_OBJ_ARG := $(COMPILER_COMMAND_FILE_FLAG)$$($1_OBJ_FILE_LIST)
- else
- # ...except for toolchains which don't support them.
- $1_LD_OBJ_ARG := `cat $$($1_OBJ_FILE_LIST)`
- endif
-
- # If we are building static library, 'AR' on macosx/aix may not support @-file.
- ifeq ($$($1_TYPE), STATIC_LIBRARY)
- ifeq ($(call isTargetOs, macosx aix), true)
- $1_LD_OBJ_ARG := `cat $$($1_OBJ_FILE_LIST)`
- endif
- endif
- endif
-
- # Unfortunately the @-file trick does not work reliably when using clang.
- # Clang does not propagate the @-file parameter to the ld sub process, but
- # instead puts the full content on the command line. At least the llvm ld
- # does not even support an @-file.
- #
- # When linking a large amount of object files, we risk hitting the limit
- # of the command line length even on posix systems if the path length of
- # the output dir is very long due to our use of absolute paths. To
- # mitigate this, use paths relative to the output dir when linking over
- # 500 files with clang and the output dir path is deep.
- ifneq ($$(word 500, $$($1_ALL_OBJS)), )
- ifeq ($$(TOOLCHAIN_TYPE), clang)
- # There is no strlen function in make, but checking path depth is a
- # reasonable approximation.
- ifneq ($$(word 10, $$(subst /, ,$$(OUTPUTDIR))), )
- $1_LINK_OBJS_RELATIVE := true
- $1_ALL_OBJS_RELATIVE := $$(patsubst $$(OUTPUTDIR)/%, %, $$($1_ALL_OBJS))
- endif
- endif
- endif
-
- ifeq ($$($1_TYPE), STATIC_LIBRARY)
- # Include partial linking when building the static library with clang on linux.
- ifeq ($(call isTargetOs, linux), true)
- ifneq ($(findstring $(TOOLCHAIN_TYPE), clang), )
- $1_ENABLE_PARTIAL_LINKING := true
- endif
- endif
-
- $1_VARDEPS := $$($1_AR) $$(ARFLAGS) $$($1_ARFLAGS) $$($1_LIBS) \
- $$($1_EXTRA_LIBS)
- ifeq ($$($1_ENABLE_PARTIAL_LINKING), true)
- $1_VARDEPS += $$($1_LD) $$($1_SYSROOT_LDFLAGS)
- endif
- $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
- $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
-
- # Generating a static library, ie object file archive.
- ifeq ($(STATIC_BUILD), true)
- ifeq ($$($1_USE_MAPFILE_FOR_SYMBOLS), true)
- STATIC_MAPFILE_DEP := $$($1_MAPFILE)
- endif
- endif
-
- $1_TARGET_DEPS := $$($1_ALL_OBJS) $$($1_RES) $$($1_VARDEPS_FILE) $$(STATIC_MAPFILE_DEP)
-
- $1_AR_OBJ_ARG := $$($1_LD_OBJ_ARG)
- # With clang on linux, partial linking is enabled and 'AR' takes the output
- # object from the partial linking step.
- ifeq ($$($1_ENABLE_PARTIAL_LINKING), true)
- $1_TARGET_RELOCATABLE := $$($1_OBJECT_DIR)/$$($1_PREFIX)$$($1_NAME)_relocatable$(OBJ_SUFFIX)
- $1_AR_OBJ_ARG := $$($1_TARGET_RELOCATABLE)
- endif
-
- $$($1_TARGET): $$($1_TARGET_DEPS)
- ifneq ($$($1_OBJ_FILE_LIST), )
- ifeq ($$($1_LINK_OBJS_RELATIVE), true)
- $$(eval $$(call ListPathsSafely, $1_ALL_OBJS_RELATIVE, $$($1_OBJ_FILE_LIST)))
- else
- $$(eval $$(call ListPathsSafely, $1_ALL_OBJS, $$($1_OBJ_FILE_LIST)))
- endif
- endif
- $$(call LogInfo, Building static library $$($1_BASENAME))
- $$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
- # Do partial linking.
- ifeq ($$($1_ENABLE_PARTIAL_LINKING), true)
- $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_partial_link, \
- $(if $$($1_LINK_OBJS_RELATIVE), $$(CD) $$(OUTPUTDIR) ; ) \
- $$($1_LD) $(LDFLAGS_CXX_PARTIAL_LINKING) $$($1_SYSROOT_LDFLAGS) \
- $(LD_OUT_OPTION)$$($1_TARGET_RELOCATABLE) \
- $$($1_LD_OBJ_ARG))
- endif
- $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
- $(if $$($1_LINK_OBJS_RELATIVE), $$(CD) $$(OUTPUTDIR) ; ) \
- $$($1_AR) $$(ARFLAGS) $$($1_ARFLAGS) $(AR_OUT_OPTION)$$($1_TARGET) $$($1_AR_OBJ_ARG) \
- $$($1_RES))
- ifeq ($(STATIC_BUILD), true)
- ifeq ($$($1_USE_MAPFILE_FOR_SYMBOLS), true)
- $(CP) $$($1_MAPFILE) $$(@D)/$$(basename $$(@F)).symbols
- else
- $(GetSymbols)
- endif
- endif
- else
- # A shared dynamic library or an executable binary has been specified
- ifeq ($$($1_TYPE), LIBRARY)
- # Generating a dynamic library.
- $1_EXTRA_LDFLAGS += $$(call SET_SHARED_LIBRARY_NAME,$$($1_BASENAME))
-
- # Create loadmap on AIX. Helps in diagnosing some problems.
- ifneq ($(COMPILER_BINDCMD_FILE_FLAG), )
- $1_EXTRA_LDFLAGS += $(COMPILER_BINDCMD_FILE_FLAG)$$($1_OBJECT_DIR)/$$($1_NOSUFFIX).loadmap
- endif
- endif
-
- ifeq ($(call isTargetOs, windows), true)
- ifeq ($$($1_EMBED_MANIFEST), true)
- $1_EXTRA_LDFLAGS += -manifest:embed
- endif
-
- $1_IMPORT_LIBRARY := $$($1_OBJECT_DIR)/$$($1_NAME).lib
- $1_EXTRA_LDFLAGS += "-implib:$$($1_IMPORT_LIBRARY)"
- ifeq ($$($1_TYPE), LIBRARY)
- # To properly trigger downstream dependants of the import library, just as
- # for debug files, we must have a recipe in the rule. To avoid rerunning
- # the recipe every time have it touch the target. If an import library
- # file is deleted by something external, explicitly delete the target to
- # trigger a rebuild of both.
- ifneq ($$(wildcard $$($1_IMPORT_LIBRARY)), $$($1_IMPORT_LIBRARY))
- $$(call LogDebug, Deleting $$($1_BASENAME) because import library is missing)
- $$(shell $(RM) $$($1_TARGET))
- endif
- $$($1_IMPORT_LIBRARY): $$($1_TARGET)
- $(TOUCH) $$@
-
- $1 += $$($1_IMPORT_LIBRARY)
- endif
- endif
-
- $1_VARDEPS := $$($1_LD) $$($1_SYSROOT_LDFLAGS) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) \
- $$($1_LIBS) $$($1_EXTRA_LIBS) $$($1_MT) \
- $$($1_CREATE_DEBUGINFO_CMDS) $$($1_MANIFEST_VERSION) \
- $$($1_STRIP_CMD) $$($1_CREATE_DEBUGLINK_CMDS)
- $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
- $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
-
- $1_TARGET_DEPS := $$($1_ALL_OBJS) $$($1_RES) $$($1_MANIFEST) \
- $$($1_REAL_MAPFILE) $$($1_VARDEPS_FILE)
-
- $$($1_TARGET): $$($1_TARGET_DEPS)
- ifneq ($$($1_OBJ_FILE_LIST), )
- ifeq ($$($1_LINK_OBJS_RELATIVE), true)
- $$(eval $$(call ListPathsSafely, $1_ALL_OBJS_RELATIVE, $$($1_OBJ_FILE_LIST)))
- else
- $$(eval $$(call ListPathsSafely, $1_ALL_OBJS, $$($1_OBJ_FILE_LIST)))
- endif
- endif
- # Keep as much as possible on one execution line for best performance
- # on Windows
- $$(call LogInfo, Linking $$($1_BASENAME))
- $$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
- ifeq ($(call isTargetOs, windows), true)
-
- $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
- $$($1_LD) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $$($1_SYSROOT_LDFLAGS) \
- $(LD_OUT_OPTION)$$($1_TARGET) $$($1_LD_OBJ_ARG) $$($1_RES) \
- $$($1_LIBS) $$($1_EXTRA_LIBS)) \
- | $(GREP) -v "^ Creating library .*\.lib and object .*\.exp" || \
- test "$$$$?" = "1" ; \
- $$($1_CREATE_DEBUGINFO_CMDS)
- $$($1_STRIP_CMD)
- $$($1_CREATE_DEBUGLINK_CMDS)
- ifeq ($(call isBuildOsEnv, windows.wsl2), true)
- $$(CHMOD) +x $$($1_TARGET)
- endif
- else
- $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
- $$(if $$($1_LINK_OBJS_RELATIVE), $$(CD) $$(OUTPUTDIR) ; ) \
- $$($1_LD) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) $$($1_SYSROOT_LDFLAGS) \
- $(LD_OUT_OPTION)$$($1_TARGET) $$($1_LD_OBJ_ARG) $$($1_RES) \
- $$($1_LIBS) $$($1_EXTRA_LIBS)) ; \
- $$($1_CREATE_DEBUGINFO_CMDS)
- $$($1_STRIP_CMD)
- $$($1_CREATE_DEBUGLINK_CMDS)
- endif
- ifeq ($(call isTargetOs, windows), true)
- ifneq ($$($1_MANIFEST), )
- $$($1_MT) -nologo -manifest $$($1_MANIFEST) -identity:"$$($1_NAME).exe, version=$$($1_MANIFEST_VERSION)" -outputresource:$$@;#1
- endif
- endif
- # On macosx, optionally run codesign on every binary.
- # Remove signature explicitly first to avoid warnings if the linker
- # added a default adhoc signature.
- ifeq ($(MACOSX_CODESIGN_MODE), hardened)
- $(CODESIGN) --remove-signature $$@
- $(CODESIGN) -f -s "$(MACOSX_CODESIGN_IDENTITY)" --timestamp --options runtime \
- --entitlements $$(call GetEntitlementsFile, $$@) $$@
- else ifeq ($(MACOSX_CODESIGN_MODE), debug)
- $(CODESIGN) --remove-signature $$@
- $(CODESIGN) -f -s - --entitlements $$(call GetEntitlementsFile, $$@) $$@
- endif
- endif
-
- ifeq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
- $1 := $$($1_ALL_OBJS_JSON)
- endif
endef
endif # _NATIVE_COMPILATION_GMK
diff --git a/make/common/TestFilesCompilation.gmk b/make/common/TestFilesCompilation.gmk
index d97d0e6c697e7..626eb058f0a1b 100644
--- a/make/common/TestFilesCompilation.gmk
+++ b/make/common/TestFilesCompilation.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -103,6 +103,7 @@ define SetupTestFilesCompilationBody
$$(eval $$(call SetupNativeCompilation, BUILD_TEST_$$(name), \
NAME := $$(unprefixed_name), \
TYPE := $$($1_COMPILATION_TYPE), \
+ LINK_TYPE := $(if $$(filter %.cpp, $$(file)), C++, C), \
EXTRA_FILES := $$(file) $$($1_EXTRA_FILES), \
OBJECT_DIR := $$($1_OUTPUT_DIR)/support/$$(name), \
OUTPUT_DIR := $$($1_OUTPUT_DIR)/$$($1_OUTPUT_SUBDIR), \
@@ -113,7 +114,6 @@ define SetupTestFilesCompilationBody
DISABLED_WARNINGS_clang := undef format-nonliteral \
missing-field-initializers sometimes-uninitialized, \
LIBS := $$($1_LIBS_$$(name)), \
- TOOLCHAIN := $(if $$(filter %.cpp, $$(file)), TOOLCHAIN_LINK_CXX, TOOLCHAIN_DEFAULT), \
OPTIMIZATION := $$(if $$($1_OPTIMIZATION_$$(name)),$$($1_OPTIMIZATION_$$(name)),LOW), \
COPY_DEBUG_SYMBOLS := $$($1_COPY_DEBUG_SYMBOLS), \
STRIP_SYMBOLS := $$(if $$($1_STRIP_SYMBOLS_$$(name)),$$($1_STRIP_SYMBOLS_$$(name)),false), \
diff --git a/make/common/native/CompileFile.gmk b/make/common/native/CompileFile.gmk
new file mode 100644
index 0000000000000..a9384fb0cf509
--- /dev/null
+++ b/make/common/native/CompileFile.gmk
@@ -0,0 +1,351 @@
+#
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# This file contains functionality related to compiling a single native source
+# file (C, C++ or Objective-C) into an object file. It also harbours related
+# functionality for generating PCH (precompiled headers) and Windows resource
+# files.
+
+################################################################################
+# Creates a recipe that creates a compile_commands.json fragment. Remove any
+# occurrences of FIXPATH programs from the command to show the actual invocation.
+#
+# Param 1: Name of file to create
+# Param 2: Working directory
+# Param 3: Source file
+# Param 4: Compile command
+################################################################################
+define WriteCompileCommandsFragment
+ $(call LogInfo, Creating compile commands fragment for $(notdir $3))
+ $(call MakeDir, $(dir $1))
+ $(call WriteFile,{ \
+ "directory": "$(strip $(call FixPath, $2))"$(COMMA) \
+ "file": "$(strip $(call FixPath, $3))"$(COMMA) \
+ "command": "$(strip $(subst $(DQUOTE),\$(DQUOTE),$(subst \,\\,\
+ $(subst $(FIXPATH),,$(call FixPath, $4)))))" \
+ }$(COMMA), \
+ $1)
+endef
+
+################################################################################
+# Extensions of files handled by this macro.
+NATIVE_SOURCE_EXTENSIONS := %.S %.c %.cpp %.cc %.m %.mm
+
+# Replaces native source extensions with the object file extension in a string.
+# Param 1: the string containing source file names with extensions
+# The surrounding strip is needed to keep additional whitespace out
+define replace_with_obj_extension
+$(strip \
+ $(foreach extension, $(NATIVE_SOURCE_EXTENSIONS), \
+ $(patsubst $(extension),%$(OBJ_SUFFIX), $(filter $(extension), $1))) \
+)
+endef
+
+################################################################################
+# This pattern is used to transform the output of the microsoft CL compiler
+# into a make syntax dependency file (.d)
+WINDOWS_SHOWINCLUDE_SED_PATTERN := \
+ -e '/^Note: including file:/!d' \
+ -e 's|Note: including file: *||' \
+ -e 's|\r||g' \
+ -e 's|\\|/|g' \
+ -e 's|^\([a-zA-Z]\):|$(WINENV_PREFIX)/\1|g' \
+ -e '\|$(TOPDIR)|I !d' \
+ -e 's|$$$$| \\|g' \
+ #
+
+################################################################################
+# This pattern is used to transform a dependency file (.d) to a list
+# of make targets for dependent files (.d.targets)
+DEPENDENCY_TARGET_SED_PATTERN := \
+ -e 's/\#.*//' \
+ -e 's/^[^:]*: *//' \
+ -e 's/ *\\$$$$//' \
+ -e 's/^[ ]*//' \
+ -e '/^$$$$/ d' \
+ -e 's/$$$$/ :/' \
+ #
+
+################################################################################
+# Create the recipe needed to compile a single native source file.
+#
+# Parameter 1 is the name of the rule, based on the name of the library/
+# program being build and the name of the source code file, e.g.
+# BUILD_LIBFOO_fooMain.cpp.
+#
+# Remaining parameters are named arguments:
+# FILE - The full path of the source file to compiler
+# BASE - The name of the rule for the entire binary to build ($1)
+#
+CreateCompiledNativeFile = $(NamedParamsMacroTemplate)
+define CreateCompiledNativeFileBody
+ $1_FILENAME := $$(notdir $$($1_FILE))
+
+ # The target file to be generated.
+ $1_OBJ := $$($$($1_BASE)_OBJECT_DIR)/$$(call replace_with_obj_extension, \
+ $$($1_FILENAME))
+
+ # Generate the corresponding compile_commands.json fragment.
+ $1_OBJ_JSON = $$(MAKESUPPORT_OUTPUTDIR)/compile-commands/$$(subst /,_,$$(subst \
+ $$(OUTPUTDIR)/,,$$($1_OBJ))).json
+ $$($1_BASE)_ALL_OBJS_JSON += $$($1_OBJ_JSON)
+
+ # Only continue if this object file hasn't been processed already. This lets
+ # the first found source file override any other with the same name.
+ ifeq ($$($1_OBJ_PROCESSED), )
+ $1_OBJ_PROCESSED := true
+ # This is the definite source file to use for $1_FILENAME.
+ $1_SRC_FILE := $$($1_FILE)
+
+ $$(eval $$(call SetupCompileFileFlags,$1,$$($1_BASE)))
+
+ ifneq ($$(filter %.c, $$($1_FILENAME)), )
+ # Compile as a C file
+ $1_CFLAGS += $$($1_WARNINGS_FLAGS)
+ $1_FLAGS := $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) $$($1_BASE_CFLAGS) \
+ $$($1_OPT_CFLAGS) $$($1_CFLAGS) -c
+ $1_COMPILER := $$($$($1_BASE)_CC)
+ else ifneq ($$(filter %.m, $$($1_FILENAME)), )
+ # Compile as an Objective-C file
+ $1_CFLAGS += $$($1_WARNINGS_FLAGS)
+ $1_FLAGS := -x objective-c $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) \
+ $$($1_BASE_CFLAGS) $$($1_OPT_CFLAGS) $$($1_CFLAGS) -c
+ $1_COMPILER := $$($$($1_BASE)_CC)
+ else ifneq ($$(filter %.S, $$($1_FILENAME)), )
+ # Compile as preprocessed assembler file
+ $1_FLAGS := $(BASIC_ASFLAGS) $$($1_BASE_ASFLAGS)
+ $1_COMPILER := $(AS)
+
+ # gcc or clang assembly files must contain an appropriate relative .file
+ # path for reproducible builds.
+ ifneq ($(findstring $(TOOLCHAIN_TYPE), gcc clang), )
+ # If no absolute paths allowed, work out relative source file path
+ # for assembly .file substitution, otherwise use full file path
+ ifeq ($(ALLOW_ABSOLUTE_PATHS_IN_OUTPUT), false)
+ $1_REL_ASM_SRC := $$(call RelativePath, $$($1_FILE), $(WORKSPACE_ROOT))
+ else
+ $1_REL_ASM_SRC := $$($1_FILE)
+ endif
+ $1_FLAGS := $$($1_FLAGS) -DASSEMBLY_SRC_FILE='"$$($1_REL_ASM_SRC)"' \
+ -include $(TOPDIR)/make/data/autoheaders/assemblyprefix.h
+ endif
+ else ifneq ($$(filter %.cpp %.cc %.mm, $$($1_FILENAME)), )
+ # Compile as a C++ or Objective-C++ file
+ $1_CXXFLAGS += $$($1_WARNINGS_FLAGS)
+ $1_FLAGS := $(CFLAGS_CCACHE) $$($1_USE_PCH_FLAGS) $$($1_BASE_CXXFLAGS) \
+ $$($1_OPT_CXXFLAGS) $$($1_CXXFLAGS) -c
+ $1_COMPILER := $$($$($1_BASE)_CXX)
+ else
+ $$(error Internal error in NativeCompilation.gmk: no compiler for file $$($1_FILENAME))
+ endif
+
+ # And this is the dependency file for this obj file.
+ $1_DEPS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ))
+ # The dependency target file lists all dependencies as empty targets to
+ # avoid make error "No rule to make target" for removed files
+ $1_DEPS_TARGETS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ))
+
+ # Only try to load individual dependency information files if the global
+ # file hasn't been loaded (could happen if make was interrupted).
+ ifneq ($$($$($1_BASE)_DEPS_FILE_LOADED), true)
+ # Include previously generated dependency information. (if it exists)
+ -include $$($1_DEPS_FILE)
+ -include $$($1_DEPS_TARGETS_FILE)
+ endif
+
+ ifneq ($$(strip $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)), )
+ $1_VARDEPS := $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, $$($1_OBJ).vardeps)
+ endif
+
+ $1_OBJ_DEPS := $$($1_SRC_FILE) $$($$($1_BASE)_COMPILE_VARDEPS_FILE) \
+ $$($$($1_BASE)_EXTRA_DEPS) $$($1_VARDEPS_FILE)
+ $1_COMPILE_OPTIONS := $$($1_FLAGS) $(CC_OUT_OPTION)$$($1_OBJ) $$($1_SRC_FILE)
+ # For reproducible builds with gcc and clang ensure random symbol generation is
+ # seeded deterministically
+ ifneq ($(findstring $(TOOLCHAIN_TYPE), gcc clang), )
+ $1_COMPILE_OPTIONS += -frandom-seed="$$($1_FILENAME)"
+ endif
+
+ $$($1_OBJ_JSON): $$($1_OBJ_DEPS)
+ $$(call WriteCompileCommandsFragment, $$@, $$(PWD), $$($1_SRC_FILE), \
+ $$($1_COMPILER) $$($1_COMPILE_OPTIONS))
+
+ $$($1_OBJ): $$($1_OBJ_DEPS) | $$($$($1_BASE)_BUILD_INFO)
+ $$(call LogInfo, Compiling $$($1_FILENAME) (for $$($$($1_BASE)_BASENAME)))
+ $$(call MakeDir, $$(@D))
+ ifneq ($(TOOLCHAIN_TYPE), microsoft)
+ $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
+ $$($1_COMPILER) $$(GENDEPS_FLAGS) \
+ $$(addsuffix .tmp, $$($1_DEPS_FILE)) \
+ $$($1_COMPILE_OPTIONS)))
+ ifneq ($$($1_DEPS_FILE), )
+ $$(call fix-deps-file, $$($1_DEPS_FILE))
+ # Create a dependency target file from the dependency file.
+ # Solution suggested by:
+ # http://make.mad-scientist.net/papers/advanced-auto-dependency-generation/
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) \
+ > $$($1_DEPS_TARGETS_FILE)
+ endif
+ else
+ # The Visual Studio compiler lacks a feature for generating make
+ # dependencies, but by setting -showIncludes, all included files are
+ # printed. These are filtered out and parsed into make dependences.
+ #
+ # Keep as much as possible on one execution line for best performance
+ # on Windows. No need to save exit code from compilation since
+ # pipefail is always active on Windows.
+ ifeq ($$(filter %.S, $$($1_FILENAME)), )
+ $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
+ $$($1_COMPILER) -showIncludes $$($1_COMPILE_OPTIONS))) \
+ | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
+ -e "^$$($1_FILENAME)$$$$" || test "$$$$?" = "1" ; \
+ $(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \
+ $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \
+ | $(SORT) -u >> $$($1_DEPS_FILE) ; \
+ $(ECHO) >> $$($1_DEPS_FILE) ; \
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE)
+ else
+ # For assembler calls just create empty dependency lists
+ $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
+ $$($1_COMPILER) $$($1_FLAGS) \
+ $(CC_OUT_OPTION)$$($1_OBJ) -Ta $$($1_SRC_FILE))) \
+ | $(TR) -d '\r' | $(GREP) -v -e "Assembling:" || test "$$$$?" = "1" ; \
+ $(ECHO) > $$($1_DEPS_FILE) ; \
+ $(ECHO) > $$($1_DEPS_TARGETS_FILE)
+ endif
+ endif
+ endif
+endef
+
+################################################################################
+define CreatePrecompiledHeader
+ ifneq ($$($1_PRECOMPILED_HEADER), )
+ ifeq ($(USE_PRECOMPILED_HEADER), true)
+ ifeq ($(TOOLCHAIN_TYPE), microsoft)
+ $1_PCH_FILE := $$($1_OBJECT_DIR)/$1.pch
+ $1_GENERATED_PCH_SRC := $$($1_OBJECT_DIR)/$1_pch.cpp
+ $1_GENERATED_PCH_OBJ := $$($1_OBJECT_DIR)/$1_pch$(OBJ_SUFFIX)
+
+ $$(eval $$(call CreateCompiledNativeFile, $1_$$(notdir $$($1_GENERATED_PCH_SRC)), \
+ FILE := $$($1_GENERATED_PCH_SRC), \
+ BASE := $1, \
+ EXTRA_CXXFLAGS := -Fp$$($1_PCH_FILE) -Yc$$(notdir $$($1_PRECOMPILED_HEADER)), \
+ ))
+
+ $1_USE_PCH_FLAGS := \
+ -Fp$$($1_PCH_FILE) -Yu$$(notdir $$($1_PRECOMPILED_HEADER))
+
+ $$($1_ALL_OBJS): $$($1_GENERATED_PCH_OBJ)
+
+ # Explicitly add the pch obj file first to ease comparing to old
+ # hotspot build.
+ $1_ALL_OBJS := $$($1_GENERATED_PCH_OBJ) $$($1_ALL_OBJS)
+
+ $$($1_GENERATED_PCH_SRC):
+ $(ECHO) "#include \"$$(notdir $$($1_PRECOMPILED_HEADER))\"" > $$@
+
+ else ifneq ($(findstring $(TOOLCHAIN_TYPE), gcc clang), )
+ ifeq ($(TOOLCHAIN_TYPE), gcc)
+ $1_PCH_FILE := $$($1_OBJECT_DIR)/precompiled/$$(notdir $$($1_PRECOMPILED_HEADER)).gch
+ $1_USE_PCH_FLAGS := -I$$($1_OBJECT_DIR)/precompiled
+ else ifeq ($(TOOLCHAIN_TYPE), clang)
+ $1_PCH_FILE := $$($1_OBJECT_DIR)/precompiled/$$(notdir $$($1_PRECOMPILED_HEADER)).pch
+ $1_USE_PCH_FLAGS := -include-pch $$($1_PCH_FILE)
+ endif
+ $1_PCH_DEPS_FILE := $$($1_PCH_FILE).d
+ $1_PCH_DEPS_TARGETS_FILE := $$($1_PCH_FILE).d.targets
+
+ -include $$($1_PCH_DEPS_FILE)
+ -include $$($1_PCH_DEPS_TARGETS_FILE)
+
+ $1_PCH_COMMAND := $$($1_CC) $$($1_CFLAGS) $$($1_EXTRA_CFLAGS) $$($1_SYSROOT_CFLAGS) \
+ $$($1_OPT_CFLAGS) -x c++-header -c $(GENDEPS_FLAGS) \
+ $$(addsuffix .tmp, $$($1_PCH_DEPS_FILE))
+
+ $$($1_PCH_FILE): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE)
+ $$(call LogInfo, Generating precompiled header)
+ $$(call MakeDir, $$(@D))
+ $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
+ $$($1_PCH_COMMAND) $$< -o $$@))
+ $$(call fix-deps-file, $$($1_PCH_DEPS_FILE))
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEPS_FILE) \
+ > $$($1_PCH_DEPS_TARGETS_FILE)
+
+ $$($1_ALL_OBJS): $$($1_PCH_FILE)
+
+ # Generate the corresponding compile_commands.json fragment.
+ $1_PCH_FILE_JSON := $$(MAKESUPPORT_OUTPUTDIR)/compile-commands/$$(subst /,_,$$(subst \
+ $$(OUTPUTDIR)/,,$$($1_PCH_FILE))).json
+ $1_ALL_OBJS_JSON += $$($1_PCH_FILE_JSON)
+
+ $$($1_PCH_FILE_JSON): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE)
+ $$(call WriteCompileCommandsFragment, $$@, $$(PWD), $$<, \
+ $$($1_PCH_COMMAND) $$< -o $$($1_PCH_FILE))
+ endif
+ endif
+ endif
+endef
+
+################################################################################
+define CreateWindowsResourceFile
+ ifneq ($$($1_VERSIONINFO_RESOURCE), )
+ $1_RES := $$($1_OBJECT_DIR)/$$($1_BASENAME).res
+ $1_RES_DEPS_FILE := $$($1_RES).d
+ $1_RES_DEPS_TARGETS_FILE := $$($1_RES).d.targets
+ -include $$($1_RES_DEPS_FILE)
+ -include $$($1_RES_DEPS_TARGETS_FILE)
+
+ $1_RES_VARDEPS := $$($1_RC) $$($1_RCFLAGS)
+ $1_RES_VARDEPS_FILE := $$(call DependOnVariable, $1_RES_VARDEPS, \
+ $$($1_RES).vardeps)
+
+ $$($1_RES): $$($1_VERSIONINFO_RESOURCE) $$($1_RES_VARDEPS_FILE)
+ $$(call LogInfo, Compiling resource $$(notdir $$($1_VERSIONINFO_RESOURCE)) (for $$($1_BASENAME)))
+ $$(call MakeDir, $$(@D) $$($1_OBJECT_DIR))
+ $$(call ExecuteWithLog, $$@, $$(call MakeCommandRelative, \
+ $$($1_RC) $$($1_RCFLAGS) $$($1_SYSROOT_CFLAGS) $(CC_OUT_OPTION)$$@ \
+ $$($1_VERSIONINFO_RESOURCE) 2>&1 ))
+ # Windows RC compiler does not support -showIncludes, so we mis-use CL
+ # for this. Filter out RC specific arguments that are unknown to CL.
+ # For some unknown reason, in this case CL actually outputs the show
+ # includes to stderr so need to redirect it to hide the output from the
+ # main log.
+ $$(call ExecuteWithLog, $$($1_RES_DEPS_FILE)$(OBJ_SUFFIX), \
+ $$($1_CC) $$(filter-out -l%, $$($1_RCFLAGS)) \
+ $$($1_SYSROOT_CFLAGS) -showIncludes -nologo -TC \
+ $(CC_OUT_OPTION)$$($1_RES_DEPS_FILE)$(OBJ_SUFFIX) -P -Fi$$($1_RES_DEPS_FILE).pp \
+ $$($1_VERSIONINFO_RESOURCE)) 2>&1 \
+ | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \
+ -e "^$$(notdir $$($1_VERSIONINFO_RESOURCE))$$$$" || test "$$$$?" = "1" ; \
+ $(ECHO) $$($1_RES): \\ > $$($1_RES_DEPS_FILE) ; \
+ $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEPS_FILE)$(OBJ_SUFFIX).log \
+ >> $$($1_RES_DEPS_FILE) ; \
+ $(ECHO) >> $$($1_RES_DEPS_FILE) ;\
+ $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEPS_FILE) \
+ > $$($1_RES_DEPS_TARGETS_FILE)
+ endif
+endef
diff --git a/make/common/native/DebugSymbols.gmk b/make/common/native/DebugSymbols.gmk
new file mode 100644
index 0000000000000..9f49f5e1d5292
--- /dev/null
+++ b/make/common/native/DebugSymbols.gmk
@@ -0,0 +1,118 @@
+#
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# This file contains functionality related to native debug symbol handling.
+
+################################################################################
+define CreateDebugSymbols
+ ifneq ($$($1_COPY_DEBUG_SYMBOLS), false)
+ $1_COPY_DEBUG_SYMBOLS := $(COPY_DEBUG_SYMBOLS)
+ endif
+
+ ifneq ($$($1_ZIP_EXTERNAL_DEBUG_SYMBOLS), false)
+ $1_ZIP_EXTERNAL_DEBUG_SYMBOLS := $(ZIP_EXTERNAL_DEBUG_SYMBOLS)
+ endif
+
+ ifeq ($$($1_COPY_DEBUG_SYMBOLS), true)
+ ifneq ($$($1_DEBUG_SYMBOLS), false)
+ $$(call SetIfEmpty, $1_SYMBOLS_DIR, $$($1_OUTPUT_DIR))
+ # Only copy debug symbols for dynamic libraries and programs.
+ ifneq ($$($1_TYPE), STATIC_LIBRARY)
+ # Generate debuginfo files.
+ ifeq ($(call isTargetOs, windows), true)
+ $1_EXTRA_LDFLAGS += -debug "-pdb:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).pdb" \
+ "-map:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).map"
+ ifeq ($(SHIP_DEBUG_SYMBOLS), public)
+ $1_EXTRA_LDFLAGS += "-pdbstripped:$$($1_SYMBOLS_DIR)/$$($1_BASENAME).stripped.pdb"
+ endif
+ $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_BASENAME).pdb \
+ $$($1_SYMBOLS_DIR)/$$($1_BASENAME).map
+
+ else ifeq ($(call isTargetOs, linux), true)
+ $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).debuginfo
+ # Setup the command line creating debuginfo files, to be run after linking.
+ # It cannot be run separately since it updates the original target file
+ # Creating the debuglink is done in another command rather than all at once
+ # so we can run it after strip is called, since strip can sometimes mangle the
+ # embedded debuglink, which we want to avoid.
+ $1_CREATE_DEBUGINFO_CMDS := \
+ $$($1_OBJCOPY) --only-keep-debug $$($1_TARGET) $$($1_DEBUGINFO_FILES) && \
+ $$(CHMOD) -x $$($1_DEBUGINFO_FILES)
+ $1_CREATE_DEBUGLINK_CMDS := $(CD) $$($1_SYMBOLS_DIR) && \
+ $$($1_OBJCOPY) --add-gnu-debuglink=$$($1_DEBUGINFO_FILES) $$($1_TARGET)
+
+ else ifeq ($(call isTargetOs, aix), true)
+ # AIX does not provide the equivalent of OBJCOPY to extract debug symbols,
+ # so we copy the compiled object with symbols to the .debuginfo file, which
+ # happens prior to the STRIP_CMD on the original target object file.
+ $1_DEBUGINFO_FILES := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).debuginfo
+ $1_CREATE_DEBUGINFO_CMDS := $(CP) $$($1_TARGET) $$($1_DEBUGINFO_FILES)
+
+ else ifeq ($(call isTargetOs, macosx), true)
+ $1_DEBUGINFO_FILES := \
+ $$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM/Contents/Info.plist \
+ $$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM/Contents/Resources/DWARF/$$($1_BASENAME)
+ $1_CREATE_DEBUGINFO_CMDS := \
+ $(DSYMUTIL) --out $$($1_SYMBOLS_DIR)/$$($1_BASENAME).dSYM $$($1_TARGET)
+ endif
+
+ # Since the link rule creates more than one file that we want to track,
+ # we have to use some tricks to get make to cooperate. To properly
+ # trigger downstream dependants of $$($1_DEBUGINFO_FILES), we must have
+ # a recipe in the rule below. To avoid rerunning the recipe every time
+ # have it touch the target. If a debuginfo file is deleted by something
+ # external, explicitly delete the TARGET to trigger a rebuild of both.
+ ifneq ($$(wildcard $$($1_DEBUGINFO_FILES)), $$($1_DEBUGINFO_FILES))
+ $$(call LogDebug, Deleting $$($1_BASENAME) because debuginfo files are missing)
+ $$(shell $(RM) $$($1_TARGET))
+ endif
+ $$($1_DEBUGINFO_FILES): $$($1_TARGET)
+ $$(if $$(CORRECT_FUNCTION_IN_RECIPE_EVALUATION), \
+ $$(if $$(wildcard $$@), , $$(error $$@ was not created for $$<)) \
+ )
+ $(TOUCH) $$@
+
+ $1 += $$($1_DEBUGINFO_FILES)
+
+ ifeq ($$($1_ZIP_EXTERNAL_DEBUG_SYMBOLS), true)
+ ifeq ($(call isTargetOs, windows), true)
+ $1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_BASENAME).diz
+ else
+ $1_DEBUGINFO_ZIP := $$($1_SYMBOLS_DIR)/$$($1_NOSUFFIX).diz
+ endif
+ $1 += $$($1_DEBUGINFO_ZIP)
+
+ # The dependency on TARGET is needed for debuginfo files
+ # to be rebuilt properly.
+ $$($1_DEBUGINFO_ZIP): $$($1_DEBUGINFO_FILES) $$($1_TARGET)
+ $(CD) $$($1_SYMBOLS_DIR) && \
+ $(ZIPEXE) -q -r $$@ $$(subst $$($1_SYMBOLS_DIR)/,, $$($1_DEBUGINFO_FILES))
+
+ endif
+ endif # !STATIC_LIBRARY
+ endif # $1_DEBUG_SYMBOLS != false
+ endif # COPY_DEBUG_SYMBOLS
+endef
diff --git a/make/common/native/Flags.gmk b/make/common/native/Flags.gmk
new file mode 100644
index 0000000000000..213312047a4ff
--- /dev/null
+++ b/make/common/native/Flags.gmk
@@ -0,0 +1,225 @@
+#
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# This file contains functionality related to setting up compiler and linker
+# flags, based on various more abstract sources of compilation description,
+# like optimization level.
+
+################################################################################
+# $1 is the prefix of the file to be compiled
+# $2 is the prefix of the library, i.e. $$($1_BASE)
+define SetupCompileFileFlags
+ ifeq ($$($1_OPTIMIZATION), )
+ $1_OPT_CFLAGS := $$($2_OPT_CFLAGS)
+ $1_OPT_CXXFLAGS := $$($2_OPT_CXXFLAGS)
+ else
+ ifeq ($$($1_OPTIMIZATION), NONE)
+ $1_OPT_CFLAGS := $(C_O_FLAG_NONE)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NONE)
+ else ifeq ($$($1_OPTIMIZATION), LOW)
+ $1_OPT_CFLAGS := $(C_O_FLAG_NORM)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM)
+ else ifeq ($$($1_OPTIMIZATION), HIGH)
+ $1_OPT_CFLAGS := $(C_O_FLAG_HI)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI)
+ else ifeq ($$($1_OPTIMIZATION), HIGHEST)
+ $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST)
+ else ifeq ($$($1_OPTIMIZATION), HIGHEST_JVM)
+ $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST_JVM)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST_JVM)
+ else ifeq ($$($1_OPTIMIZATION), SIZE)
+ $1_OPT_CFLAGS := $(C_O_FLAG_SIZE)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_SIZE)
+ else
+ $$(error Unknown value for file OPTIMIZATION: $$($1_OPTIMIZATION))
+ endif
+ endif
+
+ ifneq ($$($2_PRECOMPILED_HEADER), )
+ ifeq ($$(filter $$($1_FILENAME), $$($2_PRECOMPILED_HEADER_EXCLUDE)), )
+ $1_USE_PCH_FLAGS := $$($2_USE_PCH_FLAGS)
+ endif
+ endif
+
+ ifneq ($(DISABLE_WARNING_PREFIX), )
+ $1_WARNINGS_FLAGS := $$(addprefix $(DISABLE_WARNING_PREFIX), \
+ $$($2_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$$($1_FILENAME)) \
+ $$($2_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)_$$($1_FILENAME)))
+ endif
+
+ $1_BASE_CFLAGS := $$($2_CFLAGS) $$($2_EXTRA_CFLAGS) \
+ $$($2_SYSROOT_CFLAGS)
+ $1_BASE_CXXFLAGS := $$($2_CXXFLAGS) $$($2_EXTRA_CXXFLAGS) \
+ $$($2_SYSROOT_CFLAGS) $$($1_EXTRA_CXXFLAGS)
+ $1_BASE_ASFLAGS := $$($2_ASFLAGS) $$($2_EXTRA_ASFLAGS)
+endef
+
+################################################################################
+define SetupCompilerFlags
+ # Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS, TOOLCHAIN_TYPE and
+ # OPENJDK_TARGET_OS plus OPENJDK_TARGET_CPU pair dependent variables for CFLAGS.
+ $1_EXTRA_CFLAGS := $$($1_CFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_CFLAGS_$(OPENJDK_TARGET_OS)) \
+ $$($1_CFLAGS_$(TOOLCHAIN_TYPE)) \
+ $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_$(OPENJDK_TARGET_CPU))
+
+ ifneq ($(DEBUG_LEVEL), release)
+ # Pickup extra debug dependent variables for CFLAGS
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_debug)
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS_TYPE)_debug)
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_debug)
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_$(OPENJDK_TARGET_CPU)_debug)
+ else
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_release)
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS_TYPE)_release)
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_release)
+ $1_EXTRA_CFLAGS += $$($1_CFLAGS_$(OPENJDK_TARGET_OS)_$(OPENJDK_TARGET_CPU)_release)
+ endif
+ ifeq ($(STATIC_LIBS), true)
+ $1_EXTRA_CFLAGS += $$(STATIC_LIBS_CFLAGS)
+ endif
+
+ # Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS and/or TOOLCHAIN_TYPE
+ # dependent variables for CXXFLAGS.
+ $1_EXTRA_CXXFLAGS := $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS)) \
+ $$($1_CXXFLAGS_$(TOOLCHAIN_TYPE))
+
+ ifneq ($(DEBUG_LEVEL), release)
+ # Pickup extra debug dependent variables for CXXFLAGS
+ $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_debug)
+ $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS_TYPE)_debug)
+ $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS)_debug)
+ else
+ $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_release)
+ $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS_TYPE)_release)
+ $1_EXTRA_CXXFLAGS += $$($1_CXXFLAGS_$(OPENJDK_TARGET_OS)_release)
+ endif
+ ifeq ($(STATIC_LIBS), true)
+ $1_EXTRA_CXXFLAGS += $$(STATIC_LIB_CFLAGS)
+ endif
+
+ # If no C++ flags are explicitly set, default to using the C flags.
+ # After that, we can set additional C++ flags that should not interfere
+ # with the mechanism for copying the C flags by default.
+ ifeq ($$($1_CXXFLAGS), )
+ $1_CXXFLAGS := $$($1_CFLAGS)
+ endif
+ ifeq ($$(strip $$($1_EXTRA_CXXFLAGS)), )
+ $1_EXTRA_CXXFLAGS := $$($1_EXTRA_CFLAGS)
+ endif
+
+ $$(call SetIfEmpty, $1_COMPILE_WITH_DEBUG_SYMBOLS, $$(COMPILE_WITH_DEBUG_SYMBOLS))
+
+ ifeq ($(STATIC_LIBS), true)
+ # For release builds where debug symbols are configured to be moved to
+ # separate debuginfo files, disable debug symbols for static libs instead.
+ # We don't currently support this configuration and we don't want symbol
+ # information in release builds unless explicitly asked to provide it.
+ ifeq ($(DEBUG_LEVEL), release)
+ ifeq ($(COPY_DEBUG_SYMBOLS), true)
+ $1_COMPILE_WITH_DEBUG_SYMBOLS := false
+ endif
+ endif
+ endif
+
+ ifeq ($$($1_COMPILE_WITH_DEBUG_SYMBOLS), true)
+ $1_EXTRA_CFLAGS += $$(CFLAGS_DEBUG_SYMBOLS)
+ $1_EXTRA_CXXFLAGS += $$(CFLAGS_DEBUG_SYMBOLS)
+ $1_EXTRA_ASFLAGS += $$(ASFLAGS_DEBUG_SYMBOLS)
+ endif
+
+ # Pass the library name for static JNI library naming
+ ifeq ($$($1_TYPE), STATIC_LIBRARY)
+ $1_EXTRA_CFLAGS += -DLIBRARY_NAME=$$($1_NAME)
+ $1_EXTRA_CXXFLAGS += -DLIBRARY_NAME=$$($1_NAME)
+ endif
+
+ # Pick up disabled warnings, if possible on this platform.
+ ifneq ($(DISABLE_WARNING_PREFIX), )
+ $1_EXTRA_CFLAGS += $$(addprefix $(DISABLE_WARNING_PREFIX), \
+ $$(DISABLED_WARNINGS) \
+ $$(DISABLED_WARNINGS_C) \
+ $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)) \
+ $$($1_DISABLED_WARNINGS_C_$(TOOLCHAIN_TYPE)) \
+ $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)) \
+ $$($1_DISABLED_WARNINGS_C_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)))
+ $1_EXTRA_CXXFLAGS += $$(addprefix $(DISABLE_WARNING_PREFIX), \
+ $$(DISABLED_WARNINGS) \
+ $$(DISABLED_WARNINGS_CXX) \
+ $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)) \
+ $$($1_DISABLED_WARNINGS_CXX_$(TOOLCHAIN_TYPE)) \
+ $$($1_DISABLED_WARNINGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)) \
+ $$($1_DISABLED_WARNINGS_CXX_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS)))
+ endif
+
+ # Check if warnings should be considered errors.
+ # Pick first binary and toolchain specific, then binary specific, then general setting.
+ ifeq ($$($1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE)), )
+ ifeq ($$($1_WARNINGS_AS_ERRORS), )
+ $1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE) := $$(WARNINGS_AS_ERRORS)
+ else
+ $1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE) := $$($1_WARNINGS_AS_ERRORS)
+ endif
+ endif
+
+ ifeq ($$($1_WARNINGS_AS_ERRORS_$(TOOLCHAIN_TYPE)), true)
+ $1_EXTRA_CFLAGS += $(CFLAGS_WARNINGS_ARE_ERRORS)
+ $1_EXTRA_CXXFLAGS += $(CFLAGS_WARNINGS_ARE_ERRORS)
+ endif
+
+ ifeq (NONE, $$($1_OPTIMIZATION))
+ $1_OPT_CFLAGS := $(C_O_FLAG_NONE)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NONE)
+ else ifeq (LOW, $$($1_OPTIMIZATION))
+ $1_OPT_CFLAGS := $(C_O_FLAG_NORM)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_NORM)
+ else ifeq (HIGH, $$($1_OPTIMIZATION))
+ $1_OPT_CFLAGS := $(C_O_FLAG_HI)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HI)
+ else ifeq (HIGHEST, $$($1_OPTIMIZATION))
+ $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST)
+ else ifeq (HIGHEST_JVM, $$($1_OPTIMIZATION))
+ $1_OPT_CFLAGS := $(C_O_FLAG_HIGHEST_JVM)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_HIGHEST_JVM)
+ else ifeq (SIZE, $$($1_OPTIMIZATION))
+ $1_OPT_CFLAGS := $(C_O_FLAG_SIZE)
+ $1_OPT_CXXFLAGS := $(CXX_O_FLAG_SIZE)
+ else ifneq (, $$($1_OPTIMIZATION))
+ $$(error Unknown value for OPTIMIZATION: $$($1_OPTIMIZATION))
+ endif
+endef
+
+################################################################################
+define SetupLinkerFlags
+ # Pickup extra OPENJDK_TARGET_OS_TYPE, OPENJDK_TARGET_OS and TOOLCHAIN_TYPE
+ # dependent variables for LDFLAGS and LIBS, and additionally the pair dependent
+ # TOOLCHAIN_TYPE plus OPENJDK_TARGET_OS
+ $1_EXTRA_LDFLAGS += $$($1_LDFLAGS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LDFLAGS_$(OPENJDK_TARGET_OS)) \
+ $$($1_LDFLAGS_$(TOOLCHAIN_TYPE)) $$($1_LDFLAGS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS))
+ $1_EXTRA_LIBS += $$($1_LIBS_$(OPENJDK_TARGET_OS_TYPE)) $$($1_LIBS_$(OPENJDK_TARGET_OS)) \
+ $$($1_LIBS_$(TOOLCHAIN_TYPE)) $$($1_LIBS_$(TOOLCHAIN_TYPE)_$(OPENJDK_TARGET_OS))
+endef
diff --git a/make/common/native/Link.gmk b/make/common/native/Link.gmk
new file mode 100644
index 0000000000000..fb23152d4fb9b
--- /dev/null
+++ b/make/common/native/Link.gmk
@@ -0,0 +1,182 @@
+#
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# This file contains functionality related to linking a native binary;
+# creating either a dynamic library, a static library or an executable.
+
+################################################################################
+# GetEntitlementsFile
+# Find entitlements file for executable when signing on macosx. If no
+# specialized file is found, returns the default file.
+# This macro might be called from custom makefiles.
+# $1 Executable to find entitlements file for.
+ENTITLEMENTS_DIR := $(TOPDIR)/make/data/macosxsigning
+ifeq ($(MACOSX_CODESIGN_MODE), debug)
+ CODESIGN_PLIST_SUFFIX := -debug
+else
+ CODESIGN_PLIST_SUFFIX :=
+endif
+DEFAULT_ENTITLEMENTS_FILE := $(ENTITLEMENTS_DIR)/default$(CODESIGN_PLIST_SUFFIX).plist
+
+GetEntitlementsFile = \
+ $(foreach f, $(ENTITLEMENTS_DIR)/$(strip $(notdir $1))$(CODESIGN_PLIST_SUFFIX).plist, \
+ $(if $(wildcard $f), $f, $(DEFAULT_ENTITLEMENTS_FILE)) \
+ )
+
+################################################################################
+define SetupLinking
+ # Unless specifically set, stripping should only happen if symbols are also
+ # being copied.
+ $$(call SetIfEmpty, $1_STRIP_SYMBOLS, $$($1_COPY_DEBUG_SYMBOLS))
+
+ ifneq ($$($1_STRIP_SYMBOLS), false)
+ # Default to using the global STRIPFLAGS. Allow for overriding with an
+ # empty value
+ $1_STRIPFLAGS ?= $(STRIPFLAGS)
+ $1_STRIP_CMD := $$($1_STRIP) $$($1_STRIPFLAGS) $$($1_TARGET)
+ endif
+endef
+
+################################################################################
+define CreateLinkedResult
+ ifeq ($$($1_TYPE), STATIC_LIBRARY)
+ $$(eval $$(call CreateStaticLibrary,$1))
+ else
+ $$(eval $$(call CreateDynamicLibraryOrExecutable,$1))
+ endif
+endef
+
+################################################################################
+define CreateStaticLibrary
+ # Include partial linking when building the static library with clang on linux
+ ifeq ($(call isTargetOs, linux), true)
+ ifneq ($(findstring $(TOOLCHAIN_TYPE), clang), )
+ $1_ENABLE_PARTIAL_LINKING := true
+ endif
+ endif
+
+ $1_VARDEPS := $$($1_AR) $$(ARFLAGS) $$($1_ARFLAGS) $$($1_LIBS) \
+ $$($1_EXTRA_LIBS)
+ ifeq ($$($1_ENABLE_PARTIAL_LINKING), true)
+ $1_VARDEPS += $$($1_LD) $$($1_SYSROOT_LDFLAGS)
+ endif
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
+
+ $1_TARGET_DEPS := $$($1_ALL_OBJS) $$($1_VARDEPS_FILE)
+
+ $1_AR_OBJ_ARG := $$($1_LD_OBJ_ARG)
+ # With clang on linux, partial linking is enabled and 'AR' takes the output
+ # object from the partial linking step.
+ ifeq ($$($1_ENABLE_PARTIAL_LINKING), true)
+ $1_TARGET_RELOCATABLE := $$($1_OBJECT_DIR)/$$($1_PREFIX)$$($1_NAME)_relocatable$(OBJ_SUFFIX)
+ $1_AR_OBJ_ARG := $$($1_TARGET_RELOCATABLE)
+ endif
+
+ $$($1_TARGET): $$($1_TARGET_DEPS)
+ ifneq ($$($1_OBJ_FILE_LIST), )
+ ifeq ($$($1_LINK_OBJS_RELATIVE), true)
+ $$(eval $$(call ListPathsSafely, $1_ALL_OBJS_RELATIVE, $$($1_OBJ_FILE_LIST)))
+ else
+ $$(eval $$(call ListPathsSafely, $1_ALL_OBJS, $$($1_OBJ_FILE_LIST)))
+ endif
+ endif
+ $$(call LogInfo, Building static library $$($1_BASENAME))
+ $$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
+ # Do partial linking.
+ ifeq ($$($1_ENABLE_PARTIAL_LINKING), true)
+ $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_partial_link, \
+ $(if $$($1_LINK_OBJS_RELATIVE), $$(CD) $$(OUTPUTDIR) ; ) \
+ $$($1_LD) $(LDFLAGS_CXX_PARTIAL_LINKING) $$($1_SYSROOT_LDFLAGS) \
+ -o $$($1_TARGET_RELOCATABLE) \
+ $$($1_LD_OBJ_ARG))
+ endif
+ $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
+ $(if $$($1_LINK_OBJS_RELATIVE), $$(CD) $$(OUTPUTDIR) ; ) \
+ $$($1_AR) $$(ARFLAGS) $$($1_ARFLAGS) -r -cs $$($1_TARGET) \
+ $$($1_AR_OBJ_ARG) $$($1_RES))
+ ifeq ($(STATIC_BUILD), true)
+ $(RM) $$(@D)/$$(basename $$(@F)).symbols; \
+ $(ECHO) "Getting symbols from nm"; \
+ $(NM) $(NMFLAGS) -m $$($1_TARGET) | $(GREP) "__TEXT" | \
+ $(EGREP) -v "non-external|private extern|__TEXT,__eh_frame" | \
+ $(SED) -e 's/.* //' > $$(@D)/$$(basename $$(@F)).symbols
+ endif
+endef
+
+################################################################################
+define CreateDynamicLibraryOrExecutable
+ # A shared dynamic library or an executable binary has been specified
+ ifeq ($$($1_TYPE), LIBRARY)
+ # Generating a dynamic library.
+ $1_EXTRA_LDFLAGS += $$(call SET_SHARED_LIBRARY_NAME,$$($1_BASENAME))
+
+ # Create loadmap on AIX. Helps in diagnosing some problems.
+ ifneq ($(COMPILER_BINDCMD_FILE_FLAG), )
+ $1_EXTRA_LDFLAGS += $(COMPILER_BINDCMD_FILE_FLAG)$$($1_OBJECT_DIR)/$$($1_NOSUFFIX).loadmap
+ endif
+ endif
+
+ $1_VARDEPS := $$($1_LD) $$($1_SYSROOT_LDFLAGS) $$($1_LDFLAGS) \
+ $$($1_EXTRA_LDFLAGS) $$($1_LIBS) $$($1_EXTRA_LIBS) \
+ $$($1_CREATE_DEBUGINFO_CMDS) $$($1_STRIP_CMD) $$($1_CREATE_DEBUGLINK_CMDS)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
+
+ $1_TARGET_DEPS := $$($1_ALL_OBJS) $$($1_VARDEPS_FILE)
+
+ $$($1_TARGET): $$($1_TARGET_DEPS)
+ ifneq ($$($1_OBJ_FILE_LIST), )
+ ifeq ($$($1_LINK_OBJS_RELATIVE), true)
+ $$(eval $$(call ListPathsSafely, $1_ALL_OBJS_RELATIVE, $$($1_OBJ_FILE_LIST)))
+ else
+ $$(eval $$(call ListPathsSafely, $1_ALL_OBJS, $$($1_OBJ_FILE_LIST)))
+ endif
+ endif
+ $$(call LogInfo, Linking $$($1_BASENAME))
+ $$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
+ $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
+ $$(if $$($1_LINK_OBJS_RELATIVE), $$(CD) $$(OUTPUTDIR) ; ) \
+ $$($1_LD) $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) \
+ $$($1_SYSROOT_LDFLAGS) -o $$($1_TARGET) $$($1_LD_OBJ_ARG) \
+ $$($1_LIBS) $$($1_EXTRA_LIBS))
+ $$($1_CREATE_DEBUGINFO_CMDS)
+ $$($1_STRIP_CMD)
+ $$($1_CREATE_DEBUGLINK_CMDS)
+ # On macosx, optionally run codesign on every binary.
+ # Remove signature explicitly first to avoid warnings if the linker
+ # added a default adhoc signature.
+ ifeq ($(MACOSX_CODESIGN_MODE), hardened)
+ $(CODESIGN) --remove-signature $$@
+ $(CODESIGN) -f -s "$(MACOSX_CODESIGN_IDENTITY)" --timestamp \
+ --options runtime --entitlements \
+ $$(call GetEntitlementsFile, $$@) $$@
+ else ifeq ($(MACOSX_CODESIGN_MODE), debug)
+ $(CODESIGN) --remove-signature $$@
+ $(CODESIGN) -f -s - --entitlements \
+ $$(call GetEntitlementsFile, $$@) $$@
+ endif
+endef
diff --git a/make/common/native/LinkMicrosoft.gmk b/make/common/native/LinkMicrosoft.gmk
new file mode 100644
index 0000000000000..f998bf3d117f1
--- /dev/null
+++ b/make/common/native/LinkMicrosoft.gmk
@@ -0,0 +1,112 @@
+#
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# This file contains functionality related to linking a native binary;
+# creating either a dynamic library, a static library or an executable.
+
+################################################################################
+define CreateLinkedResultMicrosoft
+ ifeq ($$($1_TYPE), STATIC_LIBRARY)
+ $$(eval $$(call CreateStaticLibraryMicrosoft,$1))
+ else
+ $$(eval $$(call CreateDynamicLibraryOrExecutableMicrosoft,$1))
+ endif
+endef
+
+################################################################################
+define CreateStaticLibraryMicrosoft
+ $1_VARDEPS := $$($1_LIB) $$(LIBFLAGS) $$($1_LIBFLAGS) $$($1_LIBS) \
+ $$($1_EXTRA_LIBS)
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
+
+ $$($1_TARGET): $$($1_ALL_OBJS) $$($1_RES) $$($1_VARDEPS_FILE)
+ ifneq ($$($1_OBJ_FILE_LIST), )
+ $$(eval $$(call ListPathsSafely, $1_ALL_OBJS, $$($1_OBJ_FILE_LIST)))
+ endif
+ $$(call LogInfo, Building static library $$($1_BASENAME))
+ $$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
+ $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
+ $$($1_LIB) -nologo $$(LIBFLAGS) $$($1_LIBFLAGS) -out:$$($1_TARGET) \
+ $$($1_LD_OBJ_ARG) $$($1_RES))
+endef
+
+################################################################################
+define CreateDynamicLibraryOrExecutableMicrosoft
+ ifeq ($$($1_EMBED_MANIFEST), true)
+ $1_EXTRA_LDFLAGS += -manifest:embed
+ endif
+
+ $1_IMPORT_LIBRARY := $$($1_OBJECT_DIR)/$$($1_NAME).lib
+ $1_EXTRA_LDFLAGS += "-implib:$$($1_IMPORT_LIBRARY)"
+
+ ifeq ($$($1_TYPE), LIBRARY)
+ # To properly trigger downstream dependants of the import library, just as
+ # for debug files, we must have a recipe in the rule. To avoid rerunning
+ # the recipe every time have it touch the target. If an import library
+ # file is deleted by something external, explicitly delete the target to
+ # trigger a rebuild of both.
+ ifneq ($$(wildcard $$($1_IMPORT_LIBRARY)), $$($1_IMPORT_LIBRARY))
+ $$(call LogDebug, Deleting $$($1_BASENAME) because import library is missing)
+ $$(shell $(RM) $$($1_TARGET))
+ endif
+ $$($1_IMPORT_LIBRARY): $$($1_TARGET)
+ $(TOUCH) $$@
+
+ $1 += $$($1_IMPORT_LIBRARY)
+ endif
+
+ $1_VARDEPS := $$($1_LD) $$($1_SYSROOT_LDFLAGS) $$($1_LDFLAGS) \
+ $$($1_EXTRA_LDFLAGS) $$($1_LIBS) $$($1_EXTRA_LIBS) $$($1_MT) \
+ $$($1_MANIFEST_VERSION)
+
+ $1_VARDEPS_FILE := $$(call DependOnVariable, $1_VARDEPS, \
+ $$($1_OBJECT_DIR)/$$($1_NOSUFFIX).vardeps)
+
+ $1_TARGET_DEPS := $$($1_ALL_OBJS) $$($1_RES) $$($1_MANIFEST) \
+ $$($1_VARDEPS_FILE)
+
+ $$($1_TARGET): $$($1_TARGET_DEPS)
+ ifneq ($$($1_OBJ_FILE_LIST), )
+ $$(eval $$(call ListPathsSafely, $1_ALL_OBJS, $$($1_OBJ_FILE_LIST)))
+ endif
+ $$(call LogInfo, Linking $$($1_BASENAME))
+ $$(call MakeDir, $$($1_OUTPUT_DIR) $$($1_SYMBOLS_DIR))
+ $$(call ExecuteWithLog, $$($1_OBJECT_DIR)/$$($1_SAFE_NAME)_link, \
+ $$($1_LD) -nologo $$($1_LDFLAGS) $$($1_EXTRA_LDFLAGS) \
+ $$($1_SYSROOT_LDFLAGS) -out:$$($1_TARGET) $$($1_LD_OBJ_ARG) \
+ $$($1_RES) $$($1_LIBS) $$($1_EXTRA_LIBS)) \
+ | $(GREP) -v "^ Creating library .*\.lib and object .*\.exp" || \
+ test "$$$$?" = "1"
+ ifeq ($(call isBuildOsEnv, windows.wsl2), true)
+ $$(CHMOD) +x $$($1_TARGET)
+ endif
+ ifneq ($$($1_MANIFEST), )
+ $$($1_MT) -nologo -manifest $$($1_MANIFEST) \
+ -identity:"$$($1_NAME).exe, version=$$($1_MANIFEST_VERSION)" \
+ -outputresource:$$@;#1
+ endif
+endef
diff --git a/make/common/native/Paths.gmk b/make/common/native/Paths.gmk
new file mode 100644
index 0000000000000..67aa61d86e968
--- /dev/null
+++ b/make/common/native/Paths.gmk
@@ -0,0 +1,247 @@
+#
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Oracle designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+# or visit www.oracle.com if you need additional information or have any
+# questions.
+#
+
+################################################################################
+# This file contains functionality related to handling paths for source files
+# and object files. This is complicated by the fact that we usually, but not
+# always, use absolute instead of relative paths. It is further complicated
+# by the fact that not all tools allow inputting large lists of files as
+# "@-files", which we normally use to avoid hitting command line length limits.
+# Finally this file contains functionality for locating all source code files
+# that should be included in the compilation.
+
+################################################################################
+# When absolute paths are not allowed in the output, and the compiler does not
+# support any options to avoid it, we need to rewrite compile commands to use
+# relative paths. By doing this, the __FILE__ macro will resolve to relative
+# paths. The relevant input paths on the command line are the -I flags and the
+# path to the source file itself.
+#
+# The macro MakeCommandRelative is used to rewrite the command line like this:
+# 'CD $(WORKSPACE_ROOT) && '
+# and changes all paths in cmd to be relative to the workspace root. This only
+# works properly if the build dir is inside the workspace root. If it's not,
+# relative paths are still calculated, but depending on the distance between the
+# dirs, paths in the build dir may end up as essentially absolute anyway.
+#
+# The fix-deps-file macro is used to adjust the contents of the generated make
+# dependency files to contain paths compatible with make.
+#
+REWRITE_PATHS_RELATIVE = false
+ifeq ($(ALLOW_ABSOLUTE_PATHS_IN_OUTPUT)-$(FILE_MACRO_CFLAGS), false-)
+ REWRITE_PATHS_RELATIVE = true
+endif
+
+# CCACHE_BASEDIR needs fix-deps-file as makefiles use absolute filenames for
+# object files while CCACHE_BASEDIR will make ccache relativize all paths for
+# its compiler. The compiler then produces relative dependency files.
+# make does not know a relative and absolute filename is the same so it will
+# ignore such dependencies. This only applies when the OUTPUTDIR is inside
+# the WORKSPACE_ROOT.
+ifneq ($(CCACHE), )
+ ifneq ($(filter $(WORKSPACE_ROOT)/%, $(OUTPUTDIR)), )
+ REWRITE_PATHS_RELATIVE = true
+ endif
+endif
+
+ifeq ($(REWRITE_PATHS_RELATIVE), true)
+ # Need to handle -I flags as both '-Ifoo' and '-I foo'.
+ MakeCommandRelative = \
+ $(CD) $(WORKSPACE_ROOT) && \
+ $(foreach o, $1, \
+ $(if $(filter $(WORKSPACE_ROOT)/% $(OUTPUTDIR)/%, $o), \
+ $(call RelativePath, $o, $(WORKSPACE_ROOT)) \
+ , \
+ $(if $(filter -I$(WORKSPACE_ROOT)/%, $o), \
+ -I$(call RelativePath, $(patsubst -I%, %, $o), $(WORKSPACE_ROOT)) \
+ , \
+ $o \
+ ) \
+ ) \
+ )
+
+ # When compiling with relative paths, the deps file may come out with relative
+ # paths, and that path may start with './'. First remove any leading ./, then
+ # add WORKSPACE_ROOT to any line not starting with /, while allowing for
+ # leading spaces. There may also be multiple entries on the same line, so start
+ # with splitting such lines.
+ # Non GNU sed (BSD on macosx) cannot substitute in literal \n using regex.
+ # Instead use a bash escaped literal newline. To avoid having unmatched quotes
+ # ruin the ability for an editor to properly syntax highlight this file, define
+ # that newline sequence as a separate variable and add the closing quote behind
+ # a comment.
+ sed_newline := \'$$'\n''#'
+ define fix-deps-file
+ $(SED) \
+ -e 's|\([^ ]\) \{1,\}\([^\\:]\)|\1 \\$(sed_newline) \2|g' \
+ $1.tmp \
+ | $(SED) \
+ -e 's|^\([ ]*\)\./|\1|' \
+ -e '/^[ ]*[^/ ]/s|^\([ ]*\)|\1$(WORKSPACE_ROOT)/|' \
+ > $1
+ endef
+else
+ # By default the MakeCommandRelative macro does nothing.
+ MakeCommandRelative = $1
+
+ # No adjustment is needed.
+ define fix-deps-file
+ $(MV) $1.tmp $1
+ endef
+endif
+
+################################################################################
+define SetupSourceFiles
+ $$(foreach d, $$($1_SRC), $$(if $$(wildcard $$d), , \
+ $$(error SRC specified to SetupNativeCompilation $1 contains missing directory $$d)))
+
+ $1_SRCS_RAW := $$(call FindFiles, $$($1_SRC))
+ # Order src files according to the order of the src dirs
+ $1_SRCS := $$(foreach d, $$($1_SRC), $$(filter $$d%, $$($1_SRCS_RAW)))
+ $1_SRCS := $$(filter $$(NATIVE_SOURCE_EXTENSIONS), $$($1_SRCS))
+ # Extract the C/C++ files.
+ ifneq ($$($1_EXCLUDE_PATTERNS), )
+ # We must not match the exclude pattern against the src root(s).
+ $1_SRCS_WITHOUT_ROOTS := $$($1_SRCS)
+ $$(foreach i, $$($1_SRC), $$(eval $1_SRCS_WITHOUT_ROOTS := $$(patsubst \
+ $$i/%,%, $$($1_SRCS_WITHOUT_ROOTS))))
+ $1_ALL_EXCLUDE_FILES := $$(call containing, $$($1_EXCLUDE_PATTERNS), \
+ $$($1_SRCS_WITHOUT_ROOTS))
+ endif
+ ifneq ($$($1_EXCLUDE_FILES), )
+ $1_ALL_EXCLUDE_FILES += $$($1_EXCLUDE_FILES)
+ endif
+ ifneq ($$($1_ALL_EXCLUDE_FILES), )
+ $1_EXCLUDE_FILES_PAT := $$($1_ALL_EXCLUDE_FILES) \
+ $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$($1_ALL_EXCLUDE_FILES)))
+ $1_EXCLUDE_FILES_PAT := $$(addprefix %, $$($1_EXCLUDE_FILES_PAT))
+ $1_SRCS := $$(filter-out $$($1_EXCLUDE_FILES_PAT), $$($1_SRCS))
+ endif
+ ifneq ($$($1_INCLUDE_FILES), )
+ $1_INCLUDE_FILES_PAT := $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$($1_INCLUDE_FILES)))
+ $1_SRCS := $$(filter $$($1_INCLUDE_FILES_PAT), $$($1_SRCS))
+ endif
+ # Now we have a list of all c/c++ files to compile: $$($1_SRCS)
+
+ # Prepend the source/bin path to the filter expressions. Then do the filtering.
+ ifneq ($$($1_INCLUDES), )
+ $1_SRC_INCLUDES := $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$(addsuffix /%, $$($1_INCLUDES))))
+ $1_SRCS := $$(filter $$($1_SRC_INCLUDES), $$($1_SRCS))
+ endif
+ ifneq ($$($1_EXCLUDES), )
+ $1_SRC_EXCLUDES := $$(addsuffix /%, $$($1_EXCLUDES))
+ $1_SRC_EXCLUDES += $$(foreach i, $$($1_SRC), $$(addprefix $$i/, $$(addsuffix /%, $$($1_EXCLUDES))))
+ $1_SRCS := $$(filter-out $$($1_SRC_EXCLUDES), $$($1_SRCS))
+ endif
+
+ $1_SRCS += $$($1_EXTRA_FILES)
+
+ ifeq ($$($1_SRCS), )
+ $$(error No sources found for $1 when looking inside the dirs $$($1_SRC))
+ endif
+
+ ifeq ($$($1_TYPE), EXECUTABLE)
+ ifeq ($(UBSAN_ENABLED), true)
+ # We need to set the default options for UBSan. This needs to be included in every executable.
+ # Rather than copy and paste code to everything with a main function, we add an additional
+ # source file to every executable that exports __ubsan_default_options.
+ ifneq ($$(filter %.cpp %.cc, $$($1_SRCS)), )
+ $1_SRCS += $(TOPDIR)/make/data/ubsan/ubsan_default_options.cpp
+ else
+ $1_SRCS += $(TOPDIR)/make/data/ubsan/ubsan_default_options.c
+ endif
+ endif
+ endif
+endef
+
+################################################################################
+define SetupOutputFiles
+ # Calculate the expected output from compiling the sources
+ $1_EXPECTED_OBJS_FILENAMES := $$(call replace_with_obj_extension, $$(notdir $$($1_SRCS)))
+ $1_EXPECTED_OBJS := $$(addprefix $$($1_OBJECT_DIR)/, $$($1_EXPECTED_OBJS_FILENAMES))
+ # Sort to remove duplicates and provide a reproducible order on the input files to the linker.
+ $1_ALL_OBJS := $$(sort $$($1_EXPECTED_OBJS) $$($1_EXTRA_OBJECT_FILES))
+ ifeq ($(STATIC_LIBS), true)
+ # Exclude the object files that match with $1_STATIC_LIB_EXCLUDE_OBJS.
+ ifneq ($$($1_STATIC_LIB_EXCLUDE_OBJS), )
+ $1_ALL_OBJS := $$(call not-containing, $$($1_STATIC_LIB_EXCLUDE_OBJS), $$($1_ALL_OBJS))
+ endif
+ endif
+endef
+
+################################################################################
+define RemoveSuperfluousOutputFiles
+ # Are there too many object files on disk? Perhaps because some source file was removed?
+ $1_BINS := $$(wildcard $$($1_OBJECT_DIR)/*$(OBJ_SUFFIX))
+ $1_SUPERFLOUS_OBJS := $$(sort $$(filter-out $$($1_EXPECTED_OBJS), $$($1_BINS)))
+ # Clean out the superfluous object files.
+ ifneq ($$($1_SUPERFLUOUS_OBJS), )
+ $$(shell $(RM) -f $$($1_SUPERFLUOUS_OBJS))
+ endif
+endef
+
+################################################################################
+define SetupObjectFileList
+ $1_LD_OBJ_ARG := $$($1_ALL_OBJS)
+
+ # If there are many object files, use an @-file...
+ ifneq ($$(word 17, $$($1_ALL_OBJS)), )
+ $1_OBJ_FILE_LIST := $$($1_OBJECT_DIR)/_$1_objectfilenames.txt
+ ifneq ($(COMPILER_COMMAND_FILE_FLAG), )
+ $1_LD_OBJ_ARG := $(COMPILER_COMMAND_FILE_FLAG)$$($1_OBJ_FILE_LIST)
+ else
+ # ...except for toolchains which don't support them.
+ $1_LD_OBJ_ARG := `cat $$($1_OBJ_FILE_LIST)`
+ endif
+
+ # If we are building static library, 'AR' on macosx/aix may not support @-file.
+ ifeq ($$($1_TYPE), STATIC_LIBRARY)
+ ifeq ($(call isTargetOs, macosx aix), true)
+ $1_LD_OBJ_ARG := `cat $$($1_OBJ_FILE_LIST)`
+ endif
+ endif
+ endif
+
+ # Unfortunately the @-file trick does not work reliably when using clang.
+ # Clang does not propagate the @-file parameter to the ld sub process, but
+ # instead puts the full content on the command line. At least the llvm ld
+ # does not even support an @-file.
+ #
+ # When linking a large amount of object files, we risk hitting the limit
+ # of the command line length even on posix systems if the path length of
+ # the output dir is very long due to our use of absolute paths. To
+ # mitigate this, use paths relative to the output dir when linking over
+ # 500 files with clang and the output dir path is deep.
+ ifneq ($$(word 500, $$($1_ALL_OBJS)), )
+ ifeq ($$(TOOLCHAIN_TYPE), clang)
+ # There is no strlen function in make, but checking path depth is a
+ # reasonable approximation.
+ ifneq ($$(word 10, $$(subst /, ,$$(OUTPUTDIR))), )
+ $1_LINK_OBJS_RELATIVE := true
+ $1_ALL_OBJS_RELATIVE := $$(patsubst $$(OUTPUTDIR)/%, %, $$($1_ALL_OBJS))
+ endif
+ endif
+ endif
+endef
diff --git a/make/conf/jib-profiles.js b/make/conf/jib-profiles.js
index 45a1e6528b358..af164624877a4 100644
--- a/make/conf/jib-profiles.js
+++ b/make/conf/jib-profiles.js
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1181,12 +1181,6 @@ var getJibProfilesDependencies = function (input, common) {
revision: (input.build_cpu == "x64" ? "Xcode11.3.1-MacOSX10.15+1.2" : devkit_platform_revisions[devkit_platform])
},
- cups: {
- organization: common.organization,
- ext: "tar.gz",
- revision: "1.0118+1.0"
- },
-
jtreg: {
server: "jpg",
product: "jtreg",
diff --git a/make/data/hotspot-symbols/symbols-aix b/make/data/hotspot-symbols/symbols-aix
deleted file mode 100644
index 1d32104e8a1ed..0000000000000
--- a/make/data/hotspot-symbols/symbols-aix
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-JVM_handle_aix_signal
-numa_error
-numa_warn
diff --git a/make/data/hotspot-symbols/symbols-aix-debug b/make/data/hotspot-symbols/symbols-aix-debug
deleted file mode 100644
index 10887ab2b61fb..0000000000000
--- a/make/data/hotspot-symbols/symbols-aix-debug
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-JVM_AccessVMBooleanFlag
-JVM_AccessVMIntFlag
-JVM_VMBreakPoint
diff --git a/make/data/hotspot-symbols/symbols-linux b/make/data/hotspot-symbols/symbols-linux
deleted file mode 100644
index d1f258297d82d..0000000000000
--- a/make/data/hotspot-symbols/symbols-linux
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-JVM_handle_linux_signal
-JVM_IsUseContainerSupport
-numa_error
-numa_warn
diff --git a/make/data/hotspot-symbols/symbols-macosx b/make/data/hotspot-symbols/symbols-macosx
deleted file mode 100644
index d0243562b67f9..0000000000000
--- a/make/data/hotspot-symbols/symbols-macosx
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-JVM_handle_bsd_signal
diff --git a/make/data/hotspot-symbols/symbols-shared b/make/data/hotspot-symbols/symbols-shared
deleted file mode 100644
index c5b13ef1ee867..0000000000000
--- a/make/data/hotspot-symbols/symbols-shared
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-# Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-AsyncGetCallTrace
-jio_fprintf
-jio_printf
-jio_snprintf
-jio_vfprintf
-jio_vsnprintf
-JNI_CreateJavaVM
-JNI_GetCreatedJavaVMs
-JNI_GetDefaultJavaVMInitArgs
-JVM_IsForeignLinkerSupported
-JVM_FindClassFromBootLoader
-JVM_InitAgentProperties
diff --git a/make/data/hotspot-symbols/symbols-unix b/make/data/hotspot-symbols/symbols-unix
deleted file mode 100644
index fbb82a11facb7..0000000000000
--- a/make/data/hotspot-symbols/symbols-unix
+++ /dev/null
@@ -1,233 +0,0 @@
-#
-# Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-JVM_ActiveProcessorCount
-JVM_AreNestMates
-JVM_ArrayCopy
-JVM_AssertionStatusDirectives
-JVM_BeforeHalt
-JVM_CallStackWalk
-JVM_Clone
-JVM_ConstantPoolGetClassAt
-JVM_ConstantPoolGetClassAtIfLoaded
-JVM_ConstantPoolGetClassRefIndexAt
-JVM_ConstantPoolGetDoubleAt
-JVM_ConstantPoolGetFieldAt
-JVM_ConstantPoolGetFieldAtIfLoaded
-JVM_ConstantPoolGetFloatAt
-JVM_ConstantPoolGetIntAt
-JVM_ConstantPoolGetLongAt
-JVM_ConstantPoolGetMemberRefInfoAt
-JVM_ConstantPoolGetMethodAt
-JVM_ConstantPoolGetMethodAtIfLoaded
-JVM_ConstantPoolGetNameAndTypeRefIndexAt
-JVM_ConstantPoolGetNameAndTypeRefInfoAt
-JVM_ConstantPoolGetSize
-JVM_ConstantPoolGetStringAt
-JVM_ConstantPoolGetTagAt
-JVM_ConstantPoolGetUTF8At
-JVM_CurrentCarrierThread
-JVM_CurrentThread
-JVM_SetCurrentThread
-JVM_CurrentTimeMillis
-JVM_DefineClass
-JVM_DefineClassWithSource
-JVM_DesiredAssertionStatus
-JVM_DumpAllStacks
-JVM_DumpClassListToFile
-JVM_DumpDynamicArchive
-JVM_DumpThreads
-JVM_ExpandStackFrameInfo
-JVM_FillInStackTrace
-JVM_FindClassFromCaller
-JVM_FindClassFromClass
-JVM_FindLibraryEntry
-JVM_FindLoadedClass
-JVM_FindPrimitiveClass
-JVM_FindSignal
-JVM_FreeMemory
-JVM_GC
-JVM_GetAllThreads
-JVM_GetAndClearReferencePendingList
-JVM_GetArrayElement
-JVM_GetArrayLength
-JVM_GetCallerClass
-JVM_GetClassAccessFlags
-JVM_GetClassAnnotations
-JVM_GetClassConstantPool
-JVM_GetClassContext
-JVM_GetClassCPEntriesCount
-JVM_GetClassCPTypes
-JVM_GetClassDeclaredConstructors
-JVM_GetClassDeclaredFields
-JVM_GetClassDeclaredMethods
-JVM_GetClassFieldsCount
-JVM_GetClassFileVersion
-JVM_GetClassInterfaces
-JVM_GetClassMethodsCount
-JVM_GetClassModifiers
-JVM_GetClassNameUTF
-JVM_GetClassSignature
-JVM_GetClassSigners
-JVM_GetClassTypeAnnotations
-JVM_GetCPClassNameUTF
-JVM_GetCPFieldClassNameUTF
-JVM_GetCPFieldModifiers
-JVM_GetCPFieldNameUTF
-JVM_GetCPFieldSignatureUTF
-JVM_GetCPMethodClassNameUTF
-JVM_GetCPMethodModifiers
-JVM_GetCPMethodNameUTF
-JVM_GetCPMethodSignatureUTF
-JVM_GetDeclaredClasses
-JVM_GetDeclaringClass
-JVM_GetEnclosingMethodInfo
-JVM_GetExtendedNPEMessage
-JVM_GetFieldIxModifiers
-JVM_GetFieldTypeAnnotations
-JVM_GetInheritedAccessControlContext
-JVM_GetManagement
-JVM_GetMethodIxArgsSize
-JVM_GetMethodIxByteCode
-JVM_GetMethodIxByteCodeLength
-JVM_GetMethodIxExceptionIndexes
-JVM_GetMethodIxExceptionsCount
-JVM_GetMethodIxExceptionTableEntry
-JVM_GetMethodIxExceptionTableLength
-JVM_GetMethodIxLocalsCount
-JVM_GetMethodIxMaxStack
-JVM_GetMethodIxModifiers
-JVM_GetMethodIxNameUTF
-JVM_GetMethodIxSignatureUTF
-JVM_GetMethodParameters
-JVM_GetMethodTypeAnnotations
-JVM_GetNanoTimeAdjustment
-JVM_GetNestHost
-JVM_GetNestMembers
-JVM_GetNextThreadIdOffset
-JVM_GetPermittedSubclasses
-JVM_GetPrimitiveArrayElement
-JVM_GetProperties
-JVM_GetProtectionDomain
-JVM_GetRandomSeedForDumping
-JVM_GetRecordComponents
-JVM_GetSimpleBinaryName
-JVM_GetStackAccessControlContext
-JVM_GetSystemPackage
-JVM_GetSystemPackages
-JVM_GetTemporaryDirectory
-JVM_GetVmArguments
-JVM_Halt
-JVM_HasReferencePendingList
-JVM_HoldsLock
-JVM_GetStackTrace
-JVM_IHashCode
-JVM_InitClassName
-JVM_InitStackTraceElement
-JVM_InitStackTraceElementArray
-JVM_InitializeFromArchive
-JVM_InternString
-JVM_Interrupt
-JVM_InvokeMethod
-JVM_IsArrayClass
-JVM_IsCDSDumpingEnabled
-JVM_IsConstructorIx
-JVM_IsDumpingClassList
-JVM_IsFinalizationEnabled
-JVM_IsHiddenClass
-JVM_IsInterface
-JVM_IsPreviewEnabled
-JVM_IsContinuationsSupported
-JVM_IsPrimitiveClass
-JVM_IsRecord
-JVM_IsSameClassPackage
-JVM_IsSharingEnabled
-JVM_IsSupportedJNIVersion
-JVM_IsVMGeneratedMethodIx
-JVM_LatestUserDefinedLoader
-JVM_LoadZipLibrary
-JVM_LoadLibrary
-JVM_LookupDefineClass
-JVM_LookupLambdaProxyClassFromArchive
-JVM_LogLambdaFormInvoker
-JVM_MaxMemory
-JVM_MaxObjectInspectionAge
-JVM_MonitorNotify
-JVM_MonitorNotifyAll
-JVM_MonitorWait
-JVM_MoreStackWalk
-JVM_NanoTime
-JVM_NativePath
-JVM_NewArray
-JVM_NewInstanceFromConstructor
-JVM_NewMultiArray
-JVM_PhantomReferenceRefersTo
-JVM_PrintWarningAtDynamicAgentLoad
-JVM_RaiseSignal
-JVM_RawMonitorCreate
-JVM_RawMonitorDestroy
-JVM_RawMonitorEnter
-JVM_RawMonitorExit
-JVM_ReferenceClear
-JVM_ReferenceRefersTo
-JVM_RegisterContinuationMethods
-JVM_RegisterLambdaProxyClassForArchiving
-JVM_RegisterSignal
-JVM_ReleaseUTF
-JVM_ReportFinalizationComplete
-JVM_SetArrayElement
-JVM_SetClassSigners
-JVM_SetNativeThreadName
-JVM_SetPrimitiveArrayElement
-JVM_SetStackWalkContinuation
-JVM_SetThreadPriority
-JVM_SleepNanos
-JVM_StartThread
-JVM_TotalMemory
-JVM_UnloadLibrary
-JVM_WaitForReferencePendingList
-JVM_Yield
-
-# Module related API's
-JVM_AddModuleExports
-JVM_AddModuleExportsToAll
-JVM_AddModuleExportsToAllUnnamed
-JVM_AddReadsModule
-JVM_DefineArchivedModules
-JVM_DefineModule
-JVM_SetBootLoaderUnnamedModule
-
-# Virtual thread notifications for JVMTI
-JVM_VirtualThreadStart
-JVM_VirtualThreadEnd
-JVM_VirtualThreadMount
-JVM_VirtualThreadUnmount
-JVM_VirtualThreadHideFrames
-JVM_VirtualThreadDisableSuspend
-
-# Scoped values
-JVM_EnsureMaterializedForStackWalk_func
-JVM_FindScopedValueBindings
-JVM_ScopedValueCache
-JVM_SetScopedValueCache
-#
diff --git a/make/data/hotspot-symbols/version-script.txt b/make/data/hotspot-symbols/version-script.txt
new file mode 100644
index 0000000000000..29578bf7cb2e1
--- /dev/null
+++ b/make/data/hotspot-symbols/version-script.txt
@@ -0,0 +1,11 @@
+SUNWprivate_1.1 {
+ global:
+ *;
+
+ local:
+ __bss_start;
+ _edata;
+ _end;
+ _fini;
+ _init;
+};
diff --git a/make/hotspot/gensrc/GensrcAdlc.gmk b/make/hotspot/gensrc/GensrcAdlc.gmk
index bb356476847ac..f9e09706141fd 100644
--- a/make/hotspot/gensrc/GensrcAdlc.gmk
+++ b/make/hotspot/gensrc/GensrcAdlc.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -45,7 +45,6 @@ ifeq ($(call check-jvm-feature, compiler2), true)
ADLC_CFLAGS := -qnortti -qeh -q64 -DAIX
endif
else ifeq ($(call isBuildOs, windows), true)
- ADLC_LDFLAGS += -nologo
ADLC_CFLAGS := -nologo -EHsc
ADLC_CFLAGS_WARNINGS := -W3 -D_CRT_SECURE_NO_WARNINGS
endif
@@ -72,7 +71,8 @@ ifeq ($(call check-jvm-feature, compiler2), true)
$(eval $(call SetupNativeCompilation, BUILD_ADLC, \
NAME := adlc, \
TYPE := EXECUTABLE, \
- TOOLCHAIN := TOOLCHAIN_BUILD_LINK_CXX, \
+ TARGET_TYPE := BUILD, \
+ LINK_TYPE := C++, \
SRC := $(TOPDIR)/src/hotspot/share/adlc, \
EXTRA_FILES := $(TOPDIR)/src/hotspot/share/opto/opcodes.cpp, \
CFLAGS := $(ADLC_CFLAGS) $(ADLC_CFLAGS_WARNINGS), \
diff --git a/make/hotspot/lib/CompileGtest.gmk b/make/hotspot/lib/CompileGtest.gmk
index 0d17f7a3be562..a50d1ffac9eb4 100644
--- a/make/hotspot/lib/CompileGtest.gmk
+++ b/make/hotspot/lib/CompileGtest.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -50,7 +50,7 @@ endif
$(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
NAME := gtest, \
TYPE := STATIC_LIBRARY, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ LINK_TYPE := C++, \
OUTPUT_DIR := $(JVM_OUTPUTDIR)/libgtest, \
OBJECT_DIR := $(JVM_OUTPUTDIR)/libgtest/objs, \
SRC := \
@@ -75,25 +75,11 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBGTEST, \
TARGETS += $(BUILD_GTEST_LIBGTEST)
################################################################################
-
-ifeq ($(call isTargetOs, windows), true)
- GTEST_JVM_MAPFILE := $(JVM_MAPFILE)
-else
- GTEST_JVM_MAPFILE := $(JVM_OUTPUTDIR)/gtest/mapfile
-
- $(JVM_OUTPUTDIR)/gtest/symbols: $(JVM_OUTPUTDIR)/symbols
- $(call MakeDir, $(@D))
- ( $(CAT) $< ; echo "runUnitTests" ) > $@
-
- $(GTEST_JVM_MAPFILE): $(JVM_OUTPUTDIR)/gtest/symbols
- $(call create-mapfile)
-endif
-
# Additional disabled warnings are due to code in the test source.
$(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
NAME := jvm, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ LINK_TYPE := C++, \
OUTPUT_DIR := $(JVM_OUTPUTDIR)/gtest, \
OBJECT_DIR := $(JVM_OUTPUTDIR)/gtest/objs, \
SRC := $(GTEST_TEST_SRC), \
@@ -123,8 +109,6 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
LIBS_unix := -lgtest, \
LIBS_windows := $(JVM_OUTPUTDIR)/libgtest/gtest.lib, \
OPTIMIZATION := $(JVM_OPTIMIZATION), \
- MAPFILE := $(GTEST_JVM_MAPFILE), \
- USE_MAPFILE_FOR_SYMBOLS := true, \
COPY_DEBUG_SYMBOLS := $(GTEST_COPY_DEBUG_SYMBOLS), \
ZIP_EXTERNAL_DEBUG_SYMBOLS := false, \
STRIP_SYMBOLS := false, \
@@ -134,14 +118,19 @@ $(eval $(call SetupJdkLibrary, BUILD_GTEST_LIBJVM, \
$(BUILD_GTEST_LIBJVM) : $(BUILD_GTEST_LIBGTEST)
+ifeq ($(call isTargetOs, windows), true)
+ $(BUILD_GTEST_LIBJVM_TARGET): $(WIN_EXPORT_FILE)
+endif
+
+
TARGETS += $(BUILD_GTEST_LIBJVM)
################################################################################
$(eval $(call SetupJdkExecutable, BUILD_GTEST_LAUNCHER, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
NAME := gtestLauncher, \
TYPE := EXECUTABLE, \
+ LINK_TYPE := C++, \
OUTPUT_DIR := $(JVM_OUTPUTDIR)/gtest, \
EXTRA_FILES := $(GTEST_LAUNCHER_SRC), \
OBJECT_DIR := $(JVM_OUTPUTDIR)/gtest/launcher-objs, \
diff --git a/make/hotspot/lib/CompileJvm.gmk b/make/hotspot/lib/CompileJvm.gmk
index 3393d9e00aa25..69cd80f5171c8 100644
--- a/make/hotspot/lib/CompileJvm.gmk
+++ b/make/hotspot/lib/CompileJvm.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,6 @@ include lib/JvmFlags.gmk
# Setup compilation of the main Hotspot native library (libjvm).
JVM_OUTPUTDIR := $(JVM_VARIANT_OUTPUTDIR)/libjvm
-JVM_MAPFILE := $(JVM_OUTPUTDIR)/mapfile
################################################################################
# Platform independent setup
@@ -146,12 +145,28 @@ $(call FillFindCache, $(JVM_SRC_DIRS))
# operator new.
LIBJVM_STATIC_EXCLUDE_OBJS := operator_new.o
+ifeq ($(call isTargetOs, windows), true)
+ ifeq ($(STATIC_LIBS), true)
+ WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/static-win-exports.def
+ else
+ WIN_EXPORT_FILE := $(JVM_OUTPUTDIR)/win-exports.def
+ endif
+
+ JVM_LDFLAGS += -def:$(WIN_EXPORT_FILE)
+endif
+
+ifeq ($(call isTargetOs, linux), true)
+ HOTSPOT_VERSION_SCRIPT := $(TOPDIR)/make/data/hotspot-symbols/version-script.txt
+
+ JVM_LDFLAGS += -Wl,--exclude-libs,ALL -Wl,-version-script=$(HOTSPOT_VERSION_SCRIPT)
+endif
+
################################################################################
# Now set up the actual compilation of the main hotspot native library
$(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
NAME := jvm, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ LINK_TYPE := C++, \
OUTPUT_DIR := $(JVM_LIB_OUTPUTDIR), \
SRC := $(JVM_SRC_DIRS), \
EXCLUDES := $(JVM_EXCLUDES), \
@@ -169,6 +184,7 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
DISABLED_WARNINGS_gcc_jvmciCodeInstaller.cpp := stringop-overflow, \
DISABLED_WARNINGS_gcc_jvmtiTagMap.cpp := stringop-overflow, \
DISABLED_WARNINGS_gcc_postaloc.cpp := address, \
+ DISABLED_WARNINGS_gcc_shenandoahLock.cpp := stringop-overflow, \
DISABLED_WARNINGS_gcc_synchronizer.cpp := stringop-overflow, \
DISABLED_WARNINGS_clang := $(DISABLED_WARNINGS_clang), \
DISABLED_WARNINGS_clang_arguments.cpp := missing-field-initializers, \
@@ -194,8 +210,6 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
LIBS := $(JVM_LIBS), \
OPTIMIZATION := $(JVM_OPTIMIZATION), \
OBJECT_DIR := $(JVM_OUTPUTDIR)/objs, \
- MAPFILE := $(JVM_MAPFILE), \
- USE_MAPFILE_FOR_SYMBOLS := true, \
STRIPFLAGS := $(JVM_STRIPFLAGS), \
EMBED_MANIFEST := true, \
RC_FILEDESC := $(HOTSPOT_VM_DISTRO) $(OPENJDK_TARGET_CPU_BITS)-Bit $(JVM_VARIANT) VM, \
@@ -204,11 +218,47 @@ $(eval $(call SetupJdkLibrary, BUILD_LIBJVM, \
STATIC_LIB_EXCLUDE_OBJS := $(LIBJVM_STATIC_EXCLUDE_OBJS), \
))
+ifeq ($(call isTargetOs, windows), true)
+ # The following lines create a list of vftable symbols to be filtered out of
+ # the symbol file. Removing this line causes the linker to complain about too
+ # many (> 64K) symbols, so the _guess_ is that this line is here to keep down
+ # the number of exported symbols below that limit.
+ #
+ # Some usages of C++ lambdas require the vftable symbol of classes that use
+ # the lambda type as a template parameter. The usage of those classes won't
+ # link if their vftable symbols are removed. That's why there's an exception
+ # for vftable symbols containing the string 'lambda'.
+ #
+ # A very simple example of a lambda usage that fails if the lambda vftable
+ # symbols are missing in the symbol file:
+ #
+ # #include
+ # std::function f = [](){}
+ FILTER_SYMBOLS_AWK_SCRIPT := \
+ '{ \
+ if ($$7 ~ /\?\?_7.*@@6B@/ && $$7 !~ /type_info/ && $$7 !~ /lambda/) print " " $$7; \
+ }'
+
+ # A more correct solution would be to send BUILD_LIBJVM_ALL_OBJS instead of
+ # cd && *.obj, but this will result in very long command lines, which could be
+ # problematic.
+ $(WIN_EXPORT_FILE): $(BUILD_LIBJVM_ALL_OBJS)
+ $(call LogInfo, Generating list of symbols to export from object files)
+ $(call MakeDir, $(@D))
+ $(ECHO) "EXPORTS" > $@.tmp
+ $(CD) $(BUILD_LIBJVM_OBJECT_DIR) && \
+ $(DUMPBIN) -symbols *$(OBJ_SUFFIX) | $(AWK) $(FILTER_SYMBOLS_AWK_SCRIPT) | $(SORT) -u >> $@.tmp
+ $(RM) $@
+ $(MV) $@.tmp $@
+
+ $(BUILD_LIBJVM_TARGET): $(WIN_EXPORT_FILE)
+endif
+
# Always recompile abstract_vm_version.cpp if libjvm needs to be relinked. This ensures
# that the internal vm version is updated as it relies on __DATE__ and __TIME__
# macros.
ABSTRACT_VM_VERSION_OBJ := $(JVM_OUTPUTDIR)/objs/abstract_vm_version$(OBJ_SUFFIX)
-$(ABSTRACT_VM_VERSION_OBJ): $(filter-out $(ABSTRACT_VM_VERSION_OBJ) $(JVM_MAPFILE), \
+$(ABSTRACT_VM_VERSION_OBJ): $(filter-out $(ABSTRACT_VM_VERSION_OBJ), \
$(BUILD_LIBJVM_TARGET_DEPS))
ifneq ($(GENERATE_COMPILE_COMMANDS_ONLY), true)
@@ -236,11 +286,6 @@ endif
# 1540-1090 : (I) The destructor of "..." might not be called.
# 1540-1639 : (I) The behavior of long type bit fields has changed ...
-# Include mapfile generation. It relies on BUILD_LIBJVM_ALL_OBJS which is only
-# defined after the above call to BUILD_LIBJVM. Mapfile will be generated
-# after all object files are built, but before the jvm library is linked.
-include lib/JvmMapfile.gmk
-
TARGETS += $(BUILD_LIBJVM)
################################################################################
diff --git a/make/hotspot/lib/JvmMapfile.gmk b/make/hotspot/lib/JvmMapfile.gmk
deleted file mode 100644
index b2199e7d17c6f..0000000000000
--- a/make/hotspot/lib/JvmMapfile.gmk
+++ /dev/null
@@ -1,176 +0,0 @@
-#
-# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation. Oracle designates this
-# particular file as subject to the "Classpath" exception as provided
-# by Oracle in the LICENSE file that accompanied this code.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-$(eval $(call IncludeCustomExtension, hotspot/lib/JvmMapfile.gmk))
-
-################################################################################
-# Combine a list of static symbols
-
-ifeq ($(call And, $(call isTargetOs, windows) $(call isTargetCpu, x86_64)), false)
- # On Windows x86_64, we should not have any symbols at all, since that
- # results in duplicate warnings from the linker (JDK-8043491).
- SYMBOLS_SRC += $(TOPDIR)/make/data/hotspot-symbols/symbols-shared
-endif
-
-ifeq ($(call isTargetOsType, unix), true)
- SYMBOLS_SRC += $(TOPDIR)/make/data/hotspot-symbols/symbols-unix
-endif
-
-ifneq ($(wildcard $(TOPDIR)/make/data/hotspot-symbols/symbols-$(OPENJDK_TARGET_OS)), )
- SYMBOLS_SRC += $(TOPDIR)/make/data/hotspot-symbols/symbols-$(OPENJDK_TARGET_OS)
-endif
-
-ifneq ($(findstring debug, $(DEBUG_LEVEL)), )
- ifneq ($(wildcard $(TOPDIR)/make/data/hotspot-symbols/symbols-$(OPENJDK_TARGET_OS)-debug), )
- SYMBOLS_SRC += $(TOPDIR)/make/data/hotspot-symbols/symbols-$(OPENJDK_TARGET_OS)-debug
- endif
-endif
-
-################################################################################
-# Create a dynamic list of symbols from the built object files. This is highly
-# platform dependent.
-
-ifeq ($(call isTargetOs, linux), true)
- DUMP_SYMBOLS_CMD := $(NM) $(NMFLAGS) --defined-only *$(OBJ_SUFFIX)
- ifneq ($(FILTER_SYMBOLS_PATTERN), )
- FILTER_SYMBOLS_PATTERN := $(FILTER_SYMBOLS_PATTERN)|
- endif
- FILTER_SYMBOLS_PATTERN := $(FILTER_SYMBOLS_PATTERN)^_ZTV|^gHotSpotVM|^UseSharedSpaces$$
- FILTER_SYMBOLS_PATTERN := $(FILTER_SYMBOLS_PATTERN)|^_ZN9Arguments17SharedArchivePathE$$
- FILTER_SYMBOLS_AWK_SCRIPT := \
- '{ \
- if ($$3 ~ /$(FILTER_SYMBOLS_PATTERN)/) print $$3; \
- }'
-
-else ifeq ($(call isTargetOs, macosx), true)
- # nm on macosx prints out "warning: nm: no name list" to stderr for
- # files without symbols. Hide this, even at the expense of hiding real errors.
- DUMP_SYMBOLS_CMD := $(NM) $(NMFLAGS) -Uj *$(OBJ_SUFFIX) 2> /dev/null
- ifneq ($(FILTER_SYMBOLS_PATTERN), )
- FILTER_SYMBOLS_PATTERN := $(FILTER_SYMBOLS_PATTERN)|
- endif
- FILTER_SYMBOLS_PATTERN := $(FILTER_SYMBOLS_PATTERN)^_ZTV|^gHotSpotVM
- FILTER_SYMBOLS_AWK_SCRIPT := \
- '{ \
- if ($$3 ~ /$(FILTER_SYMBOLS_PATTERN)/) print $$3; \
- }'
-
-# NOTE: The script is from the old build. It is broken and finds no symbols.
-# The script below might be what was intended, but it fails to link with tons
-# of 'cannot export hidden symbol vtable for X'.
-# '{ if ($$1 ~ /^__ZTV/ || $$1 ~ /^_gHotSpotVM/) print substr($$1, 2) }'
-else ifeq ($(call isTargetOs, aix), true)
- # NOTE: The old build had the solution below. This should to be fixed in
- # configure instead.
-
- # On AIX we have to prevent that we pick up the 'nm' version from the GNU binutils
- # which may be installed under /opt/freeware/bin. So better use an absolute path here!
- # NM=/usr/bin/nm
-
- DUMP_SYMBOLS_CMD := $(NM) $(NMFLAGS) -B -C *$(OBJ_SUFFIX)
- FILTER_SYMBOLS_AWK_SCRIPT := \
- '{ \
- if (($$2="d" || $$2="D") && ($$3 ~ /^__vft/ || $$3 ~ /^gHotSpotVM/)) print $$3; \
- if ($$3 ~ /^UseSharedSpaces$$/) print $$3; \
- if ($$3 ~ /^SharedArchivePath__9Arguments$$/) print $$3; \
- }'
-
-else ifeq ($(call isTargetOs, windows), true)
- DUMP_SYMBOLS_CMD := $(DUMPBIN) -symbols *$(OBJ_SUFFIX)
-
- # The following lines create a list of vftable symbols to be filtered out of
- # the mapfile. Removing this line causes the linker to complain about too many
- # (> 64K) symbols, so the _guess_ is that this line is here to keep down the
- # number of exported symbols below that limit.
- #
- # Some usages of C++ lambdas require the vftable symbol of classes that use
- # the lambda type as a template parameter. The usage of those classes won't
- # link if their vftable symbols are removed. That's why there's an exception
- # for vftable symbols containing the string 'lambda'.
- #
- # A very simple example of a lambda usage that fails if the lambda vftable
- # symbols are missing in the mapfile:
- #
- # #include
- # std::function f = [](){}
-
- FILTER_SYMBOLS_AWK_SCRIPT := \
- '{ \
- if ($$7 ~ /\?\?_7.*@@6B@/ && $$7 !~ /type_info/ && $$7 !~ /lambda/) print $$7; \
- }'
-
-else
- $(error Unknown target OS $(OPENJDK_TARGET_OS) in JvmMapfile.gmk)
-endif
-
-# A more correct solution would be to send BUILD_LIBJVM_ALL_OBJS instead of
-# cd && *.o, but this will result in very long command lines, which is
-# problematic on some platforms.
-$(JVM_OUTPUTDIR)/symbols-objects: $(BUILD_LIBJVM_ALL_OBJS)
- $(call LogInfo, Generating symbol list from object files)
- $(CD) $(JVM_OUTPUTDIR)/objs && \
- $(DUMP_SYMBOLS_CMD) | $(AWK) $(FILTER_SYMBOLS_AWK_SCRIPT) | $(SORT) -u > $@
-
-SYMBOLS_SRC += $(JVM_OUTPUTDIR)/symbols-objects
-
-################################################################################
-# Now concatenate all symbol lists into a single file and remove comments.
-
-$(JVM_OUTPUTDIR)/symbols: $(SYMBOLS_SRC)
- $(SED) -e '/^#/d' $^ > $@
-
-################################################################################
-# Finally convert the symbol list into a platform-specific mapfile
-
-ifeq ($(call isTargetOs, macosx), true)
- # On macosx, we need to add a leading underscore
- define create-mapfile-work
- $(AWK) '{ if ($$0 ~ ".") { print " _" $$0 } }' < $^ > $@.tmp
- endef
-else ifeq ($(call isTargetOs, windows), true)
- # On windows, add an 'EXPORTS' header
- define create-mapfile-work
- $(ECHO) "EXPORTS" > $@.tmp
- $(AWK) '{ if ($$0 ~ ".") { print " " $$0 } }' < $^ >> $@.tmp
- endef
-else
- # Assume standard linker script
- define create-mapfile-work
- $(PRINTF) "SUNWprivate_1.1 { \n global: \n" > $@.tmp
- $(AWK) '{ if ($$0 ~ ".") { print " " $$0 ";" } }' < $^ >> $@.tmp
- $(PRINTF) " local: \n *; \n }; \n" >> $@.tmp
- endef
-endif
-
-define create-mapfile
- $(call LogInfo, Creating mapfile)
- $(call MakeDir, $(@D))
- $(call create-mapfile-work)
- $(RM) $@
- $(MV) $@.tmp $@
-endef
-
-$(JVM_MAPFILE): $(JVM_OUTPUTDIR)/symbols
- $(call create-mapfile)
diff --git a/make/modules/java.base/Lib.gmk b/make/modules/java.base/Lib.gmk
index 924cb8aae268b..54050d0798626 100644
--- a/make/modules/java.base/Lib.gmk
+++ b/make/modules/java.base/Lib.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -242,7 +242,7 @@ endif
ifeq ($(call isTargetOs, linux)+$(call isTargetCpu, x86_64)+$(INCLUDE_COMPILER2)+$(filter $(TOOLCHAIN_TYPE), gcc), true+true+true+gcc)
$(eval $(call SetupJdkLibrary, BUILD_LIB_SIMD_SORT, \
NAME := simdsort, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ LINK_TYPE := C++, \
OPTIMIZATION := HIGH, \
CFLAGS := $(CFLAGS_JDKLIB), \
CXXFLAGS := $(CXXFLAGS_JDKLIB) -std=c++17, \
diff --git a/make/modules/java.base/lib/CoreLibraries.gmk b/make/modules/java.base/lib/CoreLibraries.gmk
index 8904c39449e29..b27013536f8e3 100644
--- a/make/modules/java.base/lib/CoreLibraries.gmk
+++ b/make/modules/java.base/lib/CoreLibraries.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -115,7 +115,7 @@ TARGETS += $(BUILD_LIBZIP)
$(eval $(call SetupJdkLibrary, BUILD_LIBJIMAGE, \
NAME := jimage, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ LINK_TYPE := C++, \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB), \
CXXFLAGS := $(CXXFLAGS_JDKLIB), \
diff --git a/make/modules/java.desktop/Lib.gmk b/make/modules/java.desktop/Lib.gmk
index be1ac3f1fb8a2..cb831faebf62c 100644
--- a/make/modules/java.desktop/Lib.gmk
+++ b/make/modules/java.desktop/Lib.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -51,13 +51,14 @@ ifeq ($(call isTargetOs, aix), false)
-DUSE_PLATFORM_MIDI_IN=TRUE \
#
+ LIBJSOUND_LINK_TYPE := C
ifeq ($(call isTargetOs, macosx), true)
- LIBJSOUND_TOOLCHAIN := TOOLCHAIN_LINK_CXX
+ LIBJSOUND_LINK_TYPE := C++
endif
$(eval $(call SetupJdkLibrary, BUILD_LIBJSOUND, \
NAME := jsound, \
- TOOLCHAIN := $(LIBJSOUND_TOOLCHAIN), \
+ LINK_TYPE := $(LIBJSOUND_LINK_TYPE), \
OPTIMIZATION := LOW, \
CFLAGS := $(CFLAGS_JDKLIB) \
$(LIBJSOUND_CFLAGS), \
diff --git a/make/modules/java.desktop/lib/Awt2dLibraries.gmk b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
index e274005e60741..aaf98d088fdad 100644
--- a/make/modules/java.desktop/lib/Awt2dLibraries.gmk
+++ b/make/modules/java.desktop/lib/Awt2dLibraries.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2011, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -506,8 +506,10 @@ else
# noexcept-type required for GCC 7 builds. Not required for GCC 8+.
# expansion-to-defined required for GCC 9 builds. Not required for GCC 10+.
# maybe-uninitialized required for GCC 8 builds. Not required for GCC 9+.
+ # calloc-transposed-args required for GCC 14 builds. (fixed upstream in Harfbuzz 032c931e1c0cfb20f18e5acb8ba005775242bd92)
HARFBUZZ_DISABLED_WARNINGS_CXX_gcc := class-memaccess noexcept-type \
- expansion-to-defined dangling-reference maybe-uninitialized
+ expansion-to-defined dangling-reference maybe-uninitialized \
+ calloc-transposed-args
HARFBUZZ_DISABLED_WARNINGS_clang := missing-field-initializers range-loop-analysis
HARFBUZZ_DISABLED_WARNINGS_microsoft := 4267 4244
@@ -560,9 +562,9 @@ LIBFONTMANAGER_CFLAGS += $(X_CFLAGS) -DLE_STANDALONE -DHEADLESS
# libawt_xawt). See JDK-8196516 for details.
$(eval $(call SetupJdkLibrary, BUILD_LIBFONTMANAGER, \
NAME := fontmanager, \
+ LINK_TYPE := C++, \
EXCLUDE_FILES := $(LIBFONTMANAGER_EXCLUDE_FILES) \
AccelGlyphCache.c, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
CFLAGS := $(CFLAGS_JDKLIB) $(LIBFONTMANAGER_CFLAGS), \
CXXFLAGS := $(CXXFLAGS_JDKLIB) $(LIBFONTMANAGER_CFLAGS), \
OPTIMIZATION := $(LIBFONTMANAGER_OPTIMIZATION), \
diff --git a/make/modules/jdk.hotspot.agent/Lib.gmk b/make/modules/jdk.hotspot.agent/Lib.gmk
index ebdbfecd461f9..6d85061583781 100644
--- a/make/modules/jdk.hotspot.agent/Lib.gmk
+++ b/make/modules/jdk.hotspot.agent/Lib.gmk
@@ -45,16 +45,16 @@ else ifeq ($(call isTargetOs, windows), true)
endif
endif
-SA_TOOLCHAIN := $(TOOLCHAIN_DEFAULT)
+SA_LINK_TYPE := C
ifeq ($(call isTargetOs, linux), true)
- SA_TOOLCHAIN := TOOLCHAIN_LINK_CXX
+ SA_LINK_TYPE := C++
endif
################################################################################
$(eval $(call SetupJdkLibrary, BUILD_LIBSA, \
NAME := saproc, \
- TOOLCHAIN := $(SA_TOOLCHAIN), \
+ LINK_TYPE := $(SA_LINK_TYPE), \
OPTIMIZATION := HIGH, \
DISABLED_WARNINGS_gcc := sign-compare, \
DISABLED_WARNINGS_gcc_ps_core.c := pointer-arith, \
diff --git a/make/modules/jdk.internal.le/Lib.gmk b/make/modules/jdk.internal.le/Lib.gmk
index 75a2446cc5a18..85550e3cc1d8e 100644
--- a/make/modules/jdk.internal.le/Lib.gmk
+++ b/make/modules/jdk.internal.le/Lib.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@ ifeq ($(call isTargetOs, linux macosx windows), true)
$(eval $(call SetupJdkLibrary, BUILD_LIBLE, \
NAME := le, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ LINK_TYPE := C++, \
OPTIMIZATION := LOW, \
CFLAGS := $(CXXFLAGS_JDKLIB), \
LDFLAGS := $(LDFLAGS_JDKLIB), \
diff --git a/make/modules/jdk.jpackage/Lib.gmk b/make/modules/jdk.jpackage/Lib.gmk
index 1d3e27e8a6b77..58e40d772e135 100644
--- a/make/modules/jdk.jpackage/Lib.gmk
+++ b/make/modules/jdk.jpackage/Lib.gmk
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -32,13 +32,13 @@ include LauncherCommon.gmk
ifeq ($(call isTargetOs, linux), true)
JPACKAGE_APPLAUNCHER_SRC := \
$(call FindSrcDirsForComponent, jdk.jpackage, applauncher)
- JPACKAGE_APPLAUNCHER_TOOLCHAIN := TOOLCHAIN_DEFAULT
+ JPACKAGE_APPLAUNCHER_LINK_TYPE := C
JPACKAGE_APPLAUNCHER_INCLUDE_FILES := %.c
else
JPACKAGE_APPLAUNCHER_SRC := \
$(call FindSrcDirsForComponent, jdk.jpackage, applauncher) \
$(call FindSrcDirsForComponent, jdk.jpackage, common)
- JPACKAGE_APPLAUNCHER_TOOLCHAIN := TOOLCHAIN_LINK_CXX
+ JPACKAGE_APPLAUNCHER_LINK_TYPE := C++
endif
@@ -59,11 +59,11 @@ JPACKAGE_APPLAUNCHER_INCLUDES := $(addprefix -I, $(JPACKAGE_APPLAUNCHER_SRC))
# Output app launcher executable in resources dir, and symbols in the object dir
$(eval $(call SetupJdkExecutable, BUILD_JPACKAGE_APPLAUNCHEREXE, \
NAME := jpackageapplauncher, \
+ LINK_TYPE := $(JPACKAGE_APPLAUNCHER_LINK_TYPE), \
OUTPUT_DIR := $(JPACKAGE_OUTPUT_DIR), \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/jpackageapplauncher, \
SRC := $(JPACKAGE_APPLAUNCHER_SRC), \
INCLUDE_FILES := $(JPACKAGE_APPLAUNCHER_INCLUDE_FILES), \
- TOOLCHAIN := $(JPACKAGE_APPLAUNCHER_TOOLCHAIN), \
OPTIMIZATION := LOW, \
DISABLED_WARNINGS_clang_LinuxPackage.c := format-nonliteral, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
@@ -103,7 +103,7 @@ ifeq ($(call isTargetOs, linux), true)
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/libjpackageapplauncheraux, \
SRC := $(JPACKAGE_LIBAPPLAUNCHER_SRC), \
EXCLUDE_FILES := LinuxLauncher.c LinuxPackage.c, \
- TOOLCHAIN := TOOLCHAIN_LINK_CXX, \
+ LINK_TYPE := C++, \
OPTIMIZATION := LOW, \
DISABLED_WARNINGS_clang_JvmLauncherLib.c := format-nonliteral, \
DISABLED_WARNINGS_clang_tstrings.cpp := format-nonliteral, \
@@ -177,10 +177,10 @@ ifeq ($(call isTargetOs, windows), true)
# Build non-console version of launcher
$(eval $(call SetupJdkExecutable, BUILD_JPACKAGE_APPLAUNCHERWEXE, \
NAME := jpackageapplauncherw, \
+ LINK_TYPE := $(BUILD_JPACKAGE_APPLAUNCHEREXE_LINK_TYPE), \
OUTPUT_DIR := $(JPACKAGE_OUTPUT_DIR), \
SYMBOLS_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/jpackageapplauncherw, \
SRC := $(BUILD_JPACKAGE_APPLAUNCHEREXE_SRC), \
- TOOLCHAIN := $(BUILD_JPACKAGE_APPLAUNCHEREXE_TOOLCHAIN), \
OPTIMIZATION := $(BUILD_JPACKAGE_APPLAUNCHEREXE_OPTIMIZATION), \
CXXFLAGS := $(BUILD_JPACKAGE_APPLAUNCHEREXE_CXXFLAGS), \
CXXFLAGS_windows := $(BUILD_JPACKAGE_APPLAUNCHEREXE_CXXFLAGS_windows) -DJP_LAUNCHERW, \
diff --git a/make/scripts/compare.sh b/make/scripts/compare.sh
index a395e0cd850ca..44c700c48957e 100644
--- a/make/scripts/compare.sh
+++ b/make/scripts/compare.sh
@@ -1,6 +1,6 @@
#!/bin/bash
#
-# Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -60,13 +60,15 @@ else
STAT_PRINT_SIZE="-c %s"
fi
-COMPARE_EXCEPTIONS_INCLUDE="$TOPDIR/make/scripts/compare_exceptions.sh.incl"
-if [ ! -e "$COMPARE_EXCEPTIONS_INCLUDE" ]; then
- echo "Error: Cannot locate the exceptions file, it should have been here: $COMPARE_EXCEPTIONS_INCLUDE"
- exit 1
+
+if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
+ # We ship a pdb file inside a published zip. Such files can never be built
+ # reproducibly, so ignore it.
+ ACCEPTED_JARZIP_CONTENTS="/modules_libs/java.security.jgss/w2k_lsa_auth.dll.pdb"
+elif [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
+ # Due to signing, we can never get a byte-by-byte identical build on macOS
+ STRIP_TESTS_BEFORE_COMPARE="true"
fi
-# Include exception definitions
-. "$COMPARE_EXCEPTIONS_INCLUDE"
################################################################################
#
@@ -117,35 +119,6 @@ diff_text() {
TMP=$($DIFF $THIS_FILE $OTHER_FILE)
- if test "x$SUFFIX" = "xclass"; then
- if [ "$NAME" = "SystemModules\$all.class" ] \
- || [ "$NAME" = "SystemModules\$default.class" ]; then
- # The SystemModules\$*.classes are not comparable as they contain the
- # module hashes which would require a whole other level of
- # reproducible builds to get reproducible. There is also random
- # order of map initialization.
- TMP=""
- elif [ "$NAME" = "module-info.class" ]; then
- # The module-info.class have several issues with random ordering of
- # elements in HashSets.
- MODULES_CLASS_FILTER="$SED \
- -e 's/,$//' \
- -e 's/;$//' \
- -e 's/^ *[0-9]*://' \
- -e 's/#[0-9]* */#/' \
- -e 's/ *\/\// \/\//' \
- -e 's/aload *[0-9]*/aload X/' \
- -e 's/ldc_w/ldc /' \
- | $SORT \
- "
- $JAVAP -c -constants -l -p "${OTHER_FILE}" \
- | eval "$MODULES_CLASS_FILTER" > ${OTHER_FILE}.javap &
- $JAVAP -c -constants -l -p "${THIS_FILE}" \
- | eval "$MODULES_CLASS_FILTER" > ${THIS_FILE}.javap &
- wait
- TMP=$($DIFF ${OTHER_FILE}.javap ${THIS_FILE}.javap)
- fi
- fi
if test -n "$TMP"; then
echo Files $OTHER_FILE and $THIS_FILE differ
@@ -312,75 +285,60 @@ compare_file_types() {
}
################################################################################
-# Compare the rest of the files
+# Find all files to compare and separate them into different categories
-compare_general_files() {
+locate_files() {
THIS_DIR=$1
- OTHER_DIR=$2
- WORK_DIR=$3
+ TEMP_DIR=$COMPARE_ROOT/support
+ $MKDIR -p $TEMP_DIR
- GENERAL_FILES=$(cd $THIS_DIR && $FIND . -type f ! -name "*.so" ! -name "*.jar" \
- ! -name "*.zip" ! -name "*.debuginfo" ! -name "*.dylib" ! -name "jexec" \
- ! -name "modules" ! -name "ct.sym" ! -name "*.diz" ! -name "*.dll" \
- ! -name "*.cpl" ! -name "*.pdb" ! -name "*.exp" ! -name "*.ilk" \
- ! -name "*.lib" ! -name "*.jmod" ! -name "*.exe" \
- ! -name "*.obj" ! -name "*.o" ! -name "jspawnhelper" ! -name "*.a" \
- ! -name "*.tar.gz" ! -name "gtestLauncher" \
- ! -name "*.map" \
- | $GREP -v "./bin/" | $SORT | $FILTER)
+ ALL_FILES_PATH=$TEMP_DIR/all_files.txt
+ cd $THIS_DIR && $FIND . -type f | $SORT | $FILTER > $ALL_FILES_PATH
- echo Other files with binary differences...
- for f in $GENERAL_FILES
- do
- # Skip all files in test/*/native
- if [[ "$f" == */native/* ]]; then
- continue
- fi
- if [ -e $OTHER_DIR/$f ]; then
- SUFFIX="${f##*.}"
- if [ "$(basename $f)" = "release" ]; then
- # In release file, ignore differences in source rev numbers
- OTHER_FILE=$WORK_DIR/$f.other
- THIS_FILE=$WORK_DIR/$f.this
- $MKDIR -p $(dirname $OTHER_FILE)
- $MKDIR -p $(dirname $THIS_FILE)
- RELEASE_FILTER="$SED -e 's/SOURCE=".*"/SOURCE=/g'"
- $CAT $OTHER_DIR/$f | eval "$RELEASE_FILTER" > $OTHER_FILE
- $CAT $THIS_DIR/$f | eval "$RELEASE_FILTER" > $THIS_FILE
- elif [ "$SUFFIX" = "svg" ]; then
- # GraphViz has non-determinism when generating svg files
- OTHER_FILE=$WORK_DIR/$f.other
- THIS_FILE=$WORK_DIR/$f.this
- $MKDIR -p $(dirname $OTHER_FILE) $(dirname $THIS_FILE)
- SVG_FILTER="$SED \
- -e 's/edge[0-9][0-9]*/edgeX/g'
- "
- $CAT $OTHER_DIR/$f | eval "$SVG_FILTER" > $OTHER_FILE
- $CAT $THIS_DIR/$f | eval "$SVG_FILTER" > $THIS_FILE
- elif [ "$SUFFIX" = "jar_contents" ]; then
- # The jar_contents files may have some lines in random order
- OTHER_FILE=$WORK_DIR/$f.other
- THIS_FILE=$WORK_DIR/$f.this
- $MKDIR -p $(dirname $OTHER_FILE) $(dirname $THIS_FILE)
- $RM $OTHER_FILE $THIS_FILE
- $CAT $OTHER_DIR/$f | $SORT > $OTHER_FILE
- $CAT $THIS_DIR/$f | $SORT > $THIS_FILE
- else
- OTHER_FILE=$OTHER_DIR/$f
- THIS_FILE=$THIS_DIR/$f
- fi
- DIFF_OUT=$($DIFF $OTHER_FILE $THIS_FILE 2>&1)
- if [ -n "$DIFF_OUT" ]; then
- echo $f
- REGRESSIONS=true
- if [ "$SHOW_DIFFS" = "true" ]; then
- echo "$DIFF_OUT"
- fi
- fi
- fi
- done
+ ZIP_FILES_PATH=$TEMP_DIR/zip_files.txt
+ ZIP_FILTER="-e '\.zip$' -e '\.tar.gz$'"
+ $CAT "$ALL_FILES_PATH" | eval $GREP $ZIP_FILTER > $ZIP_FILES_PATH
+
+ JMOD_FILES_PATH=$TEMP_DIR/jmod_files.txt
+ JMOD_FILTER="-e '\.jmod$'"
+ $CAT "$ALL_FILES_PATH" | eval $GREP $JMOD_FILTER > $JMOD_FILES_PATH
+
+ JAR_FILES_PATH=$TEMP_DIR/jar_files.txt
+ JAR_FILTER="-e '\.jar$' -e '\.war$' -e '/module$'"
+ $CAT "$ALL_FILES_PATH" | eval $GREP $JAR_FILTER > $JAR_FILES_PATH
+
+ LIB_FILES_PATH=$TEMP_DIR/lib_files.txt
+ LIB_FILTER="-e '\.dylib$' -e '/lib.*\.so$' -e '\.dll$' -e '\.obj$' -e '\.o$' -e '\.a$' -e '\.cpl$'"
+ # On macos, filter out the dSYM debug symbols files. They are identically named .dylib files that reside
+ # under a *.dSYM directory
+ LIB_EXCLUDE="-e '/lib.*\.dSYM/'"
+ $CAT "$ALL_FILES_PATH" | eval $GREP $LIB_FILTER | eval $GREP -v $LIB_EXCLUDE > $LIB_FILES_PATH
+ DEBUG_FILES_PATH=$TEMP_DIR/debug_files.txt
+ DEBUG_FILTER="-e '\.dSYM/' -e '\.debuginfo$' -e '\.diz$' -e '\.pdb$' -e '\.map$'"
+ $CAT "$ALL_FILES_PATH" | eval $GREP $DEBUG_FILTER > $DEBUG_FILES_PATH
+ EXEC_FILES_PATH=$TEMP_DIR/exec_files.txt
+ if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
+ EXEC_FILTER="-e '\.exe$'"
+ $CAT "$ALL_FILES_PATH" | eval $GREP $EXEC_FILTER > $EXEC_FILES_PATH
+ else
+ # Find all files with the executable bit set
+ cd $THIS_DIR && $FIND . -type f -perm -100 | $SORT | $FILTER > $EXEC_FILES_PATH
+ fi
+
+ OTHER_FILES_PATH=$TEMP_DIR/other_files.txt
+ ACCOUNTED_FILES_PATH=$TEMP_DIR/accounted_files.txt
+ $CAT $ZIP_FILES_PATH $JMOD_FILES_PATH $JAR_FILES_PATH $LIB_FILES_PATH $DEBUG_FILES_PATH $EXEC_FILES_PATH > $ACCOUNTED_FILES_PATH
+ $CAT $ACCOUNTED_FILES_PATH $ALL_FILES_PATH | $SORT | $UNIQ -u > $OTHER_FILES_PATH
+
+ ALL_ZIP_FILES=$($CAT $ZIP_FILES_PATH)
+ ALL_JMOD_FILES=$($CAT $JMOD_FILES_PATH)
+ ALL_JAR_FILES=$($CAT $JAR_FILES_PATH)
+ ALL_LIB_FILES=$($CAT $LIB_FILES_PATH)
+ ALL_DEBUG_FILES=$($CAT $DEBUG_FILES_PATH)
+ ALL_EXEC_FILES=$($CAT $EXEC_FILES_PATH)
+ ALL_OTHER_FILES=$($CAT $OTHER_FILES_PATH)
}
################################################################################
@@ -450,12 +408,14 @@ compare_zip_file() {
if [ -n "$ONLY_OTHER" ]; then
echo " Only OTHER $ZIP_FILE contains:"
echo "$ONLY_OTHER" | sed "s|Only in $OTHER_UNZIPDIR| |"g | sed 's|: |/|g'
+ REGRESSIONS=true
return_value=1
fi
if [ -n "$ONLY_THIS" ]; then
echo " Only THIS $ZIP_FILE contains:"
echo "$ONLY_THIS" | sed "s|Only in $THIS_UNZIPDIR| |"g | sed 's|: |/|g'
+ REGRESSIONS=true
return_value=1
fi
@@ -484,6 +444,7 @@ compare_zip_file() {
done
if [ -s "$WORK_DIR/$ZIP_FILE.diffs" ]; then
+ REGRESSIONS=true
return_value=1
echo " Differing files in $ZIP_FILE"
$CAT $WORK_DIR/$ZIP_FILE.diffs | $GREP 'differ$' | cut -f 2 -d ' ' | \
@@ -508,6 +469,7 @@ compare_zip_file() {
compare_bin_file $THIS_UNZIPDIR $OTHER_UNZIPDIR $WORK_DIR/$ZIP_FILE.bin \
$file
if [ "$?" != "0" ]; then
+ REGRESSIONS=true
return_value=1
fi
done
@@ -547,12 +509,14 @@ compare_jmod_file() {
if [ -n "$ONLY_OTHER" ]; then
echo " Only OTHER $JMOD_FILE contains:"
echo "$ONLY_OTHER" | sed "s|^>| |"g | sed 's|: |/|g'
+ REGRESSIONS=true
return_value=1
fi
if [ -n "$ONLY_THIS" ]; then
echo " Only THIS $JMOD_FILE contains:"
echo "$ONLY_THIS" | sed "s|^<| |"g | sed 's|: |/|g'
+ REGRESSIONS=true
return_value=1
fi
@@ -567,19 +531,18 @@ compare_all_zip_files() {
OTHER_DIR=$2
WORK_DIR=$3
- ZIPS=$(cd $THIS_DIR && $FIND . -type f -name "*.zip" -o -name "*.tar.gz" \
- | $SORT | $FILTER )
+ locate_files $THIS_DIR
- if [ -n "$ZIPS" ]; then
+ if [ -n "$ALL_ZIP_FILES" ]; then
echo Zip/tar.gz files...
return_value=0
- for f in $ZIPS; do
+ for f in $ALL_ZIP_FILES; do
if [ -f "$OTHER_DIR/$f" ]; then
compare_zip_file $THIS_DIR $OTHER_DIR $WORK_DIR $f
if [ "$?" != "0" ]; then
- return_value=1
REGRESSIONS=true
+ return_value=1
fi
fi
done
@@ -596,18 +559,18 @@ compare_all_jmod_files() {
OTHER_DIR=$2
WORK_DIR=$3
- JMODS=$(cd $THIS_DIR && $FIND . -type f -name "*.jmod" | $SORT | $FILTER )
+ locate_files $THIS_DIR
- if [ -n "$JMODS" ]; then
+ if [ -n "$ALL_JMOD_FILES" ]; then
echo Jmod files...
return_value=0
- for f in $JMODS; do
+ for f in $ALL_JMOD_FILES; do
if [ -f "$OTHER_DIR/$f" ]; then
compare_jmod_file $THIS_DIR $OTHER_DIR $WORK_DIR $f
if [ "$?" != "0" ]; then
- return_value=1
REGRESSIONS=true
+ return_value=1
fi
fi
done
@@ -624,20 +587,18 @@ compare_all_jar_files() {
OTHER_DIR=$2
WORK_DIR=$3
- # TODO filter?
- ZIPS=$(cd $THIS_DIR && $FIND . -type f -name "*.jar" -o -name "*.war" \
- -o -name "modules" | $SORT | $FILTER)
+ locate_files $THIS_DIR
- if [ -n "$ZIPS" ]; then
+ if [ -n "$ALL_JAR_FILES" ]; then
echo Jar files...
return_value=0
- for f in $ZIPS; do
+ for f in $ALL_JAR_FILES; do
if [ -f "$OTHER_DIR/$f" ]; then
compare_zip_file $THIS_DIR $OTHER_DIR $WORK_DIR $f
if [ "$?" != "0" ]; then
- return_value=1
REGRESSIONS=true
+ return_value=1
fi
fi
done
@@ -699,14 +660,16 @@ compare_bin_file() {
unset _NT_SYMBOL_PATH
if [ "$(uname -o)" = "Cygwin" ]; then
THIS=$(cygpath -msa $THIS)
- OTHER=$(cygpath -msa $OTHER)
+ if [ -n "$OTHER" ]; then
+ OTHER=$(cygpath -msa $OTHER)
+ fi
fi
# Build an _NT_SYMBOL_PATH that contains all known locations for
# pdb files.
PDB_DIRS="$(ls -d \
{$OTHER,$THIS}/support/modules_{cmds,libs}/{*,*/*} \
{$OTHER,$THIS}/support/native/jdk.jpackage/* \
- )"
+ 2> /dev/null )"
export _NT_SYMBOL_PATH="$(echo $PDB_DIRS | tr ' ' ';')"
fi
@@ -1047,23 +1010,16 @@ compare_all_libs() {
OTHER_DIR=$2
WORK_DIR=$3
- LIBS=$(cd $THIS_DIR && $FIND . -type f \( -name 'lib*.so' -o -name '*.dylib' \
- -o -name '*.dll' -o -name '*.obj' -o -name '*.o' -o -name '*.a' \
- -o -name '*.cpl' \) | $SORT | $FILTER)
-
- # On macos, filter out the dSYM debug symbols files as they are also
- # named *.dylib.
- if [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
- LIBS=$(echo "$LIBS" | $GREP -v '\.dSYM/')
- fi
+ locate_files $THIS_DIR
- if [ -n "$LIBS" ]; then
+ if [ -n "$ALL_LIB_FILES" ]; then
echo Libraries...
print_binary_diff_header
- for l in $LIBS; do
+ for l in $ALL_LIB_FILES; do
if [ -f "$OTHER_DIR/$l" ]; then
compare_bin_file $THIS_DIR $OTHER_DIR $WORK_DIR $l
if [ "$?" != "0" ]; then
+ REGRESSIONS=true
return_value=1
fi
fi
@@ -1081,33 +1037,16 @@ compare_all_execs() {
OTHER_DIR=$2
WORK_DIR=$3
- if [ "$OPENJDK_TARGET_OS" = "windows" ]; then
- EXECS=$(cd $THIS_DIR && $FIND . -type f -name '*.exe' | $SORT | $FILTER)
- else
- EXECS=$(cd $THIS_DIR && $FIND . -name db -prune -o -type f -perm -100 \! \
- \( -name '*.so' -o -name '*.dylib' -o -name '*.dll' -o -name '*.cgi' \
- -o -name '*.jar' -o -name '*.diz' -o -name 'jcontrol' -o -name '*.properties' \
- -o -name '*.data' -o -name '*.bfc' -o -name '*.src' -o -name '*.txt' \
- -o -name '*.cfg' -o -name 'meta-index' -o -name '*.properties.ja' \
- -o -name '*.xml' -o -name '*.html' -o -name '*.png' -o -name 'README' \
- -o -name '*.zip' -o -name '*.jimage' -o -name '*.java' -o -name '*.mf' \
- -o -name '*.jpg' -o -name '*.wsdl' -o -name '*.js' -o -name '*.sh' \
- -o -name '*.bat' -o -name '*LICENSE' -o -name '*.d' -o -name '*store' \
- -o -name 'blocked' -o -name '*certs' -o -name '*.ttf' \
- -o -name '*.jfc' -o -name '*.dat' -o -name 'release' -o -name '*.dir'\
- -o -name '*.sym' -o -name '*.idl' -o -name '*.h' -o -name '*.access' \
- -o -name '*.template' -o -name '*.policy' -o -name '*.security' \
- -o -name 'COPYRIGHT' -o -name '*.1' -o -name '*.debuginfo' \
- -o -name 'classlist' \) | $SORT | $FILTER)
- fi
-
- if [ -n "$EXECS" ]; then
+ locate_files $THIS_DIR
+
+ if [ -n "$ALL_EXEC_FILES" ]; then
echo Executables...
print_binary_diff_header
- for e in $EXECS; do
+ for e in $ALL_EXEC_FILES; do
if [ -f "$OTHER_DIR/$e" ]; then
compare_bin_file $THIS_DIR $OTHER_DIR $WORK_DIR $e
if [ "$?" != "0" ]; then
+ REGRESSIONS=true
return_value=1
fi
fi
@@ -1117,6 +1056,95 @@ compare_all_execs() {
return $return_value
}
+################################################################################
+# Compare native debug symbol files
+
+compare_all_debug_files() {
+ THIS_DIR=$1
+ OTHER_DIR=$2
+ WORK_DIR=$3
+
+ locate_files $THIS_DIR
+
+ echo Debug symbol files with binary differences...
+ for f in $ALL_DEBUG_FILES
+ do
+ if [ -e $OTHER_DIR/$f ]; then
+ SUFFIX="${f##*.}"
+ if [ "$SUFFIX" = "pdb" ]; then
+ # pdb files are never reproducible
+ DIFF_OUT=""
+ else
+ OTHER_FILE=$OTHER_DIR/$f
+ THIS_FILE=$THIS_DIR/$f
+ DIFF_OUT=$($DIFF $OTHER_FILE $THIS_FILE 2>&1)
+ fi
+
+ if [ -n "$DIFF_OUT" ]; then
+ echo $f
+ REGRESSIONS=true
+ if [ "$SHOW_DIFFS" = "true" ]; then
+ echo "$DIFF_OUT"
+ fi
+ fi
+ fi
+ done
+}
+
+################################################################################
+# Compare the rest of the files
+
+compare_all_other_files() {
+ THIS_DIR=$1
+ OTHER_DIR=$2
+ WORK_DIR=$3
+
+ locate_files $THIS_DIR
+
+ echo Other files with binary differences...
+ for f in $ALL_OTHER_FILES
+ do
+ # Skip all files in test/*/native
+ if [[ "$f" == */native/* ]]; then
+ continue
+ fi
+ if [ -e $OTHER_DIR/$f ]; then
+ SUFFIX="${f##*.}"
+ if [ "$(basename $f)" = "release" ]; then
+ # In release file, ignore differences in source rev numbers
+ OTHER_FILE=$WORK_DIR/$f.other
+ THIS_FILE=$WORK_DIR/$f.this
+ $MKDIR -p $(dirname $OTHER_FILE)
+ $MKDIR -p $(dirname $THIS_FILE)
+ RELEASE_FILTER="$SED -e 's/SOURCE=".*"/SOURCE=/g'"
+ $CAT $OTHER_DIR/$f | eval "$RELEASE_FILTER" > $OTHER_FILE
+ $CAT $THIS_DIR/$f | eval "$RELEASE_FILTER" > $THIS_FILE
+ elif [ "$SUFFIX" = "jar_contents" ]; then
+ # The jar_contents files are generated by the build and may have
+ # some lines in random order. They are only included for demos,
+ # which they shouldn't really...
+ OTHER_FILE=$WORK_DIR/$f.other
+ THIS_FILE=$WORK_DIR/$f.this
+ $MKDIR -p $(dirname $OTHER_FILE) $(dirname $THIS_FILE)
+ $RM $OTHER_FILE $THIS_FILE
+ $CAT $OTHER_DIR/$f | $SORT > $OTHER_FILE
+ $CAT $THIS_DIR/$f | $SORT > $THIS_FILE
+ else
+ OTHER_FILE=$OTHER_DIR/$f
+ THIS_FILE=$THIS_DIR/$f
+ fi
+ DIFF_OUT=$($DIFF $OTHER_FILE $THIS_FILE 2>&1)
+ if [ -n "$DIFF_OUT" ]; then
+ echo $f
+ REGRESSIONS=true
+ if [ "$SHOW_DIFFS" = "true" ]; then
+ echo "$DIFF_OUT"
+ fi
+ fi
+ fi
+ done
+}
+
################################################################################
# Initiate configuration
@@ -1515,22 +1543,31 @@ fi
if [ "$CMP_GENERAL" = "true" ]; then
if [ -n "$THIS_JDK" ] && [ -n "$OTHER_JDK" ]; then
echo -n "JDK "
- compare_general_files $THIS_JDK $OTHER_JDK $COMPARE_ROOT/jdk
+ compare_all_other_files $THIS_JDK $OTHER_JDK $COMPARE_ROOT/jdk
+ echo -n "JDK "
+ compare_all_debug_files $THIS_JDK $OTHER_JDK $COMPARE_ROOT/jdk
fi
if [ -n "$THIS_JDK_BUNDLE" ] && [ -n "$OTHER_JDK_BUNDLE" ]; then
echo -n "JDK Bundle "
- compare_general_files $THIS_JDK_BUNDLE $OTHER_JDK_BUNDLE $COMPARE_ROOT/jdk-bundle
+ compare_all_other_files $THIS_JDK_BUNDLE $OTHER_JDK_BUNDLE $COMPARE_ROOT/jdk-bundle
+ echo -n "JDK Bundle "
+ compare_all_debug_files $THIS_JDK_BUNDLE $OTHER_JDK_BUNDLE $COMPARE_ROOT/jdk-bundle
fi
if [ -n "$THIS_DOCS" ] && [ -n "$OTHER_DOCS" ]; then
echo -n "Docs "
- compare_general_files $THIS_DOCS $OTHER_DOCS $COMPARE_ROOT/docs
+ compare_all_other_files $THIS_DOCS $OTHER_DOCS $COMPARE_ROOT/docs
+ echo -n "Docs "
+ compare_all_debug_files $THIS_DOCS $OTHER_DOCS $COMPARE_ROOT/docs
fi
if [ -n "$THIS_TEST" ] && [ -n "$OTHER_TEST" ]; then
echo -n "Test "
- compare_general_files $THIS_TEST $OTHER_TEST $COMPARE_ROOT/test
+ compare_all_other_files $THIS_TEST $OTHER_TEST $COMPARE_ROOT/test
+ echo -n "Test "
+ compare_all_debug_files $THIS_TEST $OTHER_TEST $COMPARE_ROOT/test
fi
if [ -n "$THIS_BASE_DIR" ] && [ -n "$OTHER_BASE_DIR" ]; then
- compare_general_files $THIS_BASE_DIR $OTHER_BASE_DIR $COMPARE_ROOT/base_dir
+ compare_all_other_files $THIS_BASE_DIR $OTHER_BASE_DIR $COMPARE_ROOT/base_dir
+ compare_all_debug_files $THIS_BASE_DIR $OTHER_BASE_DIR $COMPARE_ROOT/base_dir
fi
fi
diff --git a/make/scripts/compare_exceptions.sh.incl b/make/scripts/compare_exceptions.sh.incl
index cfbfeeb5be4f9..e69de29bb2d1d 100644
--- a/make/scripts/compare_exceptions.sh.incl
+++ b/make/scripts/compare_exceptions.sh.incl
@@ -1,65 +0,0 @@
-#!/bin/bash
-#
-# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-
-# This script is not to be run as stand-alone, it should be included from
-# compare.sh.
-
-##########################################################################################
-# Check that we are run via inclusion from compare.sh and not as stand-alone.
-if [ -z "$COMPARE_EXCEPTIONS_INCLUDE" ]; then
- echo "Error: This script should not be run as stand-alone. It is included by compare.sh"
- exit 1
-fi
-
-##########################################################################################
-# Diff exceptions
-
-if [ "$OPENJDK_TARGET_OS" = "linux" ]; then
- if [ "$USE_PRECOMPILED_HEADER" = "true" ]; then
- ACCEPTED_BIN_DIFF="
- ./lib/server/libjvm.so
- ./hotspot/gtest/server/libjvm.so
- "
- STRIP_BEFORE_COMPARE="
- ./hotspot/gtest/server/libjvm.so
- "
- fi
-elif [ "$OPENJDK_TARGET_OS" = "windows" ]; then
- SKIP_BIN_DIFF="true"
- SKIP_FULLDUMP_DIFF="true"
- ACCEPTED_JARZIP_CONTENTS="
- /modules_libs/java.security.jgss/w2k_lsa_auth.dll.pdb
- /modules_libs/java.security.jgss/w2k_lsa_auth.dll.map
- /modules_libs/java.security.jgss/w2k_lsa_auth.dll
- "
-elif [ "$OPENJDK_TARGET_OS" = "macosx" ]; then
- ACCEPTED_BIN_DIFF="
- ./lib/libawt_lwawt.dylib
- ./lib/libosxapp.dylib
- ./lib/libosxui.dylib
- ./lib/server/libjvm.dylib
- ./hotspot/gtest/server/libjvm.dylib
- "
- STRIP_TESTS_BEFORE_COMPARE="true"
-fi
diff --git a/src/demo/share/java2d/J2DBench/Makefile b/src/demo/share/java2d/J2DBench/Makefile
index 04b0818a2c35b..edc4494e131de 100644
--- a/src/demo/share/java2d/J2DBench/Makefile
+++ b/src/demo/share/java2d/J2DBench/Makefile
@@ -29,6 +29,23 @@
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
+
+ifndef SOURCE
+export SOURCE := 7
+endif
+ifndef TARGET
+export TARGET := 7
+endif
+ifndef JAVAC
+export JAVAC := javac
+endif
+ifndef JAVA
+export JAVA := java
+endif
+ifndef JAR
+export JAR := jar
+endif
+
SOURCEPATH=src
CLASSES=build
DIST=dist
@@ -80,18 +97,18 @@ SCM_DIRs = .hg .svn CVS RCS SCCS Codemgr_wsdata deleted_files
all: mkdirs J2DBench.jar J2DAnalyzer.jar
run: mkdirs J2DBench.jar
- java -jar $(DIST)/J2DBench.jar
+ $(JAVA) -jar $(DIST)/J2DBench.jar
analyze: mkdirs J2DAnalyzer.jar
- java -jar $(DIST)/J2DAnalyzer.jar
+ $(JAVA) -jar $(DIST)/J2DAnalyzer.jar
J2DBench.jar: \
$(J2DBENCH_CLASSES) $(J2DBENCH_RESOURCES) \
$(CLASSES)/j2dbench.manifest
- jar cvmf $(CLASSES)/j2dbench.manifest $(DIST)/J2DBench.jar -C $(CLASSES) j2dbench
+ $(JAR) cvmf $(CLASSES)/j2dbench.manifest $(DIST)/J2DBench.jar -C $(CLASSES) j2dbench
J2DAnalyzer.jar: $(J2DANALYZER_CLASSES) $(CLASSES)/j2danalyzer.manifest
- jar cvmf $(CLASSES)/j2danalyzer.manifest \
+ $(JAR) cvmf $(CLASSES)/j2danalyzer.manifest \
$(DIST)/J2DAnalyzer.jar -C $(CLASSES) j2dbench/report
$(CLASSES)/j2dbench/tests/iio/images: $(RESOURCES)/images
@@ -120,7 +137,7 @@ $(CLASSES):
mkdirs: $(DIST) $(CLASSES)
$(CLASSES)/j2dbench/%.class: $(SOURCEPATH)/j2dbench/%.java
- javac -g:none -source 1.7 -target 1.7 -d $(CLASSES) -sourcepath $(SOURCEPATH) $<
+ $(JAVAC) -g:none -source $(SOURCE) -target $(TARGET) -d $(CLASSES) -sourcepath $(SOURCEPATH) $<
clean:
rm -rf $(CLASSES)
diff --git a/src/demo/share/java2d/J2DBench/README b/src/demo/share/java2d/J2DBench/README
index 3b9f25c13f14e..513c984a6555f 100644
--- a/src/demo/share/java2d/J2DBench/README
+++ b/src/demo/share/java2d/J2DBench/README
@@ -23,6 +23,9 @@ The benchmark requires at least jdk1.4 to compile and run. Note that
source/target is set to 1.7 in the makefile and build.xml, because of
support in jdk 14 compiler. To check compatibility with jdk1.4 you can
use "-source 1.4 -target 1.4" options and jdk1.7.
+Yo can use TARGET/SOURCE of makefile and -Dtarget/surce to set them up for your convinience.
+Similarly you can set JAVA/JAVAC/JAR and -Djava/javac to select diffferent java/javac then is on yoru PATH
+Unluckily in ant, you can not set jar, but ant should honor JAVA_HOME
-----------------------------------------------------------------------
How To Compile
diff --git a/src/demo/share/java2d/J2DBench/build.xml b/src/demo/share/java2d/J2DBench/build.xml
index 7b202946cf145..415c315899eac 100644
--- a/src/demo/share/java2d/J2DBench/build.xml
+++ b/src/demo/share/java2d/J2DBench/build.xml
@@ -39,6 +39,27 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -49,13 +70,14 @@
-
+
@@ -64,6 +86,7 @@
description="run J2DAnalyzer" >
diff --git a/src/hotspot/cpu/aarch64/aarch64.ad b/src/hotspot/cpu/aarch64/aarch64.ad
index f22af58f40ab5..a07ff041c487a 100644
--- a/src/hotspot/cpu/aarch64/aarch64.ad
+++ b/src/hotspot/cpu/aarch64/aarch64.ad
@@ -2205,14 +2205,14 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
- st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
- if (CompressedKlassPointers::shift() != 0) {
- st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
- }
+ st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
+ st->print_cr("\tcmpw rscratch1, r10");
} else {
- st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
+ st->print_cr("\tcmp rscratch1, r10");
}
- st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
}
#endif
@@ -2221,14 +2221,7 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
-
- __ cmp_klass(j_rarg0, rscratch2, rscratch1);
- Label skip;
- // TODO
- // can we avoid this skip and still use a reloc?
- __ br(Assembler::EQ, skip);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- __ bind(skip);
+ __ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@@ -2582,7 +2575,7 @@ Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
}
// Binary src (Replicate con)
-bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
+static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
if (n == nullptr || m == nullptr) {
return false;
}
@@ -2623,7 +2616,7 @@ bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
// (XorV src (Replicate m1))
// (XorVMask src (MaskAll m1))
-bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
+static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
if (n != nullptr && m != nullptr) {
return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
VectorNode::is_all_ones_vector(m);
@@ -3715,7 +3708,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
} else {
// Emit stub for static call
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -8289,7 +8282,7 @@ instruct membar_acquire() %{
ins_cost(VOLATILE_REF_COST);
format %{ "membar_acquire\n\t"
- "dmb ishld" %}
+ "dmb ish" %}
ins_encode %{
__ block_comment("membar_acquire");
@@ -8343,12 +8336,11 @@ instruct membar_release() %{
ins_cost(VOLATILE_REF_COST);
format %{ "membar_release\n\t"
- "dmb ishst\n\tdmb ishld" %}
+ "dmb ish" %}
ins_encode %{
__ block_comment("membar_release");
- __ membar(Assembler::StoreStore);
- __ membar(Assembler::LoadStore);
+ __ membar(Assembler::LoadStore|Assembler::StoreStore);
%}
ins_pipe(pipe_serial);
%}
@@ -16441,13 +16433,12 @@ instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
+ predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
- // TODO
- // identify correct cost
ins_cost(5 * INSN_COST);
- format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
+ format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
@@ -16458,6 +16449,7 @@ instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegP
instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
%{
+ predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, TEMP tmp2);
@@ -16471,6 +16463,37 @@ instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRe
ins_pipe(pipe_serial);
%}
+instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
+%{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastLock object box));
+ effect(TEMP tmp, TEMP tmp2);
+
+ ins_cost(5 * INSN_COST);
+ format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %}
+
+ ins_encode %{
+ __ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
+ %}
+
+ ins_pipe(pipe_serial);
+%}
+
+instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
+%{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastUnlock object box));
+ effect(TEMP tmp, TEMP tmp2);
+
+ ins_cost(5 * INSN_COST);
+ format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
+
+ ins_encode %{
+ __ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
+ %}
+
+ ins_pipe(pipe_serial);
+%}
// ============================================================================
// Safepoint Instructions
diff --git a/src/hotspot/cpu/aarch64/ad_encode.m4 b/src/hotspot/cpu/aarch64/ad_encode.m4
index e6c87cf5b0559..4897998d8709e 100644
--- a/src/hotspot/cpu/aarch64/ad_encode.m4
+++ b/src/hotspot/cpu/aarch64/ad_encode.m4
@@ -19,7 +19,7 @@ dnl Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
dnl or visit www.oracle.com if you need additional information or have any
dnl questions.
dnl
-dnl
+dnl
dnl Process this file with m4 ad_encode.m4 to generate the load/store
dnl patterns used in aarch64.ad.
dnl
@@ -90,4 +90,3 @@ STORE(vRegD,strd,Float,,8)
loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
-
diff --git a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
index c7b867a4207d9..76f88764416e3 100644
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020 Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -118,10 +118,6 @@ extern "C" {
else
Disassembler::decode((address)start, (address)start + len);
}
-
- JNIEXPORT void das1(uintptr_t insn) {
- das(insn, 1);
- }
}
#define __ as->
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
index b83d618506298..9bd8c6b8e9f88 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp
@@ -53,7 +53,6 @@
#endif
NEEDS_CLEANUP // remove this definitions ?
-const Register IC_Klass = rscratch2; // where the IC klass is cached
const Register SYNC_header = r0; // synchronization header
const Register SHIFT_count = r0; // where count for shift operations must be
@@ -293,27 +292,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
- Register receiver = FrameMap::receiver_opr->as_register();
- Register ic_klass = IC_Klass;
- int start_offset = __ offset();
- __ inline_cache_check(receiver, ic_klass);
-
- // if icache check fails, then jump to runtime routine
- // Note: RECEIVER must still contain the receiver!
- Label dont;
- __ br(Assembler::EQ, dont);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- // We align the verified entry point unless the method body
- // (including its inline cache check) will fit in a single 64-byte
- // icache line.
- if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
- // force alignment after the cache check.
- __ align(CodeEntryAlignment);
- }
-
- __ bind(dont);
- return start_offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@@ -1230,7 +1209,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
len,
tmp1,
tmp2,
- arrayOopDesc::header_size(op->type()),
+ arrayOopDesc::base_offset_in_bytes(op->type()),
array_element_size(op->type()),
op->klass()->as_register(),
*op->stub()->entry());
@@ -2042,7 +2021,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
__ emit_static_call_stub();
- assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
+ assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
<= call_stub_size(), "stub too big");
__ end_a_stub();
}
diff --git a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
index 43ec189255f9c..ef1b5fe2703e6 100644
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.hpp
@@ -71,8 +71,8 @@ friend class ArrayCopyStub;
void deoptimize_trap(CodeEmitInfo *info);
enum {
- // call stub: CompiledStaticCall::to_interp_stub_size() +
- // CompiledStaticCall::to_trampoline_stub_size()
+ // call stub: CompiledDirectCall::to_interp_stub_size() +
+ // CompiledDirectCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 7 * NativeInstruction::instruction_size
diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
index d3a746178f14e..e48d64d90696c 100644
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -80,12 +80,12 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
br(Assembler::NE, slow_case);
}
- // Load object header
- ldr(hdr, Address(obj, hdr_offset));
if (LockingMode == LM_LIGHTWEIGHT) {
lightweight_lock(obj, hdr, temp, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
+ // Load object header
+ ldr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orr(hdr, hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
@@ -144,11 +144,6 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {
- ldr(hdr, Address(obj, oopDesc::mark_offset_in_bytes()));
- // We cannot use tbnz here, the target might be too far away and cannot
- // be encoded.
- tst(hdr, markWord::monitor_value);
- br(Assembler::NE, slow_case);
lightweight_unlock(obj, hdr, temp, rscratch2, slow_case);
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
@@ -193,6 +188,12 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if (len->is_valid()) {
strw(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
+ int base_offset = arrayOopDesc::length_offset_in_bytes() + BytesPerInt;
+ if (!is_aligned(base_offset, BytesPerWord)) {
+ assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned");
+ // Clear gap/first 4 bytes following the length field.
+ strw(zr, Address(obj, base_offset));
+ }
} else if (UseCompressedClassPointers) {
store_klass_gap(obj, zr);
}
@@ -271,7 +272,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
verify_oop(obj);
}
-void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, int f, Register klass, Label& slow_case) {
+void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) {
assert_different_registers(obj, len, t1, t2, klass);
// determine alignment mask
@@ -284,7 +285,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
const Register arr_size = t2; // okay to be the same
// align object end
- mov(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
+ mov(arr_size, (int32_t)base_offset_in_bytes + MinObjAlignmentInBytesMask);
add(arr_size, arr_size, len, ext::uxtw, f);
andr(arr_size, arr_size, ~MinObjAlignmentInBytesMask);
@@ -292,8 +293,11 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
initialize_header(obj, klass, len, t1, t2);
+ // Align-up to word boundary, because we clear the 4 bytes potentially
+ // following the length field in initialize_header().
+ int base_offset = align_up(base_offset_in_bytes, BytesPerWord);
// clear rest of allocated space
- initialize_body(obj, arr_size, header_size * BytesPerWord, t1, t2);
+ initialize_body(obj, arr_size, base_offset, t1, t2);
if (Compilation::current()->bailed_out()) {
return;
}
@@ -308,17 +312,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
verify_oop(obj);
}
-
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- verify_oop(receiver);
- // explicit null check not needed since load from [klass_offset] causes a trap
- // check against inline cache
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
-
- cmp_klass(receiver, iCache, rscratch1);
-}
-
-
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
diff --git a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp
index 4aa6206aa6073..d210c21d12b8f 100644
--- a/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c1_MacroAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -100,7 +100,7 @@ using MacroAssembler::null_check;
// header_size: size of object header in words
// f : element scale factor
// slow_case : exit to slow case implementation if fast allocation fails
- void allocate_array(Register obj, Register len, Register t, Register t2, int header_size, int f, Register klass, Label& slow_case);
+ void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, int f, Register klass, Label& slow_case);
int rsp_offset() const { return _rsp_offset; }
void set_rsp_offset(int n) { _rsp_offset = n; }
diff --git a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
index d2f4744a04914..63a32e714e365 100644
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp
@@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_aarch64.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_aarch64.hpp"
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
index 8910fba97a558..7e3ceb1f02029 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.cpp
@@ -32,6 +32,7 @@
#include "opto/output.hpp"
#include "opto/subnode.hpp"
#include "runtime/stubRoutines.hpp"
+#include "utilities/globalDefinitions.hpp"
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -55,6 +56,7 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
Label object_has_monitor;
Label count, no_count;
+ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
assert_different_registers(oop, box, tmp, disp_hdr);
// Load markWord from object into displaced_header.
@@ -73,7 +75,8 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
if (LockingMode == LM_MONITOR) {
tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
b(cont);
- } else if (LockingMode == LM_LEGACY) {
+ } else {
+ assert(LockingMode == LM_LEGACY, "must be");
// Set tmp to be (markWord of object | UNLOCK_VALUE).
orr(tmp, disp_hdr, markWord::unlocked_value);
@@ -102,10 +105,6 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes()));
b(cont);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- lightweight_lock(oop, disp_hdr, tmp, tmp3Reg, no_count);
- b(count);
}
// Handle existing monitor.
@@ -119,14 +118,13 @@ void C2_MacroAssembler::fast_lock(Register objectReg, Register boxReg, Register
cmpxchg(tmp, zr, rthread, Assembler::xword, /*acquire*/ true,
/*release*/ true, /*weak*/ false, tmp3Reg); // Sets flags for result
- if (LockingMode != LM_LIGHTWEIGHT) {
- // Store a non-null value into the box to avoid looking like a re-entrant
- // lock. The fast-path monitor unlock code checks for
- // markWord::monitor_value so use markWord::unused_mark which has the
- // relevant bit set, and also matches ObjectSynchronizer::enter.
- mov(tmp, (address)markWord::unused_mark().value());
- str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
- }
+ // Store a non-null value into the box to avoid looking like a re-entrant
+ // lock. The fast-path monitor unlock code checks for
+ // markWord::monitor_value so use markWord::unused_mark which has the
+ // relevant bit set, and also matches ObjectSynchronizer::enter.
+ mov(tmp, (address)markWord::unused_mark().value());
+ str(tmp, Address(box, BasicLock::displaced_header_offset_in_bytes()));
+
br(Assembler::EQ, cont); // CAS success means locking succeeded
cmp(tmp3Reg, rthread);
@@ -157,6 +155,7 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
Label object_has_monitor;
Label count, no_count;
+ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
assert_different_registers(oop, box, tmp, disp_hdr);
if (LockingMode == LM_LEGACY) {
@@ -175,7 +174,8 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
if (LockingMode == LM_MONITOR) {
tst(oop, oop); // Set NE to indicate 'failure' -> take slow-path. We know that oop != 0.
b(cont);
- } else if (LockingMode == LM_LEGACY) {
+ } else {
+ assert(LockingMode == LM_LEGACY, "must be");
// Check if it is still a light weight lock, this is is true if we
// see the stack address of the basicLock in the markWord of the
// object.
@@ -183,10 +183,6 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
cmpxchg(oop, box, disp_hdr, Assembler::xword, /*acquire*/ false,
/*release*/ true, /*weak*/ false, tmp);
b(cont);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- lightweight_unlock(oop, tmp, box, disp_hdr, no_count);
- b(count);
}
assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
@@ -196,19 +192,6 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
STATIC_ASSERT(markWord::monitor_value <= INT_MAX);
add(tmp, tmp, -(int)markWord::monitor_value); // monitor
- if (LockingMode == LM_LIGHTWEIGHT) {
- // If the owner is anonymous, we need to fix it -- in an outline stub.
- Register tmp2 = disp_hdr;
- ldr(tmp2, Address(tmp, ObjectMonitor::owner_offset()));
- // We cannot use tbnz here, the target might be too far away and cannot
- // be encoded.
- tst(tmp2, (uint64_t)ObjectMonitor::ANONYMOUS_OWNER);
- C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmp, tmp2);
- Compile::current()->output()->add_stub(stub);
- br(Assembler::NE, stub->entry());
- bind(stub->continuation());
- }
-
ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset()));
Label notRecursive;
@@ -241,6 +224,262 @@ void C2_MacroAssembler::fast_unlock(Register objectReg, Register boxReg, Registe
bind(no_count);
}
+void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register t1,
+ Register t2, Register t3) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ assert_different_registers(obj, t1, t2, t3);
+
+ // Handle inflated monitor.
+ Label inflated;
+ // Finish fast lock successfully. MUST branch to with flag == EQ
+ Label locked;
+ // Finish fast lock unsuccessfully. MUST branch to with flag == NE
+ Label slow_path;
+
+ if (DiagnoseSyncOnValueBasedClasses != 0) {
+ load_klass(t1, obj);
+ ldrw(t1, Address(t1, Klass::access_flags_offset()));
+ tstw(t1, JVM_ACC_IS_VALUE_BASED_CLASS);
+ br(Assembler::NE, slow_path);
+ }
+
+ const Register t1_mark = t1;
+
+ { // Lightweight locking
+
+ // Push lock to the lock stack and finish successfully. MUST branch to with flag == EQ
+ Label push;
+
+ const Register t2_top = t2;
+ const Register t3_t = t3;
+
+ // Check if lock-stack is full.
+ ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ cmpw(t2_top, (unsigned)LockStack::end_offset() - 1);
+ br(Assembler::GT, slow_path);
+
+ // Check if recursive.
+ subw(t3_t, t2_top, oopSize);
+ ldr(t3_t, Address(rthread, t3_t));
+ cmp(obj, t3_t);
+ br(Assembler::EQ, push);
+
+ // Relaxed normal load to check for monitor. Optimization for monitor case.
+ ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
+
+ // Not inflated
+ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid a lea");
+
+ // Try to lock. Transition lock-bits 0b01 => 0b00
+ orr(t1_mark, t1_mark, markWord::unlocked_value);
+ eor(t3_t, t1_mark, markWord::unlocked_value);
+ cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
+ /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
+ br(Assembler::NE, slow_path);
+
+ bind(push);
+ // After successful lock, push object on lock-stack.
+ str(obj, Address(rthread, t2_top));
+ addw(t2_top, t2_top, oopSize);
+ strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ b(locked);
+ }
+
+ { // Handle inflated monitor.
+ bind(inflated);
+
+ // mark contains the tagged ObjectMonitor*.
+ const Register t1_tagged_monitor = t1_mark;
+ const uintptr_t monitor_tag = markWord::monitor_value;
+ const Register t2_owner_addr = t2;
+ const Register t3_owner = t3;
+
+ // Compute owner address.
+ lea(t2_owner_addr, Address(t1_tagged_monitor, (in_bytes(ObjectMonitor::owner_offset()) - monitor_tag)));
+
+ // CAS owner (null => current thread).
+ cmpxchg(t2_owner_addr, zr, rthread, Assembler::xword, /*acquire*/ true,
+ /*release*/ false, /*weak*/ false, t3_owner);
+ br(Assembler::EQ, locked);
+
+ // Check if recursive.
+ cmp(t3_owner, rthread);
+ br(Assembler::NE, slow_path);
+
+ // Recursive.
+ increment(Address(t1_tagged_monitor, in_bytes(ObjectMonitor::recursions_offset()) - monitor_tag), 1);
+ }
+
+ bind(locked);
+ increment(Address(rthread, JavaThread::held_monitor_count_offset()));
+
+#ifdef ASSERT
+ // Check that locked label is reached with Flags == EQ.
+ Label flag_correct;
+ br(Assembler::EQ, flag_correct);
+ stop("Fast Lock Flag != EQ");
+#endif
+
+ bind(slow_path);
+#ifdef ASSERT
+ // Check that slow_path label is reached with Flags == NE.
+ br(Assembler::NE, flag_correct);
+ stop("Fast Lock Flag != NE");
+ bind(flag_correct);
+#endif
+ // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
+}
+
+void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register t1, Register t2,
+ Register t3) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ assert_different_registers(obj, t1, t2, t3);
+
+ // Handle inflated monitor.
+ Label inflated, inflated_load_monitor;
+ // Finish fast unlock successfully. MUST branch to with flag == EQ
+ Label unlocked;
+ // Finish fast unlock unsuccessfully. MUST branch to with flag == NE
+ Label slow_path;
+
+ const Register t1_mark = t1;
+ const Register t2_top = t2;
+ const Register t3_t = t3;
+
+ { // Lightweight unlock
+
+ // Check if obj is top of lock-stack.
+ ldrw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ subw(t2_top, t2_top, oopSize);
+ ldr(t3_t, Address(rthread, t2_top));
+ cmp(obj, t3_t);
+ // Top of lock stack was not obj. Must be monitor.
+ br(Assembler::NE, inflated_load_monitor);
+
+ // Pop lock-stack.
+ DEBUG_ONLY(str(zr, Address(rthread, t2_top));)
+ strw(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
+
+ // Check if recursive.
+ subw(t3_t, t2_top, oopSize);
+ ldr(t3_t, Address(rthread, t3_t));
+ cmp(obj, t3_t);
+ br(Assembler::EQ, unlocked);
+
+ // Not recursive.
+ // Load Mark.
+ ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Check header for monitor (0b10).
+ tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
+
+ // Try to unlock. Transition lock bits 0b00 => 0b01
+ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
+ orr(t3_t, t1_mark, markWord::unlocked_value);
+ cmpxchg(/*addr*/ obj, /*expected*/ t1_mark, /*new*/ t3_t, Assembler::xword,
+ /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
+ br(Assembler::EQ, unlocked);
+
+ // Compare and exchange failed.
+ // Restore lock-stack and handle the unlock in runtime.
+ DEBUG_ONLY(str(obj, Address(rthread, t2_top));)
+ addw(t2_top, t2_top, oopSize);
+ str(t2_top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ b(slow_path);
+ }
+
+
+ { // Handle inflated monitor.
+ bind(inflated_load_monitor);
+ ldr(t1_mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+#ifdef ASSERT
+ tbnz(t1_mark, exact_log2(markWord::monitor_value), inflated);
+ stop("Fast Unlock not monitor");
+#endif
+
+ bind(inflated);
+
+#ifdef ASSERT
+ Label check_done;
+ subw(t2_top, t2_top, oopSize);
+ cmpw(t2_top, in_bytes(JavaThread::lock_stack_base_offset()));
+ br(Assembler::LT, check_done);
+ ldr(t3_t, Address(rthread, t2_top));
+ cmp(obj, t3_t);
+ br(Assembler::NE, inflated);
+ stop("Fast Unlock lock on stack");
+ bind(check_done);
+#endif
+
+ // mark contains the tagged ObjectMonitor*.
+ const Register t1_monitor = t1_mark;
+ const uintptr_t monitor_tag = markWord::monitor_value;
+
+ // Untag the monitor.
+ sub(t1_monitor, t1_mark, monitor_tag);
+
+ const Register t2_recursions = t2;
+ Label not_recursive;
+
+ // Check if recursive.
+ ldr(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset()));
+ cbz(t2_recursions, not_recursive);
+
+ // Recursive unlock.
+ sub(t2_recursions, t2_recursions, 1u);
+ str(t2_recursions, Address(t1_monitor, ObjectMonitor::recursions_offset()));
+ // Set flag == EQ
+ cmp(t2_recursions, t2_recursions);
+ b(unlocked);
+
+ bind(not_recursive);
+
+ Label release;
+ const Register t2_owner_addr = t2;
+
+ // Compute owner address.
+ lea(t2_owner_addr, Address(t1_monitor, ObjectMonitor::owner_offset()));
+
+ // Check if the entry lists are empty.
+ ldr(rscratch1, Address(t1_monitor, ObjectMonitor::EntryList_offset()));
+ ldr(t3_t, Address(t1_monitor, ObjectMonitor::cxq_offset()));
+ orr(rscratch1, rscratch1, t3_t);
+ cmp(rscratch1, zr);
+ br(Assembler::EQ, release);
+
+ // The owner may be anonymous and we removed the last obj entry in
+ // the lock-stack. This loses the information about the owner.
+ // Write the thread to the owner field so the runtime knows the owner.
+ str(rthread, Address(t2_owner_addr));
+ b(slow_path);
+
+ bind(release);
+ // Set owner to null.
+ // Release to satisfy the JMM
+ stlr(zr, t2_owner_addr);
+ }
+
+ bind(unlocked);
+ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
+
+#ifdef ASSERT
+ // Check that unlocked label is reached with Flags == EQ.
+ Label flag_correct;
+ br(Assembler::EQ, flag_correct);
+ stop("Fast Unlock Flag != EQ");
+#endif
+
+ bind(slow_path);
+#ifdef ASSERT
+ // Check that slow_path label is reached with Flags == NE.
+ br(Assembler::NE, flag_correct);
+ stop("Fast Unlock Flag != NE");
+ bind(flag_correct);
+#endif
+ // C2 uses the value of Flags (NE vs EQ) to determine the continuation.
+}
+
// Search for str1 in str2 and return index or -1
// Clobbers: rscratch1, rscratch2, rflags. May also clobber v0-v1, when icnt1==-1.
void C2_MacroAssembler::string_indexof(Register str2, Register str1,
diff --git a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
index dfa7d88cb93fe..1481f975020c9 100644
--- a/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/c2_MacroAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -36,9 +36,11 @@
public:
// Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
- // See full description in macroAssembler_aarch64.cpp.
void fast_lock(Register object, Register box, Register tmp, Register tmp2, Register tmp3);
void fast_unlock(Register object, Register box, Register tmp, Register tmp2);
+ // Code used by cmpFastLockLightweight and cmpFastUnlockLightweight mach instructions in .ad file.
+ void fast_lock_lightweight(Register object, Register t1, Register t2, Register t3);
+ void fast_unlock_lightweight(Register object, Register t1, Register t2, Register t3);
void string_compare(Register str1, Register str2,
Register cnt1, Register cnt2, Register result,
diff --git a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
index c58ff8828bce6..23c08f11d1a8b 100644
--- a/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/compiledIC_aarch64.cpp
@@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -36,7 +35,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
@@ -71,11 +70,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return MacroAssembler::static_call_stub_size();
}
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 3 instructions here (although
// there are only two) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
@@ -83,21 +82,14 @@ int CompiledStaticCall::to_trampoline_stub_size() {
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
@@ -115,7 +107,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -132,7 +124,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/aarch64/frame_aarch64.cpp b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
index c5b2ff8a4c01c..8d0fa8895d15c 100644
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -678,7 +678,7 @@ static void printbc(Method *m, intptr_t bcx) {
printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
}
-void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
+static void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
if (! fp)
return;
diff --git a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
index 42081d422c869..427987da97141 100644
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp
@@ -28,8 +28,8 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/javaThread.hpp"
diff --git a/src/hotspot/cpu/aarch64/globals_aarch64.hpp b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
index 13f2e4b61b9a4..293cc6eb0d0c6 100644
--- a/src/hotspot/cpu/aarch64/globals_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/globals_aarch64.hpp
@@ -127,8 +127,6 @@ define_pd_global(intx, InlineSmallCode, 1000);
range(1, 99) \
product(ccstr, UseBranchProtection, "none", \
"Branch Protection to use: none, standard, pac-ret") \
- product(bool, AlwaysMergeDMB, false, DIAGNOSTIC, \
- "Always merge DMB instructions in code emission") \
// end of ARCH_FLAGS
diff --git a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp b/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp
deleted file mode 100644
index bd8cfc42600e2..0000000000000
--- a/src/hotspot/cpu/aarch64/icBuffer_aarch64.cpp
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_aarch64.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return (MacroAssembler::far_branches() ? 6 : 4) * NativeInstruction::instruction_size;
-}
-
-#define __ masm->
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
- // note: even though the code contains an embedded value, we do not need reloc info
- // because
- // (1) the value is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
- // assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
-
- address start = __ pc();
- Label l;
- __ ldr(rscratch2, l);
- int jump_code_size = __ far_jump(ExternalAddress(entry_point));
- // IC stub code size is not expected to vary depending on target address.
- // We use NOPs to make the [ldr + far_jump + nops + int64] stub size equal to ic_stub_code_size.
- for (int size = NativeInstruction::instruction_size + jump_code_size + 8;
- size < ic_stub_code_size(); size += NativeInstruction::instruction_size) {
- __ nop();
- }
- __ bind(l);
- assert((uintptr_t)__ pc() % wordSize == 0, "");
- __ emit_int64((int64_t)cached_value);
- // Only need to invalidate the 1st two instructions - not the whole ic stub
- ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size());
- assert(__ pc() - start == ic_stub_code_size(), "must be");
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- NativeJump* jump = nativeJump_at(code_begin + 4);
- return jump->jump_destination();
-}
-
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- // The word containing the cached value is at the end of this IC buffer
- uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize);
- void* o = (void*)*p;
- return o;
-}
diff --git a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp
index 3d87fde2b5bcd..7caafc19fbd31 100644
--- a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -53,7 +53,7 @@ struct li_pair {
static struct li_pair InverseLITable[LI_TABLE_SIZE];
// comparator to sort entries in the inverse table
-int compare_immediate_pair(const void *i1, const void *i2)
+static int compare_immediate_pair(const void *i1, const void *i2)
{
struct li_pair *li1 = (struct li_pair *)i1;
struct li_pair *li2 = (struct li_pair *)i2;
@@ -142,7 +142,7 @@ static inline uint32_t uimm(uint32_t val, int hi, int lo)
// result
// a bit string containing count copies of input bit string
//
-uint64_t replicate(uint64_t bits, int nbits, int count)
+static uint64_t replicate(uint64_t bits, int nbits, int count)
{
assert(count > 0, "must be");
assert(nbits > 0, "must be");
@@ -231,8 +231,8 @@ uint64_t replicate(uint64_t bits, int nbits, int count)
// For historical reasons the implementation of this function is much
// more convoluted than is really necessary.
-int expandLogicalImmediate(uint32_t immN, uint32_t immr,
- uint32_t imms, uint64_t &bimm)
+static int expandLogicalImmediate(uint32_t immN, uint32_t immr,
+ uint32_t imms, uint64_t &bimm)
{
int len; // ought to be <= 6
uint32_t levels; // 6 bits
@@ -446,4 +446,3 @@ uint32_t encoding_for_fp_immediate(float immediate)
res = (s << 7) | (r << 4) | f;
return res;
}
-
diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
index 69a61e281f352..b5625b7fc6134 100644
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -701,7 +701,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg)
}
if (LockingMode == LM_LIGHTWEIGHT) {
- ldr(tmp, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
lightweight_lock(obj_reg, tmp, tmp2, tmp3, slow_case);
b(count);
} else if (LockingMode == LM_LEGACY) {
@@ -818,22 +817,6 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg)
if (LockingMode == LM_LIGHTWEIGHT) {
Label slow_case;
-
- // Check for non-symmetric locking. This is allowed by the spec and the interpreter
- // must handle it.
- Register tmp = rscratch1;
- // First check for lock-stack underflow.
- ldrw(tmp, Address(rthread, JavaThread::lock_stack_top_offset()));
- cmpw(tmp, (unsigned)LockStack::start_offset());
- br(Assembler::LE, slow_case);
- // Then check if the top of the lock-stack matches the unlocked object.
- subw(tmp, tmp, oopSize);
- ldr(tmp, Address(rthread, tmp));
- cmpoop(tmp, obj_reg);
- br(Assembler::NE, slow_case);
-
- ldr(header_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- tbnz(header_reg, exact_log2(markWord::monitor_value), slow_case);
lightweight_unlock(obj_reg, header_reg, swap_reg, tmp_reg, slow_case);
b(count);
bind(slow_case);
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
index 124af3bafbe3a..e88e7ff1f6273 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -23,12 +23,11 @@
*
*/
-#include
-
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "ci/ciEnv.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/disassembler.hpp"
#include "compiler/oopMap.hpp"
@@ -55,6 +54,7 @@
#include "runtime/jniHandles.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
+#include "utilities/globalDefinitions.hpp"
#include "utilities/powerOfTwo.hpp"
#ifdef COMPILER1
#include "c1/c1_LIRAssembler.hpp"
@@ -66,6 +66,8 @@
#include "opto/output.hpp"
#endif
+#include
+
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
#else
@@ -965,7 +967,7 @@ int MacroAssembler::max_trampoline_stub_size() {
}
void MacroAssembler::emit_static_call_stub() {
- // CompiledDirectStaticCall::set_to_interpreted knows the
+ // CompiledDirectCall::set_to_interpreted knows the
// exact layout of this stub.
isb();
@@ -995,10 +997,51 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
// address const_ptr = long_constant((jlong)Universe::non_oop_word());
// uintptr_t offset;
// ldr_constant(rscratch2, const_ptr);
- movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
+ movptr(rscratch2, (intptr_t)Universe::non_oop_word());
return trampoline_call(Address(entry, rh));
}
+int MacroAssembler::ic_check_size() {
+ if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
+ return NativeInstruction::instruction_size * 7;
+ } else {
+ return NativeInstruction::instruction_size * 5;
+ }
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register receiver = j_rarg0;
+ Register data = rscratch2;
+ Register tmp1 = rscratch1;
+ Register tmp2 = r10;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+
+ if (UseCompressedClassPointers) {
+ ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ cmpw(tmp1, tmp2);
+ } else {
+ ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ cmp(tmp1, tmp2);
+ }
+
+ Label dont;
+ br(Assembler::EQ, dont);
+ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ bind(dont);
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
+
+ return uep_offset;
+}
+
// Implementation of call_VM versions
void MacroAssembler::call_VM(Register oop_result,
@@ -1100,7 +1143,14 @@ void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thr
}
void MacroAssembler::align(int modulus) {
- while (offset() % modulus != 0) nop();
+ align(modulus, offset());
+}
+
+// Ensure that the code at target bytes offset from the current offset() is aligned
+// according to modulus.
+void MacroAssembler::align(int modulus, int target) {
+ int delta = target - offset();
+ while ((offset() + delta) % modulus != 0) nop();
}
void MacroAssembler::post_call_nop() {
@@ -1197,7 +1247,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
-// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
+// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by .
// The receiver klass is in recv_klass.
@@ -2066,21 +2116,14 @@ void MacroAssembler::membar(Membar_mask_bits order_constraint) {
address last = code()->last_insn();
if (last != nullptr && nativeInstruction_at(last)->is_Membar() && prev == last) {
NativeMembar *bar = NativeMembar_at(prev);
- // Don't promote DMB ST|DMB LD to DMB (a full barrier) because
- // doing so would introduce a StoreLoad which the caller did not
- // intend
- if (AlwaysMergeDMB || bar->get_kind() == order_constraint
- || bar->get_kind() == AnyAny
- || order_constraint == AnyAny) {
- // We are merging two memory barrier instructions. On AArch64 we
- // can do this simply by ORing them together.
- bar->set_kind(bar->get_kind() | order_constraint);
- BLOCK_COMMENT("merged membar");
- return;
- }
+ // We are merging two memory barrier instructions. On AArch64 we
+ // can do this simply by ORing them together.
+ bar->set_kind(bar->get_kind() | order_constraint);
+ BLOCK_COMMENT("merged membar");
+ } else {
+ code()->set_last_insn(pc());
+ dmb(Assembler::barrier(order_constraint));
}
- code()->set_last_insn(pc());
- dmb(Assembler::barrier(order_constraint));
}
bool MacroAssembler::try_merge_ldst(Register rt, const Address &adr, size_t size_in_bytes, bool is_store) {
@@ -6339,97 +6382,122 @@ void MacroAssembler::double_move(VMRegPair src, VMRegPair dst, Register tmp) {
}
// Implements lightweight-locking.
-// Branches to slow upon failure to lock the object, with ZF cleared.
-// Falls through upon success with ZF set.
//
// - obj: the object to be locked
-// - hdr: the header, already loaded from obj, will be destroyed
-// - t1, t2: temporary registers, will be destroyed
-void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
+// - t1, t2, t3: temporary registers, will be destroyed
+// - slow: branched to if locking fails, absolute offset may larger than 32KB (imm14 encoding).
+void MacroAssembler::lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
- assert_different_registers(obj, hdr, t1, t2, rscratch1);
-
- // Check if we would have space on lock-stack for the object.
- ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
- cmpw(t1, (unsigned)LockStack::end_offset() - 1);
- br(Assembler::GT, slow);
-
- // Load (object->mark() | 1) into hdr
- orr(hdr, hdr, markWord::unlocked_value);
- // Clear lock-bits, into t2
- eor(t2, hdr, markWord::unlocked_value);
- // Try to swing header from unlocked to locked
- // Clobbers rscratch1 when UseLSE is false
- cmpxchg(/*addr*/ obj, /*expected*/ hdr, /*new*/ t2, Assembler::xword,
- /*acquire*/ true, /*release*/ true, /*weak*/ false, t1);
+ assert_different_registers(obj, t1, t2, t3, rscratch1);
+
+ Label push;
+ const Register top = t1;
+ const Register mark = t2;
+ const Register t = t3;
+
+ // Preload the markWord. It is important that this is the first
+ // instruction emitted as it is part of C1's null check semantics.
+ ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Check if the lock-stack is full.
+ ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ cmpw(top, (unsigned)LockStack::end_offset());
+ br(Assembler::GE, slow);
+
+ // Check for recursion.
+ subw(t, top, oopSize);
+ ldr(t, Address(rthread, t));
+ cmp(obj, t);
+ br(Assembler::EQ, push);
+
+ // Check header for monitor (0b10).
+ tst(mark, markWord::monitor_value);
+ br(Assembler::NE, slow);
+
+ // Try to lock. Transition lock bits 0b01 => 0b00
+ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
+ orr(mark, mark, markWord::unlocked_value);
+ eor(t, mark, markWord::unlocked_value);
+ cmpxchg(/*addr*/ obj, /*expected*/ mark, /*new*/ t, Assembler::xword,
+ /*acquire*/ true, /*release*/ false, /*weak*/ false, noreg);
br(Assembler::NE, slow);
- // After successful lock, push object on lock-stack
- ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
- str(obj, Address(rthread, t1));
- addw(t1, t1, oopSize);
- strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+ bind(push);
+ // After successful lock, push object on lock-stack.
+ str(obj, Address(rthread, top));
+ addw(top, top, oopSize);
+ strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
}
// Implements lightweight-unlocking.
-// Branches to slow upon failure, with ZF cleared.
-// Falls through upon success, with ZF set.
//
// - obj: the object to be unlocked
-// - hdr: the (pre-loaded) header of the object
-// - t1, t2: temporary registers
-void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow) {
+// - t1, t2, t3: temporary registers
+// - slow: branched to if unlocking fails, absolute offset may larger than 32KB (imm14 encoding).
+void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow) {
assert(LockingMode == LM_LIGHTWEIGHT, "only used with new lightweight locking");
- assert_different_registers(obj, hdr, t1, t2, rscratch1);
+ // cmpxchg clobbers rscratch1.
+ assert_different_registers(obj, t1, t2, t3, rscratch1);
#ifdef ASSERT
{
- // The following checks rely on the fact that LockStack is only ever modified by
- // its owning thread, even if the lock got inflated concurrently; removal of LockStack
- // entries after inflation will happen delayed in that case.
-
// Check for lock-stack underflow.
Label stack_ok;
ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
cmpw(t1, (unsigned)LockStack::start_offset());
- br(Assembler::GT, stack_ok);
+ br(Assembler::GE, stack_ok);
STOP("Lock-stack underflow");
bind(stack_ok);
}
- {
- // Check if the top of the lock-stack matches the unlocked object.
- Label tos_ok;
- subw(t1, t1, oopSize);
- ldr(t1, Address(rthread, t1));
- cmpoop(t1, obj);
- br(Assembler::EQ, tos_ok);
- STOP("Top of lock-stack does not match the unlocked object");
- bind(tos_ok);
- }
- {
- // Check that hdr is fast-locked.
- Label hdr_ok;
- tst(hdr, markWord::lock_mask_in_place);
- br(Assembler::EQ, hdr_ok);
- STOP("Header is not fast-locked");
- bind(hdr_ok);
- }
#endif
- // Load the new header (unlocked) into t1
- orr(t1, hdr, markWord::unlocked_value);
+ Label unlocked, push_and_slow;
+ const Register top = t1;
+ const Register mark = t2;
+ const Register t = t3;
- // Try to swing header from locked to unlocked
- // Clobbers rscratch1 when UseLSE is false
- cmpxchg(obj, hdr, t1, Assembler::xword,
- /*acquire*/ true, /*release*/ true, /*weak*/ false, t2);
+ // Check if obj is top of lock-stack.
+ ldrw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ subw(top, top, oopSize);
+ ldr(t, Address(rthread, top));
+ cmp(obj, t);
br(Assembler::NE, slow);
- // After successful unlock, pop object from lock-stack
- ldrw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
- subw(t1, t1, oopSize);
+ // Pop lock-stack.
+ DEBUG_ONLY(str(zr, Address(rthread, top));)
+ strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
+
+ // Check if recursive.
+ subw(t, top, oopSize);
+ ldr(t, Address(rthread, t));
+ cmp(obj, t);
+ br(Assembler::EQ, unlocked);
+
+ // Not recursive. Check header for monitor (0b10).
+ ldr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ tbnz(mark, log2i_exact(markWord::monitor_value), push_and_slow);
+
#ifdef ASSERT
- str(zr, Address(rthread, t1));
+ // Check header not unlocked (0b01).
+ Label not_unlocked;
+ tbz(mark, log2i_exact(markWord::unlocked_value), not_unlocked);
+ stop("lightweight_unlock already unlocked");
+ bind(not_unlocked);
#endif
- strw(t1, Address(rthread, JavaThread::lock_stack_top_offset()));
+
+ // Try to unlock. Transition lock bits 0b00 => 0b01
+ assert(oopDesc::mark_offset_in_bytes() == 0, "required to avoid lea");
+ orr(t, mark, markWord::unlocked_value);
+ cmpxchg(obj, mark, t, Assembler::xword,
+ /*acquire*/ false, /*release*/ true, /*weak*/ false, noreg);
+ br(Assembler::EQ, unlocked);
+
+ bind(push_and_slow);
+ // Restore lock-stack and handle the unlock in runtime.
+ DEBUG_ONLY(str(obj, Address(rthread, top));)
+ addw(top, top, oopSize);
+ strw(top, Address(rthread, JavaThread::lock_stack_top_offset()));
+ b(slow);
+
+ bind(unlocked);
}
diff --git a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
index e92e0ee6aa934..dad7ec4d4975e 100644
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -720,6 +720,7 @@ class MacroAssembler: public Assembler {
// Alignment
void align(int modulus);
+ void align(int modulus, int target);
// nop
void post_call_nop();
@@ -1247,6 +1248,8 @@ class MacroAssembler: public Assembler {
// Emit the CompiledIC call idiom
address ic_call(address entry, jint method_index = 0);
+ static int ic_check_size();
+ int ic_check(int end_alignment);
public:
@@ -1599,8 +1602,8 @@ class MacroAssembler: public Assembler {
// Code for java.lang.Thread::onSpinWait() intrinsic.
void spin_wait();
- void lightweight_lock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
- void lightweight_unlock(Register obj, Register hdr, Register t1, Register t2, Label& slow);
+ void lightweight_lock(Register obj, Register t1, Register t2, Register t3, Label& slow);
+ void lightweight_unlock(Register obj, Register t1, Register t2, Register t3, Label& slow);
private:
// Check the current thread doesn't need a cross modify fence.
diff --git a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
index 82da734611693..97a10afde7ab2 100644
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Red Hat Inc. All rights reserved.
* Copyright (c) 2021, Azul Systems, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -30,7 +30,6 @@
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -39,7 +38,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -740,9 +738,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Label ok;
-
- Register holder = rscratch2;
+ Register data = rscratch2;
Register receiver = j_rarg0;
Register tmp = r10; // A call-clobbered register not used for arg passing
@@ -757,17 +753,12 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
__ block_comment("c2i_unverified_entry {");
- __ load_klass(rscratch1, receiver);
- __ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ cmp(rscratch1, tmp);
- __ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ br(Assembler::EQ, ok);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- __ bind(ok);
// Method might have been compiled since the call site was patched to
// interpreted; if that is the case treat it as a miss so we can get
// the call site corrected.
+ __ ic_check(1 /* end_alignment */);
+ __ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
+
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
__ cbz(rscratch1, skip_fixup);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -1118,7 +1109,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ b(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1183,7 +1174,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
}
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1539,25 +1530,15 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// restoring them except rfp. rfp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
-
- const Register ic_reg = rscratch2;
const Register receiver = j_rarg0;
- Label hit;
Label exception_pending;
- assert_different_registers(ic_reg, receiver, rscratch1);
+ assert_different_registers(receiver, rscratch1);
__ verify_oop(receiver);
- __ cmp_klass(receiver, ic_reg, rscratch1);
- __ br(Assembler::EQ, hit);
-
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ __ ic_check(8 /* end_alignment */);
// Verified entry point must be aligned
- __ align(8);
-
- __ bind(hit);
-
int vep_offset = ((intptr_t)__ pc()) - start;
// If we have to make this method not-entrant we'll overwrite its
@@ -1815,7 +1796,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ br(Assembler::NE, slow_path_lock);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- __ ldr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ lightweight_lock(obj_reg, swap_reg, tmp, lock_tmp, slow_path_lock);
}
__ bind(count);
@@ -1958,8 +1938,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "");
- __ ldr(old_hdr, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ tbnz(old_hdr, exact_log2(markWord::monitor_value), slow_path_unlock);
__ lightweight_unlock(obj_reg, old_hdr, swap_reg, lock_tmp, slow_path_unlock);
__ decrement(Address(rthread, JavaThread::held_monitor_count_offset()));
}
diff --git a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp
index 14e9764457508..28ec07815be5c 100644
--- a/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/upcallLinker_aarch64.cpp
@@ -245,9 +245,13 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ mov_metadata(rmethod, entry);
__ str(rmethod, Address(rthread, JavaThread::callee_target_offset())); // just in case callee is deoptimized
+ __ push_cont_fastpath(rthread);
+
__ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
__ blr(rscratch1);
+ __ pop_cont_fastpath(rthread);
+
// return value shuffle
if (!needs_return_buffer) {
#ifdef ASSERT
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
index b53e427649781..18f310c746cd4 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
@@ -191,9 +191,6 @@ void VM_Version::initialize() {
if (FLAG_IS_DEFAULT(UseSIMDForMemoryOps)) {
FLAG_SET_DEFAULT(UseSIMDForMemoryOps, true);
}
- if (FLAG_IS_DEFAULT(AlwaysMergeDMB)) {
- FLAG_SET_DEFAULT(AlwaysMergeDMB, true);
- }
}
// Cortex A53
diff --git a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
index 0a85d339a552f..6883dc0d93e16 100644
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -169,6 +169,7 @@ enum Ampere_CPU_Model {
// Aarch64 supports fast class initialization checks
static bool supports_fast_class_init_checks() { return true; }
constexpr static bool supports_stack_watermark_barrier() { return true; }
+ constexpr static bool supports_recursive_lightweight_locking() { return true; }
static void get_compatible_board(char *buf, int buflen);
diff --git a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
index c895ff5cc0ec1..2bb53d16a3c97 100644
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp
@@ -26,10 +26,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_aarch64.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -168,22 +168,22 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
// Entry arguments:
- // rscratch2: CompiledICHolder
+ // rscratch2: CompiledICData
// j_rarg0: Receiver
// This stub is called from compiled code which has no callee-saved registers,
// so all registers except arguments are free at this point.
const Register recv_klass_reg = r10;
- const Register holder_klass_reg = r16; // declaring interface klass (DECC)
+ const Register holder_klass_reg = r16; // declaring interface klass (DEFC)
const Register resolved_klass_reg = r17; // resolved interface klass (REFC)
const Register temp_reg = r11;
const Register temp_reg2 = r15;
- const Register icholder_reg = rscratch2;
+ const Register icdata_reg = rscratch2;
Label L_no_such_interface;
- __ ldr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ ldr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ ldr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ ldr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
start_pc = __ pc();
diff --git a/src/hotspot/cpu/arm/arm.ad b/src/hotspot/cpu/arm/arm.ad
index 6b18e76e6d7eb..1a833b08c4cf4 100644
--- a/src/hotspot/cpu/arm/arm.ad
+++ b/src/hotspot/cpu/arm/arm.ad
@@ -869,12 +869,7 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
#define R_RTEMP "R_R12"
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
- if (UseCompressedClassPointers) {
- st->print_cr("\tLDR_w " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
- st->print_cr("\tdecode_klass " R_RTEMP);
- } else {
- st->print_cr("\tLDR " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
- }
+ st->print_cr("\tLDR " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
st->print_cr("\tCMP " R_RTEMP ",R_R8" );
st->print ("\tB.NE SharedRuntime::handle_ic_miss_stub");
}
@@ -882,13 +877,7 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
- Register iCache = reg_to_register_object(Matcher::inline_cache_reg_encode());
- assert(iCache == Ricklass, "should be");
- Register receiver = R0;
-
- __ load_klass(Rtemp, receiver);
- __ cmp(Rtemp, iCache);
- __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
+ __ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@@ -1241,7 +1230,7 @@ encode %{
emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
// Emit stubs for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
diff --git a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
index 999309c02258d..688790f07e548 100644
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp
@@ -161,10 +161,7 @@ void LIR_Assembler::osr_entry() {
int LIR_Assembler::check_icache() {
- Register receiver = LIR_Assembler::receiverOpr()->as_register();
- int offset = __ offset();
- __ inline_cache_check(receiver, Ricklass);
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@@ -971,7 +968,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp1()->as_register(),
op->tmp2()->as_register(),
op->tmp3()->as_register(),
- arrayOopDesc::header_size(op->type()),
+ arrayOopDesc::base_offset_in_bytes(op->type()),
type2aelembytes(op->type()),
op->klass()->as_register(),
*op->stub()->entry());
@@ -1950,7 +1947,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
// If not a single instruction, NativeMovConstReg::next_instruction_address()
// must jump over the whole following ldr_literal.
- // (See CompiledStaticCall::set_to_interpreted())
+ // (See CompiledDirectCall::set_to_interpreted())
#ifdef ASSERT
address ldr_site = __ pc();
#endif
diff --git a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
index c09e54e0e57ad..d9d042bb2e4e7 100644
--- a/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_MacroAssembler_arm.cpp
@@ -43,16 +43,6 @@
// arm [macro]assembler) and used with care in the other C1 specific
// files.
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- Label verified;
- load_klass(Rtemp, receiver);
- cmp(Rtemp, iCache);
- b(verified, eq); // jump over alignment no-ops
- jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
- align(CodeEntryAlignment);
- bind(verified);
-}
-
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");
diff --git a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
index 62faa6170833b..9862a074a687f 100644
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp
@@ -37,7 +37,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_arm.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_arm.hpp"
diff --git a/src/hotspot/cpu/arm/compiledIC_arm.cpp b/src/hotspot/cpu/arm/compiledIC_arm.cpp
index 2d4187b7d6c6a..71389d2353d66 100644
--- a/src/hotspot/cpu/arm/compiledIC_arm.cpp
+++ b/src/hotspot/cpu/arm/compiledIC_arm.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
@@ -37,7 +36,7 @@
#if COMPILER2_OR_JVMCI
#define __ _masm.
// emit call stub, compiled java to interpreter
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
// set (empty), R9
@@ -59,7 +58,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
InlinedMetadata object_literal(nullptr);
// single instruction, see NativeMovConstReg::next_instruction_address() in
- // CompiledStaticCall::set_to_interpreted()
+ // CompiledDirectCall::set_to_interpreted()
__ ldr_literal(Rmethod, object_literal);
__ set_inst_mark(); // Who uses this?
@@ -87,32 +86,25 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
#undef __
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 10; // 4 in emit_to_interp_stub + 1 in Java_Static_Call
}
#endif // COMPILER2_OR_JVMCI
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// ARM doesn't use trampolines.
return 0;
}
// size of C2 call stub, compiled java to interpreter
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return 8 * NativeInstruction::instruction_size;
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -128,7 +120,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -144,7 +136,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp
index 3aa71dca8cbf0..6d724c750aa34 100644
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp
@@ -29,8 +29,8 @@
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/sharedRuntime.hpp"
diff --git a/src/hotspot/cpu/arm/icBuffer_arm.cpp b/src/hotspot/cpu/arm/icBuffer_arm.cpp
deleted file mode 100644
index e3a1c148ec6a0..0000000000000
--- a/src/hotspot/cpu/arm/icBuffer_arm.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_arm.hpp"
-#include "oops/oop.inline.hpp"
-
-#define __ masm->
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return (4 * Assembler::InstructionSize);
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
-
- InlinedAddress oop_literal((address) cached_value);
- __ ldr_literal(Ricklass, oop_literal);
- // FIXME: OK to remove reloc here?
- __ patchable_jump(entry_point, relocInfo::runtime_call_type, Rtemp);
- __ bind_literal(oop_literal);
- __ flush();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- address jump_address;
- jump_address = code_begin + NativeInstruction::instruction_size;
- NativeJump* jump = nativeJump_at(jump_address);
- return jump->jump_destination();
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
- return (void*)move->data();
-}
-
-#undef __
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.cpp b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
index b827e69d02233..99d619bddb55a 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.cpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.cpp
@@ -28,6 +28,7 @@
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "ci/ciEnv.hpp"
+#include "code/compiledIC.hpp"
#include "code/nativeInst.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
@@ -297,11 +298,13 @@ Address MacroAssembler::receiver_argument_address(Register params_base, Register
return Address(tmp, -Interpreter::stackElementSize);
}
+void MacroAssembler::align(int modulus, int target) {
+ int delta = target - offset();
+ while ((offset() + delta) % modulus != 0) nop();
+}
void MacroAssembler::align(int modulus) {
- while (offset() % modulus != 0) {
- nop();
- }
+ align(modulus, offset());
}
int MacroAssembler::set_last_Java_frame(Register last_java_sp,
@@ -1860,3 +1863,31 @@ void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2,
// Fallthrough: success
}
+
+int MacroAssembler::ic_check_size() {
+ return NativeInstruction::instruction_size * 7;
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register receiver = j_rarg0;
+ Register tmp1 = R4;
+ Register tmp2 = R5;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+
+ ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ldr(tmp2, Address(Ricklass, CompiledICData::speculated_klass_offset()));
+ cmp(tmp1, tmp2);
+
+ Label dont;
+ b(dont, eq);
+ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
+ bind(dont);
+ return uep_offset;
+}
diff --git a/src/hotspot/cpu/arm/macroAssembler_arm.hpp b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
index d9e49ab986c3a..691c8fa70ee8b 100644
--- a/src/hotspot/cpu/arm/macroAssembler_arm.hpp
+++ b/src/hotspot/cpu/arm/macroAssembler_arm.hpp
@@ -221,6 +221,7 @@ class MacroAssembler: public Assembler {
inline bool ignore_non_patchable_relocations() { return true; }
void align(int modulus);
+ void align(int modulus, int target);
// Support for VM calls
//
@@ -1077,6 +1078,9 @@ class MacroAssembler: public Assembler {
void safepoint_poll(Register tmp1, Label& slow_path);
void get_polling_page(Register dest);
void read_polling_page(Register dest, relocInfo::relocType rtype);
+
+ static int ic_check_size();
+ int ic_check(int end_alignment);
};
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
index 23ee01d335264..6a4062f29b3ba 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_arm.hpp"
#include "oops/oop.inline.hpp"
diff --git a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
index 7006d7709813a..15b57188730df 100644
--- a/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
+++ b/src/hotspot/cpu/arm/nativeInst_arm_32.hpp
@@ -385,7 +385,7 @@ class NativeMovConstReg: public NativeInstruction {
}
void set_pc_relative_offset(address addr, address pc);
address next_instruction_address() const {
- // NOTE: CompiledStaticCall::set_to_interpreted() calls this but
+ // NOTE: CompiledDirectCall::set_to_interpreted() calls this but
// are restricted to single-instruction ldr. No need to jump over
// several instructions.
assert(is_ldr_literal(), "Should only use single-instructions load");
diff --git a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
index 716c7b7575e9c..3792fab082ba6 100644
--- a/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
+++ b/src/hotspot/cpu/arm/sharedRuntime_arm.cpp
@@ -24,15 +24,14 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/jniHandles.hpp"
@@ -626,12 +625,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label skip_fixup;
const Register receiver = R0;
const Register holder_klass = Rtemp; // XXX should be OK for C2 but not 100% sure
- const Register receiver_klass = R4;
- __ load_klass(receiver_klass, receiver);
- __ ldr(holder_klass, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
- __ ldr(Rmethod, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
- __ cmp(receiver_klass, holder_klass);
+ __ ic_check(1 /* end_alignment */);
+ __ ldr(Rmethod, Address(Ricklass, CompiledICData::speculated_method_offset()));
__ ldr(Rtemp, Address(Rmethod, Method::code_offset()), eq);
__ cmp(Rtemp, 0, eq);
@@ -819,21 +815,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Unverified entry point
address start = __ pc();
- // Inline cache check, same as in C1_MacroAssembler::inline_cache_check()
const Register receiver = R0; // see receiverOpr()
- __ load_klass(Rtemp, receiver);
- __ cmp(Rtemp, Ricklass);
- Label verified;
-
- __ b(verified, eq); // jump over alignment no-ops too
- __ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
- __ align(CodeEntryAlignment);
+ __ verify_oop(receiver);
+ // Inline cache check
+ __ ic_check(CodeEntryAlignment /* end_alignment */);
// Verified entry point
- __ bind(verified);
int vep_offset = __ pc() - start;
-
if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
// Object.hashCode, System.identityHashCode can pull the hashCode from the header word
// instead of doing a full VM transition once it's been computed.
diff --git a/src/hotspot/cpu/arm/vtableStubs_arm.cpp b/src/hotspot/cpu/arm/vtableStubs_arm.cpp
index 539e288f63fb2..1229b5073f506 100644
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp
@@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_arm.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "oops/klass.inline.hpp"
@@ -160,7 +160,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass(Rclass, R0);
// Receiver subtype check against REFC.
- __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
+ __ ldr(Rintf, Address(Ricklass, CompiledICData::itable_refc_klass_offset()));
__ lookup_interface_method(// inputs: rec. class, interface, itable index
Rclass, Rintf, noreg,
// outputs: temp reg1, temp reg2
@@ -171,7 +171,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
start_pc = __ pc();
// Get Method* and entry point for compiler
- __ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
+ __ ldr(Rintf, Address(Ricklass, CompiledICData::itable_defc_klass_offset()));
__ lookup_interface_method(// inputs: rec. class, interface, itable index
Rclass, Rintf, itable_index,
// outputs: temp reg1, temp reg2, temp reg3
diff --git a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
index 47b681ce26be4..d78dec964cbb0 100644
--- a/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
+++ b/src/hotspot/cpu/ppc/assembler_ppc.inline.hpp
@@ -451,7 +451,7 @@ inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocT
// helper function for b
inline bool Assembler::is_within_range_of_b(address a, address pc) {
- // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+ // Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-6); // li field is from bit 6 to bit 29.
@@ -465,7 +465,7 @@ inline bool Assembler::is_within_range_of_b(address a, address pc) {
// helper functions for bcxx.
inline bool Assembler::is_within_range_of_bcxx(address a, address pc) {
- // Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
+ // Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-16); // bd field is from bit 16 to bit 29.
diff --git a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
index 1a00c9ad268c7..dc70c73d4b330 100644
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp
@@ -456,6 +456,9 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
__ extsw(R7_ARG5, length()->as_register());
ce->emit_static_call_stub();
+ if (ce->compilation()->bailed_out()) {
+ return; // CodeCache is full
+ }
bool success = ce->emit_trampoline_stub_for_call(SharedRuntime::get_resolve_static_call_stub());
if (!success) { return; }
diff --git a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
index d316c2b3db2be..3ae35949b2148 100644
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -77,9 +77,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
// we fetch the class of the receiver and compare it with the cached class.
// If they do not match we jump to slow case.
int LIR_Assembler::check_icache() {
- int offset = __ offset();
- __ inline_cache_check(R3_ARG1, R19_inline_cache_reg);
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@@ -2300,7 +2298,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->tmp1()->as_register(),
op->tmp2()->as_register(),
op->tmp3()->as_register(),
- arrayOopDesc::header_size(op->type()),
+ arrayOopDesc::base_offset_in_bytes(op->type()),
type2aelembytes(op->type()),
op->klass()->as_register(),
*op->stub()->entry());
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
index 577dcae25f4bc..ba0187d0363ca 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -40,29 +40,6 @@
#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- const Register temp_reg = R12_scratch2;
- Label Lmiss;
-
- verify_oop(receiver, FILE_AND_LINE);
- load_klass_check_null(temp_reg, receiver, &Lmiss);
-
- if (TrapBasedICMissChecks && TrapBasedNullChecks) {
- trap_ic_miss_check(temp_reg, iCache);
- } else {
- Label Lok;
- cmpd(CCR0, temp_reg, iCache);
- beq(CCR0, Lok);
- bind(Lmiss);
- //load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
- calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
- mtctr(temp_reg);
- bctr();
- align(32, 12);
- bind(Lok);
- }
-}
-
void C1_MacroAssembler::explicit_null_check(Register base) {
Unimplemented();
@@ -333,7 +310,7 @@ void C1_MacroAssembler::allocate_array(
Register t1, // temp register
Register t2, // temp register
Register t3, // temp register
- int hdr_size, // object header size in words
+ int base_offset_in_bytes, // elements offset in bytes
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // continuation point if fast allocation fails
@@ -365,7 +342,7 @@ void C1_MacroAssembler::allocate_array(
sldi(t1, len, log2_elt_size);
arr_len_in_bytes = t1;
}
- addi(arr_size, arr_len_in_bytes, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
+ addi(arr_size, arr_len_in_bytes, base_offset_in_bytes + MinObjAlignmentInBytesMask); // Add space for header & alignment.
clrrdi(arr_size, arr_size, LogMinObjAlignmentInBytes); // Align array size.
// Allocate space & initialize header.
@@ -375,8 +352,18 @@ void C1_MacroAssembler::allocate_array(
// Initialize body.
const Register base = t2;
const Register index = t3;
- addi(base, obj, hdr_size * wordSize); // compute address of first element
- addi(index, arr_size, -(hdr_size * wordSize)); // compute index = number of bytes to clear
+ addi(base, obj, base_offset_in_bytes); // compute address of first element
+ addi(index, arr_size, -(base_offset_in_bytes)); // compute index = number of bytes to clear
+
+ // Zero first 4 bytes, if start offset is not word aligned.
+ if (!is_aligned(base_offset_in_bytes, BytesPerWord)) {
+ assert(is_aligned(base_offset_in_bytes, BytesPerInt), "must be 4-byte aligned");
+ li(t1, 0);
+ stw(t1, 0, base);
+ addi(base, base, BytesPerInt);
+ // Note: initialize_body will align index down, no need to correct it here.
+ }
+
initialize_body(base, index);
if (CURRENT_ENV->dtrace_alloc_probes()) {
diff --git a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp
index 5fa19d5fd5dad..ab31431e67d9c 100644
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -80,7 +80,7 @@
Register t1, // temp register
Register t2, // temp register
Register t3, // temp register
- int hdr_size, // object header size in words
+ int base_offset_in_bytes, // elements offset in bytes
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // continuation point if fast allocation fails
diff --git a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
index 2ba6a6bca4e03..63914c5d1cb93 100644
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp
@@ -34,7 +34,6 @@
#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_ppc.hpp"
diff --git a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
index 54f9cfa936797..355ac4815d551 100644
--- a/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
+++ b/src/hotspot/cpu/ppc/compiledIC_ppc.cpp
@@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
@@ -37,7 +36,7 @@
// ----------------------------------------------------------------------------
-// A PPC CompiledDirectStaticCall looks like this:
+// A PPC CompiledDirectCall looks like this:
//
// >>>> consts
//
@@ -79,7 +78,7 @@
const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
@@ -91,7 +90,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
MacroAssembler _masm(&cbuf);
// Start the stub.
- address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
+ address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeCache is full
}
@@ -135,7 +134,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// FIXME: Assert that the stub can be identified and patched.
// Java_to_interp_stub_size should be good.
- assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
+ assert((__ offset() - stub_start_offset) <= CompiledDirectCall::to_interp_stub_size(),
"should be good size");
assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
"must not confuse java_to_interp with trampoline stubs");
@@ -153,27 +152,20 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// Size of java_to_interp stub, this doesn't need to be accurate but it must
// be larger or equal to the real size of the stub.
// Used for optimization in Compile::Shorten_branches.
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return 12 * BytesPerInstWord;
}
// Relocation entries for call stub, compiled java to interpreter.
// Used for optimization in Compile::Shorten_branches.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 5;
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -188,7 +180,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -204,7 +196,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
index cd2fd355bbb97..ab520162d350e 100644
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp
@@ -30,9 +30,9 @@
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
#include "gc/g1/g1DirtyCardQueue.hpp"
+#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/sharedRuntime.hpp"
diff --git a/src/hotspot/cpu/ppc/globals_ppc.hpp b/src/hotspot/cpu/ppc/globals_ppc.hpp
index f46ca5db3b7df..a2a94c178fb8b 100644
--- a/src/hotspot/cpu/ppc/globals_ppc.hpp
+++ b/src/hotspot/cpu/ppc/globals_ppc.hpp
@@ -60,7 +60,7 @@ define_pd_global(bool, VMContinuations, true);
// Use large code-entry alignment.
define_pd_global(uintx, CodeCacheSegmentSize, 128);
-define_pd_global(intx, CodeEntryAlignment, 128);
+define_pd_global(intx, CodeEntryAlignment, 64);
define_pd_global(intx, OptoLoopAlignment, 16);
define_pd_global(intx, InlineSmallCode, 1500);
diff --git a/src/hotspot/cpu/ppc/icBuffer_ppc.cpp b/src/hotspot/cpu/ppc/icBuffer_ppc.cpp
deleted file mode 100644
index 4157a5b0fd788..0000000000000
--- a/src/hotspot/cpu/ppc/icBuffer_ppc.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_ppc.hpp"
-#include "oops/oop.inline.hpp"
-
-#define __ masm.
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return MacroAssembler::load_const_size + MacroAssembler::b64_patchable_size;
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler masm(&code);
- // Note: even though the code contains an embedded metadata, we do not need reloc info
- // because
- // (1) the metadata is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
-
- // Load the oop ...
- __ load_const(R19_method, (address) cached_value, R0);
- // ... and jump to entry point.
- __ b64_patchable((address) entry_point, relocInfo::none);
-
- __ flush();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- NativeJump* jump = nativeJump_at(move->next_instruction_address());
- return jump->jump_destination();
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- void* o = (void*)move->data();
- return o;
-}
-
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
index b9d1cdb19ac9d..fe19cf0350020 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.cpp
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/barrierSet.hpp"
@@ -1195,6 +1196,81 @@ void MacroAssembler::post_call_nop() {
assert(is_post_call_nop(*(int*)(pc() - 4)), "post call not not found");
}
+int MacroAssembler::ic_check_size() {
+ bool implicit_null_checks_available = ImplicitNullChecks && os::zero_page_read_protected(),
+ use_fast_receiver_null_check = implicit_null_checks_available || TrapBasedNullChecks,
+ use_trap_based_null_check = !implicit_null_checks_available && TrapBasedNullChecks;
+
+ int num_ins;
+ if (use_fast_receiver_null_check && TrapBasedICMissChecks) {
+ num_ins = 3;
+ if (use_trap_based_null_check) num_ins += 1;
+ } else {
+ num_ins = 7;
+ if (!implicit_null_checks_available) num_ins += 2;
+ }
+ return num_ins * BytesPerInstWord;
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ bool implicit_null_checks_available = ImplicitNullChecks && os::zero_page_read_protected(),
+ use_fast_receiver_null_check = implicit_null_checks_available || TrapBasedNullChecks,
+ use_trap_based_null_check = !implicit_null_checks_available && TrapBasedNullChecks;
+
+ Register receiver = R3_ARG1;
+ Register data = R19_inline_cache_reg;
+ Register tmp1 = R11_scratch1;
+ Register tmp2 = R12_scratch2;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, end_alignment, end_alignment - ic_check_size());
+
+ int uep_offset = offset();
+
+ if (use_fast_receiver_null_check && TrapBasedICMissChecks) {
+ // Fast version which uses SIGTRAP
+
+ if (use_trap_based_null_check) {
+ trap_null_check(receiver);
+ }
+ if (UseCompressedClassPointers) {
+ lwz(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ } else {
+ ld(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ }
+ ld(tmp2, in_bytes(CompiledICData::speculated_klass_offset()), data);
+ trap_ic_miss_check(tmp1, tmp2);
+
+ } else {
+ // Slower version which doesn't use SIGTRAP
+
+ // Load stub address using toc (fixed instruction size, unlike load_const_optimized)
+ calculate_address_from_global_toc(tmp1, SharedRuntime::get_ic_miss_stub(),
+ true, true, false); // 2 instructions
+ mtctr(tmp1);
+
+ if (!implicit_null_checks_available) {
+ cmpdi(CCR0, receiver, 0);
+ beqctr(CCR0);
+ }
+ if (UseCompressedClassPointers) {
+ lwz(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ } else {
+ ld(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
+ }
+ ld(tmp2, in_bytes(CompiledICData::speculated_klass_offset()), data);
+ cmpd(CCR0, tmp1, tmp2);
+ bnectr(CCR0);
+ }
+
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
+
+ return uep_offset;
+}
+
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,
diff --git a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
index cddc8b92fa09a..ec370a450ac35 100644
--- a/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
+++ b/src/hotspot/cpu/ppc/macroAssembler_ppc.hpp
@@ -367,6 +367,9 @@ class MacroAssembler: public Assembler {
Register toc);
#endif
+ static int ic_check_size();
+ int ic_check(int end_alignment);
+
protected:
// It is imperative that all calls into the VM are handled via the
diff --git a/src/hotspot/cpu/ppc/ppc.ad b/src/hotspot/cpu/ppc/ppc.ad
index 783edf727b3bc..110c6b9b66860 100644
--- a/src/hotspot/cpu/ppc/ppc.ad
+++ b/src/hotspot/cpu/ppc/ppc.ad
@@ -1978,42 +1978,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
- // Inline_cache contains a klass.
- Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
- Register receiver_klass = R12_scratch2; // tmp
-
- assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
- assert(R11_scratch1 == R11, "need prologue scratch register");
-
- // Check for nullptr argument if we don't have implicit null checks.
- if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
- if (TrapBasedNullChecks) {
- __ trap_null_check(R3_ARG1);
- } else {
- Label valid;
- __ cmpdi(CCR0, R3_ARG1, 0);
- __ bne_predict_taken(CCR0, valid);
- // We have a null argument, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ bind(valid);
- }
- }
- // Assume argument is not nullptr, load klass from receiver.
- __ load_klass(receiver_klass, R3_ARG1);
-
- if (TrapBasedICMissChecks) {
- __ trap_ic_miss_check(receiver_klass, ic_klass);
- } else {
- Label valid;
- __ cmpd(CCR0, receiver_klass, ic_klass);
- __ beq_predict_taken(CCR0, valid);
- // We have an unexpected klass, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ bind(valid);
- }
-
+ __ ic_check(CodeEntryAlignment);
// Argument is valid and klass is as expected, continue.
}
@@ -2062,7 +2027,10 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_exception_handler());
- if (base == nullptr) return 0; // CodeBuffer::expand failed
+ if (base == nullptr) {
+ ciEnv::current()->record_failure("CodeCache is full");
+ return 0; // CodeBuffer::expand failed
+ }
int offset = __ offset();
__ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
@@ -2079,7 +2047,10 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
C2_MacroAssembler _masm(&cbuf);
address base = __ start_a_stub(size_deopt_handler());
- if (base == nullptr) return 0; // CodeBuffer::expand failed
+ if (base == nullptr) {
+ ciEnv::current()->record_failure("CodeCache is full");
+ return 0; // CodeBuffer::expand failed
+ }
int offset = __ offset();
__ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
@@ -2801,15 +2772,16 @@ encode %{
intptr_t val = $src$$constant;
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
address const_toc_addr;
+ RelocationHolder r; // Initializes type to none.
if (constant_reloc == relocInfo::oop_type) {
// Create an oop constant and a corresponding relocation.
- AddressLiteral a = __ allocate_oop_address((jobject)val);
+ AddressLiteral a = __ constant_oop_address((jobject)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
+ r = a.rspec();
} else if (constant_reloc == relocInfo::metadata_type) {
+ // Notify OOP recorder (don't need the relocation)
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
} else {
// Create a non-oop constant, no relocation needed.
const_toc_addr = __ long_constant((jlong)$src$$constant);
@@ -2819,6 +2791,7 @@ encode %{
ciEnv::current()->record_out_of_memory_failure();
return;
}
+ __ relocate(r); // If set above.
// Get the constant's TOC offset.
toc_offset = __ offset_to_method_toc(const_toc_addr);
@@ -2832,15 +2805,16 @@ encode %{
intptr_t val = $src$$constant;
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
address const_toc_addr;
+ RelocationHolder r; // Initializes type to none.
if (constant_reloc == relocInfo::oop_type) {
// Create an oop constant and a corresponding relocation.
- AddressLiteral a = __ allocate_oop_address((jobject)val);
+ AddressLiteral a = __ constant_oop_address((jobject)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
+ r = a.rspec();
} else if (constant_reloc == relocInfo::metadata_type) {
+ // Notify OOP recorder (don't need the relocation)
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
- __ relocate(a.rspec());
} else { // non-oop pointers, e.g. card mark base, heap top
// Create a non-oop constant, no relocation needed.
const_toc_addr = __ long_constant((jlong)$src$$constant);
@@ -2850,6 +2824,7 @@ encode %{
ciEnv::current()->record_out_of_memory_failure();
return;
}
+ __ relocate(r); // If set above.
// Get the constant's TOC offset.
const int toc_offset = __ offset_to_method_toc(const_toc_addr);
// Store the toc offset of the constant.
@@ -3452,7 +3427,7 @@ encode %{
__ bl(__ pc()); // Emits a relocation.
// The stub for call to interpreter.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -3507,7 +3482,7 @@ encode %{
// Create the nodes for loading the IC from the TOC.
loadConLNodesTuple loadConLNodes_IC =
- loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
+ loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) Universe::non_oop_word()),
OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
// Create the call node.
diff --git a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
index ebe918785edc0..5a080adc7a9fa 100644
--- a/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
+++ b/src/hotspot/cpu/ppc/sharedRuntime_ppc.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "frame_ppc.hpp"
#include "compiler/oopMap.hpp"
@@ -35,7 +34,6 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/continuation.hpp"
@@ -1174,8 +1172,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
BLOCK_COMMENT("c2i unverified entry");
c2i_unverified_entry = __ pc();
- // inline_cache contains a compiledICHolder
- const Register ic = R19_method;
+ // inline_cache contains a CompiledICData
+ const Register ic = R19_inline_cache_reg;
const Register ic_klass = R11_scratch1;
const Register receiver_klass = R12_scratch2;
const Register code = R21_tmp1;
@@ -1186,45 +1184,10 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label call_interpreter;
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
- "klass offset should reach into any page");
- // Check for null argument if we don't have implicit null checks.
- if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
- if (TrapBasedNullChecks) {
- __ trap_null_check(R3_ARG1);
- } else {
- Label valid;
- __ cmpdi(CCR0, R3_ARG1, 0);
- __ bne_predict_taken(CCR0, valid);
- // We have a null argument, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ BIND(valid);
- }
- }
- // Assume argument is not null, load klass from receiver.
- __ load_klass(receiver_klass, R3_ARG1);
-
- __ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
-
- if (TrapBasedICMissChecks) {
- __ trap_ic_miss_check(receiver_klass, ic_klass);
- } else {
- Label valid;
- __ cmpd(CCR0, receiver_klass, ic_klass);
- __ beq_predict_taken(CCR0, valid);
- // We have an unexpected klass, branch to ic_miss_stub.
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- __ BIND(valid);
- }
-
+ __ ic_check(4 /* end_alignment */);
+ __ ld(R19_method, CompiledICData::speculated_method_offset(), ic);
// Argument is valid and klass is as expected, continue.
- // Extract method from inline cache, verified entry point needs it.
- __ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
- assert(R19_method == ic, "the inline cache register is dead here");
-
__ ld(code, method_(code));
__ cmpdi(CCR0, code, 0);
__ ld(ientry, method_(interpreter_entry)); // preloaded
@@ -1798,7 +1761,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
- stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
+ stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@@ -1891,7 +1854,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
- stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, call_pc);
+ stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@@ -2188,7 +2151,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
intptr_t frame_done_pc;
intptr_t oopmap_pc;
- Label ic_miss;
Label handle_pending_exception;
Register r_callers_sp = R21;
@@ -2212,19 +2174,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Check ic: object class == cached class?
if (!method_is_static) {
- Register ic = R19_inline_cache_reg;
- Register receiver_klass = r_temp_1;
-
- __ cmpdi(CCR0, R3_ARG1, 0);
- __ beq(CCR0, ic_miss);
- __ verify_oop(R3_ARG1, FILE_AND_LINE);
- __ load_klass(receiver_klass, R3_ARG1);
-
- __ cmpd(CCR0, receiver_klass, ic);
- __ bne(CCR0, ic_miss);
+ __ ic_check(4 /* end_alignment */);
}
-
// Generate the Verified Entry Point (VEP).
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
@@ -2704,16 +2656,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ b64_patchable((address)StubRoutines::forward_exception_entry(),
relocInfo::runtime_call_type);
- // Handler for a cache miss (out-of-line).
- // --------------------------------------------------------------------------
-
- if (!method_is_static) {
- __ bind(ic_miss);
-
- __ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
- relocInfo::runtime_call_type);
- }
-
// Done.
// --------------------------------------------------------------------------
diff --git a/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp b/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp
index a1c8f46ce2d57..b60fd4f16d163 100644
--- a/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp
+++ b/src/hotspot/cpu/ppc/upcallLinker_ppc.cpp
@@ -243,10 +243,14 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ load_const_optimized(R19_method, (intptr_t)entry);
__ std(R19_method, in_bytes(JavaThread::callee_target_offset()), R16_thread);
+ __ push_cont_fastpath();
+
__ ld(call_target_address, in_bytes(Method::from_compiled_offset()), R19_method);
__ mtctr(call_target_address);
__ bctrl();
+ __ pop_cont_fastpath();
+
// return value shuffle
if (!needs_return_buffer) {
// CallArranger can pick a return type that goes in the same reg for both CCs.
diff --git a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
index fe4eb3df8f12f..28ba04d833bed 100644
--- a/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
+++ b/src/hotspot/cpu/ppc/vtableStubs_ppc_64.cpp
@@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_ppc.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassVtable.hpp"
@@ -181,13 +181,13 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass_check_null(rcvr_klass, R3_ARG1);
// Receiver subtype check against REFC.
- __ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method);
+ __ ld(interface, CompiledICData::itable_refc_klass_offset(), R19_method);
__ lookup_interface_method(rcvr_klass, interface, noreg,
R0, tmp1, tmp2,
L_no_such_interface, /*return_method=*/ false);
// Get Method* and entrypoint for compiler
- __ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method);
+ __ ld(interface, CompiledICData::itable_defc_klass_offset(), R19_method);
__ lookup_interface_method(rcvr_klass, interface, itable_index,
R19_method, tmp1, tmp2,
L_no_such_interface, /*return_method=*/ true);
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
index 0bbf3771a04bc..5d0fa3fad3cec 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -51,7 +51,6 @@
#endif
NEEDS_CLEANUP // remove this definitions ?
-const Register IC_Klass = t1; // where the IC klass is cached
const Register SYNC_header = x10; // synchronization header
const Register SHIFT_count = x10; // where count for shift operations must be
@@ -265,26 +264,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
- Register receiver = FrameMap::receiver_opr->as_register();
- Register ic_klass = IC_Klass;
- int start_offset = __ offset();
- Label dont;
- __ inline_cache_check(receiver, ic_klass, dont);
-
- // if icache check fails, then jump to runtime routine
- // Note: RECEIVER must still contain the receiver!
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- // We align the verified entry point unless the method body
- // (including its inline cache check) will fit in a single 64-byte
- // icache line.
- if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
- // force alignment after the cache check.
- __ align(CodeEntryAlignment);
- }
-
- __ bind(dont);
- return start_offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
@@ -1040,7 +1020,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
len,
tmp1,
tmp2,
- arrayOopDesc::header_size(op->type()),
+ arrayOopDesc::base_offset_in_bytes(op->type()),
array_element_size(op->type()),
op->klass()->as_register(),
*op->stub()->entry());
@@ -1398,7 +1378,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
__ emit_static_call_stub();
- assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
+ assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
<= call_stub_size(), "stub too big");
__ end_a_stub();
}
diff --git a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
index b088498e6fc08..ce23213776c08 100644
--- a/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/c1_LIRAssembler_riscv.hpp
@@ -68,7 +68,7 @@ friend class ArrayCopyStub;
enum {
// See emit_static_call_stub for detail
- // CompiledStaticCall::to_interp_stub_size() (14) + CompiledStaticCall::to_trampoline_stub_size() (1 + 3 + address)
+ // CompiledDirectCall::to_interp_stub_size() (14) + CompiledDirectCall::to_trampoline_stub_size() (1 + 3 + address)
_call_stub_size = 14 * NativeInstruction::instruction_size +
(NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size),
// See emit_exception_handler for detail
diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
index 6c1dce0de1598..770dd6a9d0f37 100644
--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -280,7 +280,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
verify_oop(obj);
}
-void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case) {
+void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case) {
assert_different_registers(obj, len, tmp1, tmp2, klass);
// determine alignment mask
@@ -292,7 +292,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
const Register arr_size = tmp2; // okay to be the same
// align object end
- mv(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
+ mv(arr_size, (int32_t)base_offset_in_bytes + MinObjAlignmentInBytesMask);
shadd(arr_size, len, arr_size, t0, f);
andi(arr_size, arr_size, ~(uint)MinObjAlignmentInBytesMask);
@@ -300,9 +300,20 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
initialize_header(obj, klass, len, tmp1, tmp2);
+ // Clear leading 4 bytes, if necessary.
+ // TODO: This could perhaps go into initialize_body() and also clear the leading 4 bytes
+ // for non-array objects, thereby replacing the klass-gap clearing code in initialize_header().
+ int base_offset = base_offset_in_bytes;
+ if (!is_aligned(base_offset, BytesPerWord)) {
+ assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned");
+ sw(zr, Address(obj, base_offset));
+ base_offset += BytesPerInt;
+ }
+ assert(is_aligned(base_offset, BytesPerWord), "must be word-aligned");
+
// clear rest of allocated space
const Register len_zero = len;
- initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
+ initialize_body(obj, arr_size, base_offset, len_zero);
membar(MacroAssembler::StoreStore);
@@ -314,15 +325,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
verify_oop(obj);
}
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
- verify_oop(receiver);
- // explicit null check not needed since load from [klass_offset] causes a trap
- // check against inline cache
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
- assert_different_registers(receiver, iCache, t0, t2);
- cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
-}
-
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
diff --git a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
index b737a438511c8..2d7f8d7485d4f 100644
--- a/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/c1_MacroAssembler_riscv.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2015, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
@@ -101,7 +101,7 @@ using MacroAssembler::null_check;
// header_size: size of object header in words
// f : element scale factor
// slow_case : exit to slow case implementation if fast allocation fails
- void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int header_size, int f, Register klass, Label& slow_case);
+ void allocate_array(Register obj, Register len, Register tmp1, Register tmp2, int base_offset_in_bytes, int f, Register klass, Label& slow_case);
int rsp_offset() const { return _rsp_offset; }
diff --git a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
index b76163a30841d..9fa8939837a85 100644
--- a/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
+++ b/src/hotspot/cpu/riscv/c1_Runtime1_riscv.cpp
@@ -37,7 +37,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_riscv.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_riscv.hpp"
diff --git a/src/hotspot/cpu/riscv/compiledIC_riscv.cpp b/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
index e29dee56de8d8..fdb2bcb06ff97 100644
--- a/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
+++ b/src/hotspot/cpu/riscv/compiledIC_riscv.cpp
@@ -27,7 +27,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -37,7 +36,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
// Stub is fixed up when the corresponding call is converted from
@@ -69,11 +68,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return MacroAssembler::static_call_stub_size();
}
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 4 instructions here (although
// there are only 3) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
@@ -81,21 +80,14 @@ int CompiledStaticCall::to_trampoline_stub_size() {
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub);
@@ -112,7 +104,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -129,7 +121,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
index 5d0b0fb472934..fa7df32d7e944 100644
--- a/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/gc/g1/g1BarrierSetAssembler_riscv.cpp
@@ -29,8 +29,8 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegion.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/javaThread.hpp"
diff --git a/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp b/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
index e368bbdc9141f..68cd51ece5f70 100644
--- a/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
+++ b/src/hotspot/cpu/riscv/globalDefinitions_riscv.hpp
@@ -50,6 +50,9 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define USE_POINTERS_TO_REGISTER_IMPL_ARRAY
+// auipc useable for all cc -> cc calls and jumps
+#define CODE_CACHE_SIZE_LIMIT ((2*G)-(2*K))
+
// The expected size in bytes of a cache line.
#define DEFAULT_CACHE_LINE_SIZE 64
diff --git a/src/hotspot/cpu/riscv/icBuffer_riscv.cpp b/src/hotspot/cpu/riscv/icBuffer_riscv.cpp
deleted file mode 100644
index ab904817816fc..0000000000000
--- a/src/hotspot/cpu/riscv/icBuffer_riscv.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_riscv.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- // 6: auipc + ld + auipc + jalr + address(2 * instruction_size)
- return 6 * NativeInstruction::instruction_size;
-}
-
-#define __ masm->
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- assert_cond(code_begin != nullptr && entry_point != nullptr);
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
- // Note: even though the code contains an embedded value, we do not need reloc info
- // because
- // (1) the value is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
-
- address start = __ pc();
- Label l;
- __ ld(t1, l);
- __ far_jump(ExternalAddress(entry_point));
- __ align(wordSize);
- __ bind(l);
- __ emit_int64((intptr_t)cached_value);
- // Only need to invalidate the 1st two instructions - not the whole ic stub
- ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size());
- assert(__ pc() - start == ic_stub_code_size(), "must be");
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- NativeJump* jump = nativeJump_at(move->next_instruction_address());
- return jump->jump_destination();
-}
-
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- // The word containing the cached value is at the end of this IC buffer
- uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize);
- void* o = (void*)*p;
- return o;
-}
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
index ce336c16aa718..96e07319e843f 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.cpp
@@ -27,6 +27,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -634,8 +635,8 @@ void MacroAssembler::unimplemented(const char* what) {
}
void MacroAssembler::emit_static_call_stub() {
- IncompressibleRegion ir(this); // Fixed length: see CompiledStaticCall::to_interp_stub_size().
- // CompiledDirectStaticCall::set_to_interpreted knows the
+ IncompressibleRegion ir(this); // Fixed length: see CompiledDirectCall::to_interp_stub_size().
+ // CompiledDirectCall::set_to_interpreted knows the
// exact layout of this stub.
mov_metadata(xmethod, (Metadata*)nullptr);
@@ -2542,7 +2543,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
-// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
+// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by .
// The receiver klass is in recv_klass.
@@ -3542,6 +3543,48 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
return trampoline_call(Address(entry, rh));
}
+int MacroAssembler::ic_check_size() {
+ // No compressed
+ return (NativeInstruction::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) +
+ far_branch_size();
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ IncompressibleRegion ir(this);
+ Register receiver = j_rarg0;
+ Register data = t1;
+
+ Register tmp1 = t0; // t0 always scratch
+ // t2 is saved on call, thus should have been saved before this check.
+ // Hence we can clobber it.
+ Register tmp2 = t2;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, ic_check_size());
+ int uep_offset = offset();
+
+ if (UseCompressedClassPointers) {
+ lwu(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ } else {
+ ld(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ ld(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
+ }
+
+ Label ic_hit;
+ beq(tmp1, tmp2, ic_hit);
+ // Note, far_jump is not fixed size.
+ // Is this ever generates a movptr alignment/size will be off.
+ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ bind(ic_hit);
+
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point.");
+ return uep_offset;
+}
+
// Emit a trampoline stub for a call to a target which is too far away.
//
// code sequences:
diff --git a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
index d283654e6e179..63cfb22855180 100644
--- a/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
+++ b/src/hotspot/cpu/riscv/macroAssembler_riscv.hpp
@@ -1193,7 +1193,10 @@ class MacroAssembler: public Assembler {
//
// Return: the call PC or null if CodeCache is full.
address trampoline_call(Address entry);
+
address ic_call(address entry, jint method_index = 0);
+ static int ic_check_size();
+ int ic_check(int end_alignment = NativeInstruction::instruction_size);
// Support for memory inc/dec
// n.b. increment/decrement calls with an Address destination will
diff --git a/src/hotspot/cpu/riscv/riscv.ad b/src/hotspot/cpu/riscv/riscv.ad
index a6f0959942414..10a80cd094024 100644
--- a/src/hotspot/cpu/riscv/riscv.ad
+++ b/src/hotspot/cpu/riscv/riscv.ad
@@ -1808,14 +1808,13 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
assert_cond(st != nullptr);
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
- st->print_cr("\tlwu t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
- if (CompressedKlassPointers::shift() != 0) {
- st->print_cr("\tdecode_klass_not_null t0, t0");
- }
+ st->print_cr("\tlwu t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tlwu t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
} else {
- st->print_cr("\tld t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tld t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tld t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
}
- st->print_cr("\tbeq t0, t1, ic_hit");
+ st->print_cr("\tbeq t0, t2, ic_hit");
st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
st->print_cr("\tic_hit:");
}
@@ -1825,15 +1824,11 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
+ __ ic_check(CodeEntryAlignment);
- Label skip;
- __ cmp_klass(j_rarg0, t1, t0, t2 /* call-clobbered t2 as a tmp */, skip);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- __ bind(skip);
-
- // These NOPs are critical so that verified entry point is properly
- // 4 bytes aligned for patching by NativeJump::patch_verified_entry()
- __ align(NativeInstruction::instruction_size);
+ // Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
+ // ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
+ assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@@ -2402,7 +2397,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
} else {
// Emit stub for static call
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
diff --git a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
index 9f04e20ea3b73..7435b552d15de 100644
--- a/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
+++ b/src/hotspot/cpu/riscv/sharedRuntime_riscv.cpp
@@ -29,7 +29,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -38,7 +37,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_riscv.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -622,10 +620,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Label ok;
-
- const Register holder = t1;
const Register receiver = j_rarg0;
+ const Register data = t1;
const Register tmp = t2; // A call-clobbered register not used for arg passing
// -------------------------------------------------------------------------
@@ -639,16 +635,10 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
__ block_comment("c2i_unverified_entry {");
- __ load_klass(t0, receiver, tmp);
- __ ld(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ ld(xmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ beq(t0, tmp, ok);
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- __ bind(ok);
- // Method might have been compiled since the call site was patched to
- // interpreted; if that is the case treat it as a miss so we can get
- // the call site corrected.
+ __ ic_check();
+ __ ld(xmethod, Address(data, CompiledICData::speculated_method_offset()));
+
__ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
__ beqz(t0, skip_fixup);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -985,7 +975,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ j(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1051,7 +1041,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
}
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1425,19 +1415,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register ic_reg = t1;
const Register receiver = j_rarg0;
- Label hit;
- Label exception_pending;
-
__ verify_oop(receiver);
- assert_different_registers(ic_reg, receiver, t0, t2);
- __ cmp_klass(receiver, ic_reg, t0, t2 /* call-clobbered t2 as a tmp */, hit);
-
- __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ assert_different_registers(receiver, t0, t1);
- // Verified entry point must be aligned
- __ align(8);
-
- __ bind(hit);
+ __ ic_check();
int vep_offset = ((intptr_t)__ pc()) - start;
@@ -1872,6 +1853,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ leave();
// Any exception pending?
+ Label exception_pending;
__ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
__ bnez(t0, exception_pending);
diff --git a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
index 4bd33d08f8928..bbdafb922cc46 100644
--- a/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
+++ b/src/hotspot/cpu/riscv/stubGenerator_riscv.cpp
@@ -4809,6 +4809,348 @@ class StubGenerator: public StubCodeGenerator {
return (address) start;
}
+
+ // ------------------------ SHA-1 intrinsic ------------------------
+
+ // K't =
+ // 5a827999, 0 <= t <= 19
+ // 6ed9eba1, 20 <= t <= 39
+ // 8f1bbcdc, 40 <= t <= 59
+ // ca62c1d6, 60 <= t <= 79
+ void sha1_prepare_k(Register cur_k, int round) {
+ assert(round >= 0 && round < 80, "must be");
+
+ static const int64_t ks[] = {0x5a827999, 0x6ed9eba1, 0x8f1bbcdc, 0xca62c1d6};
+ if ((round % 20) == 0) {
+ __ mv(cur_k, ks[round/20]);
+ }
+ }
+
+ // W't =
+ // M't, 0 <= t <= 15
+ // ROTL'1(W't-3 ^ W't-8 ^ W't-14 ^ W't-16), 16 <= t <= 79
+ void sha1_prepare_w(Register cur_w, Register ws[], Register buf, int round) {
+ assert(round >= 0 && round < 80, "must be");
+
+ if (round < 16) {
+ // in the first 16 rounds, in ws[], every register contains 2 W't, e.g.
+ // in ws[0], high part contains W't-0, low part contains W't-1,
+ // in ws[1], high part contains W't-2, low part contains W't-3,
+ // ...
+ // in ws[7], high part contains W't-14, low part contains W't-15.
+
+ if ((round % 2) == 0) {
+ __ ld(ws[round/2], Address(buf, (round/2) * 8));
+ // reverse bytes, as SHA-1 is defined in big-endian.
+ __ revb(ws[round/2], ws[round/2]);
+ __ srli(cur_w, ws[round/2], 32);
+ } else {
+ __ mv(cur_w, ws[round/2]);
+ }
+
+ return;
+ }
+
+ if ((round % 2) == 0) {
+ int idx = 16;
+ // W't = ROTL'1(W't-3 ^ W't-8 ^ W't-14 ^ W't-16), 16 <= t <= 79
+ __ srli(t1, ws[(idx-8)/2], 32);
+ __ xorr(t0, ws[(idx-3)/2], t1);
+
+ __ srli(t1, ws[(idx-14)/2], 32);
+ __ srli(cur_w, ws[(idx-16)/2], 32);
+ __ xorr(cur_w, cur_w, t1);
+
+ __ xorr(cur_w, cur_w, t0);
+ __ rolw_imm(cur_w, cur_w, 1, t0);
+
+ // copy the cur_w value to ws[8].
+ // now, valid w't values are at:
+ // w0: ws[0]'s lower 32 bits
+ // w1 ~ w14: ws[1] ~ ws[7]
+ // w15: ws[8]'s higher 32 bits
+ __ slli(ws[idx/2], cur_w, 32);
+
+ return;
+ }
+
+ int idx = 17;
+ // W't = ROTL'1(W't-3 ^ W't-8 ^ W't-14 ^ W't-16), 16 <= t <= 79
+ __ srli(t1, ws[(idx-3)/2], 32);
+ __ xorr(t0, t1, ws[(idx-8)/2]);
+
+ __ xorr(cur_w, ws[(idx-16)/2], ws[(idx-14)/2]);
+
+ __ xorr(cur_w, cur_w, t0);
+ __ rolw_imm(cur_w, cur_w, 1, t0);
+
+ // copy the cur_w value to ws[8]
+ __ zero_extend(cur_w, cur_w, 32);
+ __ orr(ws[idx/2], ws[idx/2], cur_w);
+
+ // shift the w't registers, so they start from ws[0] again.
+ // now, valid w't values are at:
+ // w0 ~ w15: ws[0] ~ ws[7]
+ Register ws_0 = ws[0];
+ for (int i = 0; i < 16/2; i++) {
+ ws[i] = ws[i+1];
+ }
+ ws[8] = ws_0;
+ }
+
+ // f't(x, y, z) =
+ // Ch(x, y, z) = (x & y) ^ (~x & z) , 0 <= t <= 19
+ // Parity(x, y, z) = x ^ y ^ z , 20 <= t <= 39
+ // Maj(x, y, z) = (x & y) ^ (x & z) ^ (y & z) , 40 <= t <= 59
+ // Parity(x, y, z) = x ^ y ^ z , 60 <= t <= 79
+ void sha1_f(Register dst, Register x, Register y, Register z, int round) {
+ assert(round >= 0 && round < 80, "must be");
+ assert_different_registers(dst, x, y, z, t0, t1);
+
+ if (round < 20) {
+ // (x & y) ^ (~x & z)
+ __ andr(t0, x, y);
+ __ andn(dst, z, x);
+ __ xorr(dst, dst, t0);
+ } else if (round >= 40 && round < 60) {
+ // (x & y) ^ (x & z) ^ (y & z)
+ __ andr(t0, x, y);
+ __ andr(t1, x, z);
+ __ andr(dst, y, z);
+ __ xorr(dst, dst, t0);
+ __ xorr(dst, dst, t1);
+ } else {
+ // x ^ y ^ z
+ __ xorr(dst, x, y);
+ __ xorr(dst, dst, z);
+ }
+ }
+
+ // T = ROTL'5(a) + f't(b, c, d) + e + K't + W't
+ // e = d
+ // d = c
+ // c = ROTL'30(b)
+ // b = a
+ // a = T
+ void sha1_process_round(Register a, Register b, Register c, Register d, Register e,
+ Register cur_k, Register cur_w, Register tmp, int round) {
+ assert(round >= 0 && round < 80, "must be");
+ assert_different_registers(a, b, c, d, e, cur_w, cur_k, tmp, t0);
+
+ // T = ROTL'5(a) + f't(b, c, d) + e + K't + W't
+
+ // cur_w will be recalculated at the beginning of each round,
+ // so, we can reuse it as a temp register here.
+ Register tmp2 = cur_w;
+
+ // reuse e as a temporary register, as we will mv new value into it later
+ Register tmp3 = e;
+ __ add(tmp2, cur_k, tmp2);
+ __ add(tmp3, tmp3, tmp2);
+ __ rolw_imm(tmp2, a, 5, t0);
+
+ sha1_f(tmp, b, c, d, round);
+
+ __ add(tmp2, tmp2, tmp);
+ __ add(tmp2, tmp2, tmp3);
+
+ // e = d
+ // d = c
+ // c = ROTL'30(b)
+ // b = a
+ // a = T
+ __ mv(e, d);
+ __ mv(d, c);
+
+ __ rolw_imm(c, b, 30);
+ __ mv(b, a);
+ __ mv(a, tmp2);
+ }
+
+ // H(i)0 = a + H(i-1)0
+ // H(i)1 = b + H(i-1)1
+ // H(i)2 = c + H(i-1)2
+ // H(i)3 = d + H(i-1)3
+ // H(i)4 = e + H(i-1)4
+ void sha1_calculate_im_hash(Register a, Register b, Register c, Register d, Register e,
+ Register prev_ab, Register prev_cd, Register prev_e) {
+ assert_different_registers(a, b, c, d, e, prev_ab, prev_cd, prev_e);
+
+ __ add(a, a, prev_ab);
+ __ srli(prev_ab, prev_ab, 32);
+ __ add(b, b, prev_ab);
+
+ __ add(c, c, prev_cd);
+ __ srli(prev_cd, prev_cd, 32);
+ __ add(d, d, prev_cd);
+
+ __ add(e, e, prev_e);
+ }
+
+ void sha1_preserve_prev_abcde(Register a, Register b, Register c, Register d, Register e,
+ Register prev_ab, Register prev_cd, Register prev_e) {
+ assert_different_registers(a, b, c, d, e, prev_ab, prev_cd, prev_e, t0);
+
+ __ slli(t0, b, 32);
+ __ zero_extend(prev_ab, a, 32);
+ __ orr(prev_ab, prev_ab, t0);
+
+ __ slli(t0, d, 32);
+ __ zero_extend(prev_cd, c, 32);
+ __ orr(prev_cd, prev_cd, t0);
+
+ __ mv(prev_e, e);
+ }
+
+ // Intrinsic for:
+ // void sun.security.provider.SHA.implCompress0(byte[] buf, int ofs)
+ // void sun.security.provider.DigestBase.implCompressMultiBlock0(byte[] b, int ofs, int limit)
+ //
+ // Arguments:
+ //
+ // Inputs:
+ // c_rarg0: byte[] src array + offset
+ // c_rarg1: int[] SHA.state
+ // - - - - - - below are only for implCompressMultiBlock0 - - - - - -
+ // c_rarg2: int offset
+ // c_rarg3: int limit
+ //
+ // Outputs:
+ // - - - - - - below are only for implCompressMultiBlock0 - - - - - -
+ // c_rarg0: int offset, when (multi_block == true)
+ //
+ address generate_sha1_implCompress(bool multi_block, const char *name) {
+ __ align(CodeEntryAlignment);
+ StubCodeMark mark(this, "StubRoutines", name);
+
+ address start = __ pc();
+ __ enter();
+
+ RegSet saved_regs = RegSet::range(x18, x27);
+ if (multi_block) {
+ // use x9 as src below.
+ saved_regs += RegSet::of(x9);
+ }
+ __ push_reg(saved_regs, sp);
+
+ // c_rarg0 - c_rarg3: x10 - x13
+ Register buf = c_rarg0;
+ Register state = c_rarg1;
+ Register offset = c_rarg2;
+ Register limit = c_rarg3;
+ // use src to contain the original start point of the array.
+ Register src = x9;
+
+ if (multi_block) {
+ __ sub(limit, limit, offset);
+ __ add(limit, limit, buf);
+ __ sub(src, buf, offset);
+ }
+
+ // [args-reg]: x14 - x17
+ // [temp-reg]: x28 - x31
+ // [saved-reg]: x18 - x27
+
+ // h0/1/2/3/4
+ const Register a = x14, b = x15, c = x16, d = x17, e = x28;
+ // w0, w1, ... w15
+ // put two adjecent w's in one register:
+ // one at high word part, another at low word part
+ // at different round (even or odd), w't value reside in different items in ws[].
+ // w0 ~ w15, either reside in
+ // ws[0] ~ ws[7], where
+ // w0 at higher 32 bits of ws[0],
+ // w1 at lower 32 bits of ws[0],
+ // ...
+ // w14 at higher 32 bits of ws[7],
+ // w15 at lower 32 bits of ws[7].
+ // or, reside in
+ // w0: ws[0]'s lower 32 bits
+ // w1 ~ w14: ws[1] ~ ws[7]
+ // w15: ws[8]'s higher 32 bits
+ Register ws[9] = {x29, x30, x31, x18,
+ x19, x20, x21, x22,
+ x23}; // auxiliary register for calculating w's value
+ // current k't's value
+ const Register cur_k = x24;
+ // current w't's value
+ const Register cur_w = x25;
+ // values of a, b, c, d, e in the previous round
+ const Register prev_ab = x26, prev_cd = x27;
+ const Register prev_e = offset; // reuse offset/c_rarg2
+
+ // load 5 words state into a, b, c, d, e.
+ //
+ // To minimize the number of memory operations, we apply following
+ // optimization: read the states (a/b/c/d) of 4-byte values in pairs,
+ // with a single ld, and split them into 2 registers.
+ //
+ // And, as the core algorithm of SHA-1 works on 32-bits words, so
+ // in the following code, it does not care about the content of
+ // higher 32-bits in a/b/c/d/e. Based on this observation,
+ // we can apply further optimization, which is to just ignore the
+ // higher 32-bits in a/c/e, rather than set the higher
+ // 32-bits of a/c/e to zero explicitly with extra instructions.
+ __ ld(a, Address(state, 0));
+ __ srli(b, a, 32);
+ __ ld(c, Address(state, 8));
+ __ srli(d, c, 32);
+ __ lw(e, Address(state, 16));
+
+ Label L_sha1_loop;
+ if (multi_block) {
+ __ BIND(L_sha1_loop);
+ }
+
+ sha1_preserve_prev_abcde(a, b, c, d, e, prev_ab, prev_cd, prev_e);
+
+ for (int round = 0; round < 80; round++) {
+ // prepare K't value
+ sha1_prepare_k(cur_k, round);
+
+ // prepare W't value
+ sha1_prepare_w(cur_w, ws, buf, round);
+
+ // one round process
+ sha1_process_round(a, b, c, d, e, cur_k, cur_w, t2, round);
+ }
+
+ // compute the intermediate hash value
+ sha1_calculate_im_hash(a, b, c, d, e, prev_ab, prev_cd, prev_e);
+
+ if (multi_block) {
+ int64_t block_bytes = 16 * 4;
+ __ addi(buf, buf, block_bytes);
+
+ __ bge(limit, buf, L_sha1_loop, true);
+ }
+
+ // store back the state.
+ __ zero_extend(a, a, 32);
+ __ slli(b, b, 32);
+ __ orr(a, a, b);
+ __ sd(a, Address(state, 0));
+ __ zero_extend(c, c, 32);
+ __ slli(d, d, 32);
+ __ orr(c, c, d);
+ __ sd(c, Address(state, 8));
+ __ sw(e, Address(state, 16));
+
+ // return offset
+ if (multi_block) {
+ __ sub(c_rarg0, buf, src);
+ }
+
+ __ pop_reg(saved_regs, sp);
+
+ __ leave();
+ __ ret();
+
+ return (address) start;
+ }
+
+
+
#ifdef COMPILER2
static const int64_t right_2_bits = right_n_bits(2);
@@ -5273,6 +5615,11 @@ static const int64_t right_3_bits = right_n_bits(3);
StubRoutines::_chacha20Block = generate_chacha20Block();
}
+ if (UseSHA1Intrinsics) {
+ StubRoutines::_sha1_implCompress = generate_sha1_implCompress(false, "sha1_implCompress");
+ StubRoutines::_sha1_implCompressMB = generate_sha1_implCompress(true, "sha1_implCompressMB");
+ }
+
#endif // COMPILER2_OR_JVMCI
}
diff --git a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp
index 7c604e8c11cc2..90a7e0967b240 100644
--- a/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp
+++ b/src/hotspot/cpu/riscv/stubRoutines_riscv.hpp
@@ -39,7 +39,7 @@ enum platform_dependent_constants {
// simply increase sizes if too small (assembler will crash if too small)
_initial_stubs_code_size = 10000,
_continuation_stubs_code_size = 2000,
- _compiler_stubs_code_size = 15000 ZGC_ONLY(+5000),
+ _compiler_stubs_code_size = 25000 ZGC_ONLY(+5000),
_final_stubs_code_size = 20000 ZGC_ONLY(+10000)
};
diff --git a/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp b/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp
index 142d9d636938d..8a6557dde93a1 100644
--- a/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp
+++ b/src/hotspot/cpu/riscv/upcallLinker_riscv.cpp
@@ -267,9 +267,13 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ mov_metadata(xmethod, entry);
__ sd(xmethod, Address(xthread, JavaThread::callee_target_offset())); // just in case callee is deoptimized
+ __ push_cont_fastpath(xthread);
+
__ ld(t0, Address(xmethod, Method::from_compiled_offset()));
__ jalr(t0);
+ __ pop_cont_fastpath(xthread);
+
// return value shuffle
if (!needs_return_buffer) {
#ifdef ASSERT
diff --git a/src/hotspot/cpu/riscv/vm_version_riscv.cpp b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
index 390ba51ee4f34..e1711dc5592f9 100644
--- a/src/hotspot/cpu/riscv/vm_version_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vm_version_riscv.cpp
@@ -149,16 +149,6 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
}
- if (UseSHA1Intrinsics) {
- warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
- FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
- }
-
- if (UseSHA3Intrinsics) {
- warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
- FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
- }
-
if (UseCRC32Intrinsics) {
warning("CRC32 intrinsics are not available on this CPU.");
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
@@ -260,11 +250,8 @@ void VM_Version::initialize() {
// NOTE: Make sure codes dependent on UseRVV are put after c2_initialize(),
// as there are extra checks inside it which could disable UseRVV
// in some situations.
- if (UseZvkn && !UseRVV) {
- FLAG_SET_DEFAULT(UseZvkn, false);
- warning("Cannot enable Zvkn on cpu without RVV support.");
- }
+ // ChaCha20
if (UseRVV) {
if (FLAG_IS_DEFAULT(UseChaCha20Intrinsics)) {
FLAG_SET_DEFAULT(UseChaCha20Intrinsics, true);
@@ -276,29 +263,65 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseChaCha20Intrinsics, false);
}
- if (!UseZvkn && UseSHA) {
- warning("SHA instructions are not available on this CPU");
- FLAG_SET_DEFAULT(UseSHA, false);
- } else if (UseZvkn && FLAG_IS_DEFAULT(UseSHA)) {
+ // SHA's
+ if (FLAG_IS_DEFAULT(UseSHA)) {
FLAG_SET_DEFAULT(UseSHA, true);
}
- if (!UseSHA) {
+ // SHA-1, no RVV required though.
+ if (UseSHA) {
+ if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
+ FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
+ }
+ } else if (UseSHA1Intrinsics) {
+ warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
+ FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+ }
+
+ // UseZvkn (depends on RVV) and SHA-2.
+ if (UseZvkn && !UseRVV) {
+ FLAG_SET_DEFAULT(UseZvkn, false);
+ warning("Cannot enable Zvkn on cpu without RVV support.");
+ }
+ // SHA-2, depends on Zvkn.
+ if (UseSHA) {
+ if (UseZvkn) {
+ if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
+ FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
+ }
+ if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
+ FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
+ }
+ } else {
+ if (UseSHA256Intrinsics) {
+ warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU, UseZvkn needed.");
+ FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+ }
+ if (UseSHA512Intrinsics) {
+ warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU, UseZvkn needed.");
+ FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+ }
+ }
+ } else {
if (UseSHA256Intrinsics) {
- warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU, UseZvkn needed.");
+ warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU, as UseSHA disabled.");
FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
}
if (UseSHA512Intrinsics) {
- warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU, UseZvkn needed.");
+ warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU, as UseSHA disabled.");
FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
}
- } else {
- if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
- FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
- }
- if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
- FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
- }
+ }
+
+ // SHA-3
+ if (UseSHA3Intrinsics) {
+ warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
+ FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
+ }
+
+ // UseSHA
+ if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA3Intrinsics || UseSHA512Intrinsics)) {
+ FLAG_SET_DEFAULT(UseSHA, false);
}
}
diff --git a/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp b/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp
index 9d08796681f3f..5d945dbc32309 100644
--- a/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp
+++ b/src/hotspot/cpu/riscv/vtableStubs_riscv.cpp
@@ -27,10 +27,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_riscv.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -171,22 +171,22 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
// Entry arguments:
- // t1: CompiledICHolder
+ // t1: CompiledICData
// j_rarg0: Receiver
// This stub is called from compiled code which has no callee-saved registers,
// so all registers except arguments are free at this point.
const Register recv_klass_reg = x18;
- const Register holder_klass_reg = x19; // declaring interface klass (DECC)
+ const Register holder_klass_reg = x19; // declaring interface klass (DEFC)
const Register resolved_klass_reg = x30; // resolved interface klass (REFC)
const Register temp_reg = x28;
const Register temp_reg2 = x29;
- const Register icholder_reg = t1;
+ const Register icdata_reg = t1;
Label L_no_such_interface;
- __ ld(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ ld(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ ld(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ ld(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
start_pc = __ pc();
diff --git a/src/hotspot/cpu/s390/assembler_s390.hpp b/src/hotspot/cpu/s390/assembler_s390.hpp
index 9bb143001b944..91cc7e611bfd1 100644
--- a/src/hotspot/cpu/s390/assembler_s390.hpp
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp
@@ -107,7 +107,7 @@ class RelAddr {
static bool is_in_range_of_RelAddr(address target, address pc, bool shortForm) {
// Guard against illegal branch targets, e.g. -1. Occurrences in
- // CompiledStaticCall and ad-file. Do not assert (it's a test
+ // CompiledDirectCall and ad-file. Do not assert (it's a test
// function!). Just return false in case of illegal operands.
if ((((uint64_t)target) & 0x0001L) != 0) return false;
if ((((uint64_t)pc) & 0x0001L) != 0) return false;
diff --git a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
index 200f7ee978dcb..b7f1d3605681a 100644
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2018 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2024 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -428,6 +428,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
"must be aligned");
ce->emit_static_call_stub();
+ CHECK_BAILOUT();
// Prepend each BRASL with a nop.
__ relocate(relocInfo::static_call_type);
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
index 355c66047c1b2..503440a5fcc01 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp
@@ -76,10 +76,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
// We fetch the class of the receiver and compare it with the cached class.
// If they do not match we jump to the slow case.
int LIR_Assembler::check_icache() {
- Register receiver = receiverOpr()->as_register();
- int offset = __ offset();
- __ inline_cache_check(receiver, Z_inline_cache);
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@@ -2385,7 +2382,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
op->len()->as_register(),
op->tmp1()->as_register(),
op->tmp2()->as_register(),
- arrayOopDesc::header_size(op->type()),
+ arrayOopDesc::base_offset_in_bytes(op->type()),
type2aelembytes(op->type()),
op->klass()->as_register(),
*op->stub()->entry());
diff --git a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
index 229216ef20d44..c8815f3a729a4 100644
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.hpp
@@ -45,7 +45,7 @@
}
enum {
- _call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub.
+ _call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledDirectCall::emit_to_interp_stub.
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
};
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
index 40edca6559aa4..58bdcee5d5f8f 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -40,31 +40,6 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- Label ic_miss, ic_hit;
- verify_oop(receiver, FILE_AND_LINE);
- int klass_offset = oopDesc::klass_offset_in_bytes();
-
- if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
- if (VM_Version::has_CompareBranch()) {
- z_cgij(receiver, 0, Assembler::bcondEqual, ic_miss);
- } else {
- z_ltgr(receiver, receiver);
- z_bre(ic_miss);
- }
- }
-
- compare_klass_ptr(iCache, klass_offset, receiver, false);
- z_bre(ic_hit);
-
- // If icache check fails, then jump to runtime routine.
- // Note: RECEIVER must still contain the receiver!
- load_const_optimized(Z_R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
- z_br(Z_R1_scratch);
- align(CodeEntryAlignment);
- bind(ic_hit);
-}
-
void C1_MacroAssembler::explicit_null_check(Register base) {
ShouldNotCallThis(); // unused
}
@@ -296,7 +271,7 @@ void C1_MacroAssembler::allocate_array(
Register len, // array length
Register t1, // temp register
Register t2, // temp register
- int hdr_size, // object header size in words
+ int base_offset_in_bytes, // elements offset in bytes
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
@@ -322,8 +297,8 @@ void C1_MacroAssembler::allocate_array(
case 8: z_sllg(arr_size, len, 3); break;
default: ShouldNotReachHere();
}
- add2reg(arr_size, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
- z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size.
+ add2reg(arr_size, base_offset_in_bytes + MinObjAlignmentInBytesMask); // Add space for header & alignment.
+ z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size.
try_allocate(obj, arr_size, 0, t1, slow_case);
@@ -333,9 +308,9 @@ void C1_MacroAssembler::allocate_array(
Label done;
Register object_fields = t1;
Register Rzero = Z_R1_scratch;
- z_aghi(arr_size, -(hdr_size * BytesPerWord));
+ z_aghi(arr_size, -base_offset_in_bytes);
z_bre(done); // Jump if size of fields is zero.
- z_la(object_fields, hdr_size * BytesPerWord, obj);
+ z_la(object_fields, base_offset_in_bytes, obj);
z_xgr(Rzero, Rzero);
initialize_body(object_fields, arr_size, Rzero);
bind(done);
diff --git a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
index 7a4f76af1546e..c77258509e1a5 100644
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@@ -86,7 +86,7 @@
Register len, // array length
Register t1, // temp register
Register t2, // temp register
- int hdr_size, // object header size in words
+ int base_offset_in_bytes, // elements offset in bytes
int elt_size, // element size in bytes
Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
diff --git a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
index 257148827be4e..decb3a1cafc31 100644
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp
@@ -35,7 +35,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_s390.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_s390.hpp"
diff --git a/src/hotspot/cpu/s390/compiledIC_s390.cpp b/src/hotspot/cpu/s390/compiledIC_s390.cpp
index 7ea90c1de7c69..3adcfbc85f185 100644
--- a/src/hotspot/cpu/s390/compiledIC_s390.cpp
+++ b/src/hotspot/cpu/s390/compiledIC_s390.cpp
@@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
@@ -40,7 +39,7 @@
#undef __
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
@@ -54,7 +53,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
- address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
+ address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeBuffer::expand failed.
}
@@ -81,27 +80,20 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return 2 * MacroAssembler::load_const_from_toc_size() +
2; // branch
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 5; // 4 in emit_java_to_interp + 1 in Java_Static_Call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -115,7 +107,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@@ -131,7 +123,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
index 3ed99f68c475c..8ce9305a865e5 100644
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp
@@ -31,9 +31,9 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1DirtyCardQueue.hpp"
+#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/sharedRuntime.hpp"
diff --git a/src/hotspot/cpu/s390/icBuffer_s390.cpp b/src/hotspot/cpu/s390/icBuffer_s390.cpp
deleted file mode 100644
index 0dc936d6fad0c..0000000000000
--- a/src/hotspot/cpu/s390/icBuffer_s390.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_s390.hpp"
-#include "oops/oop.inline.hpp"
-
-#define __ masm.
-
-int InlineCacheBuffer::ic_stub_code_size() {
- return MacroAssembler::load_const_size() + Assembler::z_brul_size();
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_oop, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler masm(&code);
- // Note: even though the code contains an embedded oop, we do not need reloc info
- // because
- // (1) the oop is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
-
- // Load the oop,
- __ load_const(Z_method, (address) cached_oop); // inline cache reg = Z_method
- // and do a tail-call (pc-relative).
- __ z_brul((address) entry_point);
- __ flush();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
- return MacroAssembler::get_target_addr_pcrel(move->next_instruction_address());
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
- return (void*)move->data();
-}
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.cpp b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
index 14fc07ec00794..0226d494c8958 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.cpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.cpp
@@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -1097,7 +1098,13 @@ void MacroAssembler::clear_mem(const Address& addr, unsigned int size) {
}
void MacroAssembler::align(int modulus) {
- while (offset() % modulus != 0) z_nop();
+ align(modulus, offset());
+}
+
+void MacroAssembler::align(int modulus, int target) {
+ assert(((modulus % 2 == 0) && (target % 2 == 0)), "needs to be even");
+ int delta = target - offset();
+ while ((offset() + delta) % modulus != 0) z_nop();
}
// Special version for non-relocateable code if required alignment
@@ -2150,6 +2157,45 @@ void MacroAssembler::call_VM_leaf_base(address entry_point) {
call_VM_leaf_base(entry_point, allow_relocation);
}
+int MacroAssembler::ic_check_size() {
+ return 30 + (ImplicitNullChecks ? 0 : 6);
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register R2_receiver = Z_ARG1;
+ Register R0_scratch = Z_R0_scratch;
+ Register R1_scratch = Z_R1_scratch;
+ Register R9_data = Z_inline_cache;
+ Label success, failure;
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+ if (!ImplicitNullChecks) {
+ z_cgij(R2_receiver, 0, Assembler::bcondEqual, failure);
+ }
+
+ if (UseCompressedClassPointers) {
+ z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
+ } else {
+ z_lg(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
+ }
+ z_cg(R1_scratch, Address(R9_data, in_bytes(CompiledICData::speculated_klass_offset())));
+ z_bre(success);
+
+ bind(failure);
+ load_const(R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
+ z_br(R1_scratch);
+ bind(success);
+
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point, offset() = %d, end_alignment = %d", offset(), end_alignment);
+ return uep_offset;
+}
+
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,
diff --git a/src/hotspot/cpu/s390/macroAssembler_s390.hpp b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
index bf14b42e2d1b3..924583abdf563 100644
--- a/src/hotspot/cpu/s390/macroAssembler_s390.hpp
+++ b/src/hotspot/cpu/s390/macroAssembler_s390.hpp
@@ -257,6 +257,7 @@ class MacroAssembler: public Assembler {
// nop padding
void align(int modulus);
+ void align(int modulus, int target);
void align_address(int modulus);
//
@@ -566,6 +567,9 @@ class MacroAssembler: public Assembler {
// Get the pc where the last call will return to. Returns _last_calls_return_pc.
inline address last_calls_return_pc();
+ static int ic_check_size();
+ int ic_check(int end_alignment);
+
private:
static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call.
static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs.
diff --git a/src/hotspot/cpu/s390/s390.ad b/src/hotspot/cpu/s390/s390.ad
index fa53c73269196..5fcb885cdc3ab 100644
--- a/src/hotspot/cpu/s390/s390.ad
+++ b/src/hotspot/cpu/s390/s390.ad
@@ -1,6 +1,6 @@
//
// Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
-// Copyright (c) 2017, 2022 SAP SE. All rights reserved.
+// Copyright (c) 2017, 2024 SAP SE. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -1341,51 +1341,9 @@ void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
+ // This is Unverified Entry Point
C2_MacroAssembler _masm(&cbuf);
- const int ic_miss_offset = 2;
-
- // Inline_cache contains a klass.
- Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
- // ARG1 is the receiver oop.
- Register R2_receiver = Z_ARG1;
- int klass_offset = oopDesc::klass_offset_in_bytes();
- AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
- Register R1_ic_miss_stub_addr = Z_R1_scratch;
-
- // Null check of receiver.
- // This is the null check of the receiver that actually should be
- // done in the caller. It's here because in case of implicit null
- // checks we get it for free.
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
- "second word in oop should not require explicit null check.");
- if (!ImplicitNullChecks) {
- Label valid;
- if (VM_Version::has_CompareBranch()) {
- __ z_cgij(R2_receiver, 0, Assembler::bcondNotEqual, valid);
- } else {
- __ z_ltgr(R2_receiver, R2_receiver);
- __ z_bre(valid);
- }
- // The ic_miss_stub will handle the null pointer exception.
- __ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
- __ z_br(R1_ic_miss_stub_addr);
- __ bind(valid);
- }
-
- // Check whether this method is the proper implementation for the class of
- // the receiver (ic miss check).
- {
- Label valid;
- // Compare cached class against klass from receiver.
- // This also does an implicit null check!
- __ compare_klass_ptr(ic_klass, klass_offset, R2_receiver, false);
- __ z_bre(valid);
- // The inline cache points to the wrong method. Call the
- // ic_miss_stub to find the proper method.
- __ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
- __ z_br(R1_ic_miss_stub_addr);
- __ bind(valid);
- }
+ __ ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@@ -1447,6 +1405,7 @@ int HandlerImpl::emit_exception_handler(CodeBuffer &cbuf) {
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
+ ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
@@ -1468,6 +1427,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
+ ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
@@ -2146,7 +2106,7 @@ encode %{
assert(__ inst_mark() != nullptr, "emit_call_reloc must set_inst_mark()");
if (_method) { // Emit stub for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
diff --git a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
index ed1795cfa339f..11e1e617d8e3a 100644
--- a/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
+++ b/src/hotspot/cpu/s390/sharedRuntime_s390.cpp
@@ -26,8 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/gcLocker.hpp"
@@ -35,7 +35,6 @@
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "registerSaver_s390.hpp"
@@ -1500,17 +1499,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
unsigned int wrapper_FrameDone;
unsigned int wrapper_CRegsSet;
Label handle_pending_exception;
- Label ic_miss;
//---------------------------------------------------------------------
// Unverified entry point (UEP)
//---------------------------------------------------------------------
- wrapper_UEPStart = __ offset();
// check ic: object class <-> cached class
- if (!method_is_static) __ nmethod_UEP(ic_miss);
- // Fill with nops (alignment of verified entry point).
- __ align(CodeEntryAlignment);
+ if (!method_is_static) {
+ wrapper_UEPStart = __ ic_check(CodeEntryAlignment /* end_alignment */);
+ }
//---------------------------------------------------------------------
// Verified entry point (VEP)
@@ -2026,13 +2023,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ restore_return_pc();
__ z_br(Z_R1_scratch);
- //---------------------------------------------------------------------
- // Handler for a cache miss (out-of-line)
- //---------------------------------------------------------------------
- __ call_ic_miss_handler(ic_miss, 0x77, 0, Z_R1_scratch);
__ flush();
-
-
//////////////////////////////////////////////////////////////////////
// end of code generation
//////////////////////////////////////////////////////////////////////
@@ -2318,9 +2309,6 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label skip_fixup;
{
Label ic_miss;
- const int klass_offset = oopDesc::klass_offset_in_bytes();
- const int holder_klass_offset = in_bytes(CompiledICHolder::holder_klass_offset());
- const int holder_metadata_offset = in_bytes(CompiledICHolder::holder_metadata_offset());
// Out-of-line call to ic_miss handler.
__ call_ic_miss_handler(ic_miss, 0x11, 0, Z_R1_scratch);
@@ -2329,27 +2317,11 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
__ align(CodeEntryAlignment);
c2i_unverified_entry = __ pc();
- // Check the pointers.
- if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
- __ z_ltgr(Z_ARG1, Z_ARG1);
- __ z_bre(ic_miss);
- }
- __ verify_oop(Z_ARG1, FILE_AND_LINE);
-
- // Check ic: object class <-> cached class
- // Compress cached class for comparison. That's more efficient.
- if (UseCompressedClassPointers) {
- __ z_lg(Z_R11, holder_klass_offset, Z_method); // Z_R11 is overwritten a few instructions down anyway.
- __ compare_klass_ptr(Z_R11, klass_offset, Z_ARG1, false); // Cached class can't be zero.
- } else {
- __ z_clc(klass_offset, sizeof(void *)-1, Z_ARG1, holder_klass_offset, Z_method);
- }
- __ z_brne(ic_miss); // Cache miss: call runtime to handle this.
-
+ __ ic_check(2);
+ __ z_lg(Z_method, Address(Z_inline_cache, CompiledICData::speculated_method_offset()));
// This def MUST MATCH code in gen_c2i_adapter!
const Register code = Z_R11;
- __ z_lg(Z_method, holder_metadata_offset, Z_method);
__ load_and_test_long(Z_R0, method_(code));
__ z_brne(ic_miss); // Cache miss: call runtime to handle this.
diff --git a/src/hotspot/cpu/s390/vtableStubs_s390.cpp b/src/hotspot/cpu/s390/vtableStubs_s390.cpp
index 5a79369ceab47..573c23d796708 100644
--- a/src/hotspot/cpu/s390/vtableStubs_s390.cpp
+++ b/src/hotspot/cpu/s390/vtableStubs_s390.cpp
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016, 2021 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_s390.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassVtable.hpp"
@@ -197,12 +197,12 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass(rcvr_klass, Z_ARG1);
// Receiver subtype check against REFC.
- __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_klass_offset()));
+ __ z_lg(interface, Address(Z_method, CompiledICData::itable_refc_klass_offset()));
__ lookup_interface_method(rcvr_klass, interface, noreg,
noreg, Z_R1, no_such_interface, /*return_method=*/ false);
// Get Method* and entrypoint for compiler
- __ z_lg(interface, Address(Z_method, CompiledICHolder::holder_metadata_offset()));
+ __ z_lg(interface, Address(Z_method, CompiledICData::itable_defc_klass_offset()));
__ lookup_interface_method(rcvr_klass, interface, itable_index,
Z_method, Z_R1, no_such_interface, /*return_method=*/ true);
diff --git a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
index ff0726840d30a..c279e3073af87 100644
--- a/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -72,7 +72,6 @@ static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jl
NEEDS_CLEANUP // remove this definitions ?
-const Register IC_Klass = rax; // where the IC klass is cached
const Register SYNC_header = rax; // synchronization header
const Register SHIFT_count = rcx; // where count for shift operations must be
@@ -336,23 +335,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
- Register receiver = FrameMap::receiver_opr->as_register();
- Register ic_klass = IC_Klass;
- const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
- if (!do_post_padding) {
- // insert some nops so that the verified entry point is aligned on CodeEntryAlignment
- __ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
- }
- int offset = __ offset();
- __ inline_cache_check(receiver, IC_Klass);
- assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
- if (do_post_padding) {
- // force alignment after the cache check.
- // It's been verified to be aligned if !VerifyOops
- __ align(CodeEntryAlignment);
- }
- return offset;
+ return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@@ -1635,7 +1618,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
len,
tmp1,
tmp2,
- arrayOopDesc::header_size(op->type()),
+ arrayOopDesc::base_offset_in_bytes(op->type()),
array_element_size(op->type()),
op->klass()->as_register(),
*op->stub()->entry());
diff --git a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
index b6a27abf0f37e..7088cf33cf646 100644
--- a/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_LIRGenerator_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2005, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1207,9 +1207,10 @@ void LIRGenerator::do_vectorizedMismatch(Intrinsic* x) {
__ move(result_reg, result);
}
+#ifndef _LP64
// _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
// _i2b, _i2c, _i2s
-LIR_Opr fixed_register_for(BasicType type) {
+static LIR_Opr fixed_register_for(BasicType type) {
switch (type) {
case T_FLOAT: return FrameMap::fpu0_float_opr;
case T_DOUBLE: return FrameMap::fpu0_double_opr;
@@ -1218,6 +1219,7 @@ LIR_Opr fixed_register_for(BasicType type) {
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
}
}
+#endif
void LIRGenerator::do_Convert(Convert* x) {
#ifdef _LP64
diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
index 78361a305aeeb..caca3a1528261 100644
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@@ -34,10 +35,12 @@
#include "oops/arrayOop.hpp"
#include "oops/markWord.hpp"
#include "runtime/basicLock.hpp"
+#include "runtime/globals.hpp"
#include "runtime/os.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/checkedCast.hpp"
+#include "utilities/globalDefinitions.hpp"
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register tmp, Label& slow_case) {
const int aligned_mask = BytesPerWord -1;
@@ -60,9 +63,6 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
jcc(Assembler::notZero, slow_case);
}
- // Load object header
- movptr(hdr, Address(obj, hdr_offset));
-
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
const Register thread = r15_thread;
@@ -73,6 +73,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
lightweight_lock(obj, hdr, thread, tmp, slow_case);
} else if (LockingMode == LM_LEGACY) {
Label done;
+ // Load object header
+ movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
orptr(hdr, markWord::unlocked_value);
// save unlocked object header into the displaced header location on the stack
@@ -134,9 +136,14 @@ void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_
verify_oop(obj);
if (LockingMode == LM_LIGHTWEIGHT) {
- movptr(disp_hdr, Address(obj, hdr_offset));
- andptr(disp_hdr, ~(int32_t)markWord::lock_mask_in_place);
- lightweight_unlock(obj, disp_hdr, hdr, slow_case);
+#ifdef _LP64
+ lightweight_unlock(obj, disp_hdr, r15_thread, hdr, slow_case);
+#else
+ // This relies on the implementation of lightweight_unlock being able to handle
+ // that the reg_rax and thread Register parameters may alias each other.
+ get_thread(disp_hdr);
+ lightweight_unlock(obj, disp_hdr, disp_hdr, hdr, slow_case);
+#endif
} else if (LockingMode == LM_LEGACY) {
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
@@ -179,6 +186,15 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
if (len->is_valid()) {
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
+#ifdef _LP64
+ int base_offset = arrayOopDesc::length_offset_in_bytes() + BytesPerInt;
+ if (!is_aligned(base_offset, BytesPerWord)) {
+ assert(is_aligned(base_offset, BytesPerInt), "must be 4-byte aligned");
+ // Clear gap/first 4 bytes following the length field.
+ xorl(t1, t1);
+ movl(Address(obj, base_offset), t1);
+ }
+#endif
}
#ifdef _LP64
else if (UseCompressedClassPointers) {
@@ -262,7 +278,7 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
verify_oop(obj);
}
-void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case) {
+void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case) {
assert(obj == rax, "obj must be in rax, for cmpxchg");
assert_different_registers(obj, len, t1, t2, klass);
@@ -275,7 +291,7 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
const Register arr_size = t2; // okay to be the same
// align object end
- movptr(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask);
+ movptr(arr_size, base_offset_in_bytes + MinObjAlignmentInBytesMask);
lea(arr_size, Address(arr_size, len, f));
andptr(arr_size, ~MinObjAlignmentInBytesMask);
@@ -285,7 +301,10 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
// clear rest of allocated space
const Register len_zero = len;
- initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
+ // Align-up to word boundary, because we clear the 4 bytes potentially
+ // following the length field in initialize_header().
+ int base_offset = align_up(base_offset_in_bytes, BytesPerWord);
+ initialize_body(obj, arr_size, base_offset, len_zero);
if (CURRENT_ENV->dtrace_alloc_probes()) {
assert(obj == rax, "must be");
@@ -295,30 +314,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
verify_oop(obj);
}
-
-
-void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
- verify_oop(receiver);
- // explicit null check not needed since load from [klass_offset] causes a trap
- // check against inline cache
- assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
- int start_offset = offset();
-
- if (UseCompressedClassPointers) {
- load_klass(rscratch1, receiver, rscratch2);
- cmpptr(rscratch1, iCache);
- } else {
- cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
- }
- // if icache check fails, then jump to runtime routine
- // Note: RECEIVER must still contain the receiver!
- jump_cc(Assembler::notEqual,
- RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
- assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
-}
-
-
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.
diff --git a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp
index b3593feb05640..ae340e64fb737 100644
--- a/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/c1_MacroAssembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -89,7 +89,7 @@
// header_size: size of object header in words
// f : element scale factor
// slow_case : exit to slow case implementation if fast allocation fails
- void allocate_array(Register obj, Register len, Register t, Register t2, int header_size, Address::ScaleFactor f, Register klass, Label& slow_case);
+ void allocate_array(Register obj, Register len, Register t, Register t2, int base_offset_in_bytes, Address::ScaleFactor f, Register klass, Label& slow_case);
int rsp_offset() const { return _rsp_offset; }
void set_rsp_offset(int n) { _rsp_offset = n; }
diff --git a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
index 8b56f464f2739..2c24c0c2cfb17 100644
--- a/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
+++ b/src/hotspot/cpu/x86/c1_Runtime1_x86.cpp
@@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_x86.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_x86.hpp"
diff --git a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp
index b9b4e8af02c5f..6dc8d14064ad2 100644
--- a/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_CodeStubs_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -73,26 +73,74 @@ void C2EntryBarrierStub::emit(C2_MacroAssembler& masm) {
__ jmp(continuation(), false /* maybe_short */);
}
-#ifdef _LP64
-int C2HandleAnonOMOwnerStub::max_size() const {
- // Max size of stub has been determined by testing with 0, in which case
- // C2CodeStubList::emit() will throw an assertion and report the actual size that
- // is needed.
- return DEBUG_ONLY(36) NOT_DEBUG(21);
+int C2FastUnlockLightweightStub::max_size() const {
+ return 128;
}
-void C2HandleAnonOMOwnerStub::emit(C2_MacroAssembler& masm) {
- __ bind(entry());
- Register mon = monitor();
- Register t = tmp();
- __ movptr(Address(mon, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), r15_thread);
- __ subl(Address(r15_thread, JavaThread::lock_stack_top_offset()), oopSize);
+void C2FastUnlockLightweightStub::emit(C2_MacroAssembler& masm) {
+ assert(_t == rax, "must be");
+
+ Label restore_held_monitor_count_and_slow_path;
+
+ { // Restore lock-stack and handle the unlock in runtime.
+
+ __ bind(_push_and_slow_path);
#ifdef ASSERT
- __ movl(t, Address(r15_thread, JavaThread::lock_stack_top_offset()));
- __ movptr(Address(r15_thread, t), 0);
+ // The obj was only cleared in debug.
+ __ movl(_t, Address(_thread, JavaThread::lock_stack_top_offset()));
+ __ movptr(Address(_thread, _t), _obj);
#endif
- __ jmp(continuation());
-}
+ __ addl(Address(_thread, JavaThread::lock_stack_top_offset()), oopSize);
+ }
+
+ { // Restore held monitor count and slow path.
+
+ __ bind(restore_held_monitor_count_and_slow_path);
+ // Restore held monitor count.
+ __ increment(Address(_thread, JavaThread::held_monitor_count_offset()));
+ // increment will always result in ZF = 0 (no overflows).
+ __ jmp(slow_path_continuation());
+ }
+
+ { // Handle monitor medium path.
+
+ __ bind(_check_successor);
+
+ Label fix_zf_and_unlocked;
+ const Register monitor = _mark;
+
+#ifndef _LP64
+ __ jmpb(restore_held_monitor_count_and_slow_path);
+#else // _LP64
+ // successor null check.
+ __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
+ __ jccb(Assembler::equal, restore_held_monitor_count_and_slow_path);
+
+ // Release lock.
+ __ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
+
+ // Fence.
+ // Instead of MFENCE we use a dummy locked add of 0 to the top-of-stack.
+ __ lock(); __ addl(Address(rsp, 0), 0);
+
+ // Recheck successor.
+ __ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(succ)), NULL_WORD);
+ // Observed a successor after the release -> fence we have handed off the monitor
+ __ jccb(Assembler::notEqual, fix_zf_and_unlocked);
+
+ // Try to relock, if it fails the monitor has been handed over
+ // TODO: Caveat, this may fail due to deflation, which does
+ // not handle the monitor handoff. Currently only works
+ // due to the responsible thread.
+ __ xorptr(rax, rax);
+ __ lock(); __ cmpxchgptr(_thread, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ __ jccb (Assembler::equal, restore_held_monitor_count_and_slow_path);
#endif
+ __ bind(fix_zf_and_unlocked);
+ __ xorl(rax, rax);
+ __ jmp(unlocked_continuation());
+ }
+}
+
#undef __
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
index 7512a366e7ea0..b6ecde62af655 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.cpp
@@ -33,9 +33,13 @@
#include "opto/output.hpp"
#include "opto/opcodes.hpp"
#include "opto/subnode.hpp"
+#include "runtime/globals.hpp"
#include "runtime/objectMonitor.hpp"
#include "runtime/stubRoutines.hpp"
#include "utilities/checkedCast.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/powerOfTwo.hpp"
+#include "utilities/sizes.hpp"
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -554,6 +558,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
RTMLockingCounters* stack_rtm_counters,
Metadata* method_data,
bool use_rtm, bool profile_rtm) {
+ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_lock_lightweight");
// Ensure the register assignments are disjoint
assert(tmpReg == rax, "");
@@ -605,7 +610,8 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
if (LockingMode == LM_MONITOR) {
// Clear ZF so that we take the slow path at the DONE label. objReg is known to be not 0.
testptr(objReg, objReg);
- } else if (LockingMode == LM_LEGACY) {
+ } else {
+ assert(LockingMode == LM_LEGACY, "must be");
// Attempt stack-locking ...
orptr (tmpReg, markWord::unlocked_value);
movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
@@ -620,10 +626,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// Next instruction set ZFlag == 1 (Success) if difference is less then one page.
andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - (int)os::vm_page_size())) );
movptr(Address(boxReg, 0), tmpReg);
- } else {
- assert(LockingMode == LM_LIGHTWEIGHT, "");
- lightweight_lock(objReg, tmpReg, thread, scrReg, NO_COUNT);
- jmp(COUNT);
}
jmp(DONE_LABEL);
@@ -754,6 +756,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// Xcheck:jni is enabled.
void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
+ assert(LockingMode != LM_LIGHTWEIGHT, "lightweight locking should use fast_unlock_lightweight");
assert(boxReg == rax, "");
assert_different_registers(objReg, boxReg, tmpReg);
@@ -784,23 +787,6 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
}
// It's inflated.
- if (LockingMode == LM_LIGHTWEIGHT) {
- // If the owner is ANONYMOUS, we need to fix it - in an outline stub.
- testb(Address(tmpReg, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), (int32_t) ObjectMonitor::ANONYMOUS_OWNER);
-#ifdef _LP64
- if (!Compile::current()->output()->in_scratch_emit_size()) {
- C2HandleAnonOMOwnerStub* stub = new (Compile::current()->comp_arena()) C2HandleAnonOMOwnerStub(tmpReg, boxReg);
- Compile::current()->output()->add_stub(stub);
- jcc(Assembler::notEqual, stub->entry());
- bind(stub->continuation());
- } else
-#endif
- {
- // We can't easily implement this optimization on 32 bit because we don't have a thread register.
- // Call the slow-path instead.
- jcc(Assembler::notEqual, NO_COUNT);
- }
- }
#if INCLUDE_RTM_OPT
if (use_rtm) {
@@ -922,19 +908,14 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
jmpb (DONE_LABEL);
#endif
- if (LockingMode != LM_MONITOR) {
+ if (LockingMode == LM_LEGACY) {
bind (Stacked);
- if (LockingMode == LM_LIGHTWEIGHT) {
- mov(boxReg, tmpReg);
- lightweight_unlock(objReg, boxReg, tmpReg, NO_COUNT);
- jmp(COUNT);
- } else if (LockingMode == LM_LEGACY) {
- movptr(tmpReg, Address (boxReg, 0)); // re-fetch
- lock();
- cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
- }
+ movptr(tmpReg, Address (boxReg, 0)); // re-fetch
+ lock();
+ cmpxchgptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes())); // Uses RAX which is box
// Intentional fall-thru into DONE_LABEL
}
+
bind(DONE_LABEL);
// ZFlag == 1 count in fast path
@@ -955,6 +936,247 @@ void C2_MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register t
bind(NO_COUNT);
}
+void C2_MacroAssembler::fast_lock_lightweight(Register obj, Register box, Register rax_reg,
+ Register t, Register thread) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ assert(rax_reg == rax, "Used for CAS");
+ assert_different_registers(obj, box, rax_reg, t, thread);
+
+ // Handle inflated monitor.
+ Label inflated;
+ // Finish fast lock successfully. ZF value is irrelevant.
+ Label locked;
+ // Finish fast lock unsuccessfully. MUST jump with ZF == 0
+ Label slow_path;
+
+ if (DiagnoseSyncOnValueBasedClasses != 0) {
+ load_klass(rax_reg, obj, t);
+ movl(rax_reg, Address(rax_reg, Klass::access_flags_offset()));
+ testl(rax_reg, JVM_ACC_IS_VALUE_BASED_CLASS);
+ jcc(Assembler::notZero, slow_path);
+ }
+
+ const Register mark = t;
+
+ { // Lightweight Lock
+
+ Label push;
+
+ const Register top = box;
+
+ // Load the mark.
+ movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Prefetch top.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ // Check for monitor (0b10).
+ testptr(mark, markWord::monitor_value);
+ jcc(Assembler::notZero, inflated);
+
+ // Check if lock-stack is full.
+ cmpl(top, LockStack::end_offset() - 1);
+ jcc(Assembler::greater, slow_path);
+
+ // Check if recursive.
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
+ jccb(Assembler::equal, push);
+
+ // Try to lock. Transition lock bits 0b01 => 0b00
+ movptr(rax_reg, mark);
+ orptr(rax_reg, markWord::unlocked_value);
+ andptr(mark, ~(int32_t)markWord::unlocked_value);
+ lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ jcc(Assembler::notEqual, slow_path);
+
+ bind(push);
+ // After successful lock, push object on lock-stack.
+ movptr(Address(thread, top), obj);
+ addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+ jmpb(locked);
+ }
+
+ { // Handle inflated monitor.
+ bind(inflated);
+
+ const Register tagged_monitor = mark;
+
+ // CAS owner (null => current thread).
+ xorptr(rax_reg, rax_reg);
+ lock(); cmpxchgptr(thread, Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
+ jccb(Assembler::equal, locked);
+
+ // Check if recursive.
+ cmpptr(thread, rax_reg);
+ jccb(Assembler::notEqual, slow_path);
+
+ // Recursive.
+ increment(Address(tagged_monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+ }
+
+ bind(locked);
+ increment(Address(thread, JavaThread::held_monitor_count_offset()));
+ // Set ZF = 1
+ xorl(rax_reg, rax_reg);
+
+#ifdef ASSERT
+ // Check that locked label is reached with ZF set.
+ Label zf_correct;
+ jccb(Assembler::zero, zf_correct);
+ stop("Fast Lock ZF != 1");
+#endif
+
+ bind(slow_path);
+#ifdef ASSERT
+ // Check that slow_path label is reached with ZF not set.
+ jccb(Assembler::notZero, zf_correct);
+ stop("Fast Lock ZF != 0");
+ bind(zf_correct);
+#endif
+ // C2 uses the value of ZF to determine the continuation.
+}
+
+void C2_MacroAssembler::fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread) {
+ assert(LockingMode == LM_LIGHTWEIGHT, "must be");
+ assert(reg_rax == rax, "Used for CAS");
+ assert_different_registers(obj, reg_rax, t);
+
+ // Handle inflated monitor.
+ Label inflated, inflated_check_lock_stack;
+ // Finish fast unlock successfully. MUST jump with ZF == 1
+ Label unlocked;
+
+ // Assume success.
+ decrement(Address(thread, JavaThread::held_monitor_count_offset()));
+
+ const Register mark = t;
+ const Register top = reg_rax;
+
+ Label dummy;
+ C2FastUnlockLightweightStub* stub = nullptr;
+
+ if (!Compile::current()->output()->in_scratch_emit_size()) {
+ stub = new (Compile::current()->comp_arena()) C2FastUnlockLightweightStub(obj, mark, reg_rax, thread);
+ Compile::current()->output()->add_stub(stub);
+ }
+
+ Label& push_and_slow_path = stub == nullptr ? dummy : stub->push_and_slow_path();
+ Label& check_successor = stub == nullptr ? dummy : stub->check_successor();
+
+ { // Lightweight Unlock
+
+ // Load top.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ // Prefetch mark.
+ movptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Check if obj is top of lock-stack.
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
+ // Top of lock stack was not obj. Must be monitor.
+ jcc(Assembler::notEqual, inflated_check_lock_stack);
+
+ // Pop lock-stack.
+ DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
+ subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+
+ // Check if recursive.
+ cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
+ jcc(Assembler::equal, unlocked);
+
+ // We elide the monitor check, let the CAS fail instead.
+
+ // Try to unlock. Transition lock bits 0b00 => 0b01
+ movptr(reg_rax, mark);
+ andptr(reg_rax, ~(int32_t)markWord::lock_mask);
+ orptr(mark, markWord::unlocked_value);
+ lock(); cmpxchgptr(mark, Address(obj, oopDesc::mark_offset_in_bytes()));
+ jcc(Assembler::notEqual, push_and_slow_path);
+ jmp(unlocked);
+ }
+
+
+ { // Handle inflated monitor.
+ bind(inflated_check_lock_stack);
+#ifdef ASSERT
+ Label check_done;
+ subl(top, oopSize);
+ cmpl(top, in_bytes(JavaThread::lock_stack_base_offset()));
+ jcc(Assembler::below, check_done);
+ cmpptr(obj, Address(thread, top));
+ jccb(Assembler::notEqual, inflated_check_lock_stack);
+ stop("Fast Unlock lock on stack");
+ bind(check_done);
+ testptr(mark, markWord::monitor_value);
+ jccb(Assembler::notZero, inflated);
+ stop("Fast Unlock not monitor");
+#endif
+
+ bind(inflated);
+
+ // mark contains the tagged ObjectMonitor*.
+ const Register monitor = mark;
+
+#ifndef _LP64
+ // Check if recursive.
+ xorptr(reg_rax, reg_rax);
+ orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+ jcc(Assembler::notZero, check_successor);
+
+ // Check if the entry lists are empty.
+ movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+ orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
+ jcc(Assembler::notZero, check_successor);
+
+ // Release lock.
+ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
+#else // _LP64
+ Label recursive;
+
+ // Check if recursive.
+ cmpptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)), 0);
+ jccb(Assembler::notEqual, recursive);
+
+ // Check if the entry lists are empty.
+ movptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
+ orptr(reg_rax, Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
+ jcc(Assembler::notZero, check_successor);
+
+ // Release lock.
+ movptr(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)), NULL_WORD);
+ jmpb(unlocked);
+
+ // Recursive unlock.
+ bind(recursive);
+ decrement(Address(monitor, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
+ xorl(t, t);
+#endif
+ }
+
+ bind(unlocked);
+ if (stub != nullptr) {
+ bind(stub->unlocked_continuation());
+ }
+
+#ifdef ASSERT
+ // Check that unlocked label is reached with ZF set.
+ Label zf_correct;
+ jccb(Assembler::zero, zf_correct);
+ stop("Fast Unlock ZF != 1");
+#endif
+
+ if (stub != nullptr) {
+ bind(stub->slow_path_continuation());
+ }
+#ifdef ASSERT
+ // Check that stub->continuation() label is reached with ZF not set.
+ jccb(Assembler::notZero, zf_correct);
+ stop("Fast Unlock ZF != 0");
+ bind(zf_correct);
+#endif
+ // C2 uses the value of ZF to determine the continuation.
+}
+
//-------------------------------------------------------------------------------------------
// Generic instructions support for use in .ad files C2 code generation
diff --git a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
index 151f2148372d5..26f7fb44aa939 100644
--- a/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/c2_MacroAssembler_x86.hpp
@@ -43,6 +43,10 @@
bool use_rtm, bool profile_rtm);
void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
+ void fast_lock_lightweight(Register obj, Register box, Register rax_reg,
+ Register t, Register thread);
+ void fast_unlock_lightweight(Register obj, Register reg_rax, Register t, Register thread);
+
#if INCLUDE_RTM_OPT
void rtm_counters_update(Register abort_status, Register rtm_counters);
void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
diff --git a/src/hotspot/cpu/x86/compiledIC_x86.cpp b/src/hotspot/cpu/x86/compiledIC_x86.cpp
index 8fc001039fbd3..95b41f62b6aab 100644
--- a/src/hotspot/cpu/x86/compiledIC_x86.cpp
+++ b/src/hotspot/cpu/x86/compiledIC_x86.cpp
@@ -26,7 +26,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@@ -36,7 +35,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
// movq rbx, 0
@@ -66,32 +65,25 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
return NOT_LP64(10) // movl; jmp
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
}
-int CompiledStaticCall::to_trampoline_stub_size() {
+int CompiledDirectCall::to_trampoline_stub_size() {
// x86 doesn't use trampolines.
return 0;
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
- {
- ResourceMark rm;
- log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
- p2i(instruction_address()),
- callee->name_and_sig_as_C_string());
- }
-
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@@ -105,7 +97,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
assert(CompiledICLocker::is_safe(static_stub->addr()), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
@@ -122,7 +114,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();
diff --git a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
index f609846f00d6c..f9f77c23f14ca 100644
--- a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp
@@ -28,8 +28,8 @@
#include "gc/g1/g1BarrierSetAssembler.hpp"
#include "gc/g1/g1BarrierSetRuntime.hpp"
#include "gc/g1/g1CardTable.hpp"
+#include "gc/g1/g1HeapRegion.hpp"
#include "gc/g1/g1ThreadLocalData.hpp"
-#include "gc/g1/heapRegion.hpp"
#include "interpreter/interp_masm.hpp"
#include "runtime/sharedRuntime.hpp"
#include "utilities/debug.hpp"
diff --git a/src/hotspot/cpu/x86/icBuffer_x86.cpp b/src/hotspot/cpu/x86/icBuffer_x86.cpp
deleted file mode 100644
index af374b5741659..0000000000000
--- a/src/hotspot/cpu/x86/icBuffer_x86.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_x86.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- // Worst case, if destination is not a near call:
- // lea rax, lit1
- // lea scratch, lit2
- // jmp scratch
-
- // Best case
- // lea rax, lit1
- // jmp lit2
-
- int best = NativeMovConstReg::instruction_size + NativeJump::instruction_size;
- int worst = 2 * NativeMovConstReg::instruction_size + 3;
- return MAX2(best, worst);
-}
-
-
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
- ResourceMark rm;
- CodeBuffer code(code_begin, ic_stub_code_size());
- MacroAssembler* masm = new MacroAssembler(&code);
- // note: even though the code contains an embedded value, we do not need reloc info
- // because
- // (1) the value is old (i.e., doesn't matter for scavenges)
- // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
- // assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
- masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type));
- masm->jump(ExternalAddress(entry_point));
-}
-
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
- address jmp = move->next_instruction_address();
- NativeInstruction* ni = nativeInstruction_at(jmp);
- if (ni->is_jump()) {
- NativeJump* jump = nativeJump_at(jmp);
- return jump->jump_destination();
- } else {
- assert(ni->is_far_jump(), "unexpected instruction");
- NativeFarJump* jump = nativeFarJump_at(jmp);
- return jump->jump_destination();
- }
-}
-
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- // creation also verifies the object
- NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
- // Verifies the jump
- address jmp = move->next_instruction_address();
- NativeInstruction* ni = nativeInstruction_at(jmp);
- if (ni->is_jump()) {
- NativeJump* jump = nativeJump_at(jmp);
- } else {
- assert(ni->is_far_jump(), "unexpected instruction");
- NativeFarJump* jump = nativeFarJump_at(jmp);
- }
- void* o = (void*)move->data();
- return o;
-}
diff --git a/src/hotspot/cpu/x86/interp_masm_x86.cpp b/src/hotspot/cpu/x86/interp_masm_x86.cpp
index f5f83ae21f475..33570f3155b15 100644
--- a/src/hotspot/cpu/x86/interp_masm_x86.cpp
+++ b/src/hotspot/cpu/x86/interp_masm_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -1192,8 +1192,6 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
const Register thread = lock_reg;
get_thread(thread);
#endif
- // Load object header, prepare for CAS from unlocked to locked.
- movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
lightweight_lock(obj_reg, swap_reg, thread, tmp_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load immediate 1 into swap_reg %rax
@@ -1311,20 +1309,13 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
if (LockingMode == LM_LIGHTWEIGHT) {
#ifdef _LP64
- const Register thread = r15_thread;
+ lightweight_unlock(obj_reg, swap_reg, r15_thread, header_reg, slow_case);
#else
- const Register thread = header_reg;
- get_thread(thread);
+ // This relies on the implementation of lightweight_unlock being able to handle
+ // that the reg_rax and thread Register parameters may alias each other.
+ get_thread(swap_reg);
+ lightweight_unlock(obj_reg, swap_reg, swap_reg, header_reg, slow_case);
#endif
- // Handle unstructured locking.
- Register tmp = swap_reg;
- movl(tmp, Address(thread, JavaThread::lock_stack_top_offset()));
- cmpptr(obj_reg, Address(thread, tmp, Address::times_1, -oopSize));
- jcc(Assembler::notEqual, slow_case);
- // Try to swing header from locked to unlocked.
- movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- lightweight_unlock(obj_reg, swap_reg, header_reg, slow_case);
} else if (LockingMode == LM_LEGACY) {
// Load the old header from BasicLock structure
movptr(header_reg, Address(swap_reg,
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.cpp b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
index ba4b089c7aa6e..f0e7a08dd5f2a 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "compiler/compiler_globals.hpp"
#include "compiler/disassembler.hpp"
#include "crc32c.h"
@@ -1341,13 +1342,45 @@ void MacroAssembler::ic_call(address entry, jint method_index) {
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
#ifdef _LP64
// Needs full 64-bit immediate for later patching.
- mov64(rax, (intptr_t)Universe::non_oop_word());
+ mov64(rax, (int64_t)Universe::non_oop_word());
#else
movptr(rax, (intptr_t)Universe::non_oop_word());
#endif
call(AddressLiteral(entry, rh));
}
+int MacroAssembler::ic_check_size() {
+ return LP64_ONLY(14) NOT_LP64(12);
+}
+
+int MacroAssembler::ic_check(int end_alignment) {
+ Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
+ Register data = rax;
+ Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx);
+
+ // The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
+ // before the inline cache check, so we don't have to execute any nop instructions when dispatching
+ // through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
+ // before the inline cache check here, and not after
+ align(end_alignment, offset() + ic_check_size());
+
+ int uep_offset = offset();
+
+ if (UseCompressedClassPointers) {
+ movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
+ } else {
+ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
+ }
+
+ // if inline cache check fails, then jump to runtime routine
+ jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
+ assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
+
+ return uep_offset;
+}
+
void MacroAssembler::emit_static_call_stub() {
// Static stub relocation also tags the Method* in the code-stream.
mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
@@ -4087,8 +4120,9 @@ static void restore_xmm_register(MacroAssembler* masm, int offset, XMMRegister r
}
}
-int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers, bool save_fpu,
- int& gp_area_size, int& fp_area_size, int& xmm_area_size) {
+static int register_section_sizes(RegSet gp_registers, XMMRegSet xmm_registers,
+ bool save_fpu, int& gp_area_size,
+ int& fp_area_size, int& xmm_area_size) {
gp_area_size = align_up(gp_registers.size() * Register::max_slots_per_register * VMRegImpl::stack_slot_size,
StackAlignmentInBytes);
@@ -4354,7 +4388,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
-// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
+// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by .
// The receiver klass is in recv_klass.
@@ -9877,68 +9911,116 @@ void MacroAssembler::check_stack_alignment(Register sp, const char* msg, unsigne
}
// Implements lightweight-locking.
-// Branches to slow upon failure to lock the object, with ZF cleared.
-// Falls through upon success with unspecified ZF.
//
// obj: the object to be locked
-// hdr: the (pre-loaded) header of the object, must be rax
+// reg_rax: rax
// thread: the thread which attempts to lock obj
// tmp: a temporary register
-void MacroAssembler::lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow) {
- assert(hdr == rax, "header must be in rax for cmpxchg");
- assert_different_registers(obj, hdr, thread, tmp);
-
- // First we need to check if the lock-stack has room for pushing the object reference.
- // Note: we subtract 1 from the end-offset so that we can do a 'greater' comparison, instead
- // of 'greaterEqual' below, which readily clears the ZF. This makes C2 code a little simpler and
- // avoids one branch.
- cmpl(Address(thread, JavaThread::lock_stack_top_offset()), LockStack::end_offset() - 1);
- jcc(Assembler::greater, slow);
-
- // Now we attempt to take the fast-lock.
- // Clear lock_mask bits (locked state).
- andptr(hdr, ~(int32_t)markWord::lock_mask_in_place);
- movptr(tmp, hdr);
- // Set unlocked_value bit.
- orptr(hdr, markWord::unlocked_value);
- lock();
- cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
+void MacroAssembler::lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
+ assert(reg_rax == rax, "");
+ assert_different_registers(obj, reg_rax, thread, tmp);
+
+ Label push;
+ const Register top = tmp;
+
+ // Preload the markWord. It is important that this is the first
+ // instruction emitted as it is part of C1's null check semantics.
+ movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
+
+ // Load top.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ // Check if the lock-stack is full.
+ cmpl(top, LockStack::end_offset());
+ jcc(Assembler::greaterEqual, slow);
+
+ // Check for recursion.
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
+ jcc(Assembler::equal, push);
+
+ // Check header for monitor (0b10).
+ testptr(reg_rax, markWord::monitor_value);
+ jcc(Assembler::notZero, slow);
+
+ // Try to lock. Transition lock bits 0b01 => 0b00
+ movptr(tmp, reg_rax);
+ andptr(tmp, ~(int32_t)markWord::unlocked_value);
+ orptr(reg_rax, markWord::unlocked_value);
+ lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
jcc(Assembler::notEqual, slow);
- // If successful, push object to lock-stack.
- movl(tmp, Address(thread, JavaThread::lock_stack_top_offset()));
- movptr(Address(thread, tmp), obj);
- incrementl(tmp, oopSize);
- movl(Address(thread, JavaThread::lock_stack_top_offset()), tmp);
+ // Restore top, CAS clobbers register.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+
+ bind(push);
+ // After successful lock, push object on lock-stack.
+ movptr(Address(thread, top), obj);
+ incrementl(top, oopSize);
+ movl(Address(thread, JavaThread::lock_stack_top_offset()), top);
}
// Implements lightweight-unlocking.
-// Branches to slow upon failure, with ZF cleared.
-// Falls through upon success, with unspecified ZF.
//
// obj: the object to be unlocked
-// hdr: the (pre-loaded) header of the object, must be rax
+// reg_rax: rax
+// thread: the thread
// tmp: a temporary register
-void MacroAssembler::lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow) {
- assert(hdr == rax, "header must be in rax for cmpxchg");
- assert_different_registers(obj, hdr, tmp);
-
- // Mark-word must be lock_mask now, try to swing it back to unlocked_value.
- movptr(tmp, hdr); // The expected old value
- orptr(tmp, markWord::unlocked_value);
- lock();
- cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
+//
+// x86_32 Note: reg_rax and thread may alias each other due to limited register
+// availiability.
+void MacroAssembler::lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow) {
+ assert(reg_rax == rax, "");
+ assert_different_registers(obj, reg_rax, tmp);
+ LP64_ONLY(assert_different_registers(obj, reg_rax, thread, tmp);)
+
+ Label unlocked, push_and_slow;
+ const Register top = tmp;
+
+ // Check if obj is top of lock-stack.
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+ cmpptr(obj, Address(thread, top, Address::times_1, -oopSize));
jcc(Assembler::notEqual, slow);
- // Pop the lock object from the lock-stack.
-#ifdef _LP64
- const Register thread = r15_thread;
-#else
- const Register thread = rax;
- get_thread(thread);
-#endif
+
+ // Pop lock-stack.
+ DEBUG_ONLY(movptr(Address(thread, top, Address::times_1, -oopSize), 0);)
subl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+
+ // Check if recursive.
+ cmpptr(obj, Address(thread, top, Address::times_1, -2 * oopSize));
+ jcc(Assembler::equal, unlocked);
+
+ // Not recursive. Check header for monitor (0b10).
+ movptr(reg_rax, Address(obj, oopDesc::mark_offset_in_bytes()));
+ testptr(reg_rax, markWord::monitor_value);
+ jcc(Assembler::notZero, push_and_slow);
+
+#ifdef ASSERT
+ // Check header not unlocked (0b01).
+ Label not_unlocked;
+ testptr(reg_rax, markWord::unlocked_value);
+ jcc(Assembler::zero, not_unlocked);
+ stop("lightweight_unlock already unlocked");
+ bind(not_unlocked);
+#endif
+
+ // Try to unlock. Transition lock bits 0b00 => 0b01
+ movptr(tmp, reg_rax);
+ orptr(tmp, markWord::unlocked_value);
+ lock(); cmpxchgptr(tmp, Address(obj, oopDesc::mark_offset_in_bytes()));
+ jcc(Assembler::equal, unlocked);
+
+ bind(push_and_slow);
+ // Restore lock-stack and handle the unlock in runtime.
+ if (thread == reg_rax) {
+ // On x86_32 we may lose the thread.
+ get_thread(thread);
+ }
#ifdef ASSERT
- movl(tmp, Address(thread, JavaThread::lock_stack_top_offset()));
- movptr(Address(thread, tmp), 0);
+ movl(top, Address(thread, JavaThread::lock_stack_top_offset()));
+ movptr(Address(thread, top), obj);
#endif
+ addl(Address(thread, JavaThread::lock_stack_top_offset()), oopSize);
+ jmp(slow);
+
+ bind(unlocked);
}
diff --git a/src/hotspot/cpu/x86/macroAssembler_x86.hpp b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
index 4b30168452796..4789b63decc6c 100644
--- a/src/hotspot/cpu/x86/macroAssembler_x86.hpp
+++ b/src/hotspot/cpu/x86/macroAssembler_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -896,6 +896,8 @@ class MacroAssembler: public Assembler {
// Emit the CompiledIC call idiom
void ic_call(address entry, jint method_index = 0);
+ static int ic_check_size();
+ int ic_check(int end_alignment);
void emit_static_call_stub();
@@ -2031,8 +2033,8 @@ class MacroAssembler: public Assembler {
void check_stack_alignment(Register sp, const char* msg, unsigned bias = 0, Register tmp = noreg);
- void lightweight_lock(Register obj, Register hdr, Register thread, Register tmp, Label& slow);
- void lightweight_unlock(Register obj, Register hdr, Register tmp, Label& slow);
+ void lightweight_lock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
+ void lightweight_unlock(Register obj, Register reg_rax, Register thread, Register tmp, Label& slow);
};
/**
diff --git a/src/hotspot/cpu/x86/peephole_x86_64.cpp b/src/hotspot/cpu/x86/peephole_x86_64.cpp
index 8c956aeb05393..92a29490edaf8 100644
--- a/src/hotspot/cpu/x86/peephole_x86_64.cpp
+++ b/src/hotspot/cpu/x86/peephole_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -33,8 +33,8 @@
// lea d, [s1 + s2] and
// mov d, s1; shl d, s2 into
// lea d, [s1 << s2] with s2 = 1, 2, 3
-bool lea_coalesce_helper(Block* block, int block_index, PhaseCFG* cfg_, PhaseRegAlloc* ra_,
- MachNode* (*new_root)(), uint inst0_rule, bool imm) {
+static bool lea_coalesce_helper(Block* block, int block_index, PhaseCFG* cfg_, PhaseRegAlloc* ra_,
+ MachNode* (*new_root)(), uint inst0_rule, bool imm) {
MachNode* inst0 = block->get_node(block_index)->as_Mach();
assert(inst0->rule() == inst0_rule, "sanity");
@@ -136,7 +136,7 @@ bool lea_coalesce_helper(Block* block, int block_index, PhaseCFG* cfg_, PhaseReg
// This helper func takes a condition and returns the flags that need to be set for the condition
// It uses the same flags as the test instruction, so if the e.g. the overflow bit is required,
// this func returns clears_overflow, as that is what the test instruction does and what the downstream path expects
-juint map_condition_to_required_test_flags(Assembler::Condition condition) {
+static juint map_condition_to_required_test_flags(Assembler::Condition condition) {
switch (condition) {
case Assembler::Condition::zero: // Same value as equal
case Assembler::Condition::notZero: // Same value as notEqual
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
index 571160523cbe4..febc1b2c3b143 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_32.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
+#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
@@ -36,7 +36,6 @@
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/jniHandles.hpp"
@@ -944,25 +943,18 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Register holder = rax;
+ Register data = rax;
Register receiver = rcx;
Register temp = rbx;
{
-
- Label missed;
- __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
- __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ jcc(Assembler::notEqual, missed);
+ __ ic_check(1 /* end_alignment */);
+ __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
-
- __ bind(missed);
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
}
address c2i_entry = __ pc();
@@ -1449,23 +1441,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// as far as the interpreter and the compiler(s) are concerned.
- const Register ic_reg = rax;
const Register receiver = rcx;
- Label hit;
Label exception_pending;
__ verify_oop(receiver);
- __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
- __ jcc(Assembler::equal, hit);
-
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
// verified entry must be aligned for code patching.
- // and the first 5 bytes must be in the same cache line
- // if we align at 8 then we will be sure 5 bytes are in the same line
- __ align(8);
-
- __ bind(hit);
+ __ ic_check(8 /* end_alignment */);
int vep_offset = ((intptr_t)__ pc()) - start;
@@ -1713,8 +1694,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- // Load object header
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ lightweight_lock(obj_reg, swap_reg, thread, lock_reg, slow_path_lock);
}
__ bind(count_mon);
@@ -1872,9 +1851,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ dec_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, swap_reg, thread, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}
diff --git a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
index cab50e85ec51c..c666f982d0f52 100644
--- a/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
+++ b/src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
@@ -42,7 +41,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@@ -1000,20 +998,14 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
- Label ok;
- Register holder = rax;
+ Register data = rax;
Register receiver = j_rarg0;
Register temp = rbx;
{
- __ load_klass(temp, receiver, rscratch1);
- __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
- __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
- __ jcc(Assembler::equal, ok);
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- __ bind(ok);
+ __ ic_check(1 /* end_alignment */);
+ __ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
@@ -1450,7 +1442,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, __ pc());
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1487,7 +1479,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
- address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, __ pc());
+ address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@@ -1883,25 +1875,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// restoring them except rbp. rbp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
-
- const Register ic_reg = rax;
const Register receiver = j_rarg0;
- Label hit;
Label exception_pending;
- assert_different_registers(ic_reg, receiver, rscratch1, rscratch2);
+ assert_different_registers(receiver, rscratch1, rscratch2);
__ verify_oop(receiver);
- __ load_klass(rscratch1, receiver, rscratch2);
- __ cmpq(ic_reg, rscratch1);
- __ jcc(Assembler::equal, hit);
-
- __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- // Verified entry point must be aligned
- __ align(8);
-
- __ bind(hit);
+ __ ic_check(8 /* end_alignment */);
int vep_offset = ((intptr_t)__ pc()) - start;
@@ -2190,8 +2170,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ jcc(Assembler::notEqual, slow_path_lock);
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- // Load object header
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
__ lightweight_lock(obj_reg, swap_reg, r15_thread, rscratch1, slow_path_lock);
}
__ bind(count_mon);
@@ -2334,9 +2312,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ dec_held_monitor_count();
} else {
assert(LockingMode == LM_LIGHTWEIGHT, "must be");
- __ movptr(swap_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- __ andptr(swap_reg, ~(int32_t)markWord::lock_mask_in_place);
- __ lightweight_unlock(obj_reg, swap_reg, lock_reg, slow_path_unlock);
+ __ lightweight_unlock(obj_reg, swap_reg, r15_thread, lock_reg, slow_path_unlock);
__ dec_held_monitor_count();
}
diff --git a/src/hotspot/cpu/x86/stubRoutines_x86.cpp b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
index 3be83eed9d22f..bc1cbdbba26b5 100644
--- a/src/hotspot/cpu/x86/stubRoutines_x86.cpp
+++ b/src/hotspot/cpu/x86/stubRoutines_x86.cpp
@@ -279,7 +279,7 @@ uint32_t _crc32c_pow_2k_table[TILL_CYCLE]; // because _crc32c_pow_2k_table[TILL_
// A. Kadatch and B. Jenkins / Everything we know about CRC but afraid to forget September 3, 2010 8
// Listing 1: Multiplication of normalized polynomials
// "a" and "b" occupy D least significant bits.
-uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
+static uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
uint32_t product = 0;
uint32_t b_pow_x_table[D + 1]; // b_pow_x_table[k] = (b * x**k) mod P
b_pow_x_table[0] = b;
@@ -303,7 +303,7 @@ uint32_t crc32c_multiply(uint32_t a, uint32_t b) {
#undef P
// A. Kadatch and B. Jenkins / Everything we know about CRC but afraid to forget September 3, 2010 9
-void crc32c_init_pow_2k(void) {
+static void crc32c_init_pow_2k(void) {
// _crc32c_pow_2k_table(0) =
// x^(2^k) mod P(x) = x mod P(x) = x
// Since we are operating on a reflected values
@@ -318,7 +318,7 @@ void crc32c_init_pow_2k(void) {
}
// x^N mod P(x)
-uint32_t crc32c_f_pow_n(uint32_t n) {
+static uint32_t crc32c_f_pow_n(uint32_t n) {
// result = 1 (polynomial)
uint32_t one, result = 0x80000000, i = 0;
diff --git a/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp b/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp
index 89e7a466264b9..7b9d49dd46140 100644
--- a/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp
+++ b/src/hotspot/cpu/x86/upcallLinker_x86_64.cpp
@@ -300,8 +300,12 @@ address UpcallLinker::make_upcall_stub(jobject receiver, Method* entry,
__ mov_metadata(rbx, entry);
__ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx); // just in case callee is deoptimized
+ __ push_cont_fastpath();
+
__ call(Address(rbx, Method::from_compiled_offset()));
+ __ pop_cont_fastpath();
+
// return value shuffle
if (!needs_return_buffer) {
#ifdef ASSERT
diff --git a/src/hotspot/cpu/x86/vm_version_x86.cpp b/src/hotspot/cpu/x86/vm_version_x86.cpp
index 2412c053106e7..f8213a2539f04 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.cpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.cpp
@@ -809,7 +809,7 @@ void VM_Version::get_processor_features() {
_stepping = cpu_stepping();
if (cpu_family() > 4) { // it supports CPUID
- _features = feature_flags(); // These can be changed by VM settings
+ _features = _cpuid_info.feature_flags(); // These can be changed by VM settings
_cpu_features = _features; // Preserve features
// Logical processors are only available on P4s and above,
// and only if hyperthreading is available.
@@ -2891,13 +2891,13 @@ int64_t VM_Version::maximum_qualified_cpu_frequency(void) {
return _max_qualified_cpu_frequency;
}
-uint64_t VM_Version::feature_flags() {
+uint64_t VM_Version::CpuidInfo::feature_flags() const {
uint64_t result = 0;
- if (_cpuid_info.std_cpuid1_edx.bits.cmpxchg8 != 0)
+ if (std_cpuid1_edx.bits.cmpxchg8 != 0)
result |= CPU_CX8;
- if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
+ if (std_cpuid1_edx.bits.cmov != 0)
result |= CPU_CMOV;
- if (_cpuid_info.std_cpuid1_edx.bits.clflush != 0)
+ if (std_cpuid1_edx.bits.clflush != 0)
result |= CPU_FLUSH;
#ifdef _LP64
// clflush should always be available on x86_64
@@ -2905,158 +2905,158 @@ uint64_t VM_Version::feature_flags() {
// to flush the code cache.
assert ((result & CPU_FLUSH) != 0, "clflush should be available");
#endif
- if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
- _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
+ if (std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() &&
+ ext_cpuid1_edx.bits.fxsr != 0))
result |= CPU_FXSR;
// HT flag is set for multi-core processors also.
if (threads_per_core() > 1)
result |= CPU_HT;
- if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
- _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
+ if (std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() &&
+ ext_cpuid1_edx.bits.mmx != 0))
result |= CPU_MMX;
- if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
+ if (std_cpuid1_edx.bits.sse != 0)
result |= CPU_SSE;
- if (_cpuid_info.std_cpuid1_edx.bits.sse2 != 0)
+ if (std_cpuid1_edx.bits.sse2 != 0)
result |= CPU_SSE2;
- if (_cpuid_info.std_cpuid1_ecx.bits.sse3 != 0)
+ if (std_cpuid1_ecx.bits.sse3 != 0)
result |= CPU_SSE3;
- if (_cpuid_info.std_cpuid1_ecx.bits.ssse3 != 0)
+ if (std_cpuid1_ecx.bits.ssse3 != 0)
result |= CPU_SSSE3;
- if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0)
+ if (std_cpuid1_ecx.bits.sse4_1 != 0)
result |= CPU_SSE4_1;
- if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0)
+ if (std_cpuid1_ecx.bits.sse4_2 != 0)
result |= CPU_SSE4_2;
- if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0)
+ if (std_cpuid1_ecx.bits.popcnt != 0)
result |= CPU_POPCNT;
- if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 &&
- _cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 &&
- _cpuid_info.xem_xcr0_eax.bits.sse != 0 &&
- _cpuid_info.xem_xcr0_eax.bits.ymm != 0) {
+ if (std_cpuid1_ecx.bits.avx != 0 &&
+ std_cpuid1_ecx.bits.osxsave != 0 &&
+ xem_xcr0_eax.bits.sse != 0 &&
+ xem_xcr0_eax.bits.ymm != 0) {
result |= CPU_AVX;
result |= CPU_VZEROUPPER;
- if (_cpuid_info.std_cpuid1_ecx.bits.f16c != 0)
+ if (std_cpuid1_ecx.bits.f16c != 0)
result |= CPU_F16C;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0)
+ if (sef_cpuid7_ebx.bits.avx2 != 0)
result |= CPU_AVX2;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512f != 0 &&
- _cpuid_info.xem_xcr0_eax.bits.opmask != 0 &&
- _cpuid_info.xem_xcr0_eax.bits.zmm512 != 0 &&
- _cpuid_info.xem_xcr0_eax.bits.zmm32 != 0) {
+ if (sef_cpuid7_ebx.bits.avx512f != 0 &&
+ xem_xcr0_eax.bits.opmask != 0 &&
+ xem_xcr0_eax.bits.zmm512 != 0 &&
+ xem_xcr0_eax.bits.zmm32 != 0) {
result |= CPU_AVX512F;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512cd != 0)
+ if (sef_cpuid7_ebx.bits.avx512cd != 0)
result |= CPU_AVX512CD;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512dq != 0)
+ if (sef_cpuid7_ebx.bits.avx512dq != 0)
result |= CPU_AVX512DQ;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512ifma != 0)
+ if (sef_cpuid7_ebx.bits.avx512ifma != 0)
result |= CPU_AVX512_IFMA;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512pf != 0)
+ if (sef_cpuid7_ebx.bits.avx512pf != 0)
result |= CPU_AVX512PF;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512er != 0)
+ if (sef_cpuid7_ebx.bits.avx512er != 0)
result |= CPU_AVX512ER;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512bw != 0)
+ if (sef_cpuid7_ebx.bits.avx512bw != 0)
result |= CPU_AVX512BW;
- if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0)
+ if (sef_cpuid7_ebx.bits.avx512vl != 0)
result |= CPU_AVX512VL;
- if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
+ if (sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0)
result |= CPU_AVX512_VPOPCNTDQ;
- if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
+ if (sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0)
result |= CPU_AVX512_VPCLMULQDQ;
- if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0)
+ if (sef_cpuid7_ecx.bits.vaes != 0)
result |= CPU_AVX512_VAES;
- if (_cpuid_info.sef_cpuid7_ecx.bits.gfni != 0)
+ if (sef_cpuid7_ecx.bits.gfni != 0)
result |= CPU_GFNI;
- if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0)
+ if (sef_cpuid7_ecx.bits.avx512_vnni != 0)
result |= CPU_AVX512_VNNI;
- if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_bitalg != 0)
+ if (sef_cpuid7_ecx.bits.avx512_bitalg != 0)
result |= CPU_AVX512_BITALG;
- if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi != 0)
+ if (sef_cpuid7_ecx.bits.avx512_vbmi != 0)
result |= CPU_AVX512_VBMI;
- if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi2 != 0)
+ if (sef_cpuid7_ecx.bits.avx512_vbmi2 != 0)
result |= CPU_AVX512_VBMI2;
}
}
- if (_cpuid_info.std_cpuid1_ecx.bits.hv != 0)
+ if (std_cpuid1_ecx.bits.hv != 0)
result |= CPU_HV;
- if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0)
+ if (sef_cpuid7_ebx.bits.bmi1 != 0)
result |= CPU_BMI1;
- if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0)
+ if (std_cpuid1_edx.bits.tsc != 0)
result |= CPU_TSC;
- if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0)
+ if (ext_cpuid7_edx.bits.tsc_invariance != 0)
result |= CPU_TSCINV_BIT;
- if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0)
+ if (std_cpuid1_ecx.bits.aes != 0)
result |= CPU_AES;
- if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0)
+ if (sef_cpuid7_ebx.bits.erms != 0)
result |= CPU_ERMS;
- if (_cpuid_info.sef_cpuid7_edx.bits.fast_short_rep_mov != 0)
+ if (sef_cpuid7_edx.bits.fast_short_rep_mov != 0)
result |= CPU_FSRM;
- if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0)
+ if (std_cpuid1_ecx.bits.clmul != 0)
result |= CPU_CLMUL;
- if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0)
+ if (sef_cpuid7_ebx.bits.rtm != 0)
result |= CPU_RTM;
- if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0)
+ if (sef_cpuid7_ebx.bits.adx != 0)
result |= CPU_ADX;
- if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0)
+ if (sef_cpuid7_ebx.bits.bmi2 != 0)
result |= CPU_BMI2;
- if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0)
+ if (sef_cpuid7_ebx.bits.sha != 0)
result |= CPU_SHA;
- if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0)
+ if (std_cpuid1_ecx.bits.fma != 0)
result |= CPU_FMA;
- if (_cpuid_info.sef_cpuid7_ebx.bits.clflushopt != 0)
+ if (sef_cpuid7_ebx.bits.clflushopt != 0)
result |= CPU_FLUSHOPT;
- if (_cpuid_info.ext_cpuid1_edx.bits.rdtscp != 0)
+ if (ext_cpuid1_edx.bits.rdtscp != 0)
result |= CPU_RDTSCP;
- if (_cpuid_info.sef_cpuid7_ecx.bits.rdpid != 0)
+ if (sef_cpuid7_ecx.bits.rdpid != 0)
result |= CPU_RDPID;
// AMD|Hygon features.
if (is_amd_family()) {
- if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) ||
- (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0))
+ if ((ext_cpuid1_edx.bits.tdnow != 0) ||
+ (ext_cpuid1_ecx.bits.prefetchw != 0))
result |= CPU_3DNOW_PREFETCH;
- if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0)
+ if (ext_cpuid1_ecx.bits.lzcnt != 0)
result |= CPU_LZCNT;
- if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0)
+ if (ext_cpuid1_ecx.bits.sse4a != 0)
result |= CPU_SSE4A;
}
// Intel features.
if (is_intel()) {
- if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) {
+ if (ext_cpuid1_ecx.bits.lzcnt != 0) {
result |= CPU_LZCNT;
}
- if (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0) {
+ if (ext_cpuid1_ecx.bits.prefetchw != 0) {
result |= CPU_3DNOW_PREFETCH;
}
- if (_cpuid_info.sef_cpuid7_ebx.bits.clwb != 0) {
+ if (sef_cpuid7_ebx.bits.clwb != 0) {
result |= CPU_CLWB;
}
- if (_cpuid_info.sef_cpuid7_edx.bits.serialize != 0)
+ if (sef_cpuid7_edx.bits.serialize != 0)
result |= CPU_SERIALIZE;
}
// ZX features.
if (is_zx()) {
- if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) {
+ if (ext_cpuid1_ecx.bits.lzcnt != 0) {
result |= CPU_LZCNT;
}
- if (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0) {
+ if (ext_cpuid1_ecx.bits.prefetchw != 0) {
result |= CPU_3DNOW_PREFETCH;
}
}
// Protection key features.
- if (_cpuid_info.sef_cpuid7_ecx.bits.pku != 0) {
+ if (sef_cpuid7_ecx.bits.pku != 0) {
result |= CPU_PKU;
}
- if (_cpuid_info.sef_cpuid7_ecx.bits.ospke != 0) {
+ if (sef_cpuid7_ecx.bits.ospke != 0) {
result |= CPU_OSPKE;
}
// Control flow enforcement (CET) features.
- if (_cpuid_info.sef_cpuid7_ecx.bits.cet_ss != 0) {
+ if (sef_cpuid7_ecx.bits.cet_ss != 0) {
result |= CPU_CET_SS;
}
- if (_cpuid_info.sef_cpuid7_edx.bits.cet_ibt != 0) {
+ if (sef_cpuid7_edx.bits.cet_ibt != 0) {
result |= CPU_CET_IBT;
}
diff --git a/src/hotspot/cpu/x86/vm_version_x86.hpp b/src/hotspot/cpu/x86/vm_version_x86.hpp
index cfc16acabc674..03596a6e4468c 100644
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -428,7 +428,8 @@ class VM_Version : public Abstract_VM_Version {
//
// The info block is laid out in subblocks of 4 dwords corresponding to
// eax, ebx, ecx and edx, whether or not they contain anything useful.
- struct CpuidInfo {
+ class CpuidInfo {
+ public:
// cpuid function 0
uint32_t std_max_function;
uint32_t std_vendor_name_0;
@@ -522,6 +523,31 @@ class VM_Version : public Abstract_VM_Version {
// Space to save zmm registers after signal handle
int zmm_save[16*4]; // Save zmm0, zmm7, zmm8, zmm31
+
+ uint64_t feature_flags() const;
+
+ // Asserts
+ void assert_is_initialized() const {
+ assert(std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
+ }
+
+ // Extractors
+ uint32_t extended_cpu_family() const {
+ uint32_t result = std_cpuid1_eax.bits.family;
+ result += std_cpuid1_eax.bits.ext_family;
+ return result;
+ }
+
+ uint32_t extended_cpu_model() const {
+ uint32_t result = std_cpuid1_eax.bits.model;
+ result |= std_cpuid1_eax.bits.ext_model << 4;
+ return result;
+ }
+
+ uint32_t cpu_stepping() const {
+ uint32_t result = std_cpuid1_eax.bits.stepping;
+ return result;
+ }
};
private:
@@ -529,23 +555,6 @@ class VM_Version : public Abstract_VM_Version {
static CpuidInfo _cpuid_info;
// Extractors and predicates
- static uint32_t extended_cpu_family() {
- uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family;
- result += _cpuid_info.std_cpuid1_eax.bits.ext_family;
- return result;
- }
-
- static uint32_t extended_cpu_model() {
- uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model;
- result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4;
- return result;
- }
-
- static uint32_t cpu_stepping() {
- uint32_t result = _cpuid_info.std_cpuid1_eax.bits.stepping;
- return result;
- }
-
static uint logical_processor_count() {
uint result = threads_per_core();
return result;
@@ -553,7 +562,6 @@ class VM_Version : public Abstract_VM_Version {
static bool compute_has_intel_jcc_erratum();
- static uint64_t feature_flags();
static bool os_supports_avx_vectors();
static void get_processor_features();
@@ -594,11 +602,6 @@ class VM_Version : public Abstract_VM_Version {
// Override Abstract_VM_Version implementation
static void print_platform_virtualization_info(outputStream*);
- // Asserts
- static void assert_is_initialized() {
- assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized");
- }
-
//
// Processor family:
// 3 - 386
@@ -614,6 +617,10 @@ class VM_Version : public Abstract_VM_Version {
// processors. Use the feature test functions below to
// determine whether a particular instruction is supported.
//
+ static void assert_is_initialized() { _cpuid_info.assert_is_initialized(); }
+ static uint32_t extended_cpu_family() { return _cpuid_info.extended_cpu_family(); }
+ static uint32_t extended_cpu_model() { return _cpuid_info.extended_cpu_model(); }
+ static uint32_t cpu_stepping() { return _cpuid_info.cpu_stepping(); }
static int cpu_family() { return _cpu;}
static bool is_P6() { return cpu_family() >= 6; }
static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA'
@@ -770,6 +777,10 @@ class VM_Version : public Abstract_VM_Version {
return true;
}
+ constexpr static bool supports_recursive_lightweight_locking() {
+ return true;
+ }
+
// For AVX CPUs only. f16c support is disabled if UseAVX == 0.
static bool supports_float16() {
return supports_f16c() || supports_avx512vl();
diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp
index 0e78e0274d7f2..398f2e37eb5cc 100644
--- a/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_32.cpp
@@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -176,21 +176,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#endif /* PRODUCT */
// Entry arguments:
- // rax: CompiledICHolder
+ // rax: CompiledICData
// rcx: Receiver
// Most registers are in use; we'll use rax, rbx, rcx, rdx, rsi, rdi
// (If we need to make rsi, rdi callee-save, do a push/pop here.)
const Register recv_klass_reg = rsi;
- const Register holder_klass_reg = rax; // declaring interface klass (DECC)
+ const Register holder_klass_reg = rax; // declaring interface klass (DEFC)
const Register resolved_klass_reg = rdi; // resolved interface klass (REFC)
const Register temp_reg = rdx;
const Register method = rbx;
- const Register icholder_reg = rax;
+ const Register icdata_reg = rax;
const Register receiver = rcx;
- __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ movptr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ movptr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ movptr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
Label L_no_such_interface;
diff --git a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
index f162a651183f9..158d6f9c6922b 100644
--- a/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
+++ b/src/hotspot/cpu/x86/vtableStubs_x86_64.cpp
@@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
+#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86.hpp"
#include "memory/resourceArea.hpp"
-#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@@ -168,21 +168,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#endif // PRODUCT
// Entry arguments:
- // rax: CompiledICHolder
+ // rax: CompiledICData
// j_rarg0: Receiver
// Most registers are in use; we'll use rax, rbx, r10, r11
// (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
const Register recv_klass_reg = r10;
- const Register holder_klass_reg = rax; // declaring interface klass (DECC)
+ const Register holder_klass_reg = rax; // declaring interface klass (DEFC)
const Register resolved_klass_reg = r14; // resolved interface klass (REFC)
const Register temp_reg = r11;
const Register temp_reg2 = r13;
const Register method = rbx;
- const Register icholder_reg = rax;
+ const Register icdata_reg = rax;
- __ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
- __ movptr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
+ __ movptr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
+ __ movptr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
Label L_no_such_interface;
diff --git a/src/hotspot/cpu/x86/x86.ad b/src/hotspot/cpu/x86/x86.ad
index a31548eb8c3f9..6df02d280bcef 100644
--- a/src/hotspot/cpu/x86/x86.ad
+++ b/src/hotspot/cpu/x86/x86.ad
@@ -1358,7 +1358,7 @@ int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
return offset;
}
-Assembler::Width widthForType(BasicType bt) {
+static Assembler::Width widthForType(BasicType bt) {
if (bt == T_BYTE) {
return Assembler::B;
} else if (bt == T_SHORT) {
diff --git a/src/hotspot/cpu/x86/x86_32.ad b/src/hotspot/cpu/x86/x86_32.ad
index 9aa0051043575..2fe655a576778 100644
--- a/src/hotspot/cpu/x86/x86_32.ad
+++ b/src/hotspot/cpu/x86/x86_32.ad
@@ -504,7 +504,7 @@ void emit_cmpfp_fixup(MacroAssembler& _masm) {
__ bind(exit);
}
-void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
+static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
Label done;
__ movl(dst, -1);
__ jcc(Assembler::parity, done);
@@ -1383,24 +1383,12 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
MacroAssembler masm(&cbuf);
-#ifdef ASSERT
- uint insts_size = cbuf.insts_size();
-#endif
- masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
- masm.jump_cc(Assembler::notEqual,
- RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- /* WARNING these NOPs are critical so that verified entry point is properly
- aligned for patching by NativeJump::patch_verified_entry() */
- int nops_cnt = 2;
- if( !OptoBreakpoint ) // Leave space for int3
- nops_cnt += 1;
- masm.nop(nops_cnt);
-
- assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
+ masm.ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
- return OptoBreakpoint ? 11 : 12;
+ return MachNode::size(ra_); // too many variables; just compute it
+ // the hard way
}
@@ -1842,7 +1830,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, cbuf.insts()->mark_off());
} else {
// Emit stubs for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -13776,7 +13764,7 @@ instruct cmpFastLockRTM(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eD
%}
instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr, eRegP thread) %{
- predicate(!Compile::current()->use_rtm());
+ predicate(LockingMode != LM_LIGHTWEIGHT && !Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box, TEMP thread);
ins_cost(300);
@@ -13790,6 +13778,7 @@ instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP
%}
instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
+ predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, USE_KILL box);
ins_cost(300);
@@ -13800,6 +13789,32 @@ instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
ins_pipe(pipe_slow);
%}
+instruct cmpFastLockLightweight(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI eax_reg, eRegP tmp, eRegP thread) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastLock object box));
+ effect(TEMP eax_reg, TEMP tmp, USE_KILL box, TEMP thread);
+ ins_cost(300);
+ format %{ "FASTLOCK $object,$box\t! kills $box,$eax_reg,$tmp" %}
+ ins_encode %{
+ __ get_thread($thread$$Register);
+ __ fast_lock_lightweight($object$$Register, $box$$Register, $eax_reg$$Register, $tmp$$Register, $thread$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct cmpFastUnlockLightweight(eFlagsReg cr, eRegP object, eAXRegP eax_reg, eRegP tmp, eRegP thread) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastUnlock object eax_reg));
+ effect(TEMP tmp, USE_KILL eax_reg, TEMP thread);
+ ins_cost(300);
+ format %{ "FASTUNLOCK $object,$eax_reg\t! kills $eax_reg,$tmp" %}
+ ins_encode %{
+ __ get_thread($thread$$Register);
+ __ fast_unlock_lightweight($object$$Register, $eax_reg$$Register, $tmp$$Register, $thread$$Register);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
instruct mask_all_evexL_LT32(kReg dst, eRegL src) %{
predicate(Matcher::vector_length(n) <= 32);
match(Set dst (MaskAll src));
diff --git a/src/hotspot/cpu/x86/x86_64.ad b/src/hotspot/cpu/x86/x86_64.ad
index a248daaa1917b..d43929efd3ec5 100644
--- a/src/hotspot/cpu/x86/x86_64.ad
+++ b/src/hotspot/cpu/x86/x86_64.ad
@@ -519,7 +519,7 @@ int CallDynamicJavaDirectNode::compute_padding(int current_offset) const
}
// This could be in MacroAssembler but it's fairly C2 specific
-void emit_cmpfp_fixup(MacroAssembler& _masm) {
+static void emit_cmpfp_fixup(MacroAssembler& _masm) {
Label exit;
__ jccb(Assembler::noParity, exit);
__ pushf();
@@ -539,7 +539,7 @@ void emit_cmpfp_fixup(MacroAssembler& _masm) {
__ bind(exit);
}
-void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
+static void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
Label done;
__ movl(dst, -1);
__ jcc(Assembler::parity, done);
@@ -558,10 +558,10 @@ void emit_cmpfp3(MacroAssembler& _masm, Register dst) {
// je #
// |-jz -> a | b # a & b
// | -> a #
-void emit_fp_min_max(MacroAssembler& _masm, XMMRegister dst,
- XMMRegister a, XMMRegister b,
- XMMRegister xmmt, Register rt,
- bool min, bool single) {
+static void emit_fp_min_max(MacroAssembler& _masm, XMMRegister dst,
+ XMMRegister a, XMMRegister b,
+ XMMRegister xmmt, Register rt,
+ bool min, bool single) {
Label nan, zero, below, above, done;
@@ -1472,40 +1472,19 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
if (UseCompressedClassPointers) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
- st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
- st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
+ st->print_cr("\tcmpl rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
} else {
- st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
- "# Inline cache check");
+ st->print_cr("movq rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
+ st->print_cr("\tcmpq rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
}
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
- st->print_cr("\tnop\t# nops to align entry point");
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
MacroAssembler masm(&cbuf);
- uint insts_size = cbuf.insts_size();
- if (UseCompressedClassPointers) {
- masm.load_klass(rscratch1, j_rarg0, rscratch2);
- masm.cmpptr(rax, rscratch1);
- } else {
- masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
- }
-
- masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
-
- /* WARNING these NOPs are critical so that verified entry point is properly
- 4 bytes aligned for patching by NativeJump::patch_verified_entry() */
- int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3);
- if (OptoBreakpoint) {
- // Leave space for int3
- nops_cnt -= 1;
- }
- nops_cnt &= 0x3; // Do not add nops if code is aligned.
- if (nops_cnt > 0)
- masm.nop(nops_cnt);
+ masm.ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@@ -1840,7 +1819,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call_offset);
} else {
// Emit stubs for static call.
- address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
+ address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@@ -4480,7 +4459,7 @@ instruct loadD(regD dst, memory mem)
// max = java.lang.Math.max(float a, float b)
instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{
- predicate(UseAVX > 0 && !SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp);
format %{ "maxF $dst, $a, $b \t! using tmp, atmp and btmp as TEMP" %}
@@ -4491,7 +4470,7 @@ instruct maxF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp,
%}
instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{
- predicate(UseAVX > 0 && SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MaxF a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@@ -4505,7 +4484,7 @@ instruct maxF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe
// max = java.lang.Math.max(double a, double b)
instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{
- predicate(UseAVX > 0 && !SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
effect(USE a, USE b, TEMP atmp, TEMP btmp, TEMP tmp);
format %{ "maxD $dst, $a, $b \t! using tmp, atmp and btmp as TEMP" %}
@@ -4516,7 +4495,7 @@ instruct maxD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp,
%}
instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{
- predicate(UseAVX > 0 && SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MaxD a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@@ -4530,7 +4509,7 @@ instruct maxD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRe
// min = java.lang.Math.min(float a, float b)
instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp, legRegF btmp) %{
- predicate(UseAVX > 0 && !SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MinF a b));
effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp);
format %{ "minF $dst, $a, $b \t! using tmp, atmp and btmp as TEMP" %}
@@ -4541,7 +4520,7 @@ instruct minF_reg(legRegF dst, legRegF a, legRegF b, legRegF tmp, legRegF atmp,
%}
instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRegI tmp, rFlagsReg cr) %{
- predicate(UseAVX > 0 && SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MinF a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@@ -4555,7 +4534,7 @@ instruct minF_reduction_reg(legRegF dst, legRegF a, legRegF b, legRegF xmmt, rRe
// min = java.lang.Math.min(double a, double b)
instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp, legRegD btmp) %{
- predicate(UseAVX > 0 && !SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && !VLoopReductions::is_reduction(n));
match(Set dst (MinD a b));
effect(USE a, USE b, TEMP tmp, TEMP atmp, TEMP btmp);
format %{ "minD $dst, $a, $b \t! using tmp, atmp and btmp as TEMP" %}
@@ -4566,7 +4545,7 @@ instruct minD_reg(legRegD dst, legRegD a, legRegD b, legRegD tmp, legRegD atmp,
%}
instruct minD_reduction_reg(legRegD dst, legRegD a, legRegD b, legRegD xmmt, rRegL tmp, rFlagsReg cr) %{
- predicate(UseAVX > 0 && SuperWord::is_reduction(n));
+ predicate(UseAVX > 0 && VLoopReductions::is_reduction(n));
match(Set dst (MinD a b));
effect(USE a, USE b, TEMP xmmt, TEMP tmp, KILL cr);
@@ -12404,7 +12383,7 @@ instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp,
%}
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
- predicate(!Compile::current()->use_rtm());
+ predicate(LockingMode != LM_LIGHTWEIGHT && !Compile::current()->use_rtm());
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box);
ins_cost(300);
@@ -12417,6 +12396,7 @@ instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRe
%}
instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
+ predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, USE_KILL box);
ins_cost(300);
@@ -12427,6 +12407,30 @@ instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
ins_pipe(pipe_slow);
%}
+instruct cmpFastLockLightweight(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI rax_reg, rRegP tmp) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastLock object box));
+ effect(TEMP rax_reg, TEMP tmp, USE_KILL box);
+ ins_cost(300);
+ format %{ "fastlock $object,$box\t! kills $box,$rax_reg,$tmp" %}
+ ins_encode %{
+ __ fast_lock_lightweight($object$$Register, $box$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
+instruct cmpFastUnlockLightweight(rFlagsReg cr, rRegP object, rax_RegP rax_reg, rRegP tmp) %{
+ predicate(LockingMode == LM_LIGHTWEIGHT);
+ match(Set cr (FastUnlock object rax_reg));
+ effect(TEMP tmp, USE_KILL rax_reg);
+ ins_cost(300);
+ format %{ "fastunlock $object,$rax_reg\t! kills $rax_reg,$tmp" %}
+ ins_encode %{
+ __ fast_unlock_lightweight($object$$Register, $rax_reg$$Register, $tmp$$Register, r15_thread);
+ %}
+ ins_pipe(pipe_slow);
+%}
+
// ============================================================================
// Safepoint Instructions
diff --git a/src/hotspot/cpu/zero/compiledIC_zero.cpp b/src/hotspot/cpu/zero/compiledIC_zero.cpp
index b0564643af080..24153aeacc5e1 100644
--- a/src/hotspot/cpu/zero/compiledIC_zero.cpp
+++ b/src/hotspot/cpu/zero/compiledIC_zero.cpp
@@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
-#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
@@ -43,27 +42,27 @@
// ----------------------------------------------------------------------------
-address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
+address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
ShouldNotReachHere(); // Only needed for COMPILER2.
return nullptr;
}
-int CompiledStaticCall::to_interp_stub_size() {
+int CompiledDirectCall::to_interp_stub_size() {
ShouldNotReachHere(); // Only needed for COMPILER2.
return 0;
}
// Relocation entries for call stub, compiled java to interpreter.
-int CompiledStaticCall::reloc_to_interp_stub() {
+int CompiledDirectCall::reloc_to_interp_stub() {
ShouldNotReachHere(); // Only needed for COMPILER2.
return 0;
}
-void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
+void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
-void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
+void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
@@ -71,7 +70,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code.
#ifndef PRODUCT
-void CompiledDirectStaticCall::verify() {
+void CompiledDirectCall::verify() {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
diff --git a/src/hotspot/cpu/zero/icBuffer_zero.cpp b/src/hotspot/cpu/zero/icBuffer_zero.cpp
deleted file mode 100644
index adde916a4c4ad..0000000000000
--- a/src/hotspot/cpu/zero/icBuffer_zero.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "code/icBuffer.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/bytecodes.hpp"
-#include "memory/resourceArea.hpp"
-#include "nativeInst_zero.hpp"
-#include "oops/oop.inline.hpp"
-
-int InlineCacheBuffer::ic_stub_code_size() {
- // NB set this once the functions below are implemented
- return 4;
-}
-
-void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
- void* cached_oop,
- address entry_point) {
- // NB ic_stub_code_size() must return the size of the code we generate
- ShouldNotCallThis();
-}
-
-address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
- // NB ic_stub_code_size() must return the size of the code we generate
- ShouldNotCallThis();
- return nullptr;
-}
-
-void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
- ShouldNotCallThis();
- return nullptr;
-}
diff --git a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
index 4244b5817db98..986cee685123b 100644
--- a/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
+++ b/src/hotspot/cpu/zero/sharedRuntime_zero.cpp
@@ -26,10 +26,8 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/debugInfoRec.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
-#include "oops/compiledICHolder.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"
diff --git a/src/hotspot/os/aix/os_aix.cpp b/src/hotspot/os/aix/os_aix.cpp
index e701e0aef6082..0c1c0dbc6dcb1 100644
--- a/src/hotspot/os/aix/os_aix.cpp
+++ b/src/hotspot/os/aix/os_aix.cpp
@@ -29,7 +29,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
@@ -1128,10 +1127,9 @@ bool os::dll_address_to_library_name(address addr, char* buf,
return true;
}
-void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+static void* dll_load_library(const char *filename, char *ebuf, int ebuflen) {
log_info(os)("attempting shared library load of %s", filename);
-
if (ebuf && ebuflen > 0) {
ebuf[0] = '\0';
ebuf[ebuflen - 1] = '\0';
@@ -1179,6 +1177,26 @@ void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
return nullptr;
}
+// Load library named
+// If filename matches .so, and loading fails, repeat with .a.
+void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
+ void* result = nullptr;
+ char* const file_path = strdup(filename);
+ char* const pointer_to_dot = strrchr(file_path, '.');
+ const char old_extension[] = ".so";
+ const char new_extension[] = ".a";
+ STATIC_ASSERT(sizeof(old_extension) >= sizeof(new_extension));
+ // First try to load the existing file.
+ result = dll_load_library(filename, ebuf, ebuflen);
+ // If the load fails,we try to reload by changing the extension to .a for .so files only.
+ // Shared object in .so format dont have braces, hence they get removed for archives with members.
+ if (result == nullptr && pointer_to_dot != nullptr && strcmp(pointer_to_dot, old_extension) == 0) {
+ snprintf(pointer_to_dot, sizeof(old_extension), "%s", new_extension);
+ result = dll_load_library(file_path, ebuf, ebuflen);
+ }
+ FREE_C_HEAP_ARRAY(char, file_path);
+ return result;
+}
void os::print_dll_info(outputStream *st) {
st->print_cr("Dynamic libraries:");
@@ -2609,56 +2627,6 @@ jlong os::seek_to_file_offset(int fd, jlong offset) {
return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
}
-// Map a block of memory.
-char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec) {
- int prot;
- int flags = MAP_PRIVATE;
-
- if (read_only) {
- prot = PROT_READ;
- flags = MAP_SHARED;
- } else {
- prot = PROT_READ | PROT_WRITE;
- flags = MAP_PRIVATE;
- }
-
- if (allow_exec) {
- prot |= PROT_EXEC;
- }
-
- if (addr != nullptr) {
- flags |= MAP_FIXED;
- }
-
- // Allow anonymous mappings if 'fd' is -1.
- if (fd == -1) {
- flags |= MAP_ANONYMOUS;
- }
-
- char* mapped_address = (char*)::mmap(addr, (size_t)bytes, prot, flags,
- fd, file_offset);
- if (mapped_address == MAP_FAILED) {
- return nullptr;
- }
- return mapped_address;
-}
-
-// Remap a block of memory.
-char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec) {
- // same as map_memory() on this OS
- return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
- allow_exec);
-}
-
-// Unmap a block of memory.
-bool os::pd_unmap_memory(char* addr, size_t bytes) {
- return munmap(addr, bytes) == 0;
-}
-
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
// of a thread.
diff --git a/src/hotspot/os/aix/os_perf_aix.cpp b/src/hotspot/os/aix/os_perf_aix.cpp
index e1719df48c331..b5ae1a6a725a5 100644
--- a/src/hotspot/os/aix/os_perf_aix.cpp
+++ b/src/hotspot/os/aix/os_perf_aix.cpp
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2022, IBM Corp.
+ * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, IBM Corp.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -87,6 +87,7 @@ static bool read_psinfo(const u_longlong_t& pid, psinfo_t& psinfo) {
}
len = fread(&psinfo, 1, sizeof(psinfo_t), fp);
+ fclose(fp);
return len == sizeof(psinfo_t);
}
diff --git a/src/hotspot/os/bsd/os_bsd.cpp b/src/hotspot/os/bsd/os_bsd.cpp
index 42a0b9c083239..0e67228de7b91 100644
--- a/src/hotspot/os/bsd/os_bsd.cpp
+++ b/src/hotspot/os/bsd/os_bsd.cpp
@@ -24,7 +24,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
@@ -1269,7 +1268,8 @@ void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
}
#endif // !__APPLE__
-int _print_dll_info_cb(const char * name, address base_address, address top_address, void * param) {
+static int _print_dll_info_cb(const char * name, address base_address,
+ address top_address, void * param) {
outputStream * out = (outputStream *) param;
out->print_cr(INTPTR_FORMAT " \t%s", (intptr_t)base_address, name);
return 0;
@@ -2345,53 +2345,6 @@ jlong os::seek_to_file_offset(int fd, jlong offset) {
return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
}
-// Map a block of memory.
-char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec) {
- int prot;
- int flags;
-
- if (read_only) {
- prot = PROT_READ;
- flags = MAP_SHARED;
- } else {
- prot = PROT_READ | PROT_WRITE;
- flags = MAP_PRIVATE;
- }
-
- if (allow_exec) {
- prot |= PROT_EXEC;
- }
-
- if (addr != nullptr) {
- flags |= MAP_FIXED;
- }
-
- char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
- fd, file_offset);
- if (mapped_address == MAP_FAILED) {
- return nullptr;
- }
- return mapped_address;
-}
-
-
-// Remap a block of memory.
-char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec) {
- // same as map_memory() on this OS
- return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
- allow_exec);
-}
-
-
-// Unmap a block of memory.
-bool os::pd_unmap_memory(char* addr, size_t bytes) {
- return munmap(addr, bytes) == 0;
-}
-
// current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
// are used by JVM M&M and JVMTI to get user+sys or user CPU time
// of a thread.
diff --git a/src/hotspot/os/linux/hugepages.cpp b/src/hotspot/os/linux/hugepages.cpp
index 54e6c1adb7a97..b71593487cf8a 100644
--- a/src/hotspot/os/linux/hugepages.cpp
+++ b/src/hotspot/os/linux/hugepages.cpp
@@ -311,6 +311,19 @@ ExplicitHugePageSupport HugePages::_explicit_hugepage_support;
THPSupport HugePages::_thp_support;
ShmemTHPSupport HugePages::_shmem_thp_support;
+size_t HugePages::thp_pagesize_fallback() {
+ // Older kernels won't publish the THP page size. Fall back to default explicit huge page size,
+ // since that is likely to be the THP page size as well. Don't do it if the page size is considered
+ // too large to avoid large alignment waste. If explicit huge page size is unknown, use educated guess.
+ if (thp_pagesize() != 0) {
+ return thp_pagesize();
+ }
+ if (supports_explicit_hugepages()) {
+ return MIN2(default_explicit_hugepage_size(), 16 * M);
+ }
+ return 2 * M;
+}
+
void HugePages::initialize() {
_explicit_hugepage_support.scan_os();
_thp_support.scan_os();
diff --git a/src/hotspot/os/linux/hugepages.hpp b/src/hotspot/os/linux/hugepages.hpp
index 2e61fabc5a507..efd27c55fd60f 100644
--- a/src/hotspot/os/linux/hugepages.hpp
+++ b/src/hotspot/os/linux/hugepages.hpp
@@ -138,6 +138,7 @@ class HugePages : public AllStatic {
static bool supports_thp() { return thp_mode() == THPMode::madvise || thp_mode() == THPMode::always; }
static THPMode thp_mode() { return _thp_support.mode(); }
static size_t thp_pagesize() { return _thp_support.pagesize(); }
+ static size_t thp_pagesize_fallback();
static bool supports_shmem_thp() { return _shmem_thp_support.is_enabled(); }
static ShmemTHPMode shmem_thp_mode() { return _shmem_thp_support.mode(); }
diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp
index f02ca95be5593..c51aeb0ae17c1 100644
--- a/src/hotspot/os/linux/os_linux.cpp
+++ b/src/hotspot/os/linux/os_linux.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"
@@ -86,6 +85,8 @@
#endif
// put OS-includes here
+# include
+# include
# include
# include
# include
@@ -343,6 +344,29 @@ static void next_line(FILE *f) {
} while (c != '\n' && c != EOF);
}
+void os::Linux::kernel_version(long* major, long* minor) {
+ *major = -1;
+ *minor = -1;
+
+ struct utsname buffer;
+ int ret = uname(&buffer);
+ if (ret != 0) {
+ log_warning(os)("uname(2) failed to get kernel version: %s", os::errno_name(ret));
+ return;
+ }
+
+ char* walker = buffer.release;
+ long* set_v = major;
+ while (*minor == -1 && walker != nullptr) {
+ if (isdigit(walker[0])) {
+ *set_v = strtol(walker, &walker, 10);
+ set_v = minor;
+ } else {
+ ++walker;
+ }
+ }
+}
+
bool os::Linux::get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu) {
FILE* fh;
uint64_t userTicks, niceTicks, systemTicks, idleTicks;
@@ -3936,8 +3960,12 @@ void os::Linux::large_page_init() {
// In THP mode:
// - os::large_page_size() is the *THP page size*
// - os::pagesizes() has two members, the THP page size and the system page size
- assert(HugePages::thp_pagesize() > 0, "Missing OS info");
_large_page_size = HugePages::thp_pagesize();
+ if (_large_page_size == 0) {
+ log_info(pagesize) ("Cannot determine THP page size (kernel < 4.10 ?)");
+ _large_page_size = HugePages::thp_pagesize_fallback();
+ log_info(pagesize) ("Assuming THP page size to be: " EXACTFMT " (heuristics)", EXACTFMTARGS(_large_page_size));
+ }
_page_sizes.add(_large_page_size);
_page_sizes.add(os::vm_page_size());
// +UseTransparentHugePages implies +UseLargePages
@@ -5064,51 +5092,6 @@ jlong os::seek_to_file_offset(int fd, jlong offset) {
return (jlong)::lseek(fd, (off_t)offset, SEEK_SET);
}
-// Map a block of memory.
-char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec) {
- int prot;
- int flags = MAP_PRIVATE;
-
- if (read_only) {
- prot = PROT_READ;
- } else {
- prot = PROT_READ | PROT_WRITE;
- }
-
- if (allow_exec) {
- prot |= PROT_EXEC;
- }
-
- if (addr != nullptr) {
- flags |= MAP_FIXED;
- }
-
- char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
- fd, file_offset);
- if (mapped_address == MAP_FAILED) {
- return nullptr;
- }
- return mapped_address;
-}
-
-
-// Remap a block of memory.
-char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec) {
- // same as map_memory() on this OS
- return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
- allow_exec);
-}
-
-
-// Unmap a block of memory.
-bool os::pd_unmap_memory(char* addr, size_t bytes) {
- return munmap(addr, bytes) == 0;
-}
-
static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
static jlong fast_cpu_time(Thread *thread) {
diff --git a/src/hotspot/os/linux/os_linux.hpp b/src/hotspot/os/linux/os_linux.hpp
index 4b2ccf8e370db..6b902e8280244 100644
--- a/src/hotspot/os/linux/os_linux.hpp
+++ b/src/hotspot/os/linux/os_linux.hpp
@@ -93,6 +93,8 @@ class os::Linux {
bool has_steal_ticks;
};
+ static void kernel_version(long* major, long* minor);
+
// which_logical_cpu=-1 returns accumulated ticks for all cpus.
static bool get_tick_information(CPUPerfTicks* pticks, int which_logical_cpu);
static bool _stack_is_executable;
diff --git a/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp b/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
index 446449a40e094..892d825b40cdc 100644
--- a/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
+++ b/src/hotspot/os/linux/systemMemoryBarrier_linux.cpp
@@ -24,7 +24,7 @@
#include "precompiled.hpp"
#include "logging/log.hpp"
-#include "runtime/os.hpp"
+#include "os_linux.hpp"
#include "utilities/debug.hpp"
#include "utilities/systemMemoryBarrier.hpp"
@@ -61,6 +61,18 @@ static long membarrier(int cmd, unsigned int flags, int cpu_id) {
}
bool LinuxSystemMemoryBarrier::initialize() {
+#if defined(RISCV)
+// RISCV port was introduced in kernel 4.4.
+// 4.4 also made membar private expedited mandatory.
+// But RISCV actually don't support it until 6.9.
+ long major, minor;
+ os::Linux::kernel_version(&major, &minor);
+ if (!(major > 6 || (major == 6 && minor >= 9))) {
+ log_info(os)("Linux kernel %ld.%ld does not support MEMBARRIER PRIVATE_EXPEDITED on RISC-V.",
+ major, minor);
+ return false;
+ }
+#endif
long ret = membarrier(MEMBARRIER_CMD_QUERY, 0, 0);
if (ret < 0) {
log_info(os)("MEMBARRIER_CMD_QUERY unsupported");
diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp
index 339cc475b34ae..39a6779b3fc04 100644
--- a/src/hotspot/os/posix/os_posix.cpp
+++ b/src/hotspot/os/posix/os_posix.cpp
@@ -2031,3 +2031,53 @@ void os::die() {
const char* os::file_separator() { return "/"; }
const char* os::line_separator() { return "\n"; }
const char* os::path_separator() { return ":"; }
+
+// Map file into memory; uses mmap().
+// Notes:
+// - if caller specifies addr, MAP_FIXED is used. That means existing
+// mappings will be replaced.
+// - The file descriptor must be valid (to create anonymous mappings, use
+// os::reserve_memory()).
+// Returns address to mapped memory, nullptr on error
+char* os::pd_map_memory(int fd, const char* unused,
+ size_t file_offset, char *addr, size_t bytes,
+ bool read_only, bool allow_exec) {
+
+ assert(fd != -1, "Specify a valid file descriptor");
+
+ int prot;
+ int flags = MAP_PRIVATE;
+
+ if (read_only) {
+ prot = PROT_READ;
+ } else {
+ prot = PROT_READ | PROT_WRITE;
+ }
+
+ if (allow_exec) {
+ prot |= PROT_EXEC;
+ }
+
+ if (addr != nullptr) {
+ flags |= MAP_FIXED;
+ }
+
+ char* mapped_address = (char*)mmap(addr, (size_t)bytes, prot, flags,
+ fd, file_offset);
+ if (mapped_address == MAP_FAILED) {
+ return nullptr;
+ }
+
+ // If we did specify an address, and the mapping succeeded, it should
+ // have returned that address since we specify MAP_FIXED
+ assert(addr == nullptr || addr == mapped_address,
+ "mmap+MAP_FIXED returned " PTR_FORMAT ", expected " PTR_FORMAT,
+ p2i(mapped_address), p2i(addr));
+
+ return mapped_address;
+}
+
+// Unmap a block of memory. Uses munmap.
+bool os::pd_unmap_memory(char* addr, size_t bytes) {
+ return munmap(addr, bytes) == 0;
+}
diff --git a/src/hotspot/os/posix/signals_posix.cpp b/src/hotspot/os/posix/signals_posix.cpp
index eaadb36731518..6a958f8903b8e 100644
--- a/src/hotspot/os/posix/signals_posix.cpp
+++ b/src/hotspot/os/posix/signals_posix.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -340,7 +340,7 @@ static const struct {
////////////////////////////////////////////////////////////////////////////////
// sun.misc.Signal and BREAK_SIGNAL support
-void jdk_misc_signal_init() {
+static void jdk_misc_signal_init() {
// Initialize signal structures
::memset((void*)pending_signals, 0, sizeof(pending_signals));
@@ -380,7 +380,7 @@ int os::signal_wait() {
////////////////////////////////////////////////////////////////////////////////
// signal chaining support
-struct sigaction* get_chained_signal_action(int sig) {
+static struct sigaction* get_chained_signal_action(int sig) {
struct sigaction *actp = nullptr;
if (libjsig_is_loaded) {
@@ -1245,7 +1245,7 @@ int os::get_signal_number(const char* signal_name) {
return -1;
}
-void set_signal_handler(int sig) {
+static void set_signal_handler(int sig) {
// Check for overwrite.
struct sigaction oldAct;
sigaction(sig, (struct sigaction*)nullptr, &oldAct);
@@ -1292,7 +1292,7 @@ void set_signal_handler(int sig) {
// install signal handlers for signals that HotSpot needs to
// handle in order to support Java-level exception handling.
-void install_signal_handlers() {
+static void install_signal_handlers() {
// signal-chaining
typedef void (*signal_setting_t)();
signal_setting_t begin_signal_setting = nullptr;
@@ -1723,7 +1723,7 @@ static void SR_handler(int sig, siginfo_t* siginfo, void* context) {
errno = old_errno;
}
-int SR_initialize() {
+static int SR_initialize() {
struct sigaction act;
char *s;
// Get signal number to use for suspend/resume
diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp
index 3613edfc7d9e6..2ddebc9bf6dee 100644
--- a/src/hotspot/os/windows/os_windows.cpp
+++ b/src/hotspot/os/windows/os_windows.cpp
@@ -27,7 +27,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
@@ -5168,22 +5167,6 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
return base;
}
-
-// Remap a block of memory.
-char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
- char *addr, size_t bytes, bool read_only,
- bool allow_exec) {
- // This OS does not allow existing memory maps to be remapped so we
- // would have to unmap the memory before we remap it.
-
- // Because there is a small window between unmapping memory and mapping
- // it in again with different protections, CDS archives are mapped RW
- // on windows, so this function isn't called.
- ShouldNotReachHere();
- return nullptr;
-}
-
-
// Unmap a block of memory.
// Returns true=success, otherwise false.
diff --git a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
index 5e0086521aad9..242042d4247aa 100644
--- a/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
+++ b/src/hotspot/os_cpu/aix_ppc/os_aix_ppc.cpp
@@ -28,7 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.S b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.S
index 7b286820a9a8f..187cd20ddbdad 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.S
+++ b/src/hotspot/os_cpu/bsd_aarch64/copy_bsd_aarch64.S
@@ -28,6 +28,11 @@
.global CFUNC(_Copy_conjoint_words)
.global CFUNC(_Copy_disjoint_words)
+#ifdef __APPLE__
+ .private_extern CFUNC(_Copy_conjoint_words)
+ .private_extern CFUNC(_Copy_disjoint_words)
+#endif
+
s .req x0
d .req x1
count .req x2
diff --git a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
index fbd7c4eccd403..4750ed8805644 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
+++ b/src/hotspot/os_cpu/bsd_aarch64/os_bsd_aarch64.cpp
@@ -29,7 +29,6 @@
#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/bsd_aarch64/safefetch_bsd_aarch64.S b/src/hotspot/os_cpu/bsd_aarch64/safefetch_bsd_aarch64.S
index 34d7b8e34a739..b9b6df9b23aa0 100644
--- a/src/hotspot/os_cpu/bsd_aarch64/safefetch_bsd_aarch64.S
+++ b/src/hotspot/os_cpu/bsd_aarch64/safefetch_bsd_aarch64.S
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2022 SAP SE. All rights reserved.
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -39,6 +39,15 @@
.global SYMBOL(_SafeFetch32_fault)
.global SYMBOL(_SafeFetch32_continuation)
+#ifdef __APPLE__
+ .private_extern SYMBOL(SafeFetchN_impl)
+ .private_extern SYMBOL(_SafeFetchN_fault)
+ .private_extern SYMBOL(_SafeFetchN_continuation)
+ .private_extern SYMBOL(SafeFetch32_impl)
+ .private_extern SYMBOL(_SafeFetch32_fault)
+ .private_extern SYMBOL(_SafeFetch32_continuation)
+#endif
+
# Support for int SafeFetch32(int* address, int defaultval);
#
# x0 : address
diff --git a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.S b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.S
index 02231040e15bd..5cad379df3f2b 100644
--- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.S
+++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_32.S
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2004, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -32,6 +32,7 @@
#endif
.globl SYMBOL(fixcw)
+ .globl SYMBOL(SpinPause)
# NOTE WELL! The _Copy functions are called directly
# from server-compiler-generated code via CallLeafNoFP,
@@ -50,6 +51,20 @@
.globl SYMBOL(_Atomic_cmpxchg_long)
.globl SYMBOL(_Atomic_move_long)
+#ifdef __APPLE__
+ .private_extern SYMBOL(fixcw)
+ .private_extern SYMBOL(SpinPause)
+ .private_extern SYMBOL(_Copy_arrayof_conjoint_bytes)
+ .private_extern SYMBOL(_Copy_conjoint_jshorts_atomic)
+ .private_extern SYMBOL(_Copy_arrayof_conjoint_jshorts)
+ .private_extern SYMBOL(_Copy_conjoint_jints_atomic)
+ .private_extern SYMBOL(_Copy_arrayof_conjoint_jints)
+ .private_extern SYMBOL(_Copy_conjoint_jlongs_atomic)
+ .private_extern SYMBOL(_mmx_Copy_arrayof_conjoint_jshorts)
+ .private_extern SYMBOL(_Atomic_cmpxchg_long)
+ .private_extern SYMBOL(_Atomic_move_long)
+#endif
+
.text
# Support for void os::Solaris::init_thread_fpu_state() in os_solaris_i486.cpp
@@ -62,7 +77,6 @@ SYMBOL(fixcw):
popl %eax
ret
- .globl SYMBOL(SpinPause)
ELF_TYPE(SpinPause,@function)
.p2align 4,,15
SYMBOL(SpinPause):
diff --git a/src/hotspot/os_cpu/bsd_x86/bsd_x86_64.S b/src/hotspot/os_cpu/bsd_x86/bsd_x86_64.S
index 95cea3bf2a3d3..5e2addc4e6f43 100644
--- a/src/hotspot/os_cpu/bsd_x86/bsd_x86_64.S
+++ b/src/hotspot/os_cpu/bsd_x86/bsd_x86_64.S
@@ -1,5 +1,5 @@
-#
-# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+#
+# Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -31,22 +31,33 @@
#endif
# NOTE WELL! The _Copy functions are called directly
- # from server-compiler-generated code via CallLeafNoFP,
- # which means that they *must* either not use floating
- # point or use it in the same manner as does the server
- # compiler.
-
+ # from server-compiler-generated code via CallLeafNoFP,
+ # which means that they *must* either not use floating
+ # point or use it in the same manner as does the server
+ # compiler.
+
+ .globl SYMBOL(SpinPause)
.globl SYMBOL(_Copy_arrayof_conjoint_bytes)
- .globl SYMBOL(_Copy_arrayof_conjoint_jshorts)
+ .globl SYMBOL(_Copy_arrayof_conjoint_jshorts)
.globl SYMBOL(_Copy_conjoint_jshorts_atomic)
.globl SYMBOL(_Copy_arrayof_conjoint_jints)
.globl SYMBOL(_Copy_conjoint_jints_atomic)
.globl SYMBOL(_Copy_arrayof_conjoint_jlongs)
.globl SYMBOL(_Copy_conjoint_jlongs_atomic)
- .text
+#ifdef __APPLE__
+ .private_extern SYMBOL(SpinPause)
+ .private_extern SYMBOL(_Copy_arrayof_conjoint_bytes)
+ .private_extern SYMBOL(_Copy_arrayof_conjoint_jshorts)
+ .private_extern SYMBOL(_Copy_conjoint_jshorts_atomic)
+ .private_extern SYMBOL(_Copy_arrayof_conjoint_jints)
+ .private_extern SYMBOL(_Copy_conjoint_jints_atomic)
+ .private_extern SYMBOL(_Copy_arrayof_conjoint_jlongs)
+ .private_extern SYMBOL(_Copy_conjoint_jlongs_atomic)
+#endif
+
+ .text
- .globl SYMBOL(SpinPause)
.p2align 4,,15
ELF_TYPE(SpinPause,@function)
SYMBOL(SpinPause):
@@ -63,7 +74,7 @@ SYMBOL(SpinPause):
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- ELF_TYPE(_Copy_arrayof_conjoint_bytes,@function)
+ ELF_TYPE(_Copy_arrayof_conjoint_bytes,@function)
SYMBOL(_Copy_arrayof_conjoint_bytes):
movq %rdx,%r8 # byte count
shrq $3,%rdx # qword count
@@ -71,7 +82,7 @@ SYMBOL(_Copy_arrayof_conjoint_bytes):
leaq -1(%rdi,%r8,1),%rax # from + bcount*1 - 1
jbe acb_CopyRight
cmpq %rax,%rsi
- jbe acb_CopyLeft
+ jbe acb_CopyLeft
acb_CopyRight:
leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
@@ -165,8 +176,8 @@ acb_CopyLeft:
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- ELF_TYPE(_Copy_arrayof_conjoint_jshorts,@function)
- ELF_TYPE(_Copy_conjoint_jshorts_atomic,@function)
+ ELF_TYPE(_Copy_arrayof_conjoint_jshorts,@function)
+ ELF_TYPE(_Copy_conjoint_jshorts_atomic,@function)
SYMBOL(_Copy_arrayof_conjoint_jshorts):
SYMBOL(_Copy_conjoint_jshorts_atomic):
movq %rdx,%r8 # word count
@@ -175,7 +186,7 @@ SYMBOL(_Copy_conjoint_jshorts_atomic):
leaq -2(%rdi,%r8,2),%rax # from + wcount*2 - 2
jbe acs_CopyRight
cmpq %rax,%rsi
- jbe acs_CopyLeft
+ jbe acs_CopyLeft
acs_CopyRight:
leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
@@ -255,8 +266,8 @@ acs_CopyLeft:
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- ELF_TYPE(_Copy_arrayof_conjoint_jints,@function)
- ELF_TYPE(_Copy_conjoint_jints_atomic,@function)
+ ELF_TYPE(_Copy_arrayof_conjoint_jints,@function)
+ ELF_TYPE(_Copy_conjoint_jints_atomic,@function)
SYMBOL(_Copy_arrayof_conjoint_jints):
SYMBOL(_Copy_conjoint_jints_atomic):
movq %rdx,%r8 # dword count
@@ -265,7 +276,7 @@ SYMBOL(_Copy_conjoint_jints_atomic):
leaq -4(%rdi,%r8,4),%rax # from + dcount*4 - 4
jbe aci_CopyRight
cmpq %rax,%rsi
- jbe aci_CopyLeft
+ jbe aci_CopyLeft
aci_CopyRight:
leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
@@ -334,15 +345,15 @@ aci_CopyLeft:
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- ELF_TYPE(_Copy_arrayof_conjoint_jlongs,@function)
- ELF_TYPE(_Copy_conjoint_jlongs_atomic,@function)
+ ELF_TYPE(_Copy_arrayof_conjoint_jlongs,@function)
+ ELF_TYPE(_Copy_conjoint_jlongs_atomic,@function)
SYMBOL(_Copy_arrayof_conjoint_jlongs):
SYMBOL(_Copy_conjoint_jlongs_atomic):
cmpq %rdi,%rsi
leaq -8(%rdi,%rdx,8),%rax # from + count*8 - 8
jbe acl_CopyRight
cmpq %rax,%rsi
- jbe acl_CopyLeft
+ jbe acl_CopyLeft
acl_CopyRight:
leaq -8(%rsi,%rdx,8),%rcx # to + count*8 - 8
negq %rdx
diff --git a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
index 37b92bc7ffd48..c73e83996ff57 100644
--- a/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
+++ b/src/hotspot/os_cpu/bsd_x86/os_bsd_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@@ -351,7 +350,7 @@ frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
}
-intptr_t* _get_previous_fp() {
+static intptr_t* _get_previous_fp() {
#if defined(__clang__) || defined(__llvm__)
intptr_t **ebp;
__asm__("mov %%" SPELL_REG_FP ", %0":"=r"(ebp));
diff --git a/src/hotspot/os_cpu/bsd_x86/safefetch_bsd_x86_64.S b/src/hotspot/os_cpu/bsd_x86/safefetch_bsd_x86_64.S
index 2a75f3dac94b3..1697f6f03b581 100644
--- a/src/hotspot/os_cpu/bsd_x86/safefetch_bsd_x86_64.S
+++ b/src/hotspot/os_cpu/bsd_x86/safefetch_bsd_x86_64.S
@@ -1,6 +1,6 @@
#
# Copyright (c) 2022 SAP SE. All rights reserved.
-# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -38,13 +38,22 @@
.globl SYMBOL(_SafeFetch32_continuation)
.globl SYMBOL(_SafeFetchN_continuation)
+#ifdef __APPLE__
+ .private_extern SYMBOL(SafeFetch32_impl)
+ .private_extern SYMBOL(SafeFetchN_impl)
+ .private_extern SYMBOL(_SafeFetch32_fault)
+ .private_extern SYMBOL(_SafeFetchN_fault)
+ .private_extern SYMBOL(_SafeFetch32_continuation)
+ .private_extern SYMBOL(_SafeFetchN_continuation)
+#endif
+
.text
# Support for int SafeFetch32(int* address, int defaultval);
#
# %rdi : address
# %esi : defaultval
- ELF_TYPE(SafeFetch32_impl,@function)
+ ELF_TYPE(SafeFetch32_impl,@function)
SYMBOL(SafeFetch32_impl:)
SYMBOL(_SafeFetch32_fault:)
movl (%rdi), %eax
diff --git a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
index 012f85ac0ff4a..0fc9484ce23ef 100644
--- a/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
+++ b/src/hotspot/os_cpu/bsd_zero/os_bsd_zero.cpp
@@ -27,7 +27,6 @@
#include "asm/assembler.inline.hpp"
#include "atomic_bsd_zero.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S
index 4621e44ca3c47..e67206a9d497f 100644
--- a/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S
+++ b/src/hotspot/os_cpu/linux_aarch64/atomic_linux_aarch64.S
@@ -24,6 +24,7 @@
.text
.globl aarch64_atomic_fetch_add_8_default_impl
+ .hidden aarch64_atomic_fetch_add_8_default_impl
.align 5
aarch64_atomic_fetch_add_8_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -40,6 +41,7 @@ aarch64_atomic_fetch_add_8_default_impl:
ret
.globl aarch64_atomic_fetch_add_4_default_impl
+ .hidden aarch64_atomic_fetch_add_4_default_impl
.align 5
aarch64_atomic_fetch_add_4_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -56,6 +58,7 @@ aarch64_atomic_fetch_add_4_default_impl:
ret
.global aarch64_atomic_fetch_add_8_relaxed_default_impl
+ .hidden aarch64_atomic_fetch_add_8_relaxed_default_impl
.align 5
aarch64_atomic_fetch_add_8_relaxed_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -71,6 +74,7 @@ aarch64_atomic_fetch_add_8_relaxed_default_impl:
ret
.global aarch64_atomic_fetch_add_4_relaxed_default_impl
+ .hidden aarch64_atomic_fetch_add_4_relaxed_default_impl
.align 5
aarch64_atomic_fetch_add_4_relaxed_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -86,6 +90,7 @@ aarch64_atomic_fetch_add_4_relaxed_default_impl:
ret
.globl aarch64_atomic_xchg_4_default_impl
+ .hidden aarch64_atomic_xchg_4_default_impl
.align 5
aarch64_atomic_xchg_4_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -101,6 +106,7 @@ aarch64_atomic_xchg_4_default_impl:
ret
.globl aarch64_atomic_xchg_8_default_impl
+ .hidden aarch64_atomic_xchg_8_default_impl
.align 5
aarch64_atomic_xchg_8_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -116,6 +122,7 @@ aarch64_atomic_xchg_8_default_impl:
ret
.globl aarch64_atomic_cmpxchg_1_default_impl
+ .hidden aarch64_atomic_cmpxchg_1_default_impl
.align 5
aarch64_atomic_cmpxchg_1_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -136,6 +143,7 @@ aarch64_atomic_cmpxchg_1_default_impl:
ret
.globl aarch64_atomic_cmpxchg_4_default_impl
+ .hidden aarch64_atomic_cmpxchg_4_default_impl
.align 5
aarch64_atomic_cmpxchg_4_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -155,6 +163,7 @@ aarch64_atomic_cmpxchg_4_default_impl:
ret
.globl aarch64_atomic_cmpxchg_8_default_impl
+ .hidden aarch64_atomic_cmpxchg_8_default_impl
.align 5
aarch64_atomic_cmpxchg_8_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -174,6 +183,7 @@ aarch64_atomic_cmpxchg_8_default_impl:
ret
.globl aarch64_atomic_cmpxchg_4_release_default_impl
+ .hidden aarch64_atomic_cmpxchg_4_release_default_impl
.align 5
aarch64_atomic_cmpxchg_4_release_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -191,6 +201,7 @@ aarch64_atomic_cmpxchg_4_release_default_impl:
ret
.globl aarch64_atomic_cmpxchg_8_release_default_impl
+ .hidden aarch64_atomic_cmpxchg_8_release_default_impl
.align 5
aarch64_atomic_cmpxchg_8_release_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -208,6 +219,7 @@ aarch64_atomic_cmpxchg_8_release_default_impl:
ret
.globl aarch64_atomic_cmpxchg_4_seq_cst_default_impl
+ .hidden aarch64_atomic_cmpxchg_4_seq_cst_default_impl
.align 5
aarch64_atomic_cmpxchg_4_seq_cst_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -225,6 +237,7 @@ aarch64_atomic_cmpxchg_4_seq_cst_default_impl:
ret
.globl aarch64_atomic_cmpxchg_8_seq_cst_default_impl
+ .hidden aarch64_atomic_cmpxchg_8_seq_cst_default_impl
.align 5
aarch64_atomic_cmpxchg_8_seq_cst_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -242,6 +255,7 @@ aarch64_atomic_cmpxchg_8_seq_cst_default_impl:
ret
.globl aarch64_atomic_cmpxchg_1_relaxed_default_impl
+.hidden aarch64_atomic_cmpxchg_1_relaxed_default_impl
.align 5
aarch64_atomic_cmpxchg_1_relaxed_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -260,6 +274,7 @@ aarch64_atomic_cmpxchg_1_relaxed_default_impl:
ret
.globl aarch64_atomic_cmpxchg_4_relaxed_default_impl
+ .hidden aarch64_atomic_cmpxchg_4_relaxed_default_impl
.align 5
aarch64_atomic_cmpxchg_4_relaxed_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
@@ -277,6 +292,7 @@ aarch64_atomic_cmpxchg_4_relaxed_default_impl:
ret
.globl aarch64_atomic_cmpxchg_8_relaxed_default_impl
+ .hidden aarch64_atomic_cmpxchg_8_relaxed_default_impl
.align 5
aarch64_atomic_cmpxchg_8_relaxed_default_impl:
#ifdef __ARM_FEATURE_ATOMICS
diff --git a/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.S b/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.S
index 4b8ed597c59fc..ade867ace016b 100644
--- a/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.S
+++ b/src/hotspot/os_cpu/linux_aarch64/copy_linux_aarch64.S
@@ -24,6 +24,9 @@
.global _Copy_conjoint_words
.global _Copy_disjoint_words
+ .hidden _Copy_conjoint_words
+ .hidden _Copy_disjoint_words
+
s .req x0
d .req x1
count .req x2
diff --git a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
index 4835eb9405a1b..3698896abb78a 100644
--- a/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
+++ b/src/hotspot/os_cpu/linux_aarch64/os_linux_aarch64.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/os_cpu/linux_aarch64/safefetch_linux_aarch64.S b/src/hotspot/os_cpu/linux_aarch64/safefetch_linux_aarch64.S
index fcb7e62e6d5e9..cfbd8f45f285f 100644
--- a/src/hotspot/os_cpu/linux_aarch64/safefetch_linux_aarch64.S
+++ b/src/hotspot/os_cpu/linux_aarch64/safefetch_linux_aarch64.S
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2022 SAP SE. All rights reserved.
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,13 @@
.globl _SafeFetch32_fault
.globl _SafeFetch32_continuation
+ .hidden SafeFetchN_impl
+ .hidden _SafeFetchN_fault
+ .hidden _SafeFetchN_continuation
+ .hidden SafeFetch32_impl
+ .hidden _SafeFetch32_fault
+ .hidden _SafeFetch32_continuation
+
# Support for int SafeFetch32(int* address, int defaultval);
#
# x0 : address
diff --git a/src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.S b/src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.S
index ac60d6aa94168..f9f5aab2a6bd8 100644
--- a/src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.S
+++ b/src/hotspot/os_cpu/linux_aarch64/threadLS_linux_aarch64.S
@@ -25,22 +25,23 @@
// Clobber x1, flags.
// All other registers are preserved,
- .global _ZN10JavaThread25aarch64_get_thread_helperEv
- .type _ZN10JavaThread25aarch64_get_thread_helperEv, %function
+ .global _ZN10JavaThread25aarch64_get_thread_helperEv
+ .hidden _ZN10JavaThread25aarch64_get_thread_helperEv
+ .type _ZN10JavaThread25aarch64_get_thread_helperEv, %function
_ZN10JavaThread25aarch64_get_thread_helperEv:
- hint #0x19 // paciasp
- stp x29, x30, [sp, -16]!
- adrp x0, :tlsdesc:_ZN6Thread12_thr_currentE
- ldr x1, [x0, #:tlsdesc_lo12:_ZN6Thread12_thr_currentE]
- add x0, x0, :tlsdesc_lo12:_ZN6Thread12_thr_currentE
- .tlsdesccall _ZN6Thread12_thr_currentE
- blr x1
- mrs x1, tpidr_el0
- add x0, x1, x0
- ldr x0, [x0]
- ldp x29, x30, [sp], 16
- hint #0x1d // autiasp
- ret
+ hint #0x19 // paciasp
+ stp x29, x30, [sp, -16]!
+ adrp x0, :tlsdesc:_ZN6Thread12_thr_currentE
+ ldr x1, [x0, #:tlsdesc_lo12:_ZN6Thread12_thr_currentE]
+ add x0, x0, :tlsdesc_lo12:_ZN6Thread12_thr_currentE
+ .tlsdesccall _ZN6Thread12_thr_currentE
+ blr x1
+ mrs x1, tpidr_el0
+ add x0, x1, x0
+ ldr x0, [x0]
+ ldp x29, x30, [sp], 16
+ hint #0x1d // autiasp
+ ret
- .size _ZN10JavaThread25aarch64_get_thread_helperEv, .-_ZN10JavaThread25aarch64_get_thread_helperEv
+ .size _ZN10JavaThread25aarch64_get_thread_helperEv, .-_ZN10JavaThread25aarch64_get_thread_helperEv
diff --git a/src/hotspot/os_cpu/linux_arm/linux_arm_32.S b/src/hotspot/os_cpu/linux_arm/linux_arm_32.S
index eb560d8f0c78b..ad88c58ce78ce 100644
--- a/src/hotspot/os_cpu/linux_arm/linux_arm_32.S
+++ b/src/hotspot/os_cpu/linux_arm/linux_arm_32.S
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2008, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -23,38 +23,46 @@
# NOTE WELL! The _Copy functions are called directly
- # from server-compiler-generated code via CallLeafNoFP,
- # which means that they *must* either not use floating
- # point or use it in the same manner as does the server
- # compiler.
+ # from server-compiler-generated code via CallLeafNoFP,
+ # which means that they *must* either not use floating
+ # point or use it in the same manner as does the server
+ # compiler.
- .globl _Copy_conjoint_bytes
- .type _Copy_conjoint_bytes, %function
+ .globl SpinPause
+ .hidden SpinPause
+ .type SpinPause, %function
.globl _Copy_arrayof_conjoint_bytes
- .type _Copy_arrayof_conjoint_bytes, %function
- .globl _Copy_disjoint_words
- .type _Copy_disjoint_words, %function
- .globl _Copy_conjoint_words
- .type _Copy_conjoint_words, %function
+ .hidden _Copy_arrayof_conjoint_bytes
+ .type _Copy_arrayof_conjoint_bytes, %function
+ .globl _Copy_disjoint_words
+ .hidden _Copy_disjoint_words
+ .type _Copy_disjoint_words, %function
+ .globl _Copy_conjoint_words
+ .hidden _Copy_conjoint_words
+ .type _Copy_conjoint_words, %function
.globl _Copy_conjoint_jshorts_atomic
- .type _Copy_conjoint_jshorts_atomic, %function
- .globl _Copy_arrayof_conjoint_jshorts
- .type _Copy_arrayof_conjoint_jshorts, %function
+ .hidden _Copy_conjoint_jshorts_atomic
+ .type _Copy_conjoint_jshorts_atomic, %function
+ .globl _Copy_arrayof_conjoint_jshorts
+ .hidden _Copy_arrayof_conjoint_jshorts
+ .type _Copy_arrayof_conjoint_jshorts, %function
.globl _Copy_conjoint_jints_atomic
- .type _Copy_conjoint_jints_atomic, %function
+ .hidden _Copy_conjoint_jints_atomic
+ .type _Copy_conjoint_jints_atomic, %function
.globl _Copy_arrayof_conjoint_jints
- .type _Copy_arrayof_conjoint_jints, %function
- .globl _Copy_conjoint_jlongs_atomic
- .type _Copy_conjoint_jlongs_atomic, %function
- .globl _Copy_arrayof_conjoint_jlongs
- .type _Copy_arrayof_conjoint_jlongs, %function
+ .hidden _Copy_arrayof_conjoint_jints
+ .type _Copy_arrayof_conjoint_jints, %function
+ .globl _Copy_conjoint_jlongs_atomic
+ .hidden _Copy_conjoint_jlongs_atomic
+ .type _Copy_conjoint_jlongs_atomic, %function
+ .globl _Copy_arrayof_conjoint_jlongs
+ .hidden _Copy_arrayof_conjoint_jlongs
+ .type _Copy_arrayof_conjoint_jlongs, %function
from .req r0
to .req r1
- .text
- .globl SpinPause
- .type SpinPause, %function
+ .text
SpinPause:
bx LR
@@ -70,7 +78,7 @@ _Copy_arrayof_conjoint_bytes:
# size_t count)
_Copy_disjoint_words:
stmdb sp!, {r3 - r9, ip}
-
+
cmp r2, #0
beq disjoint_words_finish
@@ -81,17 +89,17 @@ _Copy_disjoint_words:
.align 3
dw_f2b_loop_32:
subs r2, #32
- blt dw_f2b_loop_32_finish
+ blt dw_f2b_loop_32_finish
ldmia from!, {r3 - r9, ip}
nop
- pld [from]
+ pld [from]
stmia to!, {r3 - r9, ip}
bgt dw_f2b_loop_32
dw_f2b_loop_32_finish:
addlts r2, #32
beq disjoint_words_finish
cmp r2, #16
- blt disjoint_words_small
+ blt disjoint_words_small
ldmia from!, {r3 - r6}
subge r2, r2, #16
stmia to!, {r3 - r6}
@@ -116,8 +124,8 @@ disjoint_words_finish:
_Copy_conjoint_words:
stmdb sp!, {r3 - r9, ip}
- cmp r2, #0
- beq conjoint_words_finish
+ cmp r2, #0
+ beq conjoint_words_finish
pld [from, #0]
cmp r2, #12
@@ -129,17 +137,17 @@ _Copy_conjoint_words:
.align 3
cw_f2b_loop_32:
subs r2, #32
- blt cw_f2b_loop_32_finish
+ blt cw_f2b_loop_32_finish
ldmia from!, {r3 - r9, ip}
nop
- pld [from]
+ pld [from]
stmia to!, {r3 - r9, ip}
bgt cw_f2b_loop_32
cw_f2b_loop_32_finish:
addlts r2, #32
beq conjoint_words_finish
cmp r2, #16
- blt conjoint_words_small
+ blt conjoint_words_small
ldmia from!, {r3 - r6}
subge r2, r2, #16
stmia to!, {r3 - r6}
@@ -154,7 +162,7 @@ conjoint_words_small:
strgt r9, [to], #4
b conjoint_words_finish
- # Src and dest overlap, copy in a descending order
+ # Src and dest overlap, copy in a descending order
cw_b2f_copy:
add from, r2
pld [from, #-32]
@@ -162,17 +170,17 @@ cw_b2f_copy:
.align 3
cw_b2f_loop_32:
subs r2, #32
- blt cw_b2f_loop_32_finish
+ blt cw_b2f_loop_32_finish
ldmdb from!, {r3-r9,ip}
nop
- pld [from, #-32]
+ pld [from, #-32]
stmdb to!, {r3-r9,ip}
bgt cw_b2f_loop_32
cw_b2f_loop_32_finish:
addlts r2, #32
beq conjoint_words_finish
cmp r2, #16
- blt cw_b2f_copy_small
+ blt cw_b2f_copy_small
ldmdb from!, {r3 - r6}
subge r2, r2, #16
stmdb to!, {r3 - r6}
@@ -196,8 +204,8 @@ conjoint_words_finish:
_Copy_conjoint_jshorts_atomic:
stmdb sp!, {r3 - r9, ip}
- cmp r2, #0
- beq conjoint_shorts_finish
+ cmp r2, #0
+ beq conjoint_shorts_finish
subs r3, to, from
cmphi r2, r3
@@ -210,11 +218,11 @@ _Copy_conjoint_jshorts_atomic:
ands r3, from, #3
bne cs_f2b_src_u
- # Aligned source address
+ # Aligned source address
.align 3
cs_f2b_loop_32:
subs r2, #32
- blt cs_f2b_loop_32_finish
+ blt cs_f2b_loop_32_finish
ldmia from!, {r3 - r9, ip}
nop
pld [from]
@@ -244,14 +252,14 @@ cs_f2b_4:
strgth r5, [to], #2
b conjoint_shorts_finish
- # Destination not aligned
+ # Destination not aligned
cs_f2b_dest_u:
ldrh r3, [from], #2
subs r2, #2
strh r3, [to], #2
beq conjoint_shorts_finish
- # Check to see if source is not aligned ether
+ # Check to see if source is not aligned ether
ands r3, from, #3
beq cs_f2b_loop_32
@@ -259,11 +267,11 @@ cs_f2b_src_u:
cmp r2, #16
blt cs_f2b_8_u
- # Load 2 first bytes to r7 and make src ptr word aligned
+ # Load 2 first bytes to r7 and make src ptr word aligned
bic from, #3
ldr r7, [from], #4
- # Destination aligned, source not
+ # Destination aligned, source not
mov r8, r2, lsr #4
.align 3
cs_f2b_16_u_loop:
@@ -306,7 +314,7 @@ cs_f2b_4_u:
strgth r5, [to], #2
b conjoint_shorts_finish
- # Src and dest overlap, copy in a descending order
+ # Src and dest overlap, copy in a descending order
cs_b2f_copy:
add from, r2
pld [from, #-32]
@@ -319,7 +327,7 @@ cs_b2f_copy:
.align 3
cs_b2f_loop_32:
subs r2, #32
- blt cs_b2f_loop_32_finish
+ blt cs_b2f_loop_32_finish
ldmdb from!, {r3-r9,ip}
nop
pld [from, #-32]
@@ -359,16 +367,16 @@ cs_b2f_all_copy:
strgth r5, [to, #-2]!
b conjoint_shorts_finish
- # Destination not aligned
+ # Destination not aligned
cs_b2f_dest_u:
ldrh r3, [from, #-2]!
strh r3, [to, #-2]!
sub r2, #2
- # Check source alignment as well
+ # Check source alignment as well
ands r3, from, #3
beq cs_b2f_loop_32
- # Source not aligned
+ # Source not aligned
cs_b2f_src_u:
bic from, #3
.align 3
@@ -393,7 +401,7 @@ cs_b2f_16_loop_u:
cs_b2f_16_loop_u_finished:
addlts r2, #16
ldr r3, [from]
- cmp r2, #10
+ cmp r2, #10
blt cs_b2f_2_u_loop
ldmdb from!, {r4 - r5}
mov r6, r4, lsr #16
@@ -402,7 +410,7 @@ cs_b2f_16_loop_u_finished:
orr r7, r7, r3, lsl #16
stmdb to!, {r6-r7}
sub r2, #8
- .align 3
+ .align 3
cs_b2f_2_u_loop:
subs r2, #2
ldrh r3, [from], #-2
@@ -426,7 +434,7 @@ _Copy_arrayof_conjoint_jshorts:
_Copy_conjoint_jints_atomic:
_Copy_arrayof_conjoint_jints:
swi 0x9f0001
-
+
# Support for void Copy::conjoint_jlongs_atomic(jlong* from,
# jlong* to,
# size_t count)
@@ -434,8 +442,8 @@ _Copy_conjoint_jlongs_atomic:
_Copy_arrayof_conjoint_jlongs:
stmdb sp!, {r3 - r9, ip}
- cmp r2, #0
- beq conjoint_longs_finish
+ cmp r2, #0
+ beq conjoint_longs_finish
pld [from, #0]
cmp r2, #24
@@ -447,10 +455,10 @@ _Copy_arrayof_conjoint_jlongs:
.align 3
cl_f2b_loop_32:
subs r2, #32
- blt cl_f2b_loop_32_finish
+ blt cl_f2b_loop_32_finish
ldmia from!, {r3 - r9, ip}
nop
- pld [from]
+ pld [from]
stmia to!, {r3 - r9, ip}
bgt cl_f2b_loop_32
cl_f2b_loop_32_finish:
@@ -458,21 +466,21 @@ cl_f2b_loop_32_finish:
beq conjoint_longs_finish
conjoint_longs_small:
cmp r2, #16
- blt cl_f2b_copy_8
- bgt cl_f2b_copy_24
+ blt cl_f2b_copy_8
+ bgt cl_f2b_copy_24
ldmia from!, {r3 - r6}
stmia to!, {r3 - r6}
- b conjoint_longs_finish
+ b conjoint_longs_finish
cl_f2b_copy_8:
ldmia from!, {r3 - r4}
stmia to!, {r3 - r4}
b conjoint_longs_finish
cl_f2b_copy_24:
- ldmia from!, {r3 - r8}
+ ldmia from!, {r3 - r8}
stmia to!, {r3 - r8}
b conjoint_longs_finish
- # Src and dest overlap, copy in a descending order
+ # Src and dest overlap, copy in a descending order
cl_b2f_copy:
add from, r2
pld [from, #-32]
@@ -480,31 +488,29 @@ cl_b2f_copy:
.align 3
cl_b2f_loop_32:
subs r2, #32
- blt cl_b2f_loop_32_finish
+ blt cl_b2f_loop_32_finish
ldmdb from!, {r3 - r9, ip}
nop
- pld [from]
+ pld [from]
stmdb to!, {r3 - r9, ip}
bgt cl_b2f_loop_32
cl_b2f_loop_32_finish:
addlts r2, #32
beq conjoint_longs_finish
cmp r2, #16
- blt cl_b2f_copy_8
- bgt cl_b2f_copy_24
+ blt cl_b2f_copy_8
+ bgt cl_b2f_copy_24
ldmdb from!, {r3 - r6}
stmdb to!, {r3 - r6}
b conjoint_longs_finish
cl_b2f_copy_8:
- ldmdb from!, {r3 - r4}
+ ldmdb from!, {r3 - r4}
stmdb to!, {r3 - r4}
b conjoint_longs_finish
cl_b2f_copy_24:
- ldmdb from!, {r3 - r8}
+ ldmdb from!, {r3 - r8}
stmdb to!, {r3 - r8}
conjoint_longs_finish:
ldmia sp!, {r3 - r9, ip}
bx lr
-
-
diff --git a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
index 86e8ed25618c1..551270588438e 100644
--- a/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
+++ b/src/hotspot/os_cpu/linux_arm/os_linux_arm.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/linux_arm/safefetch_linux_arm.S b/src/hotspot/os_cpu/linux_arm/safefetch_linux_arm.S
index 5196b199f05f6..07e90fa3079f2 100644
--- a/src/hotspot/os_cpu/linux_arm/safefetch_linux_arm.S
+++ b/src/hotspot/os_cpu/linux_arm/safefetch_linux_arm.S
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2022 SAP SE. All rights reserved.
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,11 @@
.globl SafeFetch32_impl
.globl _SafeFetch32_fault
.globl _SafeFetch32_continuation
+
+ .hidden SafeFetch32_impl
+ .hidden _SafeFetch32_fault
+ .hidden _SafeFetch32_continuation
+
.type SafeFetch32_impl, %function
# Support for int SafeFetch32(int* address, int defaultval);
diff --git a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
index b570e3b6d7f12..0b666f29c312b 100644
--- a/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
+++ b/src/hotspot/os_cpu/linux_ppc/os_linux_ppc.cpp
@@ -28,7 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/linux_ppc/safefetch_linux_ppc.S b/src/hotspot/os_cpu/linux_ppc/safefetch_linux_ppc.S
index c8d20cc1b4328..8c96edf01b4d0 100644
--- a/src/hotspot/os_cpu/linux_ppc/safefetch_linux_ppc.S
+++ b/src/hotspot/os_cpu/linux_ppc/safefetch_linux_ppc.S
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2022 SAP SE. All rights reserved.
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,13 @@
.globl _SafeFetch32_fault
.globl _SafeFetch32_continuation
+ .hidden SafeFetchN_impl
+ .hidden _SafeFetchN_fault
+ .hidden _SafeFetchN_continuation
+ .hidden SafeFetch32_impl
+ .hidden _SafeFetch32_fault
+ .hidden _SafeFetch32_continuation
+
# Support for int SafeFetch32(int* address, int defaultval);
#
# r3 : address
diff --git a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
index 282467bc9e096..9f13e2bdd2cb7 100644
--- a/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
+++ b/src/hotspot/os_cpu/linux_riscv/os_linux_riscv.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
@@ -39,6 +38,7 @@
#include "prims/jvm_misc.hpp"
#include "runtime/arguments.hpp"
#include "runtime/frame.inline.hpp"
+#include "runtime/globals.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
@@ -406,6 +406,14 @@ static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
extern "C" {
int SpinPause() {
+ if (UseZihintpause) {
+ // PAUSE is encoded as a FENCE instruction with pred=W, succ=0, fm=0, rd=x0, and rs1=x0.
+ // To do: __asm__ volatile("pause " : : : );
+ // Since we're currently not passing '-march=..._zihintpause' to the compiler,
+ // it will not recognize the "pause" instruction, hence the hard-coded instruction.
+ __asm__ volatile(".word 0x0100000f " : : : );
+ return 1;
+ }
return 0;
}
diff --git a/src/hotspot/os_cpu/linux_riscv/safefetch_linux_riscv.S b/src/hotspot/os_cpu/linux_riscv/safefetch_linux_riscv.S
index ecf0bac6f9e78..150df7567bdb8 100644
--- a/src/hotspot/os_cpu/linux_riscv/safefetch_linux_riscv.S
+++ b/src/hotspot/os_cpu/linux_riscv/safefetch_linux_riscv.S
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2022 SAP SE. All rights reserved.
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,13 @@
.globl _SafeFetch32_fault
.globl _SafeFetch32_continuation
+ .hidden SafeFetchN_impl
+ .hidden _SafeFetchN_fault
+ .hidden _SafeFetchN_continuation
+ .hidden SafeFetch32_impl
+ .hidden _SafeFetch32_fault
+ .hidden _SafeFetch32_continuation
+
# Support for int SafeFetch32(int* address, int defaultval);
#
# x10 (a0) : address
diff --git a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
index 033ea14ead6a4..5aa65e705d9ed 100644
--- a/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
+++ b/src/hotspot/os_cpu/linux_s390/os_linux_s390.cpp
@@ -28,7 +28,6 @@
// no precompiled headers
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
diff --git a/src/hotspot/os_cpu/linux_s390/safefetch_linux_s390.S b/src/hotspot/os_cpu/linux_s390/safefetch_linux_s390.S
index 47fe82f5a278b..43d50c798e534 100644
--- a/src/hotspot/os_cpu/linux_s390/safefetch_linux_s390.S
+++ b/src/hotspot/os_cpu/linux_s390/safefetch_linux_s390.S
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2022 SAP SE. All rights reserved.
- * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,13 @@
.globl _SafeFetch32_fault
.globl _SafeFetch32_continuation
+ .hidden SafeFetchN_impl
+ .hidden _SafeFetchN_fault
+ .hidden _SafeFetchN_continuation
+ .hidden SafeFetch32_impl
+ .hidden _SafeFetch32_fault
+ .hidden _SafeFetch32_continuation
+
# Support for int SafeFetch32(int* address, int defaultval);
#
# r2 : address
diff --git a/src/hotspot/os_cpu/linux_x86/linux_x86_32.S b/src/hotspot/os_cpu/linux_x86/linux_x86_32.S
index 344358172defd..e23cd2b9164ae 100644
--- a/src/hotspot/os_cpu/linux_x86/linux_x86_32.S
+++ b/src/hotspot/os_cpu/linux_x86/linux_x86_32.S
@@ -1,5 +1,5 @@
#
-# Copyright (c) 2004, 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -21,28 +21,41 @@
# questions.
#
+ .globl SpinPause
# NOTE WELL! The _Copy functions are called directly
- # from server-compiler-generated code via CallLeafNoFP,
- # which means that they *must* either not use floating
- # point or use it in the same manner as does the server
- # compiler.
+ # from server-compiler-generated code via CallLeafNoFP,
+ # which means that they *must* either not use floating
+ # point or use it in the same manner as does the server
+ # compiler.
.globl _Copy_arrayof_conjoint_bytes
.globl _Copy_conjoint_jshorts_atomic
- .globl _Copy_arrayof_conjoint_jshorts
+ .globl _Copy_arrayof_conjoint_jshorts
.globl _Copy_conjoint_jints_atomic
.globl _Copy_arrayof_conjoint_jints
- .globl _Copy_conjoint_jlongs_atomic
- .globl _mmx_Copy_arrayof_conjoint_jshorts
+ .globl _Copy_conjoint_jlongs_atomic
+ .globl _mmx_Copy_arrayof_conjoint_jshorts
.globl _Atomic_cmpxchg_long
.globl _Atomic_move_long
- .text
+ .hidden SpinPause
- .globl SpinPause
- .type SpinPause,@function
+ .hidden _Copy_arrayof_conjoint_bytes
+ .hidden _Copy_conjoint_jshorts_atomic
+ .hidden _Copy_arrayof_conjoint_jshorts
+ .hidden _Copy_conjoint_jints_atomic
+ .hidden _Copy_arrayof_conjoint_jints
+ .hidden _Copy_conjoint_jlongs_atomic
+ .hidden _mmx_Copy_arrayof_conjoint_jshorts
+
+ .hidden _Atomic_cmpxchg_long
+ .hidden _Atomic_move_long
+
+ .text
+
+ .type SpinPause,@function
.p2align 4,,15
SpinPause:
rep
@@ -55,7 +68,7 @@ SpinPause:
# size_t count)
#
.p2align 4,,15
- .type _Copy_arrayof_conjoint_bytes,@function
+ .type _Copy_arrayof_conjoint_bytes,@function
_Copy_arrayof_conjoint_bytes:
pushl %esi
movl 4+12(%esp),%ecx # count
@@ -115,7 +128,7 @@ acb_CopyLeft:
jbe 2f # <= 32 dwords
rep; smovl
jmp 4f
- .space 8
+ .space 8
2: subl %esi,%edi
.p2align 4,,15
3: movl (%esi),%edx
@@ -131,7 +144,7 @@ acb_CopyLeft:
addl $3,%esi
6: movb (%esi),%dl
movb %dl,(%edi,%esi,1)
- subl $1,%esi
+ subl $1,%esi
subl $1,%ecx
jnz 6b
7: cld
@@ -143,7 +156,7 @@ acb_CopyLeft:
# void* to,
# size_t count)
.p2align 4,,15
- .type _Copy_conjoint_jshorts_atomic,@function
+ .type _Copy_conjoint_jshorts_atomic,@function
_Copy_conjoint_jshorts_atomic:
pushl %esi
movl 4+12(%esp),%ecx # count
@@ -230,7 +243,7 @@ cs_CopyLeft:
# void* to,
# size_t count)
.p2align 4,,15
- .type _Copy_arrayof_conjoint_jshorts,@function
+ .type _Copy_arrayof_conjoint_jshorts,@function
_Copy_arrayof_conjoint_jshorts:
pushl %esi
movl 4+12(%esp),%ecx # count
@@ -307,8 +320,8 @@ acs_CopyLeft:
# Equivalent to
# arrayof_conjoint_jints
.p2align 4,,15
- .type _Copy_conjoint_jints_atomic,@function
- .type _Copy_arrayof_conjoint_jints,@function
+ .type _Copy_conjoint_jints_atomic,@function
+ .type _Copy_arrayof_conjoint_jints,@function
_Copy_conjoint_jints_atomic:
_Copy_arrayof_conjoint_jints:
pushl %esi
@@ -384,7 +397,7 @@ ci_CopyLeft:
# }
*/
.p2align 4,,15
- .type _Copy_conjoint_jlongs_atomic,@function
+ .type _Copy_conjoint_jlongs_atomic,@function
_Copy_conjoint_jlongs_atomic:
movl 4+8(%esp),%ecx # count
movl 4+0(%esp),%eax # from
@@ -413,7 +426,7 @@ cla_CopyLeft:
# void* to,
# size_t count)
.p2align 4,,15
- .type _mmx_Copy_arrayof_conjoint_jshorts,@function
+ .type _mmx_Copy_arrayof_conjoint_jshorts,@function
_mmx_Copy_arrayof_conjoint_jshorts:
pushl %esi
movl 4+12(%esp),%ecx
@@ -465,8 +478,8 @@ mmx_acs_CopyRight:
cmpl $16,%ecx
jge 4b
emms
- testl %ecx,%ecx
- ja 1b
+ testl %ecx,%ecx
+ ja 1b
5: andl $1,%eax
je 7f
6: movw (%esi),%dx
@@ -511,7 +524,7 @@ mmx_acs_CopyLeft:
# jlong exchange_value)
#
.p2align 4,,15
- .type _Atomic_cmpxchg_long,@function
+ .type _Atomic_cmpxchg_long,@function
_Atomic_cmpxchg_long:
# 8(%esp) : return PC
pushl %ebx # 4(%esp) : old %ebx
@@ -530,7 +543,7 @@ _Atomic_cmpxchg_long:
# Support for jlong Atomic::load and Atomic::store.
# void _Atomic_move_long(const volatile jlong* src, volatile jlong* dst)
.p2align 4,,15
- .type _Atomic_move_long,@function
+ .type _Atomic_move_long,@function
_Atomic_move_long:
movl 4(%esp), %eax # src
fildll (%eax)
diff --git a/src/hotspot/os_cpu/linux_x86/linux_x86_64.S b/src/hotspot/os_cpu/linux_x86/linux_x86_64.S
index 89d98cb583786..65580a194afab 100644
--- a/src/hotspot/os_cpu/linux_x86/linux_x86_64.S
+++ b/src/hotspot/os_cpu/linux_x86/linux_x86_64.S
@@ -1,5 +1,5 @@
-#
-# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
+#
+# Copyright (c) 2004, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -21,24 +21,34 @@
# questions.
#
+ .globl SpinPause
# NOTE WELL! The _Copy functions are called directly
- # from server-compiler-generated code via CallLeafNoFP,
- # which means that they *must* either not use floating
- # point or use it in the same manner as does the server
- # compiler.
-
+ # from server-compiler-generated code via CallLeafNoFP,
+ # which means that they *must* either not use floating
+ # point or use it in the same manner as does the server
+ # compiler.
+
.globl _Copy_arrayof_conjoint_bytes
- .globl _Copy_arrayof_conjoint_jshorts
+ .globl _Copy_arrayof_conjoint_jshorts
.globl _Copy_conjoint_jshorts_atomic
.globl _Copy_arrayof_conjoint_jints
.globl _Copy_conjoint_jints_atomic
.globl _Copy_arrayof_conjoint_jlongs
.globl _Copy_conjoint_jlongs_atomic
- .text
+ .hidden SpinPause
+
+ .hidden _Copy_arrayof_conjoint_bytes
+ .hidden _Copy_arrayof_conjoint_jshorts
+ .hidden _Copy_conjoint_jshorts_atomic
+ .hidden _Copy_arrayof_conjoint_jints
+ .hidden _Copy_conjoint_jints_atomic
+ .hidden _Copy_arrayof_conjoint_jlongs
+ .hidden _Copy_conjoint_jlongs_atomic
+
+ .text
- .globl SpinPause
.align 16
.type SpinPause,@function
SpinPause:
@@ -55,7 +65,7 @@ SpinPause:
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- .type _Copy_arrayof_conjoint_bytes,@function
+ .type _Copy_arrayof_conjoint_bytes,@function
_Copy_arrayof_conjoint_bytes:
movq %rdx,%r8 # byte count
shrq $3,%rdx # qword count
@@ -63,7 +73,7 @@ _Copy_arrayof_conjoint_bytes:
leaq -1(%rdi,%r8,1),%rax # from + bcount*1 - 1
jbe acb_CopyRight
cmpq %rax,%rsi
- jbe acb_CopyLeft
+ jbe acb_CopyLeft
acb_CopyRight:
leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
@@ -157,8 +167,8 @@ acb_CopyLeft:
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- .type _Copy_arrayof_conjoint_jshorts,@function
- .type _Copy_conjoint_jshorts_atomic,@function
+ .type _Copy_arrayof_conjoint_jshorts,@function
+ .type _Copy_conjoint_jshorts_atomic,@function
_Copy_arrayof_conjoint_jshorts:
_Copy_conjoint_jshorts_atomic:
movq %rdx,%r8 # word count
@@ -167,7 +177,7 @@ _Copy_conjoint_jshorts_atomic:
leaq -2(%rdi,%r8,2),%rax # from + wcount*2 - 2
jbe acs_CopyRight
cmpq %rax,%rsi
- jbe acs_CopyLeft
+ jbe acs_CopyLeft
acs_CopyRight:
leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
@@ -247,8 +257,8 @@ acs_CopyLeft:
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- .type _Copy_arrayof_conjoint_jints,@function
- .type _Copy_conjoint_jints_atomic,@function
+ .type _Copy_arrayof_conjoint_jints,@function
+ .type _Copy_conjoint_jints_atomic,@function
_Copy_arrayof_conjoint_jints:
_Copy_conjoint_jints_atomic:
movq %rdx,%r8 # dword count
@@ -257,7 +267,7 @@ _Copy_conjoint_jints_atomic:
leaq -4(%rdi,%r8,4),%rax # from + dcount*4 - 4
jbe aci_CopyRight
cmpq %rax,%rsi
- jbe aci_CopyLeft
+ jbe aci_CopyLeft
aci_CopyRight:
leaq -8(%rdi,%rdx,8),%rax # from + qcount*8 - 8
leaq -8(%rsi,%rdx,8),%rcx # to + qcount*8 - 8
@@ -326,15 +336,15 @@ aci_CopyLeft:
# rdx - count, treated as ssize_t
#
.p2align 4,,15
- .type _Copy_arrayof_conjoint_jlongs,@function
- .type _Copy_conjoint_jlongs_atomic,@function
+ .type _Copy_arrayof_conjoint_jlongs,@function
+ .type _Copy_conjoint_jlongs_atomic,@function
_Copy_arrayof_conjoint_jlongs:
_Copy_conjoint_jlongs_atomic:
cmpq %rdi,%rsi
leaq -8(%rdi,%rdx,8),%rax # from + count*8 - 8
jbe acl_CopyRight
cmpq %rax,%rsi
- jbe acl_CopyLeft
+ jbe acl_CopyLeft
acl_CopyRight:
leaq -8(%rsi,%rdx,8),%rcx # to + count*8 - 8
negq %rdx
diff --git a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
index b211330409d59..4dcaedf71da8c 100644
--- a/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
+++ b/src/hotspot/os_cpu/linux_x86/os_linux_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1999, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
@@ -165,7 +164,7 @@ frame os::get_sender_for_C_frame(frame* fr) {
return frame(fr->sender_sp(), fr->link(), fr->sender_pc());
}
-intptr_t* _get_previous_fp() {
+static intptr_t* _get_previous_fp() {
#if defined(__clang__)
intptr_t **ebp;
__asm__ __volatile__ ("mov %%" SPELL_REG_FP ", %0":"=r"(ebp):);
diff --git a/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S b/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S
index 492b1207db6e2..54775cb7e8ede 100644
--- a/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S
+++ b/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_32.S
@@ -1,6 +1,6 @@
#
# Copyright (c) 2022 SAP SE. All rights reserved.
-# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -25,7 +25,11 @@
.globl _SafeFetch32_fault
.globl _SafeFetch32_continuation
- .text
+ .hidden SafeFetch32_impl
+ .hidden _SafeFetch32_fault
+ .hidden _SafeFetch32_continuation
+
+ .text
# Support for int SafeFetch32(int* address, int defaultval);
#
diff --git a/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_64.S b/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_64.S
index 617851e8327d4..1937e71708897 100644
--- a/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_64.S
+++ b/src/hotspot/os_cpu/linux_x86/safefetch_linux_x86_64.S
@@ -1,6 +1,6 @@
#
# Copyright (c) 2022 SAP SE. All rights reserved.
-# Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,14 @@
.globl _SafeFetch32_continuation
.globl _SafeFetchN_continuation
- .text
+ .hidden SafeFetch32_impl
+ .hidden SafeFetchN_impl
+ .hidden _SafeFetch32_fault
+ .hidden _SafeFetchN_fault
+ .hidden _SafeFetch32_continuation
+ .hidden _SafeFetchN_continuation
+
+ .text
# Support for int SafeFetch32(int* address, int defaultval);
diff --git a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
index 1ce73588524c1..d593c46d15d91 100644
--- a/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
+++ b/src/hotspot/os_cpu/linux_zero/os_linux_zero.cpp
@@ -27,7 +27,6 @@
#include "asm/assembler.inline.hpp"
#include "atomic_linux_zero.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
index 46f718a9cd0f5..78e98609b6bdc 100644
--- a/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
+++ b/src/hotspot/os_cpu/windows_aarch64/os_windows_aarch64.cpp
@@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"
diff --git a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
index 4e18334315a37..7e0814c014bec 100644
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
+++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp
@@ -25,7 +25,6 @@
// no precompiled headers
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"
diff --git a/src/hotspot/share/adlc/archDesc.cpp b/src/hotspot/share/adlc/archDesc.cpp
index d27bf0865608a..93fa7451dc0b9 100644
--- a/src/hotspot/share/adlc/archDesc.cpp
+++ b/src/hotspot/share/adlc/archDesc.cpp
@@ -1,5 +1,5 @@
//
-// Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
+// Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
// archDesc.cpp - Internal format for architecture definition
+#include
#include "adlc.hpp"
static FILE *errfile = stderr;
@@ -684,6 +685,98 @@ bool ArchDesc::verify() {
return true;
}
+class MarkUsageFormClosure : public FormClosure {
+private:
+ ArchDesc* _ad;
+ std::unordered_set