Compare commits

...

148 Commits

Author SHA1 Message Date
rwestrel
2d1b109605 reviews 2025-06-12 17:33:14 +02:00
rwestrel
c04cefbc06 Merge branch 'master' into JDK-8356708 2025-06-12 17:14:01 +02:00
Roland Westrelin
c13cecdf4b
Update src/hotspot/share/opto/loopnode.cpp
Co-authored-by: Emanuel Peter <emanuel.peter@oracle.com>
2025-06-12 17:12:58 +02:00
Roland Westrelin
e476afde44
Update src/hotspot/share/opto/loopnode.cpp
Co-authored-by: Roberto Castañeda Lozano <robcasloz@users.noreply.github.com>
2025-06-12 17:12:38 +02:00
Roland Westrelin
817a391b34
Update src/hotspot/share/opto/loopnode.cpp
Co-authored-by: Roberto Castañeda Lozano <robcasloz@users.noreply.github.com>
2025-06-12 17:12:27 +02:00
Afshin Zafari
fae9c7a3f0 8351661: NMT: VMATree should support separate call-stacks for reserve and commit operations
Reviewed-by: gziemski, jsjolen
2025-06-12 14:46:39 +00:00
Emanuel Peter
dd68829017 8347273: C2: VerifyIterativeGVN for Ideal and Identity
Reviewed-by: chagedorn, mhaessig
2025-06-12 14:19:08 +00:00
Emanuel Peter
b85fe02be5 8358600: Template-Framework Library: Template for TestFramework test class
Reviewed-by: chagedorn, mhaessig
2025-06-12 14:12:14 +00:00
Radim Vansa
e18277b470 8352075: Perf regression accessing fields
Reviewed-by: coleenp, iklam, jsjolen
2025-06-12 12:29:15 +00:00
Rohitash Kumar
e5ce5c57c8 8357959: (bf) ByteBuffer.allocateDirect initialization can result in large TTSP spikes
Reviewed-by: shade, alanb
2025-06-12 12:23:42 +00:00
kabutz
91fdd72c97 8355726: LinkedBlockingDeque fixes and improvements
Reviewed-by: vklang, dl
2025-06-12 11:44:04 +00:00
Marc Chevalier
b6ec93b038 8359121: C2: Region added by vectorizedMismatch intrinsic can survive as a dead node after IGVN
Reviewed-by: thartmann, chagedorn
2025-06-12 11:40:31 +00:00
Anjian Wen
65e63b6ab4 8359218: RISC-V: Only enable CRC32 intrinsic when AvoidUnalignedAccess == false
Reviewed-by: fyang, fjiang
2025-06-12 10:44:47 +00:00
Johannes Bechberger
3f0fef2c9c 8359135: New test TestCPUTimeSampleThrottling fails intermittently
Reviewed-by: mdoerr
2025-06-12 08:54:21 +00:00
SendaoYan
3e0ef832cc 8359083: Test jdkCheckHtml.java should report SkippedException rather than report fails when miss tidy
Reviewed-by: hannesw
2025-06-12 08:18:00 +00:00
SendaoYan
7b7136b4ec 8359181: Error messages generated by configure --help after 8301197
Reviewed-by: erikj, ihse
2025-06-12 08:10:27 +00:00
SendaoYan
5886ef728f 8359182: Use @requires instead of SkippedException for MaxPath.java
Reviewed-by: bpb, bchristi
2025-06-12 07:51:29 +00:00
Matthias Baesken
d7aa349820 8357826: Avoid running some jtreg tests when asan is configured
Reviewed-by: sspitsyn, amitkumar, lmesnik, syan, lucy, cjplummer
2025-06-12 07:08:39 +00:00
Ioi Lam
3b32f6a8ec 8344556: [Graal] compiler/intrinsics/bmi/* fail when AOTCache cannot be loaded
Reviewed-by: dnsimon, kvn
2025-06-12 00:41:39 +00:00
Serguei Spitsyn
8f73357004 8358815: Exception event spec has stale reference to catch_klass parameter
Reviewed-by: cjplummer, alanb
2025-06-11 18:51:54 +00:00
Calvin Cheung
429158218b 8357382: runtime/cds/appcds/aotClassLinking/BulkLoaderTest.java#aot fails with Xcomp and C1
Reviewed-by: iklam, kvn
2025-06-11 18:10:34 +00:00
Mohamed Issa
ef4cbec6fb 8358556: Assert when running with -XX:-UseLibmIntrinsic
Reviewed-by: sviswanathan, kvn
2025-06-11 17:47:03 +00:00
Naoto Sato
e9216efefc 8358734: Remove JavaTimeSupplementary resource bundles
Reviewed-by: jlu, joehw, iris
2025-06-11 16:04:26 +00:00
Alan Bateman
e5196fc24d 8358764: (sc) SocketChannel.close when thread blocked in read causes connection to be reset (win)
Reviewed-by: jpai, vyazici
2025-06-11 14:09:45 +00:00
Jatin Bhateja
c98dffa186 8357982: Fix several failing BMI tests with -XX:+UseAPX
Reviewed-by: epeter, sviswanathan
2025-06-11 13:48:56 +00:00
Matthias Baesken
7d7fc69355 8357570: [macOS] os::Bsd::available_memory() might return too low values
Reviewed-by: clanger, mdoerr, lucy
2025-06-11 13:32:57 +00:00
Anton Artemov
42ab8fcfb9 8265754: Move suspend/resume API from HandshakeState
Reviewed-by: coleenp, dholmes, pchilanomate
2025-06-11 12:42:57 +00:00
Benoît Maillard
bf7d40d048 8356751: IGV: clean up redundant field _should_send_method
Co-authored-by: Manuel Hässig <mhaessig@openjdk.org>
Reviewed-by: mhaessig, thartmann, dfenacci
2025-06-11 11:08:38 +00:00
Khalid Boulanouare
5ae32c4c86 8352149: Test java/awt/Frame/MultiScreenTest.java fails: Window list is empty
Reviewed-by: aivanov, abhiscxk
2025-06-11 10:25:28 +00:00
Martin Doerr
56ce70c5df 8359165: AIX build broken after 8358799
Reviewed-by: kbarrett, jkern
2025-06-11 08:28:48 +00:00
Martin Doerr
abc76c6b5b 8359126: [AIX] new test TestImplicitNullChecks.java fails
Reviewed-by: rcastanedalo, dbriemann
2025-06-11 08:28:31 +00:00
Rajan Halade
9586817cea 8359170: Add 2 TLS and 2 CS Sectigo roots
Reviewed-by: mullan
2025-06-10 21:59:29 +00:00
Albert Mingkun Yang
38b877e941 8358294: Remove unnecessary GenAlignment
Reviewed-by: iwalulya, tschatzl
2025-06-10 20:10:19 +00:00
Alex Menkov
8f487d26c0 8358577: Test serviceability/jvmti/thread/GetCurrentContendedMonitor/contmon01/contmon01.java failed: unexpexcted monitor object
Reviewed-by: cjplummer, syan, sspitsyn
2025-06-10 19:05:08 +00:00
Calvin Cheung
500a3a2d0a 8358799: Refactor os::jvm_path()
Reviewed-by: dholmes, jsjolen
2025-06-10 16:20:33 +00:00
Roland Westrelin
a2f99fd88b 8354383: C2: enable sinking of Type nodes out of loop
Reviewed-by: chagedorn, thartmann
2025-06-10 14:19:19 +00:00
Daniel Fuchs
0582bd290d 8357639: DigestEchoClient fails intermittently due to: java.io.IOException: Data received while in pool
Reviewed-by: djelinski
2025-06-10 11:01:50 +00:00
Varada M
3ff83ec49e 8358159: Empty mode/padding in cipher transformations
Reviewed-by: amitkumar, valeriep
2025-06-10 08:17:52 +00:00
Benoît Maillard
7c9c8ba363 8356780: PhaseMacroExpand::_has_locks is unused
Reviewed-by: mhaessig, chagedorn, kvn, mchevalier
2025-06-10 07:27:10 +00:00
Aleksey Shipilev
ca7b885873 8358749: Fix input checks in Vector API intrinsics
Co-authored-by: Vladimir Ivanov <vlivanov@openjdk.org>
Reviewed-by: vlivanov, sviswanathan
2025-06-10 06:15:13 +00:00
Matthias Bläsing
92be7821f5 8353950: Clipboard interaction on Windows is unstable
8332271: Reading data from the clipboard from multiple threads crashes the JVM

Reviewed-by: abhiscxk, dnguyen
2025-06-10 00:21:18 +00:00
David Holmes
bcf860703d 8355792: Remove expired flags in JDK 26
Reviewed-by: coleenp, kvn
2025-06-09 22:25:20 +00:00
Ioi Lam
d186dacdb7 8357591: Re-enable CDS test cases for jvmci after JDK-8345826
Reviewed-by: dholmes, kvn
2025-06-09 21:54:55 +00:00
David Holmes
ef45c8154c 8346237: Obsolete the UseOprofile flag
Reviewed-by: coleenp, kvn
2025-06-09 20:59:30 +00:00
Justin Lu
cd9b1bc820 8358426: Improve lazy computation in Locale
Reviewed-by: naoto, liach
2025-06-09 20:49:33 +00:00
Naoto Sato
fcb68ea22d 8358626: Emit UTF-8 CLDR resources
Reviewed-by: erikj, vyazici
2025-06-09 19:03:21 +00:00
Coleen Phillimore
eb256deb80 8358326: Use oopFactory array allocation
Reviewed-by: fparain, stefank
2025-06-09 18:33:00 +00:00
Magnus Ihse Bursie
156187accc 8356978: Convert unicode sequences in Java source code to UTF-8
Co-authored-by: Alexey Ivanov <aivanov@openjdk.org>
Reviewed-by: naoto, prr, joehw
2025-06-09 17:58:49 +00:00
kieran-farrell
a377773fa7 8358617: java/net/HttpURLConnection/HttpURLConnectionExpectContinueTest.java fails with 403 due to system proxies
Reviewed-by: dfuchs
2025-06-09 17:39:39 +00:00
Jiangli Zhou
cae1fd3385 8357632: CDS test failures on static JDK
Reviewed-by: ccheung, dholmes
2025-06-09 16:08:18 +00:00
Phil Race
eb8ee8bdc7 8358731: Remove jdk.internal.access.JavaAWTAccess.java
Reviewed-by: dfuchs, serb
2025-06-09 16:01:18 +00:00
Alexander Zvegintsev
2103dc15cb 8358452: JNI exception pending in Java_sun_awt_screencast_ScreencastHelper_remoteDesktopKeyImpl of screencast_pipewire.c:1214 (ID: 51119)
Reviewed-by: psadhukhan, serb, aivanov, avu
2025-06-09 13:35:01 +00:00
Joel Sikström
1c72b350e4 8357053: ZGC: Improved utility for ZPageAge
Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org>
Reviewed-by: sjohanss, stefank
2025-06-09 09:03:12 +00:00
Per Minborg
52338c94f6 8358520: Improve lazy computation in BreakIteratorResourceBundle and related classes
Reviewed-by: naoto, jlu
2025-06-09 07:00:51 +00:00
Roberto Castañeda Lozano
91f12600d2 8345067: C2: enable implicit null checks for ZGC reads
Reviewed-by: aboldtch, kvn, epeter
2025-06-09 06:23:17 +00:00
Daniel Skantz
6c616c71ec 8357822: C2: Multiple string optimization tests are no longer testing string concatenation optimizations
Reviewed-by: rcastanedalo, epeter
2025-06-09 06:11:05 +00:00
Kim Barrett
e94ad551c6 8342639: Global operator new in adlc has wrong exception spec
Reviewed-by: kvn, mdoerr
2025-06-07 20:34:34 +00:00
Rajan Halade
d735255919 8345414: Google CAInterop test failures
Reviewed-by: weijun
Backport-of: 8e9ba788ae04a9a617a393709bf2c51a0c157206
2025-06-06 21:35:21 +00:00
Stuart Marks
d024f58e61 8358809: Improve link to stdin.encoding from java.lang.IO
Reviewed-by: naoto
2025-06-06 20:07:43 +00:00
Alexandre Iline
026975a1aa 8358721: Update JCov for class file version 70
Reviewed-by: iris, alanb, erikj
2025-06-06 15:05:43 +00:00
Vicente Romero
8adb052b46 8341778: Some javac tests ignore the result of JavacTask::call
Reviewed-by: shade
2025-06-06 14:11:27 +00:00
Hamlin Li
9658cecde3 8358685: [TEST] AOTLoggingTag.java failed with missing log message
Reviewed-by: iklam, shade
2025-06-06 13:59:17 +00:00
Fernando Guallini
b2e7cda6a0 8358171: Additional code coverage for PEM API
Reviewed-by: ascarpino
2025-06-06 09:53:25 +00:00
Harald Eilertsen
65fda5c02a 8358593: Add ucontext accessors for *BSD on Aarch64
Co-authored-by: Greg Lewis <glewis@eyesbeyond.com>
Co-authored-by: Kurt Miller <bsdkurt@gmail.com>
Reviewed-by: aph
2025-06-06 08:16:37 +00:00
Benoît Maillard
d1b788005b 8357951: Remove the IdealLoopTree* loop parameter from PhaseIdealLoop::loop_iv_phi
Reviewed-by: thartmann, mhaessig
2025-06-06 08:16:15 +00:00
Volkan Yazici
bb2611ad43 8357993: Use "stdin.encoding" for reading System.in with InputStreamReader/Scanner [hotspot]
Reviewed-by: cjplummer, sspitsyn
2025-06-06 06:53:10 +00:00
Volkan Yazici
e918a59b1d 8357821: Revert incorrectly named JavaLangAccess::unchecked* methods
Reviewed-by: pminborg
2025-06-06 06:26:09 +00:00
Amit Kumar
28acca609b 8358653: [s390] Clean up comments regarding frame manager
Reviewed-by: mdoerr
2025-06-06 03:50:06 +00:00
Jaikiran Pai
029e3bf8f5 8349914: ZipFile::entries and ZipFile::getInputStream not consistent with each other when there are duplicate entries
Co-authored-by: Lance Andersen <lancea@openjdk.org>
Reviewed-by: lancea
2025-06-06 02:07:51 +00:00
Anthony Scarpino
78158f30ae 8358099: PEM spec updates
Reviewed-by: mullan
2025-06-05 22:13:24 +00:00
Archie Cobbs
c793de989f 8350212: Track source end positions of declarations that support @SuppressWarnings
Co-authored-by: Jan Lahoda <jlahoda@openjdk.org>
Reviewed-by: mcimadamore
2025-06-05 21:57:33 +00:00
Andrey Turbanov
15178aa298 8357688: Remove unnecessary List.get before remove in PopupFactory
Reviewed-by: azvegint, kizune, serb
2025-06-05 20:19:53 +00:00
Christian Stein
fe3be498b8 8357141: Update to use jtreg 7.5.2
Reviewed-by: erikj, ihse, iris
2025-06-05 17:30:01 +00:00
Cesar Soares Lucas
62fde68708 8357396: Refactor nmethod::make_not_entrant to use Enum instead of "const char*"
Reviewed-by: mhaessig, shade
2025-06-05 16:43:29 +00:00
Nizar Benalla
af87035b71 8355746: Start of release updates for JDK 26
8355748: Add SourceVersion.RELEASE_26
8355751: Add source 26 and target 26 to javac

Co-authored-by: Joe Darcy <darcy@openjdk.org>
Reviewed-by: iris, coleenp, darcy
2025-06-05 16:01:32 +00:00
Patricio Chilano Mateo
c59e44a7aa 8357914: TestEmptyBootstrapMethodsAttr.java fails when run with TEST_THREAD_FACTORY=Virtual
Reviewed-by: lmesnik, dholmes, sspitsyn, syan
2025-06-05 15:02:02 +00:00
Dmitry Chuyko
23f1d4f9a9 8337666: AArch64: SHA3 GPR intrinsic
Reviewed-by: aph
2025-06-05 14:28:27 +00:00
Erik Gahlin
33ed7c1842 8358689: test/micro/org/openjdk/bench/java/net/SocketEventOverhead.java does not build after JDK-8351594
Reviewed-by: alanb
2025-06-05 13:08:48 +00:00
Viktor Klang
782bbca439 8358633: Test ThreadPoolExecutorTest::testTimedInvokeAnyNullTimeUnit is broken by JDK-8347491
Reviewed-by: alanb
2025-06-05 12:04:57 +00:00
Erik Gahlin
6cdfd36ac8 8358590: JFR: Include min and max in MethodTiming event
Reviewed-by: mgronlun
2025-06-05 11:42:31 +00:00
Erik Gahlin
eb770a060a 8351594: JFR: Rate-limited sampling of Java events
Reviewed-by: mgronlun, alanb
2025-06-05 11:36:08 +00:00
Nizar Benalla
c5daf89053 8349369: test/docs/jdk/javadoc/doccheck/checks/jdkCheckLinks.java did not report on missing man page files
Reviewed-by: hannesw
2025-06-05 11:05:52 +00:00
Nizar Benalla
bd08932d5b 8356633: Incorrect use of {@link} in jdk.jshell
Reviewed-by: rgiulietti, vyazici
2025-06-05 10:31:23 +00:00
Markus Grönlund
d450e341c7 8357962: JFR Cooperative Sampling reveals inconsistent interpreter frames as part of JVMTI PopFrame
Reviewed-by: dholmes, eosterlund
2025-06-05 10:14:41 +00:00
Magnus Ihse Bursie
66feb490bd 8358543: Remove CommentChecker.java and DirDiff.java
Reviewed-by: erikj
2025-06-05 09:30:44 +00:00
Aleksey Shipilev
dc949003de 8358588: ThreadSnapshot.ThreadLock should be static nested class
Reviewed-by: alanb, sspitsyn, amenkov
2025-06-05 09:02:23 +00:00
Johannes Bechberger
ace70a6d6a 8358666: [REDO] Implement JEP 509: JFR CPU-Time Profiling
Reviewed-by: mgronlun
2025-06-05 08:18:18 +00:00
Dingli Zhang
48b97ac0e0 8358634: RISC-V: Fix several broken documentation web-links
Reviewed-by: fyang
2025-06-05 07:34:48 +00:00
Jaikiran Pai
08023481ed 8358558: (zipfs) Reorder the listing of "accessMode" property in the ZIP file system's documentation
Reviewed-by: dfuchs, vyazici, alanb, lancea
2025-06-05 04:24:05 +00:00
Vladimir Kozlov
849655a145 8358632: [asan] reports heap-buffer-overflow in AOTCodeCache::copy_bytes
Reviewed-by: vlivanov, iveresov
2025-06-05 03:25:46 +00:00
Hannes Greule
575806c0e5 8358078: javap crashes with NPE on preview class file
Reviewed-by: liach
2025-06-05 01:41:21 +00:00
David Holmes
8f8b367ae3 8350029: Illegal invokespecial interface not caught by verification
Reviewed-by: coleenp, matsaave
2025-06-05 00:35:26 +00:00
Markus Grönlund
9186cc7310 8358628: [BACKOUT] 8342818: Implement JEP 509: JFR CPU-Time Profiling
Reviewed-by: pchilanomate, dholmes
2025-06-04 23:55:18 +00:00
Magnus Ihse Bursie
b787ff6def 8358538: Update GHA Windows runner to 2025
Reviewed-by: shade
2025-06-04 23:19:33 +00:00
Johannes Bechberger
5b27e9c2df 8342818: Implement JEP 509: JFR CPU-Time Profiling
Reviewed-by: mgronlun, mdoerr, pchilanomate, apangin, shade
2025-06-04 22:08:58 +00:00
Aleksey Shipilev
3cf3e4bbec 8358339: Handle MethodCounters::_method backlinks after JDK-8355003
Reviewed-by: coleenp, kvn, iveresov
2025-06-04 21:32:29 +00:00
Joe Darcy
77c110c309 8357000: Write overview documentation for start of release changes
Reviewed-by: erikj, iris, ihse, dholmes
2025-06-04 20:03:48 +00:00
Ian Graves
901144ee0d 8358217: jdk/incubator/vector/PreferredSpeciesTest.java#id0 failures - expected [128] but found [256]
Co-authored-by: Paul Sandoz <psandoz@openjdk.org>
Co-authored-by: Jaikiran Pai <jpai@openjdk.org>
Reviewed-by: syan, psandoz
2025-06-04 19:46:30 +00:00
Justin Lu
8f821175cc 8358170: Repurpose testCompat in test/jdk/java/util/TimeZone/Bug8167143.java
Reviewed-by: naoto
2025-06-04 18:46:31 +00:00
Matthew Donovan
5ed246d17d 8357592: Update output parsing in test/jdk/sun/security/tools/jarsigner/compatibility/Compatibility.java
Reviewed-by: rhalade
2025-06-04 18:07:07 +00:00
Sergey Bylokhov
8939acc8ab 8358057: Update validation of ICC_Profile header data
Reviewed-by: honkar
2025-06-04 17:53:17 +00:00
Ashutosh Mehra
fd0ab04367 8358330: AsmRemarks and DbgStrings clear() method may not get called before their destructor
Reviewed-by: kvn
2025-06-04 16:52:38 +00:00
Justin Lu
8a79ac8863 8358449: Locale.getISOCountries does not specify the returned set is unmodifiable
Reviewed-by: naoto
2025-06-04 16:40:22 +00:00
Stuart Marks
ef47635d5a 8358015: Fix SequencedMap sequenced view method specifications
Reviewed-by: jpai, bchristi
2025-06-04 16:14:31 +00:00
Stefan Karlsson
c909216446 8357443: ZGC: Optimize old page iteration in remap remembered phase
Reviewed-by: aboldtch, eosterlund
2025-06-04 14:56:20 +00:00
Aleksey Shipilev
4e314cb9e0 8356000: C1/C2-only modes use 2 compiler threads on low CPU count machines
Reviewed-by: kvn, dfenacci, galder
2025-06-04 14:21:34 +00:00
Matias Saavedra Silva
a2723d91df 8345347: Test runtime/cds/TestDefaultArchiveLoading.java should accept VM flags or be marked as flagless
Reviewed-by: lmesnik, stefank, ccheung
2025-06-04 14:16:20 +00:00
Igor Veresov
ae1892fb0f 8358003: KlassTrainingData initializer reads garbage holder
Reviewed-by: coleenp, shade, vlivanov
2025-06-04 14:07:49 +00:00
Tom Shull
0352477ff5 8357660: [JVMCI] Add support for retrieving all BootstrapMethodInvocations directly from ConstantPool
Reviewed-by: dnsimon, yzheng
2025-06-04 13:50:36 +00:00
Erik Gahlin
a653ff4893 8358536: jdk/jfr/api/consumer/TestRecordingFileWrite.java times out
Reviewed-by: mgronlun
2025-06-04 13:39:31 +00:00
Emanuel Peter
248341d372 8344942: Template-Based Testing Framework
Co-authored-by: Tobias Hartmann <thartmann@openjdk.org>
Co-authored-by: Tobias Holenstein <tholenstein@openjdk.org>
Co-authored-by: Theo Weidmann <tweidmann@openjdk.org>
Co-authored-by: Roberto Castañeda Lozano <rcastanedalo@openjdk.org>
Co-authored-by: Christian Hagedorn <chagedorn@openjdk.org>
Co-authored-by: Manuel Hässig <mhaessig@openjdk.org>
Reviewed-by: chagedorn, mhaessig, rcastanedalo
2025-06-04 13:16:24 +00:00
Archie Cobbs
09ec4de74d 8358066: Non-ascii package names gives compilation error "import requires canonical name"
Reviewed-by: jlahoda, naoto
2025-06-04 12:56:18 +00:00
Robbin Ehn
dc961609f8 8356159: RISC-V: Add Zabha
Reviewed-by: fyang, fjiang
2025-06-04 12:43:23 +00:00
Alan Bateman
7838321b74 8358496: Concurrent reading from Socket with timeout executes sequentially
Reviewed-by: dfuchs
2025-06-04 09:52:45 +00:00
Sean Coffey
42f48a39e8 8350689: Turn on timestamp and thread metadata by default for java.security.debug
Reviewed-by: mullan
2025-06-04 09:41:51 +00:00
Matthias Baesken
cd16b68962 8357155: [asan] ZGC does not work (x86_64 and ppc64)
Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org>
Reviewed-by: mdoerr, aboldtch
2025-06-04 09:06:46 +00:00
Martin Doerr
ab23500034 8354636: [PPC64] Clean up comments regarding frame manager
Reviewed-by: amitkumar, rrich
2025-06-04 08:31:37 +00:00
He-Pin(kerr)
f141674d16 8347491: IllegalArgumentationException thrown by ThreadPoolExecutor doesn't have a useful message
Reviewed-by: vklang, liach, pminborg
2025-06-04 08:28:29 +00:00
Markus Grönlund
b6d60280e7 8358429: JFR: minimize the time the Threads_lock is held for sampling
Reviewed-by: egahlin
2025-06-04 08:20:48 +00:00
Markus Grönlund
955bfcd550 8357671: JFR: Remove JfrTraceIdEpoch synchronizing
Reviewed-by: egahlin
2025-06-04 08:19:24 +00:00
Magnus Ihse Bursie
edf92721c2 8356977: UTF-8 cleanups
Reviewed-by: naoto, prr
2025-06-04 08:10:42 +00:00
Jaikiran Pai
b5cfd76c04 8358456: ZipFile.getInputStream(ZipEntry) throws unspecified IllegalArgumentException
Reviewed-by: lancea
2025-06-04 06:38:06 +00:00
Aleksey Shipilev
683319f25c 8357798: ReverseOrderListView uses Boolean boxes after JDK-8356080
Reviewed-by: liach, smarks
2025-06-04 06:04:05 +00:00
Aleksey Shipilev
b918dc84ec 8357434: x86: Simplify Interpreter::profile_taken_branch
Reviewed-by: kvn, vlivanov
2025-06-04 06:02:49 +00:00
Alan Bateman
f17b2bc06a 8356870: HotSpotDiagnosticMXBean.dumpThreads and jcmd Thread.dump_to_file updates
Reviewed-by: sspitsyn, kevinw
2025-06-04 04:10:10 +00:00
Vladimir Kozlov
ebd85288ce 8358289: [asan] runtime/cds/appcds/aotCode/AOTCodeFlags.java reports heap-buffer-overflow in ArchiveBuilder
Reviewed-by: shade, iklam, asmehra
2025-06-04 02:14:17 +00:00
Anjian Wen
939521b8e4 8358105: RISC-V: Optimize interpreter profile updates
Reviewed-by: fjiang, fyang
2025-06-04 02:03:22 +00:00
Cesar Soares Lucas
2345065166 8357600: Patch nmethod flushing message to include more details
Reviewed-by: shade, kvn
2025-06-03 23:39:32 +00:00
Naoto Sato
9c74d54514 8358158: test/jdk/java/io/Console/CharsetTest.java failing with NoClassDefFoundError: jtreg/SkippedException
Reviewed-by: joehw, jlu, iris
2025-06-03 23:28:00 +00:00
Daniel Gredler
939753579b 8356803: Test TextLayout/TestControls fails on windows & linux: line and paragraph separator show non-zero advance
8356812: Create an automated version of TextLayout/TestControls

Reviewed-by: prr, honkar
2025-06-03 23:27:44 +00:00
Alex Menkov
da49fa5e15 8354460: Streaming output for attach API should be turned on by default
Reviewed-by: sspitsyn, kevinw
2025-06-03 20:47:17 +00:00
Cesar Soares Lucas
704b5990a7 8358534: Bailout in Conv2B::Ideal when type of cmp input is not supported
Reviewed-by: shade
2025-06-03 20:15:20 +00:00
Tom Shull
e235b61a8b 8357987: [JVMCI] Add support for retrieving all methods of a ResolvedJavaType
Reviewed-by: dnsimon, yzheng, never
2025-06-03 19:38:58 +00:00
Magnus Ihse Bursie
a44a470052 8358515: make cmp-baseline is broken after JDK-8349665
Reviewed-by: erikj
2025-06-03 19:23:29 +00:00
Stefan Lobbenmeier
cc11b7d1f5 8356128: Correct documentation for --linux-package-deps
Reviewed-by: asemenyuk, almatvee
2025-06-03 19:22:52 +00:00
Alex Menkov
406f1bc5b9 8357650: ThreadSnapshot to take snapshot of thread for thread dumps
Co-authored-by: Alan Bateman <alanb@openjdk.org>
Co-authored-by: Alex Menkov <amenkov@openjdk.org>
Reviewed-by: sspitsyn, kevinw
2025-06-03 18:46:55 +00:00
Daniel D. Daugherty
e984fa7997 8358539: ProblemList jdk/jfr/api/consumer/TestRecordingFileWrite.java
Reviewed-by: ayang, bpb
2025-06-03 18:41:05 +00:00
Alisen Chung
461cb84277 8345538: Robot.mouseMove doesn't clamp bounds on macOS when trying to move mouse off screen
Reviewed-by: honkar, prr
2025-06-03 18:02:47 +00:00
Chris Plummer
c382da5798 8358178: Some nsk/jdi tests should be run with includevirtualthreads=y even though they pass without
Reviewed-by: sspitsyn, lmesnik
2025-06-03 17:19:31 +00:00
Larry Cable
44d62c8e21 8358077: sun.tools.attach.VirtualMachineImpl::checkCatchesAndSendQuitTo on Linux leaks file handles after JDK-8327114
Reviewed-by: kevinw, sspitsyn, syan
2025-06-03 17:13:22 +00:00
Jatin Bhateja
d7e58ac480 8351635: C2 ROR/ROL: assert failed: Long constant expected
Reviewed-by: thartmann, chagedorn
2025-06-03 17:00:54 +00:00
Brian Burkhalter
57862005f9 8354450: A File should be invalid if an element of its name sequence ends with a space
Reviewed-by: alanb
2025-06-03 16:32:12 +00:00
Justin Lu
04c15466f6 8358095: Cleanup tests with explicit locale provider set to only CLDR
Reviewed-by: bpb, naoto
2025-06-03 16:13:14 +00:00
Patricio Chilano Mateo
16e120b008 8357910: LoaderConstraintsTest.java fails when run with TEST_THREAD_FACTORY=Virtual
Reviewed-by: dholmes, coleenp
2025-06-03 16:12:53 +00:00
Erik Gahlin
d7def20afa 8358448: JFR: Incorrect time unit for MethodTiming event
Reviewed-by: mgronlun, ayang
2025-06-03 16:02:14 +00:00
Brian Burkhalter
4604c86d2f 8357425: (fs) SecureDirectoryStream setPermissions should use fchmodat
Reviewed-by: alanb
2025-06-03 15:43:26 +00:00
Michael McMahon
b6f827ef05 8348986: Improve coverage of enhanced exception messages
Reviewed-by: dfuchs
2025-06-03 15:36:29 +00:00
Igor Veresov
01f01b6f7b 8358283: Inconsistent failure mode for MetaspaceObj::operator new(size_t, MemTag)
Reviewed-by: kvn, kbarrett
2025-06-03 15:31:07 +00:00
756 changed files with 28912 additions and 7760 deletions

View File

@ -63,7 +63,7 @@ env:
jobs: jobs:
build-windows: build-windows:
name: build name: build
runs-on: windows-2019 runs-on: windows-2025
defaults: defaults:
run: run:
shell: bash shell: bash
@ -102,7 +102,7 @@ jobs:
id: toolchain-check id: toolchain-check
run: | run: |
set +e set +e
'/c/Program Files (x86)/Microsoft Visual Studio/2019/Enterprise/vc/auxiliary/build/vcvars64.bat' -vcvars_ver=${{ inputs.msvc-toolset-version }} '/c/Program Files/Microsoft Visual Studio/2022/Enterprise/vc/auxiliary/build/vcvars64.bat' -vcvars_ver=${{ inputs.msvc-toolset-version }}
if [ $? -eq 0 ]; then if [ $? -eq 0 ]; then
echo "Toolchain is already installed" echo "Toolchain is already installed"
echo "toolchain-installed=true" >> $GITHUB_OUTPUT echo "toolchain-installed=true" >> $GITHUB_OUTPUT
@ -115,7 +115,7 @@ jobs:
run: | run: |
# Run Visual Studio Installer # Run Visual Studio Installer
'/c/Program Files (x86)/Microsoft Visual Studio/Installer/vs_installer.exe' \ '/c/Program Files (x86)/Microsoft Visual Studio/Installer/vs_installer.exe' \
modify --quiet --installPath 'C:\Program Files (x86)\Microsoft Visual Studio\2019\Enterprise' \ modify --quiet --installPath 'C:\Program Files\Microsoft Visual Studio\2022\Enterprise' \
--add Microsoft.VisualStudio.Component.VC.${{ inputs.msvc-toolset-version }}.${{ inputs.msvc-toolset-architecture }} --add Microsoft.VisualStudio.Component.VC.${{ inputs.msvc-toolset-version }}.${{ inputs.msvc-toolset-architecture }}
if: steps.toolchain-check.outputs.toolchain-installed != 'true' if: steps.toolchain-check.outputs.toolchain-installed != 'true'

View File

@ -310,7 +310,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml uses: ./.github/workflows/build-windows.yml
with: with:
platform: windows-x64 platform: windows-x64
msvc-toolset-version: '14.29' msvc-toolset-version: '14.43'
msvc-toolset-architecture: 'x86.x64' msvc-toolset-architecture: 'x86.x64'
configure-arguments: ${{ github.event.inputs.configure-arguments }} configure-arguments: ${{ github.event.inputs.configure-arguments }}
make-arguments: ${{ github.event.inputs.make-arguments }} make-arguments: ${{ github.event.inputs.make-arguments }}
@ -322,7 +322,7 @@ jobs:
uses: ./.github/workflows/build-windows.yml uses: ./.github/workflows/build-windows.yml
with: with:
platform: windows-aarch64 platform: windows-aarch64
msvc-toolset-version: '14.29' msvc-toolset-version: '14.43'
msvc-toolset-architecture: 'arm64' msvc-toolset-architecture: 'arm64'
make-target: 'hotspot' make-target: 'hotspot'
extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin' extra-conf-options: '--openjdk-target=aarch64-unknown-cygwin'
@ -393,5 +393,5 @@ jobs:
with: with:
platform: windows-x64 platform: windows-x64
bootjdk-platform: windows-x64 bootjdk-platform: windows-x64
runs-on: windows-2019 runs-on: windows-2025
debug-suffix: -debug debug-suffix: -debug

View File

@ -1,7 +1,7 @@
[general] [general]
project=jdk project=jdk
jbs=JDK jbs=JDK
version=25 version=26
[checks] [checks]
error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright error=author,committer,reviewers,merge,issues,executable,symlink,message,hg-tag,whitespace,problemlists,copyright

View File

@ -0,0 +1,127 @@
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" lang="" xml:lang="">
<head>
<meta charset="utf-8" />
<meta name="generator" content="pandoc" />
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes" />
<title>Explanation of start of release changes</title>
<style>
code{white-space: pre-wrap;}
span.smallcaps{font-variant: small-caps;}
div.columns{display: flex; gap: min(4vw, 1.5em);}
div.column{flex: auto; overflow-x: auto;}
div.hanging-indent{margin-left: 1.5em; text-indent: -1.5em;}
/* The extra [class] is a hack that increases specificity enough to
override a similar rule in reveal.js */
ul.task-list[class]{list-style: none;}
ul.task-list li input[type="checkbox"] {
font-size: inherit;
width: 0.8em;
margin: 0 0.8em 0.2em -1.6em;
vertical-align: middle;
}
.display.math{display: block; text-align: center; margin: 0.5rem auto;}
</style>
<link rel="stylesheet" href="../make/data/docs-resources/resources/jdk-default.css" />
<!--[if lt IE 9]>
<script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.3/html5shiv-printshiv.min.js"></script>
<![endif]-->
</head>
<body>
<header id="title-block-header">
<h1 class="title">Explanation of start of release changes</h1>
</header>
<nav id="TOC" role="doc-toc">
<ul>
<li><a href="#overview" id="toc-overview">Overview</a></li>
<li><a href="#details-and-file-updates"
id="toc-details-and-file-updates">Details and file updates</a>
<ul>
<li><a href="#meta-data-files" id="toc-meta-data-files">Meta-data
files</a></li>
<li><a href="#src-files" id="toc-src-files"><code>src</code>
files</a></li>
<li><a href="#test-files" id="toc-test-files"><code>test</code>
files</a></li>
</ul></li>
</ul>
</nav>
<h2 id="overview">Overview</h2>
<p>The start of release changes, the changes that turn JDK <em>N</em>
into JDK (<em>N</em>+1), are primarily small updates to various files
along with new files to store symbol information to allow
<code>javac --release N ...</code> to run on JDK (<em>N</em>+1).</p>
<p>The updates include changes to files holding meta-data about the
release, files under the <code>src</code> directory for API and tooling
updates, and incidental updates under the <code>test</code>
directory.</p>
<h2 id="details-and-file-updates">Details and file updates</h2>
<p>As a matter of policy, there are a number of semantically distinct
concepts which get incremented separately at the start of a new
release:</p>
<ul>
<li>Feature value of <code>Runtime.version()</code></li>
<li>Highest source version modeled by
<code>javax.lang.model.SourceVersion</code></li>
<li>Highest class file format major version recognized by the
platform</li>
<li>Highest
<code>-source</code>/<code>-target</code>/<code>--release</code>
argument recognized by <code>javac</code> and related tools</li>
</ul>
<p>The expected file updates are listed below. Additional files may need
to be updated for a particular release.</p>
<h3 id="meta-data-files">Meta-data files</h3>
<ul>
<li><code>jcheck/conf</code>: update meta-data used by
<code>jcheck</code> and the Skara tooling</li>
<li><code>make/conf/version-numbers.conf</code>: update to meta-data
used in the build</li>
</ul>
<h3 id="src-files"><code>src</code> files</h3>
<ul>
<li><code>src/hotspot/share/classfile/classFileParser.cpp</code>: add a
<code>#define</code> for the new version</li>
<li><code>src/java.base/share/classes/java/lang/classfile/ClassFile.java</code>:
add a constant for the new class file format version</li>
<li><code>src/java.base/share/classes/java/lang/reflect/ClassFileFormatVersion.java</code>:
add an <code>enum</code> constant for the new class file format
version</li>
<li><code>src/java.compiler/share/classes/javax/lang/model/SourceVersion.java</code>:
add an <code>enum</code> constant for the new source version</li>
<li><code>src/java.compiler/share/classes/javax/lang/model/util/*</code>
visitors: Update <code>@SupportedSourceVersion</code> annotations to
latest value. Note this update is done in lieu of introducing another
set of visitors for each Java SE release.</li>
<li><code>src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java</code>:
add an <code>enum</code> constant for the new source version internal to
<code>javac</code></li>
<li><code>src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassFile.java</code>:
add an <code>enum</code> constant for the new class file format version
internal to <code>javac</code></li>
<li><code>src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Target.java</code>:
add an <code>enum</code> constant for the new target version internal to
<code>javac</code></li>
<li><code>src/jdk.compiler/share/classes/com/sun/tools/javac/processing/PrintingProcessor.java</code>
update printing processor to support the new source version</li>
<li>The symbol information for <code>--release</code> is stored as new
text files in the <code>src/jdk.compiler/share/data/symbols</code>
directory, one file per module. The README file in that directory
contains directions on how to create the files.</li>
</ul>
<h3 id="test-files"><code>test</code> files</h3>
<ul>
<li><code>test/langtools/tools/javac/api/TestGetSourceVersions.java</code>:
add new <code>SourceVersion</code> constant to test matrix.</li>
<li><code>test/langtools/tools/javac/classfiles/ClassVersionChecker.java</code>:
add new enum constant for the new class file version</li>
<li><code>test/langtools/tools/javac/lib/JavacTestingAbstractProcessor.java</code>
update annotation processor extended by <code>javac</code> tests to
cover the new source version</li>
<li><code>test/langtools/tools/javac/preview/classReaderTest/Client.nopreview.out</code>
and
<code>test/langtools/tools/javac/preview/classReaderTest/Client.preview.out</code>:
update expected messages for preview errors and warnings</li>
</ul>
</body>
</html>

View File

@ -0,0 +1,68 @@
% Explanation of start of release changes
## Overview
The start of release changes, the changes that turn JDK _N_ into JDK
(_N_+1), are primarily small updates to various files along with new files to
store symbol information to allow `javac --release N ...` to run on
JDK (_N_+1).
The updates include changes to files holding meta-data about the
release, files under the `src` directory for API and tooling updates,
and incidental updates under the `test` directory.
## Details and file updates
As a matter of policy, there are a number of semantically distinct
concepts which get incremented separately at the start of a new
release:
* Feature value of `Runtime.version()`
* Highest source version modeled by `javax.lang.model.SourceVersion`
* Highest class file format major version recognized by the platform
* Highest `-source`/`-target`/`--release` argument recognized by
`javac` and related tools
The expected file updates are listed below. Additional files may need
to be updated for a particular release.
### Meta-data files
* `jcheck/conf`: update meta-data used by `jcheck` and the Skara tooling
* `make/conf/version-numbers.conf`: update to meta-data used in the build
### `src` files
* `src/hotspot/share/classfile/classFileParser.cpp`: add a `#define`
for the new version
* `src/java.base/share/classes/java/lang/classfile/ClassFile.java`:
add a constant for the new class file format version
* `src/java.base/share/classes/java/lang/reflect/ClassFileFormatVersion.java`:
add an `enum` constant for the new class file format version
* `src/java.compiler/share/classes/javax/lang/model/SourceVersion.java`:
add an `enum` constant for the new source version
* `src/java.compiler/share/classes/javax/lang/model/util/*` visitors: Update
`@SupportedSourceVersion` annotations to latest value. Note this update
is done in lieu of introducing another set of visitors for each Java
SE release.
* `src/jdk.compiler/share/classes/com/sun/tools/javac/code/Source.java`:
add an `enum` constant for the new source version internal to `javac`
* `src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/ClassFile.java`:
add an `enum` constant for the new class file format version internal to `javac`
* `src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Target.java`:
add an `enum` constant for the new target version internal to `javac`
* `src/jdk.compiler/share/classes/com/sun/tools/javac/processing/PrintingProcessor.java`
update printing processor to support the new source version
* The symbol information for `--release` is stored as new text files in the
`src/jdk.compiler/share/data/symbols` directory, one file per
module. The README file in that directory contains directions on how
to create the files.
### `test` files
* `test/langtools/tools/javac/api/TestGetSourceVersions.java`: add new `SourceVersion` constant to test matrix.
* `test/langtools/tools/javac/classfiles/ClassVersionChecker.java`: add new enum constant for the new class file version
* `test/langtools/tools/javac/lib/JavacTestingAbstractProcessor.java`
update annotation processor extended by `javac` tests to cover the new source version
* `test/langtools/tools/javac/preview/classReaderTest/Client.nopreview.out` and `test/langtools/tools/javac/preview/classReaderTest/Client.preview.out`: update expected messages for preview errors and warnings

View File

@ -110,7 +110,18 @@ reconfigure:
CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \ CUSTOM_CONFIG_DIR="$(CUSTOM_CONFIG_DIR)" \
$(RECONFIGURE_COMMAND) ) $(RECONFIGURE_COMMAND) )
.PHONY: print-modules print-targets print-tests print-configuration reconfigure # Create files that are needed to run most targets in Main.gmk
create-make-helpers:
( cd $(TOPDIR) && \
$(MAKE) $(MAKE_ARGS) -j 1 -f make/GenerateFindTests.gmk \
$(USER_MAKE_VARS) )
( cd $(TOPDIR) && \
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
UPDATE_MODULE_DEPS=true NO_RECIPES=true \
create-main-targets-include )
.PHONY: print-modules print-targets print-tests print-configuration \
reconfigure create-make-helpers
############################################################################## ##############################################################################
# The main target. This will delegate all other targets into Main.gmk. # The main target. This will delegate all other targets into Main.gmk.
@ -130,7 +141,7 @@ TARGET_DESCRIPTION := target$(if $(word 2, $(MAIN_TARGETS)),s) \
# variables are explicitly propagated using $(USER_MAKE_VARS). # variables are explicitly propagated using $(USER_MAKE_VARS).
main: MAKEOVERRIDES := main: MAKEOVERRIDES :=
main: $(INIT_TARGETS) main: $(INIT_TARGETS) create-make-helpers
ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), ) ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), )
$(call RotateLogFiles) $(call RotateLogFiles)
$(ECHO) "Building $(TARGET_DESCRIPTION)" $(BUILD_LOG_PIPE_SIMPLE) $(ECHO) "Building $(TARGET_DESCRIPTION)" $(BUILD_LOG_PIPE_SIMPLE)
@ -142,12 +153,7 @@ main: $(INIT_TARGETS)
$(SEQUENTIAL_TARGETS) ) $(SEQUENTIAL_TARGETS) )
# We might have cleaned away essential files, recreate them. # We might have cleaned away essential files, recreate them.
( cd $(TOPDIR) && \ ( cd $(TOPDIR) && \
$(MAKE) $(MAKE_ARGS) -j 1 -f make/GenerateFindTests.gmk \ $(MAKE) $(MAKE_ARGS) -j 1 -f make/Init.gmk create-make-helpers )
$(USER_MAKE_VARS) )
( cd $(TOPDIR) && \
$(MAKE) $(MAKE_ARGS) -j 1 -f make/Main.gmk $(USER_MAKE_VARS) \
UPDATE_MODULE_DEPS=true NO_RECIPES=true \
create-main-targets-include )
endif endif
ifneq ($(PARALLEL_TARGETS), ) ifneq ($(PARALLEL_TARGETS), )
$(call PrepareFailureLogs) $(call PrepareFailureLogs)

View File

@ -423,14 +423,6 @@ bootcycle-images:
ifneq ($(COMPILE_TYPE), cross) ifneq ($(COMPILE_TYPE), cross)
$(call LogWarn, Boot cycle build step 2: Building a new JDK image using previously built image) $(call LogWarn, Boot cycle build step 2: Building a new JDK image using previously built image)
$(call MakeDir, $(OUTPUTDIR)/bootcycle-build) $(call MakeDir, $(OUTPUTDIR)/bootcycle-build)
# We need to create essential files for the bootcycle spec dir
( cd $(TOPDIR) && \
$(MAKE) $(MAKE_ARGS) -f make/GenerateFindTests.gmk \
SPEC=$(BOOTCYCLE_SPEC))
( cd $(TOPDIR) && \
$(MAKE) $(MAKE_ARGS) -f $(TOPDIR)/make/Main.gmk \
SPEC=$(BOOTCYCLE_SPEC) UPDATE_MODULE_DEPS=true NO_RECIPES=true \
create-main-targets-include )
+$(MAKE) $(MAKE_ARGS) -f $(TOPDIR)/make/Init.gmk PARALLEL_TARGETS=$(BOOTCYCLE_TARGET) \ +$(MAKE) $(MAKE_ARGS) -f $(TOPDIR)/make/Init.gmk PARALLEL_TARGETS=$(BOOTCYCLE_TARGET) \
LOG_PREFIX="[bootcycle] " JOBS= SPEC=$(BOOTCYCLE_SPEC) main LOG_PREFIX="[bootcycle] " JOBS= SPEC=$(BOOTCYCLE_SPEC) main
else else

View File

@ -50,7 +50,8 @@ include $(TOPDIR)/make/Global.gmk
# Targets provided by Init.gmk. # Targets provided by Init.gmk.
ALL_INIT_TARGETS := print-modules print-targets print-configuration \ ALL_INIT_TARGETS := print-modules print-targets print-configuration \
print-tests reconfigure pre-compare-build post-compare-build print-tests reconfigure pre-compare-build post-compare-build \
create-make-helpers
# CALLED_TARGETS is the list of targets that the user provided, # CALLED_TARGETS is the list of targets that the user provided,
# or "default" if unspecified. # or "default" if unspecified.

View File

@ -1,6 +1,6 @@
#!/bin/bash #!/bin/bash
# #
# Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. # Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# #
# This code is free software; you can redistribute it and/or modify it # This code is free software; you can redistribute it and/or modify it
@ -366,7 +366,7 @@ EOT
# Print additional help, e.g. a list of toolchains and JVM features. # Print additional help, e.g. a list of toolchains and JVM features.
# This must be done by the autoconf script. # This must be done by the autoconf script.
( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ) ( CONFIGURE_PRINT_ADDITIONAL_HELP=true . $generated_script PRINTF=printf ECHO=echo )
cat <<EOT cat <<EOT

View File

@ -28,7 +28,7 @@
################################################################################ ################################################################################
# Minimum supported versions # Minimum supported versions
JTREG_MINIMUM_VERSION=7.5.1 JTREG_MINIMUM_VERSION=7.5.2
GTEST_MINIMUM_VERSION=1.14.0 GTEST_MINIMUM_VERSION=1.14.0
################################################################################ ################################################################################

View File

@ -26,7 +26,7 @@
# Versions and download locations for dependencies used by GitHub Actions (GHA) # Versions and download locations for dependencies used by GitHub Actions (GHA)
GTEST_VERSION=1.14.0 GTEST_VERSION=1.14.0
JTREG_VERSION=7.5.1+1 JTREG_VERSION=7.5.2+1
LINUX_X64_BOOT_JDK_EXT=tar.gz LINUX_X64_BOOT_JDK_EXT=tar.gz
LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz LINUX_X64_BOOT_JDK_URL=https://download.java.net/java/GA/jdk24/1f9ff9062db4449d8ca828c504ffae90/36/GPL/openjdk-24_linux-x64_bin.tar.gz

View File

@ -1174,9 +1174,9 @@ var getJibProfilesDependencies = function (input, common) {
jtreg: { jtreg: {
server: "jpg", server: "jpg",
product: "jtreg", product: "jtreg",
version: "7.5.1", version: "7.5.2",
build_number: "1", build_number: "1",
file: "bundles/jtreg-7.5.1+1.zip", file: "bundles/jtreg-7.5.2+1.zip",
environment_name: "JT_HOME", environment_name: "JT_HOME",
environment_path: input.get("jtreg", "home_path") + "/bin", environment_path: input.get("jtreg", "home_path") + "/bin",
configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"), configure_args: "--with-jtreg=" + input.get("jtreg", "home_path"),
@ -1192,8 +1192,8 @@ var getJibProfilesDependencies = function (input, common) {
server: "jpg", server: "jpg",
product: "jcov", product: "jcov",
version: "3.0", version: "3.0",
build_number: "1", build_number: "3",
file: "bundles/jcov-3.0+1.zip", file: "bundles/jcov-3.0+3.zip",
environment_name: "JCOV_HOME", environment_name: "JCOV_HOME",
}, },

View File

@ -26,17 +26,17 @@
# Default version, product, and vendor information to use, # Default version, product, and vendor information to use,
# unless overridden by configure # unless overridden by configure
DEFAULT_VERSION_FEATURE=25 DEFAULT_VERSION_FEATURE=26
DEFAULT_VERSION_INTERIM=0 DEFAULT_VERSION_INTERIM=0
DEFAULT_VERSION_UPDATE=0 DEFAULT_VERSION_UPDATE=0
DEFAULT_VERSION_PATCH=0 DEFAULT_VERSION_PATCH=0
DEFAULT_VERSION_EXTRA1=0 DEFAULT_VERSION_EXTRA1=0
DEFAULT_VERSION_EXTRA2=0 DEFAULT_VERSION_EXTRA2=0
DEFAULT_VERSION_EXTRA3=0 DEFAULT_VERSION_EXTRA3=0
DEFAULT_VERSION_DATE=2025-09-16 DEFAULT_VERSION_DATE=2026-03-17
DEFAULT_VERSION_CLASSFILE_MAJOR=69 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`" DEFAULT_VERSION_CLASSFILE_MAJOR=70 # "`$EXPR $DEFAULT_VERSION_FEATURE + 44`"
DEFAULT_VERSION_CLASSFILE_MINOR=0 DEFAULT_VERSION_CLASSFILE_MINOR=0
DEFAULT_VERSION_DOCS_API_SINCE=11 DEFAULT_VERSION_DOCS_API_SINCE=11
DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25" DEFAULT_ACCEPTABLE_BOOT_VERSIONS="24 25 26"
DEFAULT_JDK_SOURCE_TARGET_VERSION=25 DEFAULT_JDK_SOURCE_TARGET_VERSION=26
DEFAULT_PROMOTED_VERSION_PRE=ea DEFAULT_PROMOTED_VERSION_PRE=ea

View File

@ -46,6 +46,8 @@ CLDR_GEN_DONE := $(GENSRC_DIR)/_cldr-gensrc.marker
TZ_DATA_DIR := $(MODULE_SRC)/share/data/tzdata TZ_DATA_DIR := $(MODULE_SRC)/share/data/tzdata
ZONENAME_TEMPLATE := $(MODULE_SRC)/share/classes/java/time/format/ZoneName.java.template ZONENAME_TEMPLATE := $(MODULE_SRC)/share/classes/java/time/format/ZoneName.java.template
# The `-utf8` option is used even for US English, as some names
# may contain non-ASCII characters, such as “Türkiye”.
$(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
$(wildcard $(CLDR_DATA_DIR)/main/en*.xml) \ $(wildcard $(CLDR_DATA_DIR)/main/en*.xml) \
$(wildcard $(CLDR_DATA_DIR)/supplemental/*.xml) \ $(wildcard $(CLDR_DATA_DIR)/supplemental/*.xml) \
@ -61,7 +63,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
-basemodule \ -basemodule \
-year $(COPYRIGHT_YEAR) \ -year $(COPYRIGHT_YEAR) \
-zntempfile $(ZONENAME_TEMPLATE) \ -zntempfile $(ZONENAME_TEMPLATE) \
-tzdatadir $(TZ_DATA_DIR)) -tzdatadir $(TZ_DATA_DIR) \
-utf8)
$(TOUCH) $@ $(TOUCH) $@
TARGETS += $(CLDR_GEN_DONE) TARGETS += $(CLDR_GEN_DONE)

View File

@ -45,7 +45,8 @@ $(CLDR_GEN_DONE): $(wildcard $(CLDR_DATA_DIR)/dtd/*.dtd) \
-baselocales "en-US" \ -baselocales "en-US" \
-year $(COPYRIGHT_YEAR) \ -year $(COPYRIGHT_YEAR) \
-o $(GENSRC_DIR) \ -o $(GENSRC_DIR) \
-tzdatadir $(TZ_DATA_DIR)) -tzdatadir $(TZ_DATA_DIR) \
-utf8)
$(TOUCH) $@ $(TOUCH) $@
TARGETS += $(CLDR_GEN_DONE) TARGETS += $(CLDR_GEN_DONE)

View File

@ -187,22 +187,18 @@ public class HelloWorld {
new Run("none", "Hello from Cupertino") new Run("none", "Hello from Cupertino")
}), }),
new Paragraph("title", new Run[] { new Paragraph("title", new Run[] {
new Run("none", "\u53F0\u5317\u554F\u5019\u60A8\u0021") new Run("none", "台北問候您!")
}), }),
new Paragraph("title", new Run[] { new Paragraph("title", new Run[] {
new Run("none", "\u0391\u03B8\u03B7\u03BD\u03B1\u03B9\u0020" // Greek new Run("none", "Αθηναι ασπαζονται υμας!") // Greek
+ "\u03B1\u03C3\u03C0\u03B1\u03B6\u03BF\u03BD"
+ "\u03C4\u03B1\u03B9\u0020\u03C5\u03BC\u03B1"
+ "\u03C2\u0021")
}), }),
new Paragraph("title", new Run[] { new Paragraph("title", new Run[] {
new Run("none", "\u6771\u4eac\u304b\u3089\u4eca\u65e5\u306f") new Run("none", "東京から今日は")
}), }),
new Paragraph("title", new Run[] { new Paragraph("title", new Run[] {
new Run("none", "\u05e9\u05dc\u05d5\u05dd \u05de\u05d9\u05e8\u05d5" new Run("none", "שלום מירושלים")
+ "\u05e9\u05dc\u05d9\u05dd")
}), }),
new Paragraph("title", new Run[] { new Paragraph("title", new Run[] {
new Run("none", "\u0633\u0644\u0627\u0645") new Run("none", "سلام")
}), }; }), };
} }

View File

@ -3921,6 +3921,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
// compute_padding() function must be // compute_padding() function must be
// provided for the instruction // provided for the instruction
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS----------------------------------------------------------- //----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct parsing // Operand definitions must precede instruction definitions for correct parsing
// in the ADLC because operands constitute user defined types which are used in // in the ADLC because operands constitute user defined types which are used in

View File

@ -106,6 +106,13 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
match(Set dst (LoadP mem)); match(Set dst (LoadP mem));
predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0); predicate(UseZGC && !needs_acquiring_load(n) && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, KILL cr); effect(TEMP dst, KILL cr);
// The main load is a candidate to implement implicit null checks, as long as
// legitimize_address() does not require a preceding lea instruction to
// materialize the memory operand. The absence of a preceding lea instruction
// is guaranteed for immLoffset8 memory operands, because these do not lead to
// out-of-range offsets (see definition of immLoffset8). Fortunately,
// immLoffset8 memory operands are the most common ones in practice.
ins_is_late_expanded_null_check_candidate(opnd_array(1)->opcode() == INDOFFL8);
ins_cost(4 * INSN_COST); ins_cost(4 * INSN_COST);
@ -117,7 +124,11 @@ instruct zLoadP(iRegPNoSp dst, memory8 mem, rFlagsReg cr)
// Fix up any out-of-range offsets. // Fix up any out-of-range offsets.
assert_different_registers(rscratch2, as_Register($mem$$base)); assert_different_registers(rscratch2, as_Register($mem$$base));
assert_different_registers(rscratch2, $dst$$Register); assert_different_registers(rscratch2, $dst$$Register);
ref_addr = __ legitimize_address(ref_addr, 8, rscratch2); int size = 8;
assert(!this->is_late_expanded_null_check_candidate() ||
!MacroAssembler::legitimize_address_requires_lea(ref_addr, size),
"an instruction that can be used for implicit null checking should emit the candidate memory access first");
ref_addr = __ legitimize_address(ref_addr, size, rscratch2);
} }
__ ldr($dst$$Register, ref_addr); __ ldr($dst$$Register, ref_addr);
z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1); z_load_barrier(masm, this, ref_addr, $dst$$Register, rscratch1);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved. * Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -95,6 +95,8 @@ define_pd_global(intx, InlineSmallCode, 1000);
"Use simplest and shortest implementation for array equals") \ "Use simplest and shortest implementation for array equals") \
product(bool, UseSIMDForBigIntegerShiftIntrinsics, true, \ product(bool, UseSIMDForBigIntegerShiftIntrinsics, true, \
"Use SIMD instructions for left/right shift of BigInteger") \ "Use SIMD instructions for left/right shift of BigInteger") \
product(bool, UseSIMDForSHA3Intrinsic, true, \
"Use SIMD SHA3 instructions for SHA3 intrinsic") \
product(bool, AvoidUnalignedAccesses, false, \ product(bool, AvoidUnalignedAccesses, false, \
"Avoid generating unaligned memory accesses") \ "Avoid generating unaligned memory accesses") \
product(bool, UseLSE, false, \ product(bool, UseLSE, false, \

View File

@ -129,16 +129,21 @@ class MacroAssembler: public Assembler {
a.lea(this, r); a.lea(this, r);
} }
// Whether materializing the given address for a LDR/STR requires an
// additional lea instruction.
static bool legitimize_address_requires_lea(const Address &a, int size) {
return a.getMode() == Address::base_plus_offset &&
!Address::offset_ok_for_immed(a.offset(), exact_log2(size));
}
/* Sometimes we get misaligned loads and stores, usually from Unsafe /* Sometimes we get misaligned loads and stores, usually from Unsafe
accesses, and these can exceed the offset range. */ accesses, and these can exceed the offset range. */
Address legitimize_address(const Address &a, int size, Register scratch) { Address legitimize_address(const Address &a, int size, Register scratch) {
if (a.getMode() == Address::base_plus_offset) { if (legitimize_address_requires_lea(a, size)) {
if (! Address::offset_ok_for_immed(a.offset(), exact_log2(size))) { block_comment("legitimize_address {");
block_comment("legitimize_address {"); lea(scratch, a);
lea(scratch, a); block_comment("} legitimize_address");
block_comment("} legitimize_address"); return Address(scratch);
return Address(scratch);
}
} }
return a; return a;
} }
@ -323,6 +328,27 @@ class MacroAssembler: public Assembler {
extr(Rd, Rn, Rn, imm); extr(Rd, Rn, Rn, imm);
} }
inline void rolw(Register Rd, Register Rn, unsigned imm) {
extrw(Rd, Rn, Rn, (32 - imm));
}
inline void rol(Register Rd, Register Rn, unsigned imm) {
extr(Rd, Rn, Rn, (64 - imm));
}
using Assembler::rax1;
using Assembler::eor3;
inline void rax1(Register Rd, Register Rn, Register Rm) {
eor(Rd, Rn, Rm, ROR, 63); // Rd = Rn ^ rol(Rm, 1)
}
inline void eor3(Register Rd, Register Rn, Register Rm, Register Rk) {
assert(Rd != Rn, "Use tmp register");
eor(Rd, Rm, Rk);
eor(Rd, Rd, Rn);
}
inline void sxtbw(Register Rd, Register Rn) { inline void sxtbw(Register Rd, Register Rn) {
sbfmw(Rd, Rn, 0, 7); sbfmw(Rd, Rn, 0, 7);
} }

View File

@ -7081,6 +7081,366 @@ class StubGenerator: public StubCodeGenerator {
return start; return start;
} }
void bcax5(Register a0, Register a1, Register a2, Register a3, Register a4,
Register tmp0, Register tmp1, Register tmp2) {
__ bic(tmp0, a2, a1); // for a0
__ bic(tmp1, a3, a2); // for a1
__ bic(tmp2, a4, a3); // for a2
__ eor(a2, a2, tmp2);
__ bic(tmp2, a0, a4); // for a3
__ eor(a3, a3, tmp2);
__ bic(tmp2, a1, a0); // for a4
__ eor(a0, a0, tmp0);
__ eor(a1, a1, tmp1);
__ eor(a4, a4, tmp2);
}
void keccak_round_gpr(bool can_use_fp, bool can_use_r18, Register rc,
Register a0, Register a1, Register a2, Register a3, Register a4,
Register a5, Register a6, Register a7, Register a8, Register a9,
Register a10, Register a11, Register a12, Register a13, Register a14,
Register a15, Register a16, Register a17, Register a18, Register a19,
Register a20, Register a21, Register a22, Register a23, Register a24,
Register tmp0, Register tmp1, Register tmp2) {
__ eor3(tmp1, a4, a9, a14);
__ eor3(tmp0, tmp1, a19, a24); // tmp0 = a4^a9^a14^a19^a24 = c4
__ eor3(tmp2, a1, a6, a11);
__ eor3(tmp1, tmp2, a16, a21); // tmp1 = a1^a6^a11^a16^a21 = c1
__ rax1(tmp2, tmp0, tmp1); // d0
{
Register tmp3, tmp4;
if (can_use_fp && can_use_r18) {
tmp3 = rfp;
tmp4 = r18_tls;
} else {
tmp3 = a4;
tmp4 = a9;
__ stp(tmp3, tmp4, __ pre(sp, -16));
}
__ eor3(tmp3, a0, a5, a10);
__ eor3(tmp4, tmp3, a15, a20); // tmp4 = a0^a5^a10^a15^a20 = c0
__ eor(a0, a0, tmp2);
__ eor(a5, a5, tmp2);
__ eor(a10, a10, tmp2);
__ eor(a15, a15, tmp2);
__ eor(a20, a20, tmp2); // d0(tmp2)
__ eor3(tmp3, a2, a7, a12);
__ eor3(tmp2, tmp3, a17, a22); // tmp2 = a2^a7^a12^a17^a22 = c2
__ rax1(tmp3, tmp4, tmp2); // d1
__ eor(a1, a1, tmp3);
__ eor(a6, a6, tmp3);
__ eor(a11, a11, tmp3);
__ eor(a16, a16, tmp3);
__ eor(a21, a21, tmp3); // d1(tmp3)
__ rax1(tmp3, tmp2, tmp0); // d3
__ eor3(tmp2, a3, a8, a13);
__ eor3(tmp0, tmp2, a18, a23); // tmp0 = a3^a8^a13^a18^a23 = c3
__ eor(a3, a3, tmp3);
__ eor(a8, a8, tmp3);
__ eor(a13, a13, tmp3);
__ eor(a18, a18, tmp3);
__ eor(a23, a23, tmp3);
__ rax1(tmp2, tmp1, tmp0); // d2
__ eor(a2, a2, tmp2);
__ eor(a7, a7, tmp2);
__ eor(a12, a12, tmp2);
__ rax1(tmp0, tmp0, tmp4); // d4
if (!can_use_fp || !can_use_r18) {
__ ldp(tmp3, tmp4, __ post(sp, 16));
}
__ eor(a17, a17, tmp2);
__ eor(a22, a22, tmp2);
__ eor(a4, a4, tmp0);
__ eor(a9, a9, tmp0);
__ eor(a14, a14, tmp0);
__ eor(a19, a19, tmp0);
__ eor(a24, a24, tmp0);
}
__ rol(tmp0, a10, 3);
__ rol(a10, a1, 1);
__ rol(a1, a6, 44);
__ rol(a6, a9, 20);
__ rol(a9, a22, 61);
__ rol(a22, a14, 39);
__ rol(a14, a20, 18);
__ rol(a20, a2, 62);
__ rol(a2, a12, 43);
__ rol(a12, a13, 25);
__ rol(a13, a19, 8) ;
__ rol(a19, a23, 56);
__ rol(a23, a15, 41);
__ rol(a15, a4, 27);
__ rol(a4, a24, 14);
__ rol(a24, a21, 2);
__ rol(a21, a8, 55);
__ rol(a8, a16, 45);
__ rol(a16, a5, 36);
__ rol(a5, a3, 28);
__ rol(a3, a18, 21);
__ rol(a18, a17, 15);
__ rol(a17, a11, 10);
__ rol(a11, a7, 6);
__ mov(a7, tmp0);
bcax5(a0, a1, a2, a3, a4, tmp0, tmp1, tmp2);
bcax5(a5, a6, a7, a8, a9, tmp0, tmp1, tmp2);
bcax5(a10, a11, a12, a13, a14, tmp0, tmp1, tmp2);
bcax5(a15, a16, a17, a18, a19, tmp0, tmp1, tmp2);
bcax5(a20, a21, a22, a23, a24, tmp0, tmp1, tmp2);
__ ldr(tmp1, __ post(rc, 8));
__ eor(a0, a0, tmp1);
}
// Arguments:
//
// Inputs:
// c_rarg0 - byte[] source+offset
// c_rarg1 - byte[] SHA.state
// c_rarg2 - int block_size
// c_rarg3 - int offset
// c_rarg4 - int limit
//
address generate_sha3_implCompress_gpr(StubGenStubId stub_id) {
bool multi_block;
switch (stub_id) {
case sha3_implCompress_id:
multi_block = false;
break;
case sha3_implCompressMB_id:
multi_block = true;
break;
default:
ShouldNotReachHere();
}
static const uint64_t round_consts[24] = {
0x0000000000000001L, 0x0000000000008082L, 0x800000000000808AL,
0x8000000080008000L, 0x000000000000808BL, 0x0000000080000001L,
0x8000000080008081L, 0x8000000000008009L, 0x000000000000008AL,
0x0000000000000088L, 0x0000000080008009L, 0x000000008000000AL,
0x000000008000808BL, 0x800000000000008BL, 0x8000000000008089L,
0x8000000000008003L, 0x8000000000008002L, 0x8000000000000080L,
0x000000000000800AL, 0x800000008000000AL, 0x8000000080008081L,
0x8000000000008080L, 0x0000000080000001L, 0x8000000080008008L
};
__ align(CodeEntryAlignment);
StubCodeMark mark(this, stub_id);
address start = __ pc();
Register buf = c_rarg0;
Register state = c_rarg1;
Register block_size = c_rarg2;
Register ofs = c_rarg3;
Register limit = c_rarg4;
// use r3.r17,r19..r28 to keep a0..a24.
// a0..a24 are respective locals from SHA3.java
Register a0 = r25,
a1 = r26,
a2 = r27,
a3 = r3,
a4 = r4,
a5 = r5,
a6 = r6,
a7 = r7,
a8 = rscratch1, // r8
a9 = rscratch2, // r9
a10 = r10,
a11 = r11,
a12 = r12,
a13 = r13,
a14 = r14,
a15 = r15,
a16 = r16,
a17 = r17,
a18 = r28,
a19 = r19,
a20 = r20,
a21 = r21,
a22 = r22,
a23 = r23,
a24 = r24;
Register tmp0 = block_size, tmp1 = buf, tmp2 = state, tmp3 = r30;
Label sha3_loop, rounds24_preloop, loop_body;
Label sha3_512_or_sha3_384, shake128;
bool can_use_r18 = false;
#ifndef R18_RESERVED
can_use_r18 = true;
#endif
bool can_use_fp = !PreserveFramePointer;
__ enter();
// save almost all yet unsaved gpr registers on stack
__ str(block_size, __ pre(sp, -128));
if (multi_block) {
__ stpw(ofs, limit, Address(sp, 8));
}
// 8 bytes at sp+16 will be used to keep buf
__ stp(r19, r20, Address(sp, 32));
__ stp(r21, r22, Address(sp, 48));
__ stp(r23, r24, Address(sp, 64));
__ stp(r25, r26, Address(sp, 80));
__ stp(r27, r28, Address(sp, 96));
if (can_use_r18 && can_use_fp) {
__ stp(r18_tls, state, Address(sp, 112));
} else {
__ str(state, Address(sp, 112));
}
// begin sha3 calculations: loading a0..a24 from state arrary
__ ldp(a0, a1, state);
__ ldp(a2, a3, Address(state, 16));
__ ldp(a4, a5, Address(state, 32));
__ ldp(a6, a7, Address(state, 48));
__ ldp(a8, a9, Address(state, 64));
__ ldp(a10, a11, Address(state, 80));
__ ldp(a12, a13, Address(state, 96));
__ ldp(a14, a15, Address(state, 112));
__ ldp(a16, a17, Address(state, 128));
__ ldp(a18, a19, Address(state, 144));
__ ldp(a20, a21, Address(state, 160));
__ ldp(a22, a23, Address(state, 176));
__ ldr(a24, Address(state, 192));
__ BIND(sha3_loop);
// load input
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a0, a0, tmp3);
__ eor(a1, a1, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a2, a2, tmp3);
__ eor(a3, a3, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a4, a4, tmp3);
__ eor(a5, a5, tmp2);
__ ldr(tmp3, __ post(buf, 8));
__ eor(a6, a6, tmp3);
// block_size == 72, SHA3-512; block_size == 104, SHA3-384
__ tbz(block_size, 7, sha3_512_or_sha3_384);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a7, a7, tmp3);
__ eor(a8, a8, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a9, a9, tmp3);
__ eor(a10, a10, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a11, a11, tmp3);
__ eor(a12, a12, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a13, a13, tmp3);
__ eor(a14, a14, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a15, a15, tmp3);
__ eor(a16, a16, tmp2);
// block_size == 136, bit4 == 0 and bit5 == 0, SHA3-256 or SHAKE256
__ andw(tmp2, block_size, 48);
__ cbzw(tmp2, rounds24_preloop);
__ tbnz(block_size, 5, shake128);
// block_size == 144, bit5 == 0, SHA3-244
__ ldr(tmp3, __ post(buf, 8));
__ eor(a17, a17, tmp3);
__ b(rounds24_preloop);
__ BIND(shake128);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a17, a17, tmp3);
__ eor(a18, a18, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a19, a19, tmp3);
__ eor(a20, a20, tmp2);
__ b(rounds24_preloop); // block_size == 168, SHAKE128
__ BIND(sha3_512_or_sha3_384);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a7, a7, tmp3);
__ eor(a8, a8, tmp2);
__ tbz(block_size, 5, rounds24_preloop); // SHA3-512
// SHA3-384
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a9, a9, tmp3);
__ eor(a10, a10, tmp2);
__ ldp(tmp3, tmp2, __ post(buf, 16));
__ eor(a11, a11, tmp3);
__ eor(a12, a12, tmp2);
__ BIND(rounds24_preloop);
__ fmovs(v0, 24.0); // float loop counter,
__ fmovs(v1, 1.0); // exact representation
__ str(buf, Address(sp, 16));
__ lea(tmp3, ExternalAddress((address) round_consts));
__ BIND(loop_body);
keccak_round_gpr(can_use_fp, can_use_r18, tmp3,
a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12,
a13, a14, a15, a16, a17, a18, a19, a20, a21, a22, a23, a24,
tmp0, tmp1, tmp2);
__ fsubs(v0, v0, v1);
__ fcmps(v0, 0.0);
__ br(__ NE, loop_body);
if (multi_block) {
__ ldrw(block_size, sp); // block_size
__ ldpw(tmp2, tmp1, Address(sp, 8)); // offset, limit
__ addw(tmp2, tmp2, block_size);
__ cmpw(tmp2, tmp1);
__ strw(tmp2, Address(sp, 8)); // store offset in case we're jumping
__ ldr(buf, Address(sp, 16)); // restore buf in case we're jumping
__ br(Assembler::LE, sha3_loop);
__ movw(c_rarg0, tmp2); // return offset
}
if (can_use_fp && can_use_r18) {
__ ldp(r18_tls, state, Address(sp, 112));
} else {
__ ldr(state, Address(sp, 112));
}
// save calculated sha3 state
__ stp(a0, a1, Address(state));
__ stp(a2, a3, Address(state, 16));
__ stp(a4, a5, Address(state, 32));
__ stp(a6, a7, Address(state, 48));
__ stp(a8, a9, Address(state, 64));
__ stp(a10, a11, Address(state, 80));
__ stp(a12, a13, Address(state, 96));
__ stp(a14, a15, Address(state, 112));
__ stp(a16, a17, Address(state, 128));
__ stp(a18, a19, Address(state, 144));
__ stp(a20, a21, Address(state, 160));
__ stp(a22, a23, Address(state, 176));
__ str(a24, Address(state, 192));
// restore required registers from stack
__ ldp(r19, r20, Address(sp, 32));
__ ldp(r21, r22, Address(sp, 48));
__ ldp(r23, r24, Address(sp, 64));
__ ldp(r25, r26, Address(sp, 80));
__ ldp(r27, r28, Address(sp, 96));
if (can_use_fp && can_use_r18) {
__ add(rfp, sp, 128); // leave() will copy rfp to sp below
} // else no need to recalculate rfp, since it wasn't changed
__ leave();
__ ret(lr);
return start;
}
/** /**
* Arguments: * Arguments:
* *
@ -11512,9 +11872,15 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id); StubRoutines::_sha512_implCompressMB = generate_sha512_implCompress(StubGenStubId::sha512_implCompressMB_id);
} }
if (UseSHA3Intrinsics) { if (UseSHA3Intrinsics) {
StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubGenStubId::sha3_implCompress_id);
StubRoutines::_double_keccak = generate_double_keccak(); StubRoutines::_double_keccak = generate_double_keccak();
StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubGenStubId::sha3_implCompressMB_id); if (UseSIMDForSHA3Intrinsic) {
StubRoutines::_sha3_implCompress = generate_sha3_implCompress(StubGenStubId::sha3_implCompress_id);
StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress(StubGenStubId::sha3_implCompressMB_id);
} else {
StubRoutines::_sha3_implCompress = generate_sha3_implCompress_gpr(StubGenStubId::sha3_implCompress_id);
StubRoutines::_sha3_implCompressMB = generate_sha3_implCompress_gpr(StubGenStubId::sha3_implCompressMB_id);
}
} }
if (UsePoly1305Intrinsics) { if (UsePoly1305Intrinsics) {

View File

@ -1893,6 +1893,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
Interpreter::_remove_activation_preserving_args_entry = __ pc(); Interpreter::_remove_activation_preserving_args_entry = __ pc();
__ empty_expression_stack(); __ empty_expression_stack();
__ restore_bcp(); // We could have returned from deoptimizing this frame, so restore rbcp.
// Set the popframe_processing bit in pending_popframe_condition // Set the popframe_processing bit in pending_popframe_condition
// indicating that we are currently handling popframe, so that // indicating that we are currently handling popframe, so that
// call_VMs that may happen later do not trigger new popframe // call_VMs that may happen later do not trigger new popframe

View File

@ -379,7 +379,7 @@ void VM_Version::initialize() {
FLAG_SET_DEFAULT(UseSHA3Intrinsics, true); FLAG_SET_DEFAULT(UseSHA3Intrinsics, true);
} }
} }
} else if (UseSHA3Intrinsics) { } else if (UseSHA3Intrinsics && UseSIMDForSHA3Intrinsic) {
warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU."); warning("Intrinsics for SHA3-224, SHA3-256, SHA3-384 and SHA3-512 crypto hash functions not available on this CPU.");
FLAG_SET_DEFAULT(UseSHA3Intrinsics, false); FLAG_SET_DEFAULT(UseSHA3Intrinsics, false);
} }

View File

@ -93,10 +93,15 @@ static size_t probe_valid_max_address_bit() {
size_t ZPlatformAddressOffsetBits() { size_t ZPlatformAddressOffsetBits() {
static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
#ifdef ADDRESS_SANITIZER
// The max supported value is 44 because of other internal data structures.
return MIN2(valid_max_address_offset_bits, (size_t)44);
#else
const size_t min_address_offset_bits = max_address_offset_bits - 2; const size_t min_address_offset_bits = max_address_offset_bits - 2;
const size_t address_offset = ZGlobalsPointers::min_address_offset_request(); const size_t address_offset = ZGlobalsPointers::min_address_offset_request();
const size_t address_offset_bits = log2i_exact(address_offset); const size_t address_offset_bits = log2i_exact(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
#endif
} }
size_t ZPlatformAddressHeapBaseShift() { size_t ZPlatformAddressHeapBaseShift() {

View File

@ -141,6 +141,7 @@ instruct zLoadP(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
%{ %{
match(Set dst (LoadP mem)); match(Set dst (LoadP mem));
effect(TEMP_DEF dst, KILL cr0); effect(TEMP_DEF dst, KILL cr0);
ins_is_late_expanded_null_check_candidate(true);
ins_cost(MEMORY_REF_COST); ins_cost(MEMORY_REF_COST);
predicate((UseZGC && n->as_Load()->barrier_data() != 0) predicate((UseZGC && n->as_Load()->barrier_data() != 0)
@ -160,6 +161,7 @@ instruct zLoadP_acq(iRegPdst dst, memoryAlg4 mem, flagsRegCR0 cr0)
%{ %{
match(Set dst (LoadP mem)); match(Set dst (LoadP mem));
effect(TEMP_DEF dst, KILL cr0); effect(TEMP_DEF dst, KILL cr0);
ins_is_late_expanded_null_check_candidate(true);
ins_cost(3 * MEMORY_REF_COST); ins_cost(3 * MEMORY_REF_COST);
// Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation // Predicate on instruction order is implicitly present due to the predicate of the cheaper zLoadP operation

View File

@ -4036,6 +4036,10 @@ ins_attrib ins_field_cbuf_insts_offset(-1);
ins_attrib ins_field_load_ic_hi_node(0); ins_attrib ins_field_load_ic_hi_node(0);
ins_attrib ins_field_load_ic_node(0); ins_attrib ins_field_load_ic_node(0);
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS----------------------------------------------------------- //----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct // Operand definitions must precede instruction definitions for correct
// parsing in the ADLC because operands constitute user defined types // parsing in the ADLC because operands constitute user defined types

View File

@ -523,7 +523,7 @@ constexpr FloatRegister F11_ARG11 = F11; // volatile
constexpr FloatRegister F12_ARG12 = F12; // volatile constexpr FloatRegister F12_ARG12 = F12; // volatile
constexpr FloatRegister F13_ARG13 = F13; // volatile constexpr FloatRegister F13_ARG13 = F13; // volatile
// Register declarations to be used in frame manager assembly code. // Register declarations to be used in template interpreter assembly code.
// Use only non-volatile registers in order to keep values across C-calls. // Use only non-volatile registers in order to keep values across C-calls.
constexpr Register R14_bcp = R14; constexpr Register R14_bcp = R14;
constexpr Register R15_esp = R15; // slot below top of expression stack for ld/st with update constexpr Register R15_esp = R15; // slot below top of expression stack for ld/st with update
@ -533,7 +533,7 @@ constexpr Register R17_tos = R17; // The interpreter's top of (expres
constexpr Register R18_locals = R18; // address of first param slot (receiver). constexpr Register R18_locals = R18; // address of first param slot (receiver).
constexpr Register R19_method = R19; // address of current method constexpr Register R19_method = R19; // address of current method
// Temporary registers to be used within frame manager. We can use // Temporary registers to be used within template interpreter. We can use
// the non-volatiles because the call stub has saved them. // the non-volatiles because the call stub has saved them.
// Use only non-volatile registers in order to keep values across C-calls. // Use only non-volatile registers in order to keep values across C-calls.
constexpr Register R21_tmp1 = R21; constexpr Register R21_tmp1 = R21;

View File

@ -2935,7 +2935,7 @@ static void push_skeleton_frames(MacroAssembler* masm, bool deopt,
__ cmpdi(CR0, number_of_frames_reg, 0); __ cmpdi(CR0, number_of_frames_reg, 0);
__ bne(CR0, loop); __ bne(CR0, loop);
// Get the return address pointing into the frame manager. // Get the return address pointing into the template interpreter.
__ ld(R0, 0, pcs_reg); __ ld(R0, 0, pcs_reg);
// Store it in the top interpreter frame. // Store it in the top interpreter frame.
__ std(R0, _abi0(lr), R1_SP); __ std(R0, _abi0(lr), R1_SP);

View File

@ -86,7 +86,7 @@ class StubGenerator: public StubCodeGenerator {
// R10 - thread : Thread* // R10 - thread : Thread*
// //
address generate_call_stub(address& return_address) { address generate_call_stub(address& return_address) {
// Setup a new c frame, copy java arguments, call frame manager or // Setup a new c frame, copy java arguments, call template interpreter or
// native_entry, and process result. // native_entry, and process result.
StubGenStubId stub_id = StubGenStubId::call_stub_id; StubGenStubId stub_id = StubGenStubId::call_stub_id;
@ -215,11 +215,10 @@ class StubGenerator: public StubCodeGenerator {
} }
{ {
BLOCK_COMMENT("Call frame manager or native entry."); BLOCK_COMMENT("Call template interpreter or native entry.");
// Call frame manager or native entry.
assert_different_registers(r_arg_entry, r_top_of_arguments_addr, r_arg_method, r_arg_thread); assert_different_registers(r_arg_entry, r_top_of_arguments_addr, r_arg_method, r_arg_thread);
// Register state on entry to frame manager / native entry: // Register state on entry to template interpreter / native entry:
// //
// tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8 // tos - intptr_t* sender tos (prepushed) Lesp = (SP) + copied_arguments_offset - 8
// R19_method - Method // R19_method - Method
@ -242,7 +241,7 @@ class StubGenerator: public StubCodeGenerator {
// Set R15_prev_state to 0 for simplifying checks in callee. // Set R15_prev_state to 0 for simplifying checks in callee.
__ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R0); __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R0);
// Stack on entry to frame manager / native entry: // Stack on entry to template interpreter / native entry:
// //
// F0 [TOP_IJAVA_FRAME_ABI] // F0 [TOP_IJAVA_FRAME_ABI]
// alignment (optional) // alignment (optional)
@ -262,7 +261,7 @@ class StubGenerator: public StubCodeGenerator {
__ mr(R21_sender_SP, R1_SP); __ mr(R21_sender_SP, R1_SP);
// Do a light-weight C-call here, r_arg_entry holds the address // Do a light-weight C-call here, r_arg_entry holds the address
// of the interpreter entry point (frame manager or native entry) // of the interpreter entry point (template interpreter or native entry)
// and save runtime-value of LR in return_address. // and save runtime-value of LR in return_address.
assert(r_arg_entry != tos && r_arg_entry != R19_method && r_arg_entry != R16_thread, assert(r_arg_entry != tos && r_arg_entry != R19_method && r_arg_entry != R16_thread,
"trashed r_arg_entry"); "trashed r_arg_entry");
@ -270,11 +269,10 @@ class StubGenerator: public StubCodeGenerator {
} }
{ {
BLOCK_COMMENT("Returned from frame manager or native entry."); BLOCK_COMMENT("Returned from template interpreter or native entry.");
// Returned from frame manager or native entry.
// Now pop frame, process result, and return to caller. // Now pop frame, process result, and return to caller.
// Stack on exit from frame manager / native entry: // Stack on exit from template interpreter / native entry:
// //
// F0 [ABI] // F0 [ABI]
// ... // ...
@ -295,7 +293,7 @@ class StubGenerator: public StubCodeGenerator {
Register r_cr = R12_scratch2; Register r_cr = R12_scratch2;
// Reload some volatile registers which we've spilled before the call // Reload some volatile registers which we've spilled before the call
// to frame manager / native entry. // to template interpreter / native entry.
// Access all locals via frame pointer, because we know nothing about // Access all locals via frame pointer, because we know nothing about
// the topmost frame's size. // the topmost frame's size.
__ ld(r_entryframe_fp, _abi0(callers_sp), R1_SP); // restore after call __ ld(r_entryframe_fp, _abi0(callers_sp), R1_SP); // restore after call

View File

@ -961,81 +961,239 @@ protected:
#undef INSN #undef INSN
enum Aqrl {relaxed = 0b00, rl = 0b01, aq = 0b10, aqrl = 0b11}; enum Aqrl {relaxed = 0b00, rl = 0b01, aq = 0b10, aqrl = 0b11};
#define INSN(NAME, op, funct3, funct7) \ private:
void NAME(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) { \
unsigned insn = 0; \ enum AmoWidthFunct3 : uint8_t {
patch((address)&insn, 6, 0, op); \ AMO_WIDTH_BYTE = 0b000, // Zabha extension
patch((address)&insn, 14, 12, funct3); \ AMO_WIDTH_HALFWORD = 0b001, // Zabha extension
patch_reg((address)&insn, 7, Rd); \ AMO_WIDTH_WORD = 0b010,
patch_reg((address)&insn, 15, Rs1); \ AMO_WIDTH_DOUBLEWORD = 0b011,
patch_reg((address)&insn, 20, Rs2); \ AMO_WIDTH_QUADWORD = 0b100,
patch((address)&insn, 31, 27, funct7); \ // 0b101 to 0b111 are reserved
patch((address)&insn, 26, 25, memory_order); \ };
emit(insn); \
enum AmoOperationFunct5 : uint8_t {
AMO_ADD = 0b00000,
AMO_SWAP = 0b00001,
AMO_LR = 0b00010,
AMO_SC = 0b00011,
AMO_XOR = 0b00100,
AMO_OR = 0b01000,
AMO_AND = 0b01100,
AMO_MIN = 0b10000,
AMO_MAX = 0b10100,
AMO_MINU = 0b11000,
AMO_MAXU = 0b11100,
AMO_CAS = 0b00101 // Zacas
};
static constexpr uint32_t OP_AMO_MAJOR = 0b0101111;
template <AmoOperationFunct5 funct5, AmoWidthFunct3 width>
void amo_base(Register Rd, Register Rs1, uint8_t Rs2, Aqrl memory_order = aqrl) {
assert(width > AMO_WIDTH_HALFWORD || UseZabha, "Must be");
assert(funct5 != AMO_CAS || UseZacas, "Must be");
unsigned insn = 0;
patch((address)&insn, 6, 0, OP_AMO_MAJOR);
patch_reg((address)&insn, 7, Rd);
patch((address)&insn, 14, 12, width);
patch_reg((address)&insn, 15, Rs1);
patch((address)&insn, 24, 20, Rs2);
patch((address)&insn, 26, 25, memory_order);
patch((address)&insn, 31, 27, funct5);
emit(insn);
} }
INSN(amoswap_w, 0b0101111, 0b010, 0b00001); template <AmoOperationFunct5 funct5, AmoWidthFunct3 width>
INSN(amoadd_w, 0b0101111, 0b010, 0b00000); void amo_base(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
INSN(amoxor_w, 0b0101111, 0b010, 0b00100); amo_base<funct5, width>(Rd, Rs1, Rs2->raw_encoding(), memory_order);
INSN(amoand_w, 0b0101111, 0b010, 0b01100);
INSN(amoor_w, 0b0101111, 0b010, 0b01000);
INSN(amomin_w, 0b0101111, 0b010, 0b10000);
INSN(amomax_w, 0b0101111, 0b010, 0b10100);
INSN(amominu_w, 0b0101111, 0b010, 0b11000);
INSN(amomaxu_w, 0b0101111, 0b010, 0b11100);
INSN(amoswap_d, 0b0101111, 0b011, 0b00001);
INSN(amoadd_d, 0b0101111, 0b011, 0b00000);
INSN(amoxor_d, 0b0101111, 0b011, 0b00100);
INSN(amoand_d, 0b0101111, 0b011, 0b01100);
INSN(amoor_d, 0b0101111, 0b011, 0b01000);
INSN(amomin_d, 0b0101111, 0b011, 0b10000);
INSN(amomax_d , 0b0101111, 0b011, 0b10100);
INSN(amominu_d, 0b0101111, 0b011, 0b11000);
INSN(amomaxu_d, 0b0101111, 0b011, 0b11100);
INSN(amocas_w, 0b0101111, 0b010, 0b00101);
INSN(amocas_d, 0b0101111, 0b011, 0b00101);
#undef INSN
enum operand_size { int8, int16, int32, uint32, int64 };
#define INSN(NAME, op, funct3, funct7) \
void NAME(Register Rd, Register Rs1, Aqrl memory_order = relaxed) { \
unsigned insn = 0; \
uint32_t val = memory_order & 0x3; \
patch((address)&insn, 6, 0, op); \
patch((address)&insn, 14, 12, funct3); \
patch_reg((address)&insn, 7, Rd); \
patch_reg((address)&insn, 15, Rs1); \
patch((address)&insn, 25, 20, 0b00000); \
patch((address)&insn, 31, 27, funct7); \
patch((address)&insn, 26, 25, val); \
emit(insn); \
} }
INSN(lr_w, 0b0101111, 0b010, 0b00010); public:
INSN(lr_d, 0b0101111, 0b011, 0b00010);
#undef INSN void amoadd_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_ADD, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
#define INSN(NAME, op, funct3, funct7) \
void NAME(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = relaxed) { \
unsigned insn = 0; \
uint32_t val = memory_order & 0x3; \
patch((address)&insn, 6, 0, op); \
patch((address)&insn, 14, 12, funct3); \
patch_reg((address)&insn, 7, Rd); \
patch_reg((address)&insn, 15, Rs2); \
patch_reg((address)&insn, 20, Rs1); \
patch((address)&insn, 31, 27, funct7); \
patch((address)&insn, 26, 25, val); \
emit(insn); \
} }
INSN(sc_w, 0b0101111, 0b010, 0b00011); void amoadd_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
INSN(sc_d, 0b0101111, 0b011, 0b00011); amo_base<AMO_ADD, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
#undef INSN }
void amoadd_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_ADD, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amoadd_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_ADD, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoswap_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_SWAP, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amoswap_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_SWAP, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoswap_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_SWAP, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amoswap_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_SWAP, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoxor_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_XOR, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amoxor_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_XOR, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoxor_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_XOR, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amoxor_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_XOR, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoor_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_OR, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amoor_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_OR, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoor_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_OR, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amoor_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_OR, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoand_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_AND, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amoand_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_AND, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amoand_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_AND, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amoand_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_AND, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amomin_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MIN, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amomin_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MIN, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amomin_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MIN, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amomin_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MIN, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amominu_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MINU, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amominu_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MINU, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amominu_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MINU, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amominu_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MINU, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amomax_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAX, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amomax_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAX, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amomax_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAX, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amomax_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAX, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amomaxu_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAXU, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amomaxu_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAXU, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amomaxu_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAXU, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amomaxu_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_MAXU, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
protected:
void lr_w(Register Rd, Register Rs1, Aqrl memory_order = aqrl) {
amo_base<AMO_LR, AMO_WIDTH_WORD>(Rd, Rs1, 0, memory_order);
}
void lr_d(Register Rd, Register Rs1, Aqrl memory_order = aqrl) {
amo_base<AMO_LR, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, 0, memory_order);
}
void sc_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_SC, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void sc_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_SC, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
void amocas_b(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_CAS, AMO_WIDTH_BYTE>(Rd, Rs1, Rs2, memory_order);
}
void amocas_h(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_CAS, AMO_WIDTH_HALFWORD>(Rd, Rs1, Rs2, memory_order);
}
void amocas_w(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_CAS, AMO_WIDTH_WORD>(Rd, Rs1, Rs2, memory_order);
}
void amocas_d(Register Rd, Register Rs1, Register Rs2, Aqrl memory_order = aqrl) {
amo_base<AMO_CAS, AMO_WIDTH_DOUBLEWORD>(Rd, Rs1, Rs2, memory_order);
}
public:
enum operand_size { int8, int16, int32, uint32, int64 };
// Immediate Instruction // Immediate Instruction
#define INSN(NAME, op, funct3) \ #define INSN(NAME, op, funct3) \

View File

@ -96,6 +96,7 @@ instruct zLoadP(iRegPNoSp dst, memory mem, iRegPNoSp tmp, rFlagsReg cr)
match(Set dst (LoadP mem)); match(Set dst (LoadP mem));
predicate(UseZGC && n->as_Load()->barrier_data() != 0); predicate(UseZGC && n->as_Load()->barrier_data() != 0);
effect(TEMP dst, TEMP tmp, KILL cr); effect(TEMP dst, TEMP tmp, KILL cr);
ins_is_late_expanded_null_check_candidate(true);
ins_cost(4 * DEFAULT_COST); ins_cost(4 * DEFAULT_COST);

View File

@ -107,6 +107,7 @@ define_pd_global(intx, InlineSmallCode, 1000);
product(bool, UseZfh, false, DIAGNOSTIC, "Use Zfh instructions") \ product(bool, UseZfh, false, DIAGNOSTIC, "Use Zfh instructions") \
product(bool, UseZfhmin, false, DIAGNOSTIC, "Use Zfhmin instructions") \ product(bool, UseZfhmin, false, DIAGNOSTIC, "Use Zfhmin instructions") \
product(bool, UseZacas, false, EXPERIMENTAL, "Use Zacas instructions") \ product(bool, UseZacas, false, EXPERIMENTAL, "Use Zacas instructions") \
product(bool, UseZabha, false, EXPERIMENTAL, "Use UseZabha instructions") \
product(bool, UseZcb, false, EXPERIMENTAL, "Use Zcb instructions") \ product(bool, UseZcb, false, EXPERIMENTAL, "Use Zcb instructions") \
product(bool, UseZic64b, false, EXPERIMENTAL, "Use Zic64b instructions") \ product(bool, UseZic64b, false, EXPERIMENTAL, "Use Zic64b instructions") \
product(bool, UseZicbom, false, EXPERIMENTAL, "Use Zicbom instructions") \ product(bool, UseZicbom, false, EXPERIMENTAL, "Use Zicbom instructions") \

View File

@ -955,47 +955,29 @@ void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
int constant, int constant) {
bool decrement) { increment_mdp_data_at(mdp_in, noreg, constant);
increment_mdp_data_at(mdp_in, noreg, constant, decrement);
} }
void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in, void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
Register reg, Register index,
int constant, int constant) {
bool decrement) {
assert(ProfileInterpreter, "must be profiling interpreter"); assert(ProfileInterpreter, "must be profiling interpreter");
// %%% this does 64bit counters at best it is wasting space
// at worst it is a rare bug when counters overflow
assert_different_registers(t1, t0, mdp_in, reg); assert_different_registers(t1, t0, mdp_in, index);
Address addr1(mdp_in, constant); Address addr1(mdp_in, constant);
Address addr2(t1, 0); Address addr2(t1, 0);
Address &addr = addr1; Address &addr = addr1;
if (reg != noreg) { if (index != noreg) {
la(t1, addr1); la(t1, addr1);
add(t1, t1, reg); add(t1, t1, index);
addr = addr2; addr = addr2;
} }
if (decrement) { ld(t0, addr);
ld(t0, addr); addi(t0, t0, DataLayout::counter_increment);
subi(t0, t0, DataLayout::counter_increment); sd(t0, addr);
Label L;
bltz(t0, L); // skip store if counter underflow
sd(t0, addr);
bind(L);
} else {
assert(DataLayout::counter_increment == 1,
"flow-free idiom only works with 1");
ld(t0, addr);
addi(t0, t0, DataLayout::counter_increment);
Label L;
blez(t0, L); // skip store if counter overflow
sd(t0, addr);
bind(L);
}
} }
void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in, void InterpreterMacroAssembler::set_mdp_flag_at(Register mdp_in,

View File

@ -233,11 +233,8 @@ class InterpreterMacroAssembler: public MacroAssembler {
void verify_method_data_pointer(); void verify_method_data_pointer();
void set_mdp_data_at(Register mdp_in, int constant, Register value); void set_mdp_data_at(Register mdp_in, int constant, Register value);
void increment_mdp_data_at(Address data, bool decrement = false); void increment_mdp_data_at(Register mdp_in, int constant);
void increment_mdp_data_at(Register mdp_in, int constant, void increment_mdp_data_at(Register mdp_in, Register index, int constant);
bool decrement = false);
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
bool decrement = false);
void increment_mask_and_jump(Address counter_addr, void increment_mask_and_jump(Address counter_addr,
int increment, Address mask, int increment, Address mask,
Register tmp1, Register tmp2, Register tmp1, Register tmp2,

View File

@ -3798,7 +3798,7 @@ void MacroAssembler::cmpxchg_obj_header(Register oldv, Register newv, Register o
void MacroAssembler::load_reserved(Register dst, void MacroAssembler::load_reserved(Register dst,
Register addr, Register addr,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire) { Assembler::Aqrl acquire) {
switch (size) { switch (size) {
case int64: case int64:
@ -3819,15 +3819,15 @@ void MacroAssembler::load_reserved(Register dst,
void MacroAssembler::store_conditional(Register dst, void MacroAssembler::store_conditional(Register dst,
Register new_val, Register new_val,
Register addr, Register addr,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl release) { Assembler::Aqrl release) {
switch (size) { switch (size) {
case int64: case int64:
sc_d(dst, new_val, addr, release); sc_d(dst, addr, new_val, release);
break; break;
case int32: case int32:
case uint32: case uint32:
sc_w(dst, new_val, addr, release); sc_w(dst, addr, new_val, release);
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
@ -3836,7 +3836,7 @@ void MacroAssembler::store_conditional(Register dst,
void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val, void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val,
enum operand_size size, Assembler::operand_size size,
Register shift, Register mask, Register aligned_addr) { Register shift, Register mask, Register aligned_addr) {
assert(size == int8 || size == int16, "unsupported operand size"); assert(size == int8 || size == int16, "unsupported operand size");
@ -3866,10 +3866,11 @@ void MacroAssembler::cmpxchg_narrow_value_helper(Register addr, Register expecte
// which are forced to work with 4-byte aligned address. // which are forced to work with 4-byte aligned address.
void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected, void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result, bool result_as_bool, Register result, bool result_as_bool,
Register tmp1, Register tmp2, Register tmp3) { Register tmp1, Register tmp2, Register tmp3) {
assert(!(UseZacas && UseZabha), "Use amocas");
assert_different_registers(addr, expected, new_val, result, tmp1, tmp2, tmp3, t0, t1); assert_different_registers(addr, expected, new_val, result, tmp1, tmp2, tmp3, t0, t1);
Register scratch0 = t0, aligned_addr = t1; Register scratch0 = t0, aligned_addr = t1;
@ -3902,13 +3903,13 @@ void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected,
notr(scratch1, mask); notr(scratch1, mask);
bind(retry); bind(retry);
lr_w(result, aligned_addr, acquire); load_reserved(result, aligned_addr, operand_size::int32, acquire);
andr(scratch0, result, mask); andr(scratch0, result, mask);
bne(scratch0, expected, fail); bne(scratch0, expected, fail);
andr(scratch0, result, scratch1); // scratch1 is ~mask andr(scratch0, result, scratch1); // scratch1 is ~mask
orr(scratch0, scratch0, new_val); orr(scratch0, scratch0, new_val);
sc_w(scratch0, scratch0, aligned_addr, release); store_conditional(scratch0, scratch0, aligned_addr, operand_size::int32, release);
bnez(scratch0, retry); bnez(scratch0, retry);
} }
@ -3940,10 +3941,11 @@ void MacroAssembler::cmpxchg_narrow_value(Register addr, Register expected,
// failed. // failed.
void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected, void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result, Register result,
Register tmp1, Register tmp2, Register tmp3) { Register tmp1, Register tmp2, Register tmp3) {
assert(!(UseZacas && UseZabha), "Use amocas");
assert_different_registers(addr, expected, new_val, result, tmp1, tmp2, tmp3, t0, t1); assert_different_registers(addr, expected, new_val, result, tmp1, tmp2, tmp3, t0, t1);
Register scratch0 = t0, aligned_addr = t1; Register scratch0 = t0, aligned_addr = t1;
@ -3974,13 +3976,13 @@ void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected,
} else { } else {
notr(scratch1, mask); notr(scratch1, mask);
lr_w(result, aligned_addr, acquire); load_reserved(result, aligned_addr, operand_size::int32, acquire);
andr(scratch0, result, mask); andr(scratch0, result, mask);
bne(scratch0, expected, fail); bne(scratch0, expected, fail);
andr(scratch0, result, scratch1); // scratch1 is ~mask andr(scratch0, result, scratch1); // scratch1 is ~mask
orr(scratch0, scratch0, new_val); orr(scratch0, scratch0, new_val);
sc_w(scratch0, scratch0, aligned_addr, release); store_conditional(scratch0, scratch0, aligned_addr, operand_size::int32, release);
bnez(scratch0, fail); bnez(scratch0, fail);
} }
@ -3997,10 +3999,10 @@ void MacroAssembler::weak_cmpxchg_narrow_value(Register addr, Register expected,
void MacroAssembler::cmpxchg(Register addr, Register expected, void MacroAssembler::cmpxchg(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result, bool result_as_bool) { Register result, bool result_as_bool) {
assert(size != int8 && size != int16, "unsupported operand size"); assert((UseZacas && UseZabha) || (size != int8 && size != int16), "unsupported operand size");
assert_different_registers(addr, t0); assert_different_registers(addr, t0);
assert_different_registers(expected, t0); assert_different_registers(expected, t0);
assert_different_registers(new_val, t0); assert_different_registers(new_val, t0);
@ -4058,10 +4060,10 @@ void MacroAssembler::cmpxchg(Register addr, Register expected,
void MacroAssembler::weak_cmpxchg(Register addr, Register expected, void MacroAssembler::weak_cmpxchg(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result) { Register result) {
assert((UseZacas && UseZabha) || (size != int8 && size != int16), "unsupported operand size");
assert_different_registers(addr, t0); assert_different_registers(addr, t0);
assert_different_registers(expected, t0); assert_different_registers(expected, t0);
assert_different_registers(new_val, t0); assert_different_registers(new_val, t0);
@ -4134,7 +4136,7 @@ ATOMIC_XCHGU(xchgalwu, xchgalw)
#undef ATOMIC_XCHGU #undef ATOMIC_XCHGU
void MacroAssembler::atomic_cas(Register prev, Register newv, Register addr, void MacroAssembler::atomic_cas(Register prev, Register newv, Register addr,
enum operand_size size, Assembler::Aqrl acquire, Assembler::Aqrl release) { Assembler::operand_size size, Assembler::Aqrl acquire, Assembler::Aqrl release) {
switch (size) { switch (size) {
case int64: case int64:
amocas_d(prev, addr, newv, (Assembler::Aqrl)(acquire | release)); amocas_d(prev, addr, newv, (Assembler::Aqrl)(acquire | release));
@ -4146,6 +4148,12 @@ void MacroAssembler::atomic_cas(Register prev, Register newv, Register addr,
amocas_w(prev, addr, newv, (Assembler::Aqrl)(acquire | release)); amocas_w(prev, addr, newv, (Assembler::Aqrl)(acquire | release));
zext(prev, prev, 32); zext(prev, prev, 32);
break; break;
case int16:
amocas_h(prev, addr, newv, (Assembler::Aqrl)(acquire | release));
break;
case int8:
amocas_b(prev, addr, newv, (Assembler::Aqrl)(acquire | release));
break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }

View File

@ -666,7 +666,7 @@ class MacroAssembler: public Assembler {
// We try to follow risc-v asm menomics. // We try to follow risc-v asm menomics.
// But as we don't layout a reachable GOT, // But as we don't layout a reachable GOT,
// we often need to resort to movptr, li <48imm>. // we often need to resort to movptr, li <48imm>.
// https://github.com/riscv-non-isa/riscv-asm-manual/blob/master/riscv-asm.md // https://github.com/riscv-non-isa/riscv-asm-manual/blob/main/src/asm-manual.adoc
// Hotspot only use the standard calling convention using x1/ra. // Hotspot only use the standard calling convention using x1/ra.
// The alternative calling convection using x5/t0 is not used. // The alternative calling convection using x5/t0 is not used.
@ -1187,26 +1187,26 @@ public:
void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail); void cmpxchgptr(Register oldv, Register newv, Register addr, Register tmp, Label &succeed, Label *fail);
void cmpxchg(Register addr, Register expected, void cmpxchg(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result, bool result_as_bool = false); Register result, bool result_as_bool = false);
void weak_cmpxchg(Register addr, Register expected, void weak_cmpxchg(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result); Register result);
void cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val, void cmpxchg_narrow_value_helper(Register addr, Register expected, Register new_val,
enum operand_size size, Assembler::operand_size size,
Register shift, Register mask, Register aligned_addr); Register shift, Register mask, Register aligned_addr);
void cmpxchg_narrow_value(Register addr, Register expected, void cmpxchg_narrow_value(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result, bool result_as_bool, Register result, bool result_as_bool,
Register tmp1, Register tmp2, Register tmp3); Register tmp1, Register tmp2, Register tmp3);
void weak_cmpxchg_narrow_value(Register addr, Register expected, void weak_cmpxchg_narrow_value(Register addr, Register expected,
Register new_val, Register new_val,
enum operand_size size, Assembler::operand_size size,
Assembler::Aqrl acquire, Assembler::Aqrl release, Assembler::Aqrl acquire, Assembler::Aqrl release,
Register result, Register result,
Register tmp1, Register tmp2, Register tmp3); Register tmp1, Register tmp2, Register tmp3);
@ -1223,7 +1223,7 @@ public:
void atomic_xchgwu(Register prev, Register newv, Register addr); void atomic_xchgwu(Register prev, Register newv, Register addr);
void atomic_xchgalwu(Register prev, Register newv, Register addr); void atomic_xchgalwu(Register prev, Register newv, Register addr);
void atomic_cas(Register prev, Register newv, Register addr, enum operand_size size, void atomic_cas(Register prev, Register newv, Register addr, Assembler::operand_size size,
Assembler::Aqrl acquire = Assembler::relaxed, Assembler::Aqrl release = Assembler::relaxed); Assembler::Aqrl acquire = Assembler::relaxed, Assembler::Aqrl release = Assembler::relaxed);
// Emit a far call/jump. Only invalidates the tmp register which // Emit a far call/jump. Only invalidates the tmp register which
@ -1636,8 +1636,8 @@ private:
int bitset_to_regs(unsigned int bitset, unsigned char* regs); int bitset_to_regs(unsigned int bitset, unsigned char* regs);
Address add_memory_helper(const Address dst, Register tmp); Address add_memory_helper(const Address dst, Register tmp);
void load_reserved(Register dst, Register addr, enum operand_size size, Assembler::Aqrl acquire); void load_reserved(Register dst, Register addr, Assembler::operand_size size, Assembler::Aqrl acquire);
void store_conditional(Register dst, Register new_val, Register addr, enum operand_size size, Assembler::Aqrl release); void store_conditional(Register dst, Register new_val, Register addr, Assembler::operand_size size, Assembler::Aqrl release);
public: public:
void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow); void lightweight_lock(Register basic_lock, Register obj, Register tmp1, Register tmp2, Register tmp3, Label& slow);

View File

@ -2304,42 +2304,6 @@ encode %{
} }
%} %}
enc_class riscv_enc_cmpxchgw(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgn(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchg(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgw_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchgn_acq(iRegINoSp res, memory mem, iRegI oldval, iRegI newval) %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
enc_class riscv_enc_cmpxchg_acq(iRegINoSp res, memory mem, iRegL oldval, iRegL newval) %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
// compare and branch instruction encodings // compare and branch instruction encodings
enc_class riscv_enc_j(label lbl) %{ enc_class riscv_enc_j(label lbl) %{
@ -2655,6 +2619,10 @@ ins_attrib ins_alignment(4); // Required alignment attribute (must
// compute_padding() function must be // compute_padding() function must be
// provided for the instruction // provided for the instruction
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS----------------------------------------------------------- //----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct parsing // Operand definitions must precede instruction definitions for correct parsing
// in the ADLC because operands constitute user defined types which are used in // in the ADLC because operands constitute user defined types which are used in
@ -5250,18 +5218,20 @@ instruct prefetchalloc( memory mem ) %{
// standard CompareAndSwapX when we are using barriers // standard CompareAndSwapX when we are using barriers
// these have higher priority than the rules selected by a predicate // these have higher priority than the rules selected by a predicate
instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(!UseZabha || !UseZacas);
match(Set res (CompareAndSwapB mem (Binary oldval newval))); match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
format %{ format %{
"cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5273,18 +5243,42 @@ instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R1
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(UseZabha && UseZacas);
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapB"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result as bool */);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{
predicate(!UseZabha || !UseZacas);
match(Set res (CompareAndSwapS mem (Binary oldval newval))); match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); effect(TEMP_DEF res, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
format %{ format %{
"cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5296,18 +5290,44 @@ instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R1
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{
predicate(UseZabha && UseZacas);
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapS"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
Assembler::relaxed /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result as bool */);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{ %{
match(Set res (CompareAndSwapI mem (Binary oldval newval))); match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapI"
%} %}
ins_encode(riscv_enc_cmpxchgw(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -5316,14 +5336,18 @@ instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval
%{ %{
match(Set res (CompareAndSwapL mem (Binary oldval newval))); match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapL"
%} %}
ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -5334,14 +5358,18 @@ instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval
match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapP"
%} %}
ins_encode(riscv_enc_cmpxchg(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -5349,35 +5377,40 @@ instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval
instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval) instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
%{ %{
predicate(n->as_LoadStore()->barrier_data() == 0); predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapN mem (Binary oldval newval))); match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapN"
%} %}
ins_encode(riscv_enc_cmpxchgn(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
// alternative CompareAndSwapX when we are eliding barriers // alternative CompareAndSwapX when we are eliding barriers
instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndSwapB mem (Binary oldval newval))); match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 10 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg_acq $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5389,20 +5422,42 @@ instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg $mem, $oldval, $newval\t# (byte) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapBAcq"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result as bool */);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{
predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndSwapS mem (Binary oldval newval))); match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 11 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg_acq $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5414,20 +5469,46 @@ instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{
predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg $mem, $oldval, $newval\t# (short) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapSAcq"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
Assembler::aq /* acquire */, Assembler::rl /* release */, $res$$Register,
true /* result as bool */);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate(needs_acquiring_load_reserved(n));
match(Set res (CompareAndSwapI mem (Binary oldval newval))); match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapIAcq"
%} %}
ins_encode(riscv_enc_cmpxchgw_acq(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -5438,14 +5519,18 @@ instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL new
match(Set res (CompareAndSwapL mem (Binary oldval newval))); match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapLAcq"
%} %}
ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -5456,14 +5541,18 @@ instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP new
match(Set res (CompareAndSwapP mem (Binary oldval newval))); match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 6 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapPAcq"
%} %}
ins_encode(riscv_enc_cmpxchg_acq(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int64,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -5474,14 +5563,18 @@ instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN new
match(Set res (CompareAndSwapN mem (Binary oldval newval))); match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + ALU_COST * 8 + BRANCH_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t" "cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval\n\t"
"mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq" "mv $res, $res == $oldval\t# $res <-- ($res == $oldval ? 1 : 0), #@compareAndSwapNAcq"
%} %}
ins_encode(riscv_enc_cmpxchgn_acq(res, mem, oldval, newval)); ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::uint32,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register,
/*result as bool*/ true);
%}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
@ -5492,17 +5585,19 @@ instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN new
// no trailing StoreLoad barrier emitted by C2. Unfortunately we // no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a // can't check the type of memory ordering here, so we always emit a
// sc_d(w) with rl bit set. // sc_d(w) with rl bit set.
instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndExchangeB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(!UseZabha || !UseZacas);
match(Set res (CompareAndExchangeB mem (Binary oldval newval))); match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB" "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5514,17 +5609,39 @@ instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iReg
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(UseZabha && UseZacas);
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeB"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{
predicate(!UseZabha || !UseZacas);
match(Set res (CompareAndExchangeS mem (Binary oldval newval))); match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS" "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5536,13 +5653,31 @@ instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iReg
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{
predicate(UseZabha && UseZacas);
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeS"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{ %{
match(Set res (CompareAndExchangeI mem (Binary oldval newval))); match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI" "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeI"
@ -5560,9 +5695,7 @@ instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL ne
%{ %{
match(Set res (CompareAndExchangeL mem (Binary oldval newval))); match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL" "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeL"
@ -5579,11 +5712,10 @@ instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL ne
instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval) instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval)
%{ %{
predicate(n->as_LoadStore()->barrier_data() == 0); predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeN mem (Binary oldval newval))); match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 3); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN" "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeN"
@ -5600,11 +5732,10 @@ instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN ne
instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval) instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval)
%{ %{
predicate(n->as_LoadStore()->barrier_data() == 0); predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeP mem (Binary oldval newval))); match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP" "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeP"
@ -5618,19 +5749,19 @@ instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP ne
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndExchangeBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndExchangeB mem (Binary oldval newval))); match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 5); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq" "cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5642,19 +5773,39 @@ instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, i
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeBAcq"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{
predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndExchangeS mem (Binary oldval newval))); match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST * 6); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq" "cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5666,15 +5817,33 @@ instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, i
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{
predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeSAcq"
%}
ins_encode %{
__ cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate(needs_acquiring_load_reserved(n));
match(Set res (CompareAndExchangeI mem (Binary oldval newval))); match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq" "cmpxchg_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeIAcq"
@ -5694,9 +5863,7 @@ instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL
match(Set res (CompareAndExchangeL mem (Binary oldval newval))); match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq" "cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeLAcq"
@ -5716,9 +5883,7 @@ instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN
match(Set res (CompareAndExchangeN mem (Binary oldval newval))); match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq" "cmpxchg_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangeNAcq"
@ -5738,9 +5903,7 @@ instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP
match(Set res (CompareAndExchangeP mem (Binary oldval newval))); match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 3 + ALU_COST); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res);
format %{ format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq" "cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval, #@compareAndExchangePAcq"
@ -5754,18 +5917,20 @@ instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct weakCompareAndSwapB_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(!UseZabha || !UseZacas);
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapB" "# $res == 1 when success, #@weakCompareAndSwapB_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5777,18 +5942,41 @@ instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI_R12 oldval, iReg
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(UseZabha && UseZacas);
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapB"
%}
ins_encode %{
__ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapS_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{
predicate(!UseZabha || !UseZacas);
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapS" "# $res == 1 when success, #@weakCompareAndSwapS_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5800,11 +5988,32 @@ instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI_R12 oldval, iReg
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{
predicate(UseZabha && UseZacas);
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapS"
%}
ins_encode %{
__ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
/*acquire*/ Assembler::relaxed, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{ %{
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
@ -5823,7 +6032,7 @@ instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL ne
%{ %{
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
@ -5841,9 +6050,10 @@ instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL ne
instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval) instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval)
%{ %{
predicate(n->as_LoadStore()->barrier_data() == 0); predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
@ -5861,9 +6071,10 @@ instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN ne
instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval) instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval)
%{ %{
predicate(n->as_LoadStore()->barrier_data() == 0); predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"
@ -5878,20 +6089,20 @@ instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP ne
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct weakCompareAndSwapBAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr) iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 6); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapBAcq" "# $res == 1 when success, #@weakCompareAndSwapBAcq_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5903,20 +6114,41 @@ instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, i
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval, instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapBAcq"
%}
ins_encode %{
__ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int8,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapSAcq_narrow(iRegINoSp res, indirect mem, iRegI_R12 oldval, iRegI_R13 newval,
iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, rFlagsReg cr)
%{
predicate((!UseZabha || !UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 7); ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3); effect(TEMP_DEF res, KILL cr, USE_KILL oldval, USE_KILL newval, TEMP tmp1, TEMP tmp2, TEMP tmp3);
format %{ format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapSAcq" "# $res == 1 when success, #@weakCompareAndSwapSAcq_narrow"
%} %}
ins_encode %{ ins_encode %{
@ -5928,13 +6160,34 @@ instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI_R12 oldval, i
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{
predicate((UseZabha && UseZacas) && needs_acquiring_load_reserved(n));
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval\n\t"
"# $res == 1 when success, #@weakCompareAndSwapSAcq"
%}
ins_encode %{
__ weak_cmpxchg(as_Register($mem$$base), $oldval$$Register, $newval$$Register, Assembler::int16,
/*acquire*/ Assembler::aq, /*release*/ Assembler::rl, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval) instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval)
%{ %{
predicate(needs_acquiring_load_reserved(n)); predicate(needs_acquiring_load_reserved(n));
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg_acq $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval\n\t"
@ -5955,7 +6208,7 @@ instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg_acq $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval\n\t"
@ -5976,7 +6229,7 @@ instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 4); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg_acq $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval\n\t"
@ -5997,7 +6250,7 @@ instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(LOAD_COST + STORE_COST + BRANCH_COST * 2 + ALU_COST * 2); ins_cost(2 * VOLATILE_REF_COST);
format %{ format %{
"weak_cmpxchg_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t" "weak_cmpxchg_acq $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval\n\t"

View File

@ -1646,6 +1646,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
Interpreter::_remove_activation_preserving_args_entry = __ pc(); Interpreter::_remove_activation_preserving_args_entry = __ pc();
__ empty_expression_stack(); __ empty_expression_stack();
__ restore_bcp(); // We could have returned from deoptimizing this frame, so restore rbcp.
// Set the popframe_processing bit in pending_popframe_condition // Set the popframe_processing bit in pending_popframe_condition
// indicating that we are currently handling popframe, so that // indicating that we are currently handling popframe, so that
// call_VMs that may happen later do not trigger new popframe // call_VMs that may happen later do not trigger new popframe

View File

@ -203,15 +203,15 @@ void VM_Version::common_initialize() {
} }
} }
// Misc Intrinsics could depend on RVV // Misc Intrinsics that could depend on RVV.
if (UseZba || UseRVV) { if (!AvoidUnalignedAccesses && (UseZba || UseRVV)) {
if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
FLAG_SET_DEFAULT(UseCRC32Intrinsics, true); FLAG_SET_DEFAULT(UseCRC32Intrinsics, true);
} }
} else { } else {
if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
warning("CRC32 intrinsic requires Zba or RVV instructions (not available on this CPU)"); warning("CRC32 intrinsic are not available on this CPU.");
} }
FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
} }

View File

@ -221,13 +221,13 @@ class VM_Version : public Abstract_VM_Version {
FLAG_SET_DEFAULT(UseExtension, true); \ FLAG_SET_DEFAULT(UseExtension, true); \
} \ } \
// https://github.com/riscv/riscv-profiles/blob/main/profiles.adoc#rva20-profiles // https://github.com/riscv/riscv-profiles/blob/main/src/profiles.adoc#rva20-profiles
#define RV_USE_RVA20U64 \ #define RV_USE_RVA20U64 \
RV_ENABLE_EXTENSION(UseRVC) \ RV_ENABLE_EXTENSION(UseRVC) \
static void useRVA20U64Profile(); static void useRVA20U64Profile();
// https://github.com/riscv/riscv-profiles/blob/main/profiles.adoc#rva22-profiles // https://github.com/riscv/riscv-profiles/blob/main/src/profiles.adoc#rva22-profiles
#define RV_USE_RVA22U64 \ #define RV_USE_RVA22U64 \
RV_ENABLE_EXTENSION(UseRVC) \ RV_ENABLE_EXTENSION(UseRVC) \
RV_ENABLE_EXTENSION(UseZba) \ RV_ENABLE_EXTENSION(UseZba) \
@ -241,7 +241,7 @@ class VM_Version : public Abstract_VM_Version {
static void useRVA22U64Profile(); static void useRVA22U64Profile();
// https://github.com/riscv/riscv-profiles/blob/main/rva23-profile.adoc#rva23u64-profile // https://github.com/riscv/riscv-profiles/blob/main/src/rva23-profile.adoc#rva23u64-profile
#define RV_USE_RVA23U64 \ #define RV_USE_RVA23U64 \
RV_ENABLE_EXTENSION(UseRVC) \ RV_ENABLE_EXTENSION(UseRVC) \
RV_ENABLE_EXTENSION(UseRVV) \ RV_ENABLE_EXTENSION(UseRVV) \

View File

@ -410,7 +410,7 @@
// C2I adapter frames: // C2I adapter frames:
// //
// STACK (interpreted called from compiled, on entry to frame manager): // STACK (interpreted called from compiled, on entry to template interpreter):
// //
// [TOP_C2I_FRAME] // [TOP_C2I_FRAME]
// [JIT_FRAME] // [JIT_FRAME]

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved. * Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
@ -414,7 +414,7 @@ constexpr FloatRegister Z_FARG2 = Z_F2;
constexpr FloatRegister Z_FARG3 = Z_F4; constexpr FloatRegister Z_FARG3 = Z_F4;
constexpr FloatRegister Z_FARG4 = Z_F6; constexpr FloatRegister Z_FARG4 = Z_F6;
// Register declarations to be used in frame manager assembly code. // Register declarations to be used in template interpreter assembly code.
// Use only non-volatile registers in order to keep values across C-calls. // Use only non-volatile registers in order to keep values across C-calls.
// Register to cache the integer value on top of the operand stack. // Register to cache the integer value on top of the operand stack.
@ -439,7 +439,7 @@ constexpr Register Z_bcp = Z_R13;
// Bytecode which is dispatched (short lived!). // Bytecode which is dispatched (short lived!).
constexpr Register Z_bytecode = Z_R14; constexpr Register Z_bytecode = Z_R14;
// Temporary registers to be used within frame manager. We can use // Temporary registers to be used within template interpreter. We can use
// the nonvolatile ones because the call stub has saved them. // the nonvolatile ones because the call stub has saved them.
// Use only non-volatile registers in order to keep values across C-calls. // Use only non-volatile registers in order to keep values across C-calls.
constexpr Register Z_tmp_1 = Z_R10; constexpr Register Z_tmp_1 = Z_R10;

View File

@ -118,7 +118,7 @@ ExceptionBlob* OptoRuntime::generate_exception_blob() {
__ z_lgr(Z_SP, saved_sp); __ z_lgr(Z_SP, saved_sp);
// [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6. // [Z_RET] isn't null was possible in hotspot5 but not in sapjvm6.
// C2I adapter extensions are now removed by a resize in the frame manager // C2I adapter extensions are now removed by a resize in the template interpreter
// (unwind_initial_activation_pending_exception). // (unwind_initial_activation_pending_exception).
#ifdef ASSERT #ifdef ASSERT
__ z_ltgr(handle_exception, handle_exception); __ z_ltgr(handle_exception, handle_exception);

View File

@ -2139,7 +2139,7 @@ static address gen_c2i_adapter(MacroAssembler *masm,
Register value = Z_R12; Register value = Z_R12;
// Remember the senderSP so we can pop the interpreter arguments off of the stack. // Remember the senderSP so we can pop the interpreter arguments off of the stack.
// In addition, frame manager expects initial_caller_sp in Z_R10. // In addition, template interpreter expects initial_caller_sp in Z_R10.
__ z_lgr(sender_SP, Z_SP); __ z_lgr(sender_SP, Z_SP);
// This should always fit in 14 bit immediate. // This should always fit in 14 bit immediate.

View File

@ -115,7 +115,7 @@ class StubGenerator: public StubCodeGenerator {
// [SP+176] - thread : Thread* // [SP+176] - thread : Thread*
// //
address generate_call_stub(address& return_address) { address generate_call_stub(address& return_address) {
// Set up a new C frame, copy Java arguments, call frame manager // Set up a new C frame, copy Java arguments, call template interpreter
// or native_entry, and process result. // or native_entry, and process result.
StubGenStubId stub_id = StubGenStubId::call_stub_id; StubGenStubId stub_id = StubGenStubId::call_stub_id;
@ -272,10 +272,10 @@ class StubGenerator: public StubCodeGenerator {
BLOCK_COMMENT("call {"); BLOCK_COMMENT("call {");
{ {
// Call frame manager or native entry. // Call template interpreter or native entry.
// //
// Register state on entry to frame manager / native entry: // Register state on entry to template interpreter / native entry:
// //
// Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed) // Z_ARG1 = r_top_of_arguments_addr - intptr_t *sender tos (prepushed)
// Lesp = (SP) + copied_arguments_offset - 8 // Lesp = (SP) + copied_arguments_offset - 8
@ -290,7 +290,7 @@ class StubGenerator: public StubCodeGenerator {
__ z_lgr(Z_esp, r_top_of_arguments_addr); __ z_lgr(Z_esp, r_top_of_arguments_addr);
// //
// Stack on entry to frame manager / native entry: // Stack on entry to template interpreter / native entry:
// //
// F0 [TOP_IJAVA_FRAME_ABI] // F0 [TOP_IJAVA_FRAME_ABI]
// [outgoing Java arguments] // [outgoing Java arguments]
@ -300,7 +300,7 @@ class StubGenerator: public StubCodeGenerator {
// //
// Do a light-weight C-call here, r_new_arg_entry holds the address // Do a light-weight C-call here, r_new_arg_entry holds the address
// of the interpreter entry point (frame manager or native entry) // of the interpreter entry point (template interpreter or native entry)
// and save runtime-value of return_pc in return_address // and save runtime-value of return_pc in return_address
// (call by reference argument). // (call by reference argument).
return_address = __ call_stub(r_new_arg_entry); return_address = __ call_stub(r_new_arg_entry);
@ -309,11 +309,11 @@ class StubGenerator: public StubCodeGenerator {
{ {
BLOCK_COMMENT("restore registers {"); BLOCK_COMMENT("restore registers {");
// Returned from frame manager or native entry. // Returned from template interpreter or native entry.
// Now pop frame, process result, and return to caller. // Now pop frame, process result, and return to caller.
// //
// Stack on exit from frame manager / native entry: // Stack on exit from template interpreter / native entry:
// //
// F0 [ABI] // F0 [ABI]
// ... // ...
@ -330,7 +330,7 @@ class StubGenerator: public StubCodeGenerator {
__ pop_frame(); __ pop_frame();
// Reload some volatile registers which we've spilled before the call // Reload some volatile registers which we've spilled before the call
// to frame manager / native entry. // to template interpreter / native entry.
// Access all locals via frame pointer, because we know nothing about // Access all locals via frame pointer, because we know nothing about
// the topmost frame's size. // the topmost frame's size.
__ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp); __ z_lg(r_arg_result_addr, result_address_offset, r_entryframe_fp);

View File

@ -1217,7 +1217,7 @@ void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// Various method entries // Various method entries
// Math function, frame manager must set up an interpreter state, etc. // Math function, template interpreter must set up an interpreter state, etc.
address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) { address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
// Decide what to do: Use same platform specific instructions and runtime calls as compilers. // Decide what to do: Use same platform specific instructions and runtime calls as compilers.

View File

@ -30,11 +30,15 @@
size_t ZPointerLoadShift; size_t ZPointerLoadShift;
size_t ZPlatformAddressOffsetBits() { size_t ZPlatformAddressOffsetBits() {
#ifdef ADDRESS_SANITIZER
return 44;
#else
const size_t min_address_offset_bits = 42; // 4TB const size_t min_address_offset_bits = 42; // 4TB
const size_t max_address_offset_bits = 44; // 16TB const size_t max_address_offset_bits = 44; // 16TB
const size_t address_offset = ZGlobalsPointers::min_address_offset_request(); const size_t address_offset = ZGlobalsPointers::min_address_offset_request();
const size_t address_offset_bits = log2i_exact(address_offset); const size_t address_offset_bits = log2i_exact(address_offset);
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
#endif
} }
size_t ZPlatformAddressHeapBaseShift() { size_t ZPlatformAddressHeapBaseShift() {

View File

@ -118,6 +118,10 @@ instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
predicate(UseZGC && n->as_Load()->barrier_data() != 0); predicate(UseZGC && n->as_Load()->barrier_data() != 0);
match(Set dst (LoadP mem)); match(Set dst (LoadP mem));
effect(TEMP dst, KILL cr); effect(TEMP dst, KILL cr);
// The main load is a candidate to implement implicit null checks. The
// barrier's slow path includes an identical reload, which does not need to be
// registered in the exception table because it is dominated by the main one.
ins_is_late_expanded_null_check_candidate(true);
ins_cost(125); ins_cost(125);

View File

@ -1355,25 +1355,15 @@ void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
} }
void InterpreterMacroAssembler::profile_taken_branch(Register mdp, void InterpreterMacroAssembler::profile_taken_branch(Register mdp) {
Register bumped_count) {
if (ProfileInterpreter) { if (ProfileInterpreter) {
Label profile_continue; Label profile_continue;
// If no method data exists, go to profile_continue. // If no method data exists, go to profile_continue.
// Otherwise, assign to mdp
test_method_data_pointer(mdp, profile_continue); test_method_data_pointer(mdp, profile_continue);
// We are taking a branch. Increment the taken count. // We are taking a branch. Increment the taken count.
// We inline increment_mdp_data_at to return bumped_count in a register increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
Address data(mdp, in_bytes(JumpData::taken_offset()));
movptr(bumped_count, data);
assert(DataLayout::counter_increment == 1,
"flow-free idiom only works with 1");
addptr(bumped_count, DataLayout::counter_increment);
sbbptr(bumped_count, 0);
movptr(data, bumped_count); // Store back out
// The method data pointer needs to be updated to reflect the new target. // The method data pointer needs to be updated to reflect the new target.
update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset())); update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
@ -1389,7 +1379,7 @@ void InterpreterMacroAssembler::profile_not_taken_branch(Register mdp) {
// If no method data exists, go to profile_continue. // If no method data exists, go to profile_continue.
test_method_data_pointer(mdp, profile_continue); test_method_data_pointer(mdp, profile_continue);
// We are taking a branch. Increment the not taken count. // We are not taking a branch. Increment the not taken count.
increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset())); increment_mdp_data_at(mdp, in_bytes(BranchData::not_taken_offset()));
// The method data pointer needs to be updated to correspond to // The method data pointer needs to be updated to correspond to

View File

@ -236,7 +236,7 @@ class InterpreterMacroAssembler: public MacroAssembler {
void update_mdp_by_constant(Register mdp_in, int constant); void update_mdp_by_constant(Register mdp_in, int constant);
void update_mdp_for_ret(Register return_bci); void update_mdp_for_ret(Register return_bci);
void profile_taken_branch(Register mdp, Register bumped_count); void profile_taken_branch(Register mdp);
void profile_not_taken_branch(Register mdp); void profile_not_taken_branch(Register mdp);
void profile_call(Register mdp); void profile_call(Register mdp);
void profile_final_call(Register mdp); void profile_final_call(Register mdp);

View File

@ -1441,6 +1441,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
Interpreter::_remove_activation_preserving_args_entry = __ pc(); Interpreter::_remove_activation_preserving_args_entry = __ pc();
__ empty_expression_stack(); __ empty_expression_stack();
__ restore_bcp(); // We could have returned from deoptimizing this frame, so restore rbcp.
// Set the popframe_processing bit in pending_popframe_condition // Set the popframe_processing bit in pending_popframe_condition
// indicating that we are currently handling popframe, so that // indicating that we are currently handling popframe, so that
// call_VMs that may happen later do not trigger new popframe // call_VMs that may happen later do not trigger new popframe

View File

@ -465,13 +465,19 @@ address TemplateInterpreterGenerator::generate_math_entry(AbstractInterpreter::M
__ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)); __ call_VM_leaf0(CAST_FROM_FN_PTR(address, SharedRuntime::dtan));
} }
} else if (kind == Interpreter::java_lang_math_tanh) { } else if (kind == Interpreter::java_lang_math_tanh) {
assert(StubRoutines::dtanh() != nullptr, "not initialized"); if (StubRoutines::dtanh() != nullptr) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtanh()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dtanh())));
} else {
return nullptr; // Fallback to default implementation
}
} else if (kind == Interpreter::java_lang_math_cbrt) { } else if (kind == Interpreter::java_lang_math_cbrt) {
assert(StubRoutines::dcbrt() != nullptr, "not initialized"); if (StubRoutines::dcbrt() != nullptr) {
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt()))); __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::dcbrt())));
} else {
return nullptr; // Fallback to default implementation
}
} else if (kind == Interpreter::java_lang_math_abs) { } else if (kind == Interpreter::java_lang_math_abs) {
assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized"); assert(StubRoutines::x86::double_sign_mask() != nullptr, "not initialized");
__ movdbl(xmm0, Address(rsp, wordSize)); __ movdbl(xmm0, Address(rsp, wordSize));

View File

@ -1687,8 +1687,7 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) {
void TemplateTable::branch(bool is_jsr, bool is_wide) { void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ get_method(rcx); // rcx holds method __ get_method(rcx); // rcx holds method
__ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx __ profile_taken_branch(rax); // rax holds updated MDP
// holds bumped taken count
const ByteSize be_offset = MethodCounters::backedge_counter_offset() + const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
InvocationCounter::counter_offset(); InvocationCounter::counter_offset();
@ -1739,7 +1738,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (UseLoopCounter) { if (UseLoopCounter) {
// increment backedge counter for backward branches // increment backedge counter for backward branches
// rax: MDO // rax: MDO
// rbx: MDO bumped taken-count
// rcx: method // rcx: method
// rdx: target offset // rdx: target offset
// r13: target bcp // r13: target bcp

View File

@ -2055,6 +2055,10 @@ ins_attrib ins_alignment(1); // Required alignment attribute (must
// compute_padding() function must be // compute_padding() function must be
// provided for the instruction // provided for the instruction
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS----------------------------------------------------------- //----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct parsing // Operand definitions must precede instruction definitions for correct parsing
// in the ADLC because operands constitute user defined types which are used in // in the ADLC because operands constitute user defined types which are used in
@ -10527,7 +10531,8 @@ instruct xorI_rReg_im1_ndd(rRegI dst, rRegI src, immI_M1 imm)
// Xor Register with Immediate // Xor Register with Immediate
instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr) instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
%{ %{
predicate(!UseAPX); // Strict predicate check to make selection of xorI_rReg_im1 cost agnostic if immI src is -1.
predicate(!UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
match(Set dst (XorI dst src)); match(Set dst (XorI dst src));
effect(KILL cr); effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag); flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@ -10541,7 +10546,8 @@ instruct xorI_rReg_imm(rRegI dst, immI src, rFlagsReg cr)
instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr) instruct xorI_rReg_rReg_imm_ndd(rRegI dst, rRegI src1, immI src2, rFlagsReg cr)
%{ %{
predicate(UseAPX); // Strict predicate check to make selection of xorI_rReg_im1_ndd cost agnostic if immI src2 is -1.
predicate(UseAPX && n->in(2)->bottom_type()->is_int()->get_con() != -1);
match(Set dst (XorI src1 src2)); match(Set dst (XorI src1 src2));
effect(KILL cr); effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag); flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@ -10559,6 +10565,7 @@ instruct xorI_rReg_mem_imm_ndd(rRegI dst, memory src1, immI src2, rFlagsReg cr)
predicate(UseAPX); predicate(UseAPX);
match(Set dst (XorI (LoadI src1) src2)); match(Set dst (XorI (LoadI src1) src2));
effect(KILL cr); effect(KILL cr);
ins_cost(150);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag); flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
format %{ "exorl $dst, $src1, $src2\t# int ndd" %} format %{ "exorl $dst, $src1, $src2\t# int ndd" %}
@ -11201,7 +11208,8 @@ instruct xorL_rReg_im1_ndd(rRegL dst,rRegL src, immL_M1 imm)
// Xor Register with Immediate // Xor Register with Immediate
instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr) instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
%{ %{
predicate(!UseAPX); // Strict predicate check to make selection of xorL_rReg_im1 cost agnostic if immL32 src is -1.
predicate(!UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
match(Set dst (XorL dst src)); match(Set dst (XorL dst src));
effect(KILL cr); effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag); flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@ -11215,7 +11223,8 @@ instruct xorL_rReg_imm(rRegL dst, immL32 src, rFlagsReg cr)
instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr) instruct xorL_rReg_rReg_imm(rRegL dst, rRegL src1, immL32 src2, rFlagsReg cr)
%{ %{
predicate(UseAPX); // Strict predicate check to make selection of xorL_rReg_im1_ndd cost agnostic if immL32 src2 is -1.
predicate(UseAPX && n->in(2)->bottom_type()->is_long()->get_con() != -1L);
match(Set dst (XorL src1 src2)); match(Set dst (XorL src1 src2));
effect(KILL cr); effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag); flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
@ -11234,6 +11243,7 @@ instruct xorL_rReg_mem_imm(rRegL dst, memory src1, immL32 src2, rFlagsReg cr)
match(Set dst (XorL (LoadL src1) src2)); match(Set dst (XorL (LoadL src1) src2));
effect(KILL cr); effect(KILL cr);
flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag); flag(PD::Flag_sets_sign_flag, PD::Flag_sets_zero_flag, PD::Flag_sets_parity_flag, PD::Flag_clears_overflow_flag, PD::Flag_clears_carry_flag);
ins_cost(150);
format %{ "exorq $dst, $src1, $src2\t# long ndd" %} format %{ "exorq $dst, $src1, $src2\t# long ndd" %}
ins_encode %{ ins_encode %{

View File

@ -1261,69 +1261,6 @@ void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
// Nothing to do beyond of what os::print_cpu_info() does. // Nothing to do beyond of what os::print_cpu_info() does.
} }
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so.
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
Dl_info dlinfo;
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
assert(ret != 0, "cannot locate libjvm");
char* rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
assert(rp != nullptr, "error in realpath(): maybe the 'path' argument is too long?");
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm.so".
const char* p = strrchr(buf, '/');
if (p == nullptr) {
return;
}
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
ss.print("%s/lib", buf);
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm.so"
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Go back to path of .so
rp = os::realpath((char *)dlinfo.dli_fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, sizeof(saved_jvm_path));
saved_jvm_path[sizeof(saved_jvm_path) - 1] = '\0';
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Virtual Memory // Virtual Memory

View File

@ -154,7 +154,8 @@ julong os::Bsd::available_memory() {
assert(kerr == KERN_SUCCESS, assert(kerr == KERN_SUCCESS,
"host_statistics64 failed - check mach_host_self() and count"); "host_statistics64 failed - check mach_host_self() and count");
if (kerr == KERN_SUCCESS) { if (kerr == KERN_SUCCESS) {
available = vmstat.free_count * os::vm_page_size(); // free_count is just a lowerbound, other page categories can be freed too and make memory available
available = (vmstat.free_count + vmstat.inactive_count + vmstat.purgeable_count) * os::vm_page_size();
} }
#endif #endif
return available; return available;
@ -1482,83 +1483,6 @@ void os::print_memory_info(outputStream* st) {
st->cr(); st->cr();
} }
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
char dli_fname[MAXPATHLEN];
dli_fname[0] = '\0';
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), nullptr);
assert(ret, "cannot locate libjvm");
char *rp = nullptr;
if (ret && dli_fname[0] != '\0') {
rp = os::realpath(dli_fname, buf, buflen);
}
if (rp == nullptr) {
return;
}
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm"
const char* p = strrchr(buf, '/');
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer space");
// Add the appropriate library and JVM variant subdirs
ss.print("%s/lib/%s", buf, Abstract_VM_Version::vm_variant());
if (0 != access(buf, F_OK)) {
ss.reset();
ss.print("%s/lib", buf);
}
// If the path exists within JAVA_HOME, add the JVM library name
// to complete the path to JVM being overridden. Otherwise fallback
// to the path to the current library.
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm"
ss.print("/libjvm%s", JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Fall back to path of current library
rp = os::realpath(dli_fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Virtual Memory // Virtual Memory

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2005, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,9 +35,6 @@
range, \ range, \
constraint) \ constraint) \
\ \
product(bool, UseOprofile, false, \
"(Deprecated) enable support for Oprofile profiler") \
\
product(bool, UseTransparentHugePages, false, \ product(bool, UseTransparentHugePages, false, \
"Use MADV_HUGEPAGE for large pages") \ "Use MADV_HUGEPAGE for large pages") \
\ \

View File

@ -2746,118 +2746,9 @@ void os::get_summary_cpu_info(char* cpuinfo, size_t length) {
#endif #endif
} }
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
char dli_fname[MAXPATHLEN];
dli_fname[0] = '\0';
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), nullptr);
assert(ret, "cannot locate libjvm");
char *rp = nullptr;
if (ret && dli_fname[0] != '\0') {
rp = os::realpath(dli_fname, buf, buflen);
}
if (rp == nullptr) {
return;
}
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm.so".
const char* p = strrchr(buf, '/');
if (p == nullptr) {
return;
}
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
ss.print("%s/lib", buf);
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm.so"
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Go back to path of .so
rp = os::realpath(dli_fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Virtual Memory // Virtual Memory
// Rationale behind this function:
// current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
// mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
// samples for JITted code. Here we create private executable mapping over the code cache
// and then we can use standard (well, almost, as mapping can change) way to provide
// info for the reporting script by storing timestamp and location of symbol
void linux_wrap_code(char* base, size_t size) {
static volatile jint cnt = 0;
static_assert(sizeof(off_t) == 8, "Expected Large File Support in this file");
if (!UseOprofile) {
return;
}
char buf[PATH_MAX+1];
int num = Atomic::add(&cnt, 1);
snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
os::get_temp_directory(), os::current_process_id(), num);
unlink(buf);
int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
if (fd != -1) {
off_t rv = ::lseek(fd, size-2, SEEK_SET);
if (rv != (off_t)-1) {
if (::write(fd, "", 1) == 1) {
mmap(base, size,
PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
}
}
::close(fd);
unlink(buf);
}
}
static bool recoverable_mmap_error(int err) { static bool recoverable_mmap_error(int err) {
// See if the error is one we can let the caller handle. This // See if the error is one we can let the caller handle. This
// list of errno values comes from JBS-6843484. I can't find a // list of errno values comes from JBS-6843484. I can't find a

View File

@ -59,6 +59,7 @@
#ifdef AIX #ifdef AIX
#include "loadlib_aix.hpp" #include "loadlib_aix.hpp"
#include "os_aix.hpp" #include "os_aix.hpp"
#include "porting_aix.hpp"
#endif #endif
#ifdef LINUX #ifdef LINUX
#include "os_linux.hpp" #include "os_linux.hpp"
@ -1060,6 +1061,95 @@ bool os::same_files(const char* file1, const char* file2) {
return is_same; return is_same;
} }
static char saved_jvm_path[MAXPATHLEN] = {0};
// Find the full path to the current module, libjvm.so
void os::jvm_path(char *buf, jint buflen) {
// Error checking.
if (buflen < MAXPATHLEN) {
assert(false, "must use a large-enough buffer");
buf[0] = '\0';
return;
}
// Lazy resolve the path to current module.
if (saved_jvm_path[0] != 0) {
strcpy(buf, saved_jvm_path);
return;
}
const char* fname;
#ifdef AIX
Dl_info dlinfo;
int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
assert(ret != 0, "cannot locate libjvm");
if (ret == 0) {
return;
}
fname = dlinfo.dli_fname;
#else
char dli_fname[MAXPATHLEN];
dli_fname[0] = '\0';
bool ret = dll_address_to_library_name(
CAST_FROM_FN_PTR(address, os::jvm_path),
dli_fname, sizeof(dli_fname), nullptr);
assert(ret, "cannot locate libjvm");
if (!ret) {
return;
}
fname = dli_fname;
#endif // AIX
char* rp = nullptr;
if (fname[0] != '\0') {
rp = os::realpath(fname, buf, buflen);
}
if (rp == nullptr) {
return;
}
// If executing unit tests we require JAVA_HOME to point to the real JDK.
if (Arguments::executing_unit_tests()) {
// Look for JAVA_HOME in the environment.
char* java_home_var = ::getenv("JAVA_HOME");
if (java_home_var != nullptr && java_home_var[0] != 0) {
// Check the current module name "libjvm.so".
const char* p = strrchr(buf, '/');
if (p == nullptr) {
return;
}
assert(strstr(p, "/libjvm") == p, "invalid library name");
stringStream ss(buf, buflen);
rp = os::realpath(java_home_var, buf, buflen);
if (rp == nullptr) {
return;
}
assert((int)strlen(buf) < buflen, "Ran out of buffer room");
ss.print("%s/lib", buf);
// If the path exists within JAVA_HOME, add the VM variant directory and JVM
// library name to complete the path to JVM being overridden. Otherwise fallback
// to the path to the current library.
if (0 == access(buf, F_OK)) {
// Use current module name "libjvm.so"
ss.print("/%s/libjvm%s", Abstract_VM_Version::vm_variant(), JNI_LIB_SUFFIX);
assert(strcmp(buf + strlen(buf) - strlen(JNI_LIB_SUFFIX), JNI_LIB_SUFFIX) == 0,
"buf has been truncated");
} else {
// Go back to path of .so
rp = os::realpath(fname, buf, buflen);
if (rp == nullptr) {
return;
}
}
}
}
strncpy(saved_jvm_path, buf, MAXPATHLEN);
saved_jvm_path[MAXPATHLEN - 1] = '\0';
}
// Called when creating the thread. The minimum stack sizes have already been calculated // Called when creating the thread. The minimum stack sizes have already been calculated
size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) { size_t os::Posix::get_initial_stack_size(ThreadType thr_type, size_t req_stack_size) {
size_t stack_size; size_t stack_size;

View File

@ -1505,6 +1505,14 @@ bool PosixSignals::is_sig_ignored(int sig) {
} }
} }
void* PosixSignals::get_signal_handler_for_signal(int sig) {
struct sigaction oact;
if (sigaction(sig, (struct sigaction*)nullptr, &oact) == -1) {
return nullptr;
}
return get_signal_handler(&oact);
}
static void signal_sets_init() { static void signal_sets_init() {
sigemptyset(&preinstalled_sigs); sigemptyset(&preinstalled_sigs);

View File

@ -52,6 +52,8 @@ public:
static bool is_sig_ignored(int sig); static bool is_sig_ignored(int sig);
static void* get_signal_handler_for_signal(int sig);
static void hotspot_sigmask(Thread* thread); static void hotspot_sigmask(Thread* thread);
static void print_signal_handler(outputStream* st, int sig, char* buf, size_t buflen); static void print_signal_handler(outputStream* st, int sig, char* buf, size_t buflen);

View File

@ -81,14 +81,12 @@
#endif #endif
#define SPELL_REG_SP "sp" #define SPELL_REG_SP "sp"
#define SPELL_REG_FP "fp"
#ifdef __APPLE__ #ifdef __APPLE__
// see darwin-xnu/osfmk/mach/arm/_structs.h // see darwin-xnu/osfmk/mach/arm/_structs.h
// 10.5 UNIX03 member name prefixes // 10.5 UNIX03 member name prefixes
#define DU3_PREFIX(s, m) __ ## s.__ ## m #define DU3_PREFIX(s, m) __ ## s.__ ## m
#endif
#define context_x uc_mcontext->DU3_PREFIX(ss,x) #define context_x uc_mcontext->DU3_PREFIX(ss,x)
#define context_fp uc_mcontext->DU3_PREFIX(ss,fp) #define context_fp uc_mcontext->DU3_PREFIX(ss,fp)
@ -97,6 +95,31 @@
#define context_pc uc_mcontext->DU3_PREFIX(ss,pc) #define context_pc uc_mcontext->DU3_PREFIX(ss,pc)
#define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr) #define context_cpsr uc_mcontext->DU3_PREFIX(ss,cpsr)
#define context_esr uc_mcontext->DU3_PREFIX(es,esr) #define context_esr uc_mcontext->DU3_PREFIX(es,esr)
#endif
#ifdef __FreeBSD__
# define context_x uc_mcontext.mc_gpregs.gp_x
# define context_fp context_x[REG_FP]
# define context_lr uc_mcontext.mc_gpregs.gp_lr
# define context_sp uc_mcontext.mc_gpregs.gp_sp
# define context_pc uc_mcontext.mc_gpregs.gp_elr
#endif
#ifdef __NetBSD__
# define context_x uc_mcontext.__gregs
# define context_fp uc_mcontext.__gregs[_REG_FP]
# define context_lr uc_mcontext.__gregs[_REG_LR]
# define context_sp uc_mcontext.__gregs[_REG_SP]
# define context_pc uc_mcontext.__gregs[_REG_ELR]
#endif
#ifdef __OpenBSD__
# define context_x sc_x
# define context_fp sc_x[REG_FP]
# define context_lr sc_lr
# define context_sp sc_sp
# define context_pc sc_elr
#endif
#define REG_BCP context_x[22] #define REG_BCP context_x[22]
@ -497,9 +520,11 @@ int os::extra_bang_size_in_bytes() {
return 0; return 0;
} }
#ifdef __APPLE__
void os::current_thread_enable_wx(WXMode mode) { void os::current_thread_enable_wx(WXMode mode) {
pthread_jit_write_protect_np(mode == WXExec); pthread_jit_write_protect_np(mode == WXExec);
} }
#endif
static inline void atomic_copy64(const volatile void *src, volatile void *dst) { static inline void atomic_copy64(const volatile void *src, volatile void *dst) {
*(jlong *) dst = *(const jlong *) src; *(jlong *) dst = *(const jlong *) src;

View File

@ -481,7 +481,3 @@ int get_legal_text(FileBuff &fbuf, char **legal_text)
*legal_text = legal_start; *legal_text = legal_start;
return (int) (legal_end - legal_start); return (int) (legal_end - legal_start);
} }
void *operator new( size_t size, int, const char *, int ) throw() {
return ::operator new( size );
}

View File

@ -1626,6 +1626,8 @@ void ArchDesc::declareClasses(FILE *fp) {
while (attr != nullptr) { while (attr != nullptr) {
if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) { if (strcmp (attr->_ident, "ins_is_TrapBasedCheckNode") == 0) {
fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val); fprintf(fp, " virtual bool is_TrapBasedCheckNode() const { return %s; }\n", attr->_val);
} else if (strcmp (attr->_ident, "ins_is_late_expanded_null_check_candidate") == 0) {
fprintf(fp, " virtual bool is_late_expanded_null_check_candidate() const { return %s; }\n", attr->_val);
} else if (strcmp (attr->_ident, "ins_cost") != 0 && } else if (strcmp (attr->_ident, "ins_cost") != 0 &&
strncmp(attr->_ident, "ins_field_", 10) != 0 && strncmp(attr->_ident, "ins_field_", 10) != 0 &&
// Must match function in node.hpp: return type bool, no prefix "ins_". // Must match function in node.hpp: return type bool, no prefix "ins_".

View File

@ -29,6 +29,7 @@
#include "compiler/disassembler.hpp" #include "compiler/disassembler.hpp"
#include "logging/log.hpp" #include "logging/log.hpp"
#include "oops/klass.inline.hpp" #include "oops/klass.inline.hpp"
#include "oops/methodCounters.hpp"
#include "oops/methodData.hpp" #include "oops/methodData.hpp"
#include "oops/oop.inline.hpp" #include "oops/oop.inline.hpp"
#include "runtime/icache.hpp" #include "runtime/icache.hpp"
@ -537,6 +538,9 @@ void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
if (m->is_methodData()) { if (m->is_methodData()) {
m = ((MethodData*)m)->method(); m = ((MethodData*)m)->method();
} }
if (m->is_methodCounters()) {
m = ((MethodCounters*)m)->method();
}
if (m->is_method()) { if (m->is_method()) {
m = ((Method*)m)->method_holder(); m = ((Method*)m)->method_holder();
} }
@ -561,6 +565,9 @@ void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
if (m->is_methodData()) { if (m->is_methodData()) {
m = ((MethodData*)m)->method(); m = ((MethodData*)m)->method();
} }
if (m->is_methodCounters()) {
m = ((MethodCounters*)m)->method();
}
if (m->is_method()) { if (m->is_method()) {
m = ((Method*)m)->method_holder(); m = ((Method*)m)->method_holder();
} }
@ -1099,7 +1106,8 @@ CHeapString::~CHeapString() {
// offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or // offset is a byte offset into an instruction stream (CodeBuffer, CodeBlob or
// other memory buffer) and remark is a string (comment). // other memory buffer) and remark is a string (comment).
// //
AsmRemarks::AsmRemarks() : _remarks(new AsmRemarkCollection()) { AsmRemarks::AsmRemarks() {
init();
assert(_remarks != nullptr, "Allocation failure!"); assert(_remarks != nullptr, "Allocation failure!");
} }
@ -1107,6 +1115,10 @@ AsmRemarks::~AsmRemarks() {
assert(_remarks == nullptr, "Must 'clear()' before deleting!"); assert(_remarks == nullptr, "Must 'clear()' before deleting!");
} }
void AsmRemarks::init() {
_remarks = new AsmRemarkCollection();
}
const char* AsmRemarks::insert(uint offset, const char* remstr) { const char* AsmRemarks::insert(uint offset, const char* remstr) {
precond(remstr != nullptr); precond(remstr != nullptr);
return _remarks->insert(offset, remstr); return _remarks->insert(offset, remstr);
@ -1151,7 +1163,8 @@ uint AsmRemarks::print(uint offset, outputStream* strm) const {
// Acting as interface to reference counted collection of (debug) strings used // Acting as interface to reference counted collection of (debug) strings used
// in the code generated, and thus requiring a fixed address. // in the code generated, and thus requiring a fixed address.
// //
DbgStrings::DbgStrings() : _strings(new DbgStringCollection()) { DbgStrings::DbgStrings() {
init();
assert(_strings != nullptr, "Allocation failure!"); assert(_strings != nullptr, "Allocation failure!");
} }
@ -1159,6 +1172,10 @@ DbgStrings::~DbgStrings() {
assert(_strings == nullptr, "Must 'clear()' before deleting!"); assert(_strings == nullptr, "Must 'clear()' before deleting!");
} }
void DbgStrings::init() {
_strings = new DbgStringCollection();
}
const char* DbgStrings::insert(const char* dbgstr) { const char* DbgStrings::insert(const char* dbgstr) {
const char* str = _strings->lookup(dbgstr); const char* str = _strings->lookup(dbgstr);
return str != nullptr ? str : _strings->insert(dbgstr); return str != nullptr ? str : _strings->insert(dbgstr);

View File

@ -426,6 +426,8 @@ class AsmRemarks {
AsmRemarks(); AsmRemarks();
~AsmRemarks(); ~AsmRemarks();
void init();
const char* insert(uint offset, const char* remstr); const char* insert(uint offset, const char* remstr);
bool is_empty() const; bool is_empty() const;
@ -452,6 +454,8 @@ class DbgStrings {
DbgStrings(); DbgStrings();
~DbgStrings(); ~DbgStrings();
void init();
const char* insert(const char* dbgstr); const char* insert(const char* dbgstr);
bool is_empty() const; bool is_empty() const;

View File

@ -818,7 +818,7 @@ JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* current, jint trap_request))
Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request); Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(trap_request);
if (action == Deoptimization::Action_make_not_entrant) { if (action == Deoptimization::Action_make_not_entrant) {
if (nm->make_not_entrant("C1 deoptimize")) { if (nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize)) {
if (reason == Deoptimization::Reason_tenured) { if (reason == Deoptimization::Reason_tenured) {
MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/); MethodData* trap_mdo = Deoptimization::get_method_data(current, method, true /*create_if_missing*/);
if (trap_mdo != nullptr) { if (trap_mdo != nullptr) {
@ -1110,7 +1110,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, C1StubId stub_id ))
// safepoint, but if it's still alive then make it not_entrant. // safepoint, but if it's still alive then make it not_entrant.
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
if (nm != nullptr) { if (nm != nullptr) {
nm->make_not_entrant("C1 code patch"); nm->make_not_entrant(nmethod::ChangeReason::C1_codepatch);
} }
Deoptimization::deoptimize_frame(current, caller_frame.id()); Deoptimization::deoptimize_frame(current, caller_frame.id());
@ -1358,7 +1358,7 @@ void Runtime1::patch_code(JavaThread* current, C1StubId stub_id) {
// Make sure the nmethod is invalidated, i.e. made not entrant. // Make sure the nmethod is invalidated, i.e. made not entrant.
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
if (nm != nullptr) { if (nm != nullptr) {
nm->make_not_entrant("C1 deoptimize for patching"); nm->make_not_entrant(nmethod::ChangeReason::C1_deoptimize_for_patching);
} }
} }
@ -1486,7 +1486,7 @@ JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* current))
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
assert (nm != nullptr, "no more nmethod?"); assert (nm != nullptr, "no more nmethod?");
nm->make_not_entrant("C1 predicate failed trap"); nm->make_not_entrant(nmethod::ChangeReason::C1_predicate_failed_trap);
methodHandle m(current, nm->method()); methodHandle m(current, nm->method());
MethodData* mdo = m->method_data(); MethodData* mdo = m->method_data();

View File

@ -110,12 +110,24 @@ const char* CDSConfig::default_archive_path() {
// before CDSConfig::ergo_initialize() is called. // before CDSConfig::ergo_initialize() is called.
assert(_cds_ergo_initialize_started, "sanity"); assert(_cds_ergo_initialize_started, "sanity");
if (_default_archive_path == nullptr) { if (_default_archive_path == nullptr) {
char jvm_path[JVM_MAXPATHLEN];
os::jvm_path(jvm_path, sizeof(jvm_path));
char *end = strrchr(jvm_path, *os::file_separator());
if (end != nullptr) *end = '\0';
stringStream tmp; stringStream tmp;
tmp.print("%s%sclasses", jvm_path, os::file_separator()); if (is_vm_statically_linked()) {
// It's easier to form the path using JAVA_HOME as os::jvm_path
// gives the path to the launcher executable on static JDK.
const char* subdir = WINDOWS_ONLY("bin") NOT_WINDOWS("lib");
tmp.print("%s%s%s%s%s%sclasses",
Arguments::get_java_home(), os::file_separator(),
subdir, os::file_separator(),
Abstract_VM_Version::vm_variant(), os::file_separator());
} else {
// Assume .jsa is in the same directory where libjvm resides on
// non-static JDK.
char jvm_path[JVM_MAXPATHLEN];
os::jvm_path(jvm_path, sizeof(jvm_path));
char *end = strrchr(jvm_path, *os::file_separator());
if (end != nullptr) *end = '\0';
tmp.print("%s%sclasses", jvm_path, os::file_separator());
}
#ifdef _LP64 #ifdef _LP64
if (!UseCompressedOops) { if (!UseCompressedOops) {
tmp.print_raw("_nocoops"); tmp.print_raw("_nocoops");

View File

@ -802,7 +802,7 @@ class CompileReplay : public StackObj {
// Make sure the existence of a prior compile doesn't stop this one // Make sure the existence of a prior compile doesn't stop this one
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code(); nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
if (nm != nullptr) { if (nm != nullptr) {
nm->make_not_entrant("CI replay"); nm->make_not_entrant(nmethod::ChangeReason::CI_replay);
} }
replay_state = this; replay_state = this;
CompileBroker::compile_method(methodHandle(THREAD, method), entry_bci, comp_level, CompileBroker::compile_method(methodHandle(THREAD, method), entry_bci, comp_level,

View File

@ -154,6 +154,8 @@
#define JAVA_25_VERSION 69 #define JAVA_25_VERSION 69
#define JAVA_26_VERSION 70
void ClassFileParser::set_class_bad_constant_seen(short bad_constant) { void ClassFileParser::set_class_bad_constant_seen(short bad_constant) {
assert((bad_constant == JVM_CONSTANT_Module || assert((bad_constant == JVM_CONSTANT_Module ||
bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION, bad_constant == JVM_CONSTANT_Package) && _major_version >= JAVA_9_VERSION,
@ -3738,6 +3740,7 @@ void ClassFileParser::apply_parsed_class_metadata(
_cp->set_pool_holder(this_klass); _cp->set_pool_holder(this_klass);
this_klass->set_constants(_cp); this_klass->set_constants(_cp);
this_klass->set_fieldinfo_stream(_fieldinfo_stream); this_klass->set_fieldinfo_stream(_fieldinfo_stream);
this_klass->set_fieldinfo_search_table(_fieldinfo_search_table);
this_klass->set_fields_status(_fields_status); this_klass->set_fields_status(_fields_status);
this_klass->set_methods(_methods); this_klass->set_methods(_methods);
this_klass->set_inner_classes(_inner_classes); this_klass->set_inner_classes(_inner_classes);
@ -3747,6 +3750,8 @@ void ClassFileParser::apply_parsed_class_metadata(
this_klass->set_permitted_subclasses(_permitted_subclasses); this_klass->set_permitted_subclasses(_permitted_subclasses);
this_klass->set_record_components(_record_components); this_klass->set_record_components(_record_components);
DEBUG_ONLY(FieldInfoStream::validate_search_table(_cp, _fieldinfo_stream, _fieldinfo_search_table));
// Delay the setting of _local_interfaces and _transitive_interfaces until after // Delay the setting of _local_interfaces and _transitive_interfaces until after
// initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could // initialize_supers() in fill_instance_klass(). It is because the _local_interfaces could
// be shared with _transitive_interfaces and _transitive_interfaces may be shared with // be shared with _transitive_interfaces and _transitive_interfaces may be shared with
@ -5054,6 +5059,7 @@ void ClassFileParser::fill_instance_klass(InstanceKlass* ik,
// note that is not safe to use the fields in the parser from this point on // note that is not safe to use the fields in the parser from this point on
assert(nullptr == _cp, "invariant"); assert(nullptr == _cp, "invariant");
assert(nullptr == _fieldinfo_stream, "invariant"); assert(nullptr == _fieldinfo_stream, "invariant");
assert(nullptr == _fieldinfo_search_table, "invariant");
assert(nullptr == _fields_status, "invariant"); assert(nullptr == _fields_status, "invariant");
assert(nullptr == _methods, "invariant"); assert(nullptr == _methods, "invariant");
assert(nullptr == _inner_classes, "invariant"); assert(nullptr == _inner_classes, "invariant");
@ -5274,6 +5280,7 @@ ClassFileParser::ClassFileParser(ClassFileStream* stream,
_super_klass(), _super_klass(),
_cp(nullptr), _cp(nullptr),
_fieldinfo_stream(nullptr), _fieldinfo_stream(nullptr),
_fieldinfo_search_table(nullptr),
_fields_status(nullptr), _fields_status(nullptr),
_methods(nullptr), _methods(nullptr),
_inner_classes(nullptr), _inner_classes(nullptr),
@ -5350,6 +5357,7 @@ void ClassFileParser::clear_class_metadata() {
// deallocated if classfile parsing returns an error. // deallocated if classfile parsing returns an error.
_cp = nullptr; _cp = nullptr;
_fieldinfo_stream = nullptr; _fieldinfo_stream = nullptr;
_fieldinfo_search_table = nullptr;
_fields_status = nullptr; _fields_status = nullptr;
_methods = nullptr; _methods = nullptr;
_inner_classes = nullptr; _inner_classes = nullptr;
@ -5372,6 +5380,7 @@ ClassFileParser::~ClassFileParser() {
if (_fieldinfo_stream != nullptr) { if (_fieldinfo_stream != nullptr) {
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_stream); MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_stream);
} }
MetadataFactory::free_array<u1>(_loader_data, _fieldinfo_search_table);
if (_fields_status != nullptr) { if (_fields_status != nullptr) {
MetadataFactory::free_array<FieldStatus>(_loader_data, _fields_status); MetadataFactory::free_array<FieldStatus>(_loader_data, _fields_status);
@ -5772,6 +5781,7 @@ void ClassFileParser::post_process_parsed_stream(const ClassFileStream* const st
_fieldinfo_stream = _fieldinfo_stream =
FieldInfoStream::create_FieldInfoStream(_temp_field_info, _java_fields_count, FieldInfoStream::create_FieldInfoStream(_temp_field_info, _java_fields_count,
injected_fields_count, loader_data(), CHECK); injected_fields_count, loader_data(), CHECK);
_fieldinfo_search_table = FieldInfoStream::create_search_table(_cp, _fieldinfo_stream, _loader_data, CHECK);
_fields_status = _fields_status =
MetadataFactory::new_array<FieldStatus>(_loader_data, _temp_field_info->length(), MetadataFactory::new_array<FieldStatus>(_loader_data, _temp_field_info->length(),
FieldStatus(0), CHECK); FieldStatus(0), CHECK);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -123,6 +123,7 @@ class ClassFileParser {
const InstanceKlass* _super_klass; const InstanceKlass* _super_klass;
ConstantPool* _cp; ConstantPool* _cp;
Array<u1>* _fieldinfo_stream; Array<u1>* _fieldinfo_stream;
Array<u1>* _fieldinfo_search_table;
Array<FieldStatus>* _fields_status; Array<FieldStatus>* _fields_status;
Array<Method*>* _methods; Array<Method*>* _methods;
Array<u2>* _inner_classes; Array<u2>* _inner_classes;

View File

@ -301,7 +301,7 @@ void FieldLayout::reconstruct_layout(const InstanceKlass* ik, bool& has_instance
BasicType last_type; BasicType last_type;
int last_offset = -1; int last_offset = -1;
while (ik != nullptr) { while (ik != nullptr) {
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) { for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
BasicType type = Signature::basic_type(fs.signature()); BasicType type = Signature::basic_type(fs.signature());
// distinction between static and non-static fields is missing // distinction between static and non-static fields is missing
if (fs.access_flags().is_static()) continue; if (fs.access_flags().is_static()) continue;
@ -461,7 +461,7 @@ void FieldLayout::print(outputStream* output, bool is_static, const InstanceKlas
bool found = false; bool found = false;
const InstanceKlass* ik = super; const InstanceKlass* ik = super;
while (!found && ik != nullptr) { while (!found && ik != nullptr) {
for (AllFieldStream fs(ik->fieldinfo_stream(), ik->constants()); !fs.done(); fs.next()) { for (AllFieldStream fs(ik); !fs.done(); fs.next()) {
if (fs.offset() == b->offset()) { if (fs.offset() == b->offset()) {
output->print_cr(" @%d \"%s\" %s %d/%d %s", output->print_cr(" @%d \"%s\" %s %d/%d %s",
b->offset(), b->offset(),

View File

@ -967,6 +967,13 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
Array<u1>* new_fis = FieldInfoStream::create_FieldInfoStream(fields, java_fields, injected_fields, k->class_loader_data(), CHECK); Array<u1>* new_fis = FieldInfoStream::create_FieldInfoStream(fields, java_fields, injected_fields, k->class_loader_data(), CHECK);
ik->set_fieldinfo_stream(new_fis); ik->set_fieldinfo_stream(new_fis);
MetadataFactory::free_array<u1>(k->class_loader_data(), old_stream); MetadataFactory::free_array<u1>(k->class_loader_data(), old_stream);
Array<u1>* old_table = ik->fieldinfo_search_table();
Array<u1>* search_table = FieldInfoStream::create_search_table(ik->constants(), new_fis, k->class_loader_data(), CHECK);
ik->set_fieldinfo_search_table(search_table);
MetadataFactory::free_array<u1>(k->class_loader_data(), old_table);
DEBUG_ONLY(FieldInfoStream::validate_search_table(ik->constants(), new_fis, search_table));
} }
} }
@ -1872,7 +1879,7 @@ ByteSize java_lang_Thread::thread_id_offset() {
} }
oop java_lang_Thread::park_blocker(oop java_thread) { oop java_lang_Thread::park_blocker(oop java_thread) {
return java_thread->obj_field(_park_blocker_offset); return java_thread->obj_field_access<MO_RELAXED>(_park_blocker_offset);
} }
oop java_lang_Thread::async_get_stack_trace(oop java_thread, TRAPS) { oop java_lang_Thread::async_get_stack_trace(oop java_thread, TRAPS) {

View File

@ -48,41 +48,50 @@ VerificationType VerificationType::from_tag(u1 tag) {
} }
} }
bool VerificationType::resolve_and_check_assignability(InstanceKlass* klass, Symbol* name, // Potentially resolve the target class and from class, and check whether the from class is assignable
Symbol* from_name, bool from_field_is_protected, bool from_is_array, bool from_is_object, TRAPS) { // to the target class. The current_klass is the class being verified - it could also be the target in some
// cases, and otherwise is needed to obtain the correct classloader for resolving the other classes.
bool VerificationType::resolve_and_check_assignability(InstanceKlass* current_klass, Symbol* target_name, Symbol* from_name,
bool from_field_is_protected, bool from_is_array,
bool from_is_object, bool* target_is_interface, TRAPS) {
HandleMark hm(THREAD); HandleMark hm(THREAD);
Klass* this_class; Klass* target_klass;
if (klass->is_hidden() && klass->name() == name) { if (current_klass->is_hidden() && current_klass->name() == target_name) {
this_class = klass; target_klass = current_klass;
} else { } else {
this_class = SystemDictionary::resolve_or_fail( target_klass = SystemDictionary::resolve_or_fail(
name, Handle(THREAD, klass->class_loader()), true, CHECK_false); target_name, Handle(THREAD, current_klass->class_loader()), true, CHECK_false);
if (log_is_enabled(Debug, class, resolve)) { if (log_is_enabled(Debug, class, resolve)) {
Verifier::trace_class_resolution(this_class, klass); Verifier::trace_class_resolution(target_klass, current_klass);
} }
} }
if (this_class->is_interface() && (!from_field_is_protected || bool is_intf = target_klass->is_interface();
if (target_is_interface != nullptr) {
*target_is_interface = is_intf;
}
if (is_intf && (!from_field_is_protected ||
from_name != vmSymbols::java_lang_Object())) { from_name != vmSymbols::java_lang_Object())) {
// If we are not trying to access a protected field or method in // If we are not trying to access a protected field or method in
// java.lang.Object then, for arrays, we only allow assignability // java.lang.Object then, for arrays, we only allow assignability
// to interfaces java.lang.Cloneable and java.io.Serializable. // to interfaces java.lang.Cloneable and java.io.Serializable.
// Otherwise, we treat interfaces as java.lang.Object. // Otherwise, we treat interfaces as java.lang.Object.
return !from_is_array || return !from_is_array ||
this_class == vmClasses::Cloneable_klass() || target_klass == vmClasses::Cloneable_klass() ||
this_class == vmClasses::Serializable_klass(); target_klass == vmClasses::Serializable_klass();
} else if (from_is_object) { } else if (from_is_object) {
Klass* from_class; Klass* from_klass;
if (klass->is_hidden() && klass->name() == from_name) { if (current_klass->is_hidden() && current_klass->name() == from_name) {
from_class = klass; from_klass = current_klass;
} else { } else {
from_class = SystemDictionary::resolve_or_fail( from_klass = SystemDictionary::resolve_or_fail(
from_name, Handle(THREAD, klass->class_loader()), true, CHECK_false); from_name, Handle(THREAD, current_klass->class_loader()), true, CHECK_false);
if (log_is_enabled(Debug, class, resolve)) { if (log_is_enabled(Debug, class, resolve)) {
Verifier::trace_class_resolution(from_class, klass); Verifier::trace_class_resolution(from_klass, current_klass);
} }
} }
return from_class->is_subclass_of(this_class); return from_klass->is_subclass_of(target_klass);
} }
return false; return false;
@ -90,8 +99,8 @@ bool VerificationType::resolve_and_check_assignability(InstanceKlass* klass, Sym
bool VerificationType::is_reference_assignable_from( bool VerificationType::is_reference_assignable_from(
const VerificationType& from, ClassVerifier* context, const VerificationType& from, ClassVerifier* context,
bool from_field_is_protected, TRAPS) const { bool from_field_is_protected, bool* this_is_interface, TRAPS) const {
InstanceKlass* klass = context->current_class();
if (from.is_null()) { if (from.is_null()) {
// null is assignable to any reference // null is assignable to any reference
return true; return true;
@ -109,7 +118,7 @@ bool VerificationType::is_reference_assignable_from(
#if INCLUDE_CDS #if INCLUDE_CDS
if (CDSConfig::is_dumping_archive()) { if (CDSConfig::is_dumping_archive()) {
bool skip_assignability_check = false; bool skip_assignability_check = false;
SystemDictionaryShared::add_verification_constraint(klass, SystemDictionaryShared::add_verification_constraint(context->current_class(),
name(), from.name(), from_field_is_protected, from.is_array(), name(), from.name(), from_field_is_protected, from.is_array(),
from.is_object(), &skip_assignability_check); from.is_object(), &skip_assignability_check);
if (skip_assignability_check) { if (skip_assignability_check) {
@ -119,8 +128,9 @@ bool VerificationType::is_reference_assignable_from(
} }
} }
#endif #endif
return resolve_and_check_assignability(klass, name(), from.name(), return resolve_and_check_assignability(context->current_class(), name(), from.name(),
from_field_is_protected, from.is_array(), from.is_object(), THREAD); from_field_is_protected, from.is_array(),
from.is_object(), this_is_interface, THREAD);
} else if (is_array() && from.is_array()) { } else if (is_array() && from.is_array()) {
VerificationType comp_this = get_component(context); VerificationType comp_this = get_component(context);
VerificationType comp_from = from.get_component(context); VerificationType comp_from = from.get_component(context);

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -288,7 +288,7 @@ class VerificationType {
if (is_reference() && from.is_reference()) { if (is_reference() && from.is_reference()) {
return is_reference_assignable_from(from, context, return is_reference_assignable_from(from, context,
from_field_is_protected, from_field_is_protected,
THREAD); nullptr, THREAD);
} else { } else {
return false; return false;
} }
@ -327,17 +327,24 @@ class VerificationType {
void print_on(outputStream* st) const; void print_on(outputStream* st) const;
private: bool is_reference_assignable_from(const VerificationType& from, ClassVerifier* context,
bool from_field_is_protected, bool* this_is_interface, TRAPS) const;
bool is_reference_assignable_from( static bool resolve_and_check_assignability(InstanceKlass* current_klass, Symbol* target_name,
const VerificationType&, ClassVerifier*, bool from_field_is_protected,
TRAPS) const;
public:
static bool resolve_and_check_assignability(InstanceKlass* klass, Symbol* name,
Symbol* from_name, bool from_field_is_protected, Symbol* from_name, bool from_field_is_protected,
bool from_is_array, bool from_is_object, bool from_is_array, bool from_is_object,
TRAPS) {
return resolve_and_check_assignability(current_klass, target_name, from_name, from_field_is_protected,
from_is_array, from_is_object, nullptr, THREAD);
}
private:
static bool resolve_and_check_assignability(InstanceKlass* current_klass, Symbol* target_name,
Symbol* from_name, bool from_field_is_protected,
bool from_is_array, bool from_is_object,
bool* target_is_interface,
TRAPS); TRAPS);
}; };
#endif // SHARE_CLASSFILE_VERIFICATIONTYPE_HPP #endif // SHARE_CLASSFILE_VERIFICATIONTYPE_HPP

View File

@ -2891,26 +2891,43 @@ void ClassVerifier::verify_invoke_instructions(
"Illegal call to internal method"); "Illegal call to internal method");
return; return;
} }
} else if (opcode == Bytecodes::_invokespecial }
&& !is_same_or_direct_interface(current_class(), current_type(), ref_class_type) // invokespecial, when not <init>, must be to a method in the current class, a direct superinterface,
&& !ref_class_type.equals(VerificationType::reference_type( // or any superclass (including Object).
current_class()->super()->name()))) { else if (opcode == Bytecodes::_invokespecial
bool subtype = false; && !is_same_or_direct_interface(current_class(), current_type(), ref_class_type)
bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref; && !ref_class_type.equals(VerificationType::reference_type(current_class()->super()->name()))) {
subtype = ref_class_type.is_assignable_from(
current_type(), this, false, CHECK_VERIFY(this));
if (!subtype) {
verify_error(ErrorContext::bad_code(bci),
"Bad invokespecial instruction: "
"current class isn't assignable to reference class.");
return;
} else if (have_imr_indirect) {
verify_error(ErrorContext::bad_code(bci),
"Bad invokespecial instruction: "
"interface method reference is in an indirect superinterface.");
return;
}
// We know it is not current class, direct superinterface or immediate superclass. That means it
// could be:
// - a totally unrelated class or interface
// - an indirect superinterface
// - an indirect superclass (including Object)
// We use the assignability test to see if it is a superclass, or else an interface, and keep track
// of the latter. Note that subtype can be true if we are dealing with an interface that is not actually
// implemented as assignability treats all interfaces as Object.
bool is_interface = false; // This can only be set true if the assignability check will return true
// and we loaded the class. For any other "true" returns (e.g. same class
// or Object) we either can't get here (same class already excluded above)
// or we know it is not an interface (i.e. Object).
bool subtype = ref_class_type.is_reference_assignable_from(current_type(), this, false,
&is_interface, CHECK_VERIFY(this));
if (!subtype) { // Totally unrelated class
verify_error(ErrorContext::bad_code(bci),
"Bad invokespecial instruction: "
"current class isn't assignable to reference class.");
return;
} else {
// Indirect superclass (including Object), indirect interface, or unrelated interface.
// Any interface use is an error.
if (is_interface) {
verify_error(ErrorContext::bad_code(bci),
"Bad invokespecial instruction: "
"interface method to invoke is not in a direct superinterface.");
return;
}
}
} }
// Get the verification types for the method's arguments. // Get the verification types for the method's arguments.

View File

@ -289,8 +289,6 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
case vmIntrinsics::_dsin: case vmIntrinsics::_dsin:
case vmIntrinsics::_dcos: case vmIntrinsics::_dcos:
case vmIntrinsics::_dtan: case vmIntrinsics::_dtan:
case vmIntrinsics::_dtanh:
case vmIntrinsics::_dcbrt:
case vmIntrinsics::_dlog: case vmIntrinsics::_dlog:
case vmIntrinsics::_dexp: case vmIntrinsics::_dexp:
case vmIntrinsics::_dpow: case vmIntrinsics::_dpow:
@ -316,6 +314,13 @@ bool vmIntrinsics::disabled_by_jvm_flags(vmIntrinsics::ID id) {
case vmIntrinsics::_fmaF: case vmIntrinsics::_fmaF:
if (!InlineMathNatives || !UseFMA) return true; if (!InlineMathNatives || !UseFMA) return true;
break; break;
case vmIntrinsics::_dtanh:
case vmIntrinsics::_dcbrt:
if (!InlineMathNatives || !InlineIntrinsics) return true;
#if defined(AMD64) && (defined(COMPILER1) || defined(COMPILER2))
if (!UseLibmIntrinsic) return true;
#endif
break;
case vmIntrinsics::_floatToFloat16: case vmIntrinsics::_floatToFloat16:
case vmIntrinsics::_float16ToFloat: case vmIntrinsics::_float16ToFloat:
if (!InlineIntrinsics) return true; if (!InlineIntrinsics) return true;

View File

@ -742,6 +742,12 @@ class SerializeClosure;
template(jdk_internal_vm_ThreadDumper, "jdk/internal/vm/ThreadDumper") \ template(jdk_internal_vm_ThreadDumper, "jdk/internal/vm/ThreadDumper") \
template(dumpThreads_name, "dumpThreads") \ template(dumpThreads_name, "dumpThreads") \
template(dumpThreadsToJson_name, "dumpThreadsToJson") \ template(dumpThreadsToJson_name, "dumpThreadsToJson") \
template(jdk_internal_vm_ThreadSnapshot, "jdk/internal/vm/ThreadSnapshot") \
template(jdk_internal_vm_ThreadLock, "jdk/internal/vm/ThreadSnapshot$ThreadLock") \
template(jdk_internal_vm_ThreadLock_signature, "Ljdk/internal/vm/ThreadSnapshot$ThreadLock;") \
template(jdk_internal_vm_ThreadLock_array, "[Ljdk/internal/vm/ThreadSnapshot$ThreadLock;") \
template(java_lang_StackTraceElement_of_name, "of") \
template(java_lang_StackTraceElement_of_signature, "([Ljava/lang/StackTraceElement;)[Ljava/lang/StackTraceElement;") \
\ \
/* jcmd Thread.vthread_scheduler and Thread.vthread_pollers */ \ /* jcmd Thread.vthread_scheduler and Thread.vthread_pollers */ \
template(jdk_internal_vm_JcmdVThreadCommands, "jdk/internal/vm/JcmdVThreadCommands") \ template(jdk_internal_vm_JcmdVThreadCommands, "jdk/internal/vm/JcmdVThreadCommands") \

View File

@ -460,18 +460,9 @@ AOTCodeCache* AOTCodeCache::open_for_dump() {
} }
void copy_bytes(const char* from, address to, uint size) { void copy_bytes(const char* from, address to, uint size) {
assert(size > 0, "sanity"); assert((int)size > 0, "sanity");
bool by_words = true; memcpy(to, from, size);
if ((size > 2 * HeapWordSize) && (((intptr_t)from | (intptr_t)to) & (HeapWordSize - 1)) == 0) { log_trace(aot, codecache)("Copied %d bytes from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, p2i(from), p2i(to));
// Use wordwise copies if possible:
Copy::disjoint_words((HeapWord*)from,
(HeapWord*)to,
((size_t)size + HeapWordSize-1) / HeapWordSize);
} else {
by_words = false;
Copy::conjoint_jbytes(from, to, (size_t)size);
}
log_trace(aot, codecache)("Copied %d bytes as %s from " INTPTR_FORMAT " to " INTPTR_FORMAT, size, (by_words ? "HeapWord" : "bytes"), p2i(from), p2i(to));
} }
AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) { AOTCodeReader::AOTCodeReader(AOTCodeCache* cache, AOTCodeEntry* entry) {
@ -915,26 +906,22 @@ CodeBlob* AOTCodeReader::compile_code_blob(const char* name, int entry_offset_co
oop_maps = read_oop_map_set(); oop_maps = read_oop_map_set();
} }
#ifndef PRODUCT
AsmRemarks asm_remarks;
read_asm_remarks(asm_remarks);
DbgStrings dbg_strings;
read_dbg_strings(dbg_strings);
#endif // PRODUCT
CodeBlob* code_blob = CodeBlob::create(archived_blob, CodeBlob* code_blob = CodeBlob::create(archived_blob,
stored_name, stored_name,
reloc_data, reloc_data,
oop_maps oop_maps
#ifndef PRODUCT
, asm_remarks
, dbg_strings
#endif
); );
if (code_blob == nullptr) { // no space left in CodeCache if (code_blob == nullptr) { // no space left in CodeCache
return nullptr; return nullptr;
} }
#ifndef PRODUCT
code_blob->asm_remarks().init();
read_asm_remarks(code_blob->asm_remarks());
code_blob->dbg_strings().init();
read_dbg_strings(code_blob->dbg_strings());
#endif // PRODUCT
fix_relocations(code_blob); fix_relocations(code_blob);
// Read entries offsets // Read entries offsets

View File

@ -281,10 +281,6 @@ CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
const char* name, const char* name,
address archived_reloc_data, address archived_reloc_data,
ImmutableOopMapSet* archived_oop_maps ImmutableOopMapSet* archived_oop_maps
#ifndef PRODUCT
, AsmRemarks& archived_asm_remarks
, DbgStrings& archived_dbg_strings
#endif // PRODUCT
) )
{ {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
@ -303,13 +299,6 @@ CodeBlob* CodeBlob::create(CodeBlob* archived_blob,
archived_oop_maps); archived_oop_maps);
assert(blob != nullptr, "sanity check"); assert(blob != nullptr, "sanity check");
#ifndef PRODUCT
blob->use_remarks(archived_asm_remarks);
archived_asm_remarks.clear();
blob->use_strings(archived_dbg_strings);
archived_dbg_strings.clear();
#endif // PRODUCT
// Flush the code block // Flush the code block
ICache::invalidate_range(blob->code_begin(), blob->code_size()); ICache::invalidate_range(blob->code_begin(), blob->code_size());
CodeCache::commit(blob); // Count adapters CodeCache::commit(blob); // Count adapters

View File

@ -318,12 +318,7 @@ public:
static CodeBlob* create(CodeBlob* archived_blob, static CodeBlob* create(CodeBlob* archived_blob,
const char* name, const char* name,
address archived_reloc_data, address archived_reloc_data,
ImmutableOopMapSet* archived_oop_maps ImmutableOopMapSet* archived_oop_maps);
#ifndef PRODUCT
, AsmRemarks& archived_asm_remarks
, DbgStrings& archived_dbg_strings
#endif // PRODUCT
);
}; };
//---------------------------------------------------------------------------------------------------- //----------------------------------------------------------------------------------------------------

View File

@ -1361,7 +1361,7 @@ void CodeCache::make_marked_nmethods_deoptimized() {
while(iter.next()) { while(iter.next()) {
nmethod* nm = iter.method(); nmethod* nm = iter.method();
if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) { if (nm->is_marked_for_deoptimization() && !nm->has_been_deoptimized() && nm->can_be_deoptimized()) {
nm->make_not_entrant("marked for deoptimization"); nm->make_not_entrant(nmethod::ChangeReason::marked_for_deoptimization);
nm->make_deoptimized(); nm->make_deoptimized();
} }
} }

View File

@ -788,6 +788,8 @@ class CheckClass : public MetadataClosure {
klass = ((Method*)md)->method_holder(); klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) { } else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder(); klass = ((MethodData*)md)->method()->method_holder();
} else if (md->is_methodCounters()) {
klass = ((MethodCounters*)md)->method()->method_holder();
} else { } else {
md->print(); md->print();
ShouldNotReachHere(); ShouldNotReachHere();
@ -1973,14 +1975,12 @@ void nmethod::invalidate_osr_method() {
} }
} }
void nmethod::log_state_change(const char* reason) const { void nmethod::log_state_change(ChangeReason change_reason) const {
assert(reason != nullptr, "Must provide a reason");
if (LogCompilation) { if (LogCompilation) {
if (xtty != nullptr) { if (xtty != nullptr) {
ttyLocker ttyl; // keep the following output all in one block ttyLocker ttyl; // keep the following output all in one block
xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'", xtty->begin_elem("make_not_entrant thread='%zu' reason='%s'",
os::current_thread_id(), reason); os::current_thread_id(), change_reason_to_string(change_reason));
log_identity(xtty); log_identity(xtty);
xtty->stamp(); xtty->stamp();
xtty->end_elem(); xtty->end_elem();
@ -1989,7 +1989,7 @@ void nmethod::log_state_change(const char* reason) const {
ResourceMark rm; ResourceMark rm;
stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256); stringStream ss(NEW_RESOURCE_ARRAY(char, 256), 256);
ss.print("made not entrant: %s", reason); ss.print("made not entrant: %s", change_reason_to_string(change_reason));
CompileTask::print_ul(this, ss.freeze()); CompileTask::print_ul(this, ss.freeze());
if (PrintCompilation) { if (PrintCompilation) {
@ -2004,9 +2004,7 @@ void nmethod::unlink_from_method() {
} }
// Invalidate code // Invalidate code
bool nmethod::make_not_entrant(const char* reason) { bool nmethod::make_not_entrant(ChangeReason change_reason) {
assert(reason != nullptr, "Must provide a reason");
// This can be called while the system is already at a safepoint which is ok // This can be called while the system is already at a safepoint which is ok
NoSafepointVerifier nsv; NoSafepointVerifier nsv;
@ -2075,7 +2073,7 @@ bool nmethod::make_not_entrant(const char* reason) {
assert(success, "Transition can't fail"); assert(success, "Transition can't fail");
// Log the transition once // Log the transition once
log_state_change(reason); log_state_change(change_reason);
// Remove nmethod from method. // Remove nmethod from method.
unlink_from_method(); unlink_from_method();
@ -2143,10 +2141,19 @@ void nmethod::purge(bool unregister_nmethod) {
// completely deallocate this method // completely deallocate this method
Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this)); Events::log_nmethod_flush(Thread::current(), "flushing %s nmethod " INTPTR_FORMAT, is_osr_method() ? "osr" : "", p2i(this));
log_debug(codecache)("*flushing %s nmethod %3d/" INTPTR_FORMAT ". Live blobs:" UINT32_FORMAT
"/Free CodeCache:%zuKb", LogTarget(Debug, codecache) lt;
is_osr_method() ? "osr" : "",_compile_id, p2i(this), CodeCache::blob_count(), if (lt.is_enabled()) {
CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024); ResourceMark rm;
LogStream ls(lt);
const char* method_name = method()->name()->as_C_string();
const size_t codecache_capacity = CodeCache::capacity()/1024;
const size_t codecache_free_space = CodeCache::unallocated_capacity(CodeCache::get_code_blob_type(this))/1024;
ls.print("Flushing nmethod %6d/" INTPTR_FORMAT ", level=%d, osr=%d, cold=%d, epoch=" UINT64_FORMAT ", cold_count=" UINT64_FORMAT ". "
"Cache capacity: %zuKb, free space: %zuKb. method %s (%s)",
_compile_id, p2i(this), _comp_level, is_osr_method(), is_cold(), _gc_epoch, CodeCache::cold_gc_count(),
codecache_capacity, codecache_free_space, method_name, compiler_name());
}
// We need to deallocate any ExceptionCache data. // We need to deallocate any ExceptionCache data.
// Note that we do not need to grab the nmethod lock for this, it // Note that we do not need to grab the nmethod lock for this, it

View File

@ -471,6 +471,85 @@ class nmethod : public CodeBlob {
void oops_do_set_strong_done(nmethod* old_head); void oops_do_set_strong_done(nmethod* old_head);
public: public:
enum class ChangeReason : u1 {
C1_codepatch,
C1_deoptimize,
C1_deoptimize_for_patching,
C1_predicate_failed_trap,
CI_replay,
JVMCI_invalidate_nmethod,
JVMCI_invalidate_nmethod_mirror,
JVMCI_materialize_virtual_object,
JVMCI_new_installation,
JVMCI_register_method,
JVMCI_replacing_with_new_code,
JVMCI_reprofile,
marked_for_deoptimization,
missing_exception_handler,
not_used,
OSR_invalidation_back_branch,
OSR_invalidation_for_compiling_with_C1,
OSR_invalidation_of_lower_level,
set_native_function,
uncommon_trap,
whitebox_deoptimization,
zombie,
};
static const char* change_reason_to_string(ChangeReason change_reason) {
switch (change_reason) {
case ChangeReason::C1_codepatch:
return "C1 code patch";
case ChangeReason::C1_deoptimize:
return "C1 deoptimized";
case ChangeReason::C1_deoptimize_for_patching:
return "C1 deoptimize for patching";
case ChangeReason::C1_predicate_failed_trap:
return "C1 predicate failed trap";
case ChangeReason::CI_replay:
return "CI replay";
case ChangeReason::JVMCI_invalidate_nmethod:
return "JVMCI invalidate nmethod";
case ChangeReason::JVMCI_invalidate_nmethod_mirror:
return "JVMCI invalidate nmethod mirror";
case ChangeReason::JVMCI_materialize_virtual_object:
return "JVMCI materialize virtual object";
case ChangeReason::JVMCI_new_installation:
return "JVMCI new installation";
case ChangeReason::JVMCI_register_method:
return "JVMCI register method";
case ChangeReason::JVMCI_replacing_with_new_code:
return "JVMCI replacing with new code";
case ChangeReason::JVMCI_reprofile:
return "JVMCI reprofile";
case ChangeReason::marked_for_deoptimization:
return "marked for deoptimization";
case ChangeReason::missing_exception_handler:
return "missing exception handler";
case ChangeReason::not_used:
return "not used";
case ChangeReason::OSR_invalidation_back_branch:
return "OSR invalidation back branch";
case ChangeReason::OSR_invalidation_for_compiling_with_C1:
return "OSR invalidation for compiling with C1";
case ChangeReason::OSR_invalidation_of_lower_level:
return "OSR invalidation of lower level";
case ChangeReason::set_native_function:
return "set native function";
case ChangeReason::uncommon_trap:
return "uncommon trap";
case ChangeReason::whitebox_deoptimization:
return "whitebox deoptimization";
case ChangeReason::zombie:
return "zombie";
default: {
assert(false, "Unhandled reason");
return "Unknown";
}
}
}
// create nmethod with entry_bci // create nmethod with entry_bci
static nmethod* new_nmethod(const methodHandle& method, static nmethod* new_nmethod(const methodHandle& method,
int compile_id, int compile_id,
@ -633,8 +712,8 @@ public:
// alive. It is used when an uncommon trap happens. Returns true // alive. It is used when an uncommon trap happens. Returns true
// if this thread changed the state of the nmethod or false if // if this thread changed the state of the nmethod or false if
// another thread performed the transition. // another thread performed the transition.
bool make_not_entrant(const char* reason); bool make_not_entrant(ChangeReason change_reason);
bool make_not_used() { return make_not_entrant("not used"); } bool make_not_used() { return make_not_entrant(ChangeReason::not_used); }
bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; } bool is_marked_for_deoptimization() const { return deoptimization_status() != not_marked; }
bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; } bool has_been_deoptimized() const { return deoptimization_status() == deoptimize_done; }
@ -947,7 +1026,7 @@ public:
// Logging // Logging
void log_identity(xmlStream* log) const; void log_identity(xmlStream* log) const;
void log_new_nmethod() const; void log_new_nmethod() const;
void log_state_change(const char* reason) const; void log_state_change(ChangeReason change_reason) const;
// Prints block-level comments, including nmethod specific block labels: // Prints block-level comments, including nmethod specific block labels:
void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const; void print_nmethod_labels(outputStream* stream, address block_begin, bool print_section_labels=true) const;

View File

@ -550,6 +550,7 @@ void CompilationPolicy::initialize() {
int count = CICompilerCount; int count = CICompilerCount;
bool c1_only = CompilerConfig::is_c1_only(); bool c1_only = CompilerConfig::is_c1_only();
bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only(); bool c2_only = CompilerConfig::is_c2_or_jvmci_compiler_only();
int min_count = (c1_only || c2_only) ? 1 : 2;
#ifdef _LP64 #ifdef _LP64
// Turn on ergonomic compiler count selection // Turn on ergonomic compiler count selection
@ -560,7 +561,7 @@ void CompilationPolicy::initialize() {
// Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
int log_cpu = log2i(os::active_processor_count()); int log_cpu = log2i(os::active_processor_count());
int loglog_cpu = log2i(MAX2(log_cpu, 1)); int loglog_cpu = log2i(MAX2(log_cpu, 1));
count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2); count = MAX2(log_cpu * loglog_cpu * 3 / 2, min_count);
// Make sure there is enough space in the code cache to hold all the compiler buffers // Make sure there is enough space in the code cache to hold all the compiler buffers
size_t c1_size = 0; size_t c1_size = 0;
#ifdef COMPILER1 #ifdef COMPILER1
@ -574,7 +575,7 @@ void CompilationPolicy::initialize() {
int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size; int max_count = (ReservedCodeCacheSize - (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3))) / (int)buffer_size;
if (count > max_count) { if (count > max_count) {
// Lower the compiler count such that all buffers fit into the code cache // Lower the compiler count such that all buffers fit into the code cache
count = MAX2(max_count, c1_only ? 1 : 2); count = MAX2(max_count, min_count);
} }
FLAG_SET_ERGO(CICompilerCount, count); FLAG_SET_ERGO(CICompilerCount, count);
} }
@ -593,9 +594,10 @@ void CompilationPolicy::initialize() {
#endif #endif
if (c1_only) { if (c1_only) {
// No C2 compiler thread required // No C2 compiler threads are needed
set_c1_count(count); set_c1_count(count);
} else if (c2_only) { } else if (c2_only) {
// No C1 compiler threads are needed
set_c2_count(count); set_c2_count(count);
} else { } else {
#if INCLUDE_JVMCI #if INCLUDE_JVMCI
@ -613,6 +615,9 @@ void CompilationPolicy::initialize() {
} }
assert(count == c1_count() + c2_count(), "inconsistent compiler thread count"); assert(count == c1_count() + c2_count(), "inconsistent compiler thread count");
set_increase_threshold_at_ratio(); set_increase_threshold_at_ratio();
} else {
// Interpreter mode creates no compilers
FLAG_SET_ERGO(CICompilerCount, 0);
} }
set_start_time(nanos_to_millis(os::javaTimeNanos())); set_start_time(nanos_to_millis(os::javaTimeNanos()));
} }
@ -919,7 +924,7 @@ void CompilationPolicy::compile(const methodHandle& mh, int bci, CompLevel level
nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false); nmethod* osr_nm = mh->lookup_osr_nmethod_for(bci, CompLevel_simple, false);
if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) { if (osr_nm != nullptr && osr_nm->comp_level() > CompLevel_simple) {
// Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted. // Invalidate the existing OSR nmethod so that a compile at CompLevel_simple is permitted.
osr_nm->make_not_entrant("OSR invalidation for compiling with C1"); osr_nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_for_compiling_with_C1);
} }
compile(mh, bci, CompLevel_simple, THREAD); compile(mh, bci, CompLevel_simple, THREAD);
} }
@ -1511,7 +1516,7 @@ void CompilationPolicy::method_back_branch_event(const methodHandle& mh, const m
int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
} }
nm->make_not_entrant("OSR invalidation, back branch"); nm->make_not_entrant(nmethod::ChangeReason::OSR_invalidation_back_branch);
} }
} }
// Fix up next_level if necessary to avoid deopts // Fix up next_level if necessary to avoid deopts

View File

@ -98,15 +98,15 @@ void ParallelArguments::initialize() {
FullGCForwarding::initialize_flags(heap_reserved_size_bytes()); FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
} }
// The alignment used for boundary between young gen and old gen // The alignment used for spaces in young gen and old gen
static size_t default_gen_alignment() { static size_t default_space_alignment() {
return 64 * K * HeapWordSize; return 64 * K * HeapWordSize;
} }
void ParallelArguments::initialize_alignments() { void ParallelArguments::initialize_alignments() {
// Initialize card size before initializing alignments // Initialize card size before initializing alignments
CardTable::initialize_card_size(); CardTable::initialize_card_size();
SpaceAlignment = GenAlignment = default_gen_alignment(); SpaceAlignment = default_space_alignment();
HeapAlignment = compute_heap_alignment(); HeapAlignment = compute_heap_alignment();
} }
@ -123,9 +123,8 @@ void ParallelArguments::initialize_heap_flags_and_sizes() {
// Can a page size be something else than a power of two? // Can a page size be something else than a power of two?
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2"); assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
size_t new_alignment = align_up(page_sz, GenAlignment); size_t new_alignment = align_up(page_sz, SpaceAlignment);
if (new_alignment != GenAlignment) { if (new_alignment != SpaceAlignment) {
GenAlignment = new_alignment;
SpaceAlignment = new_alignment; SpaceAlignment = new_alignment;
// Redo everything from the start // Redo everything from the start
initialize_heap_flags_and_sizes_one_pass(); initialize_heap_flags_and_sizes_one_pass();

View File

@ -29,10 +29,8 @@
void ParallelInitLogger::print_heap() { void ParallelInitLogger::print_heap() {
log_info_p(gc, init)("Alignments:" log_info_p(gc, init)("Alignments:"
" Space " EXACTFMT "," " Space " EXACTFMT ","
" Generation " EXACTFMT ","
" Heap " EXACTFMT, " Heap " EXACTFMT,
EXACTFMTARGS(SpaceAlignment), EXACTFMTARGS(SpaceAlignment),
EXACTFMTARGS(GenAlignment),
EXACTFMTARGS(HeapAlignment)); EXACTFMTARGS(HeapAlignment));
GCInitLogger::print_heap(); GCInitLogger::print_heap();
} }

View File

@ -69,8 +69,8 @@ jint ParallelScavengeHeap::initialize() {
initialize_reserved_region(heap_rs); initialize_reserved_region(heap_rs);
// Layout the reserved space for the generations. // Layout the reserved space for the generations.
ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, GenAlignment); ReservedSpace old_rs = heap_rs.first_part(MaxOldSize, SpaceAlignment);
ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, GenAlignment); ReservedSpace young_rs = heap_rs.last_part(MaxOldSize, SpaceAlignment);
assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap"); assert(young_rs.size() == MaxNewSize, "Didn't reserve all of the heap");
PSCardTable* card_table = new PSCardTable(_reserved); PSCardTable* card_table = new PSCardTable(_reserved);
@ -107,7 +107,7 @@ jint ParallelScavengeHeap::initialize() {
new PSAdaptiveSizePolicy(eden_capacity, new PSAdaptiveSizePolicy(eden_capacity,
initial_promo_size, initial_promo_size,
young_gen()->to_space()->capacity_in_bytes(), young_gen()->to_space()->capacity_in_bytes(),
GenAlignment, SpaceAlignment,
max_gc_pause_sec, max_gc_pause_sec,
GCTimeRatio GCTimeRatio
); );

View File

@ -41,7 +41,7 @@ PSOldGen::PSOldGen(ReservedSpace rs, size_t initial_size, size_t min_size,
_min_gen_size(min_size), _min_gen_size(min_size),
_max_gen_size(max_size) _max_gen_size(max_size)
{ {
initialize(rs, initial_size, GenAlignment); initialize(rs, initial_size, SpaceAlignment);
} }
void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment) { void PSOldGen::initialize(ReservedSpace rs, size_t initial_size, size_t alignment) {

View File

@ -47,7 +47,7 @@ PSYoungGen::PSYoungGen(ReservedSpace rs, size_t initial_size, size_t min_size, s
_from_counters(nullptr), _from_counters(nullptr),
_to_counters(nullptr) _to_counters(nullptr)
{ {
initialize(rs, initial_size, GenAlignment); initialize(rs, initial_size, SpaceAlignment);
} }
void PSYoungGen::initialize_virtual_space(ReservedSpace rs, void PSYoungGen::initialize_virtual_space(ReservedSpace rs,
@ -746,7 +746,7 @@ size_t PSYoungGen::available_to_live() {
} }
size_t delta_in_bytes = unused_committed + delta_in_survivor; size_t delta_in_bytes = unused_committed + delta_in_survivor;
delta_in_bytes = align_down(delta_in_bytes, GenAlignment); delta_in_bytes = align_down(delta_in_bytes, SpaceAlignment);
return delta_in_bytes; return delta_in_bytes;
} }

View File

@ -188,8 +188,8 @@ jint SerialHeap::initialize() {
initialize_reserved_region(heap_rs); initialize_reserved_region(heap_rs);
ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, GenAlignment); ReservedSpace young_rs = heap_rs.first_part(MaxNewSize, SpaceAlignment);
ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, GenAlignment); ReservedSpace old_rs = heap_rs.last_part(MaxNewSize, SpaceAlignment);
_rem_set = new CardTableRS(_reserved); _rem_set = new CardTableRS(_reserved);
_rem_set->initialize(young_rs.base(), old_rs.base()); _rem_set->initialize(young_rs.base(), old_rs.base());

View File

@ -35,7 +35,7 @@ extern size_t SpaceAlignment;
class GCArguments { class GCArguments {
protected: protected:
// Initialize HeapAlignment, SpaceAlignment, and extra alignments (E.g. GenAlignment) // Initialize HeapAlignment, SpaceAlignment
virtual void initialize_alignments() = 0; virtual void initialize_alignments() = 0;
virtual void initialize_heap_flags_and_sizes(); virtual void initialize_heap_flags_and_sizes();
virtual void initialize_size_info(); virtual void initialize_size_info();

View File

@ -42,17 +42,15 @@ size_t MaxOldSize = 0;
// See more in JDK-8346005 // See more in JDK-8346005
size_t OldSize = ScaleForWordSize(4*M); size_t OldSize = ScaleForWordSize(4*M);
size_t GenAlignment = 0;
size_t GenArguments::conservative_max_heap_alignment() { return (size_t)Generation::GenGrain; } size_t GenArguments::conservative_max_heap_alignment() { return (size_t)Generation::GenGrain; }
static size_t young_gen_size_lower_bound() { static size_t young_gen_size_lower_bound() {
// The young generation must be aligned and have room for eden + two survivors // The young generation must be aligned and have room for eden + two survivors
return align_up(3 * SpaceAlignment, GenAlignment); return 3 * SpaceAlignment;
} }
static size_t old_gen_size_lower_bound() { static size_t old_gen_size_lower_bound() {
return align_up(SpaceAlignment, GenAlignment); return SpaceAlignment;
} }
size_t GenArguments::scale_by_NewRatio_aligned(size_t base_size, size_t alignment) { size_t GenArguments::scale_by_NewRatio_aligned(size_t base_size, size_t alignment) {
@ -69,23 +67,20 @@ static size_t bound_minus_alignment(size_t desired_size,
void GenArguments::initialize_alignments() { void GenArguments::initialize_alignments() {
// Initialize card size before initializing alignments // Initialize card size before initializing alignments
CardTable::initialize_card_size(); CardTable::initialize_card_size();
SpaceAlignment = GenAlignment = (size_t)Generation::GenGrain; SpaceAlignment = (size_t)Generation::GenGrain;
HeapAlignment = compute_heap_alignment(); HeapAlignment = compute_heap_alignment();
} }
void GenArguments::initialize_heap_flags_and_sizes() { void GenArguments::initialize_heap_flags_and_sizes() {
GCArguments::initialize_heap_flags_and_sizes(); GCArguments::initialize_heap_flags_and_sizes();
assert(GenAlignment != 0, "Generation alignment not set up properly"); assert(SpaceAlignment != 0, "Generation alignment not set up properly");
assert(HeapAlignment >= GenAlignment, assert(HeapAlignment >= SpaceAlignment,
"HeapAlignment: %zu less than GenAlignment: %zu", "HeapAlignment: %zu less than SpaceAlignment: %zu",
HeapAlignment, GenAlignment); HeapAlignment, SpaceAlignment);
assert(GenAlignment % SpaceAlignment == 0, assert(HeapAlignment % SpaceAlignment == 0,
"GenAlignment: %zu not aligned by SpaceAlignment: %zu", "HeapAlignment: %zu not aligned by SpaceAlignment: %zu",
GenAlignment, SpaceAlignment); HeapAlignment, SpaceAlignment);
assert(HeapAlignment % GenAlignment == 0,
"HeapAlignment: %zu not aligned by GenAlignment: %zu",
HeapAlignment, GenAlignment);
// All generational heaps have a young gen; handle those flags here // All generational heaps have a young gen; handle those flags here
@ -106,7 +101,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
// Make sure NewSize allows an old generation to fit even if set on the command line // Make sure NewSize allows an old generation to fit even if set on the command line
if (FLAG_IS_CMDLINE(NewSize) && NewSize >= InitialHeapSize) { if (FLAG_IS_CMDLINE(NewSize) && NewSize >= InitialHeapSize) {
size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment); size_t revised_new_size = bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment);
log_warning(gc, ergo)("NewSize (%zuk) is equal to or greater than initial heap size (%zuk). A new " log_warning(gc, ergo)("NewSize (%zuk) is equal to or greater than initial heap size (%zuk). A new "
"NewSize of %zuk will be used to accomodate an old generation.", "NewSize of %zuk will be used to accomodate an old generation.",
NewSize/K, InitialHeapSize/K, revised_new_size/K); NewSize/K, InitialHeapSize/K, revised_new_size/K);
@ -115,8 +110,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
// Now take the actual NewSize into account. We will silently increase NewSize // Now take the actual NewSize into account. We will silently increase NewSize
// if the user specified a smaller or unaligned value. // if the user specified a smaller or unaligned value.
size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, GenAlignment); size_t bounded_new_size = bound_minus_alignment(NewSize, MaxHeapSize, SpaceAlignment);
bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, GenAlignment)); bounded_new_size = MAX2(smallest_new_size, align_down(bounded_new_size, SpaceAlignment));
if (bounded_new_size != NewSize) { if (bounded_new_size != NewSize) {
FLAG_SET_ERGO(NewSize, bounded_new_size); FLAG_SET_ERGO(NewSize, bounded_new_size);
} }
@ -125,7 +120,7 @@ void GenArguments::initialize_heap_flags_and_sizes() {
if (!FLAG_IS_DEFAULT(MaxNewSize)) { if (!FLAG_IS_DEFAULT(MaxNewSize)) {
if (MaxNewSize >= MaxHeapSize) { if (MaxNewSize >= MaxHeapSize) {
// Make sure there is room for an old generation // Make sure there is room for an old generation
size_t smaller_max_new_size = MaxHeapSize - GenAlignment; size_t smaller_max_new_size = MaxHeapSize - SpaceAlignment;
if (FLAG_IS_CMDLINE(MaxNewSize)) { if (FLAG_IS_CMDLINE(MaxNewSize)) {
log_warning(gc, ergo)("MaxNewSize (%zuk) is equal to or greater than the entire " log_warning(gc, ergo)("MaxNewSize (%zuk) is equal to or greater than the entire "
"heap (%zuk). A new max generation size of %zuk will be used.", "heap (%zuk). A new max generation size of %zuk will be used.",
@ -137,8 +132,8 @@ void GenArguments::initialize_heap_flags_and_sizes() {
} }
} else if (MaxNewSize < NewSize) { } else if (MaxNewSize < NewSize) {
FLAG_SET_ERGO(MaxNewSize, NewSize); FLAG_SET_ERGO(MaxNewSize, NewSize);
} else if (!is_aligned(MaxNewSize, GenAlignment)) { } else if (!is_aligned(MaxNewSize, SpaceAlignment)) {
FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, GenAlignment)); FLAG_SET_ERGO(MaxNewSize, align_down(MaxNewSize, SpaceAlignment));
} }
} }
@ -166,13 +161,13 @@ void GenArguments::initialize_heap_flags_and_sizes() {
// exceed it. Adjust New/OldSize as necessary. // exceed it. Adjust New/OldSize as necessary.
size_t calculated_size = NewSize + OldSize; size_t calculated_size = NewSize + OldSize;
double shrink_factor = (double) MaxHeapSize / calculated_size; double shrink_factor = (double) MaxHeapSize / calculated_size;
size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), GenAlignment); size_t smaller_new_size = align_down((size_t)(NewSize * shrink_factor), SpaceAlignment);
FLAG_SET_ERGO(NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size)); FLAG_SET_ERGO(NewSize, MAX2(young_gen_size_lower_bound(), smaller_new_size));
// OldSize is already aligned because above we aligned MaxHeapSize to // OldSize is already aligned because above we aligned MaxHeapSize to
// HeapAlignment, and we just made sure that NewSize is aligned to // HeapAlignment, and we just made sure that NewSize is aligned to
// GenAlignment. In initialize_flags() we verified that HeapAlignment // SpaceAlignment. In initialize_flags() we verified that HeapAlignment
// is a multiple of GenAlignment. // is a multiple of SpaceAlignment.
OldSize = MaxHeapSize - NewSize; OldSize = MaxHeapSize - NewSize;
} else { } else {
FLAG_SET_ERGO(MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment)); FLAG_SET_ERGO(MaxHeapSize, align_up(NewSize + OldSize, HeapAlignment));
@ -200,7 +195,7 @@ void GenArguments::initialize_size_info() {
// Determine maximum size of the young generation. // Determine maximum size of the young generation.
if (FLAG_IS_DEFAULT(MaxNewSize)) { if (FLAG_IS_DEFAULT(MaxNewSize)) {
max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, GenAlignment); max_young_size = scale_by_NewRatio_aligned(MaxHeapSize, SpaceAlignment);
// Bound the maximum size by NewSize below (since it historically // Bound the maximum size by NewSize below (since it historically
// would have been NewSize and because the NewRatio calculation could // would have been NewSize and because the NewRatio calculation could
// yield a size that is too small) and bound it by MaxNewSize above. // yield a size that is too small) and bound it by MaxNewSize above.
@ -229,18 +224,18 @@ void GenArguments::initialize_size_info() {
// If NewSize is set on the command line, we should use it as // If NewSize is set on the command line, we should use it as
// the initial size, but make sure it is within the heap bounds. // the initial size, but make sure it is within the heap bounds.
initial_young_size = initial_young_size =
MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment)); MIN2(max_young_size, bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment));
MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, GenAlignment); MinNewSize = bound_minus_alignment(initial_young_size, MinHeapSize, SpaceAlignment);
} else { } else {
// For the case where NewSize is not set on the command line, use // For the case where NewSize is not set on the command line, use
// NewRatio to size the initial generation size. Use the current // NewRatio to size the initial generation size. Use the current
// NewSize as the floor, because if NewRatio is overly large, the resulting // NewSize as the floor, because if NewRatio is overly large, the resulting
// size can be too small. // size can be too small.
initial_young_size = initial_young_size =
clamp(scale_by_NewRatio_aligned(InitialHeapSize, GenAlignment), NewSize, max_young_size); clamp(scale_by_NewRatio_aligned(InitialHeapSize, SpaceAlignment), NewSize, max_young_size);
// Derive MinNewSize from MinHeapSize // Derive MinNewSize from MinHeapSize
MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, GenAlignment), initial_young_size); MinNewSize = MIN2(scale_by_NewRatio_aligned(MinHeapSize, SpaceAlignment), initial_young_size);
} }
} }
@ -252,7 +247,7 @@ void GenArguments::initialize_size_info() {
// The maximum old size can be determined from the maximum young // The maximum old size can be determined from the maximum young
// and maximum heap size since no explicit flags exist // and maximum heap size since no explicit flags exist
// for setting the old generation maximum. // for setting the old generation maximum.
MaxOldSize = MAX2(MaxHeapSize - max_young_size, GenAlignment); MaxOldSize = MAX2(MaxHeapSize - max_young_size, SpaceAlignment);
MinOldSize = MIN3(MaxOldSize, MinOldSize = MIN3(MaxOldSize,
InitialHeapSize - initial_young_size, InitialHeapSize - initial_young_size,
MinHeapSize - MinNewSize); MinHeapSize - MinNewSize);
@ -315,10 +310,10 @@ void GenArguments::assert_flags() {
assert(NewSize >= MinNewSize, "Ergonomics decided on a too small young gen size"); assert(NewSize >= MinNewSize, "Ergonomics decided on a too small young gen size");
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes"); assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young gen and heap sizes");
assert(NewSize % GenAlignment == 0, "NewSize alignment"); assert(NewSize % SpaceAlignment == 0, "NewSize alignment");
assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % GenAlignment == 0, "MaxNewSize alignment"); assert(FLAG_IS_DEFAULT(MaxNewSize) || MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes"); assert(OldSize + NewSize <= MaxHeapSize, "Ergonomics decided on incompatible generation and heap sizes");
assert(OldSize % GenAlignment == 0, "OldSize alignment"); assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
} }
void GenArguments::assert_size_info() { void GenArguments::assert_size_info() {
@ -327,19 +322,19 @@ void GenArguments::assert_size_info() {
assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes"); assert(MaxNewSize < MaxHeapSize, "Ergonomics decided on incompatible maximum young and heap sizes");
assert(MinNewSize <= NewSize, "Ergonomics decided on incompatible minimum and initial young gen sizes"); assert(MinNewSize <= NewSize, "Ergonomics decided on incompatible minimum and initial young gen sizes");
assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes"); assert(NewSize <= MaxNewSize, "Ergonomics decided on incompatible initial and maximum young gen sizes");
assert(MinNewSize % GenAlignment == 0, "_min_young_size alignment"); assert(MinNewSize % SpaceAlignment == 0, "_min_young_size alignment");
assert(NewSize % GenAlignment == 0, "_initial_young_size alignment"); assert(NewSize % SpaceAlignment == 0, "_initial_young_size alignment");
assert(MaxNewSize % GenAlignment == 0, "MaxNewSize alignment"); assert(MaxNewSize % SpaceAlignment == 0, "MaxNewSize alignment");
assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, GenAlignment), assert(MinNewSize <= bound_minus_alignment(MinNewSize, MinHeapSize, SpaceAlignment),
"Ergonomics made minimum young generation larger than minimum heap"); "Ergonomics made minimum young generation larger than minimum heap");
assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, GenAlignment), assert(NewSize <= bound_minus_alignment(NewSize, InitialHeapSize, SpaceAlignment),
"Ergonomics made initial young generation larger than initial heap"); "Ergonomics made initial young generation larger than initial heap");
assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, GenAlignment), assert(MaxNewSize <= bound_minus_alignment(MaxNewSize, MaxHeapSize, SpaceAlignment),
"Ergonomics made maximum young generation lager than maximum heap"); "Ergonomics made maximum young generation lager than maximum heap");
assert(MinOldSize <= OldSize, "Ergonomics decided on incompatible minimum and initial old gen sizes"); assert(MinOldSize <= OldSize, "Ergonomics decided on incompatible minimum and initial old gen sizes");
assert(OldSize <= MaxOldSize, "Ergonomics decided on incompatible initial and maximum old gen sizes"); assert(OldSize <= MaxOldSize, "Ergonomics decided on incompatible initial and maximum old gen sizes");
assert(MaxOldSize % GenAlignment == 0, "MaxOldSize alignment"); assert(MaxOldSize % SpaceAlignment == 0, "MaxOldSize alignment");
assert(OldSize % GenAlignment == 0, "OldSize alignment"); assert(OldSize % SpaceAlignment == 0, "OldSize alignment");
assert(MaxHeapSize <= (MaxNewSize + MaxOldSize), "Total maximum heap sizes must be sum of generation maximum sizes"); assert(MaxHeapSize <= (MaxNewSize + MaxOldSize), "Total maximum heap sizes must be sum of generation maximum sizes");
assert(MinNewSize + MinOldSize <= MinHeapSize, "Minimum generation sizes exceed minimum heap size"); assert(MinNewSize + MinOldSize <= MinHeapSize, "Minimum generation sizes exceed minimum heap size");
assert(NewSize + OldSize == InitialHeapSize, "Initial generation sizes should match initial heap size"); assert(NewSize + OldSize == InitialHeapSize, "Initial generation sizes should match initial heap size");

View File

@ -35,8 +35,6 @@ extern size_t MaxOldSize;
extern size_t OldSize; extern size_t OldSize;
extern size_t GenAlignment;
class GenArguments : public GCArguments { class GenArguments : public GCArguments {
friend class TestGenCollectorPolicy; // Testing friend class TestGenCollectorPolicy; // Testing
private: private:

View File

@ -23,6 +23,7 @@
#include "gc/z/zAllocator.hpp" #include "gc/z/zAllocator.hpp"
#include "gc/z/zObjectAllocator.hpp" #include "gc/z/zObjectAllocator.hpp"
#include "gc/z/zPageAge.inline.hpp"
ZAllocatorEden* ZAllocator::_eden; ZAllocatorEden* ZAllocator::_eden;
ZAllocatorForRelocation* ZAllocator::_relocation[ZAllocator::_relocation_allocators]; ZAllocatorForRelocation* ZAllocator::_relocation[ZAllocator::_relocation_allocators];
@ -47,7 +48,7 @@ ZPageAge ZAllocatorForRelocation::install() {
for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) { for (uint i = 0; i < ZAllocator::_relocation_allocators; ++i) {
if (_relocation[i] == nullptr) { if (_relocation[i] == nullptr) {
_relocation[i] = this; _relocation[i] = this;
return static_cast<ZPageAge>(i + 1); return to_zpageage(i + 1);
} }
} }

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@ class ZPage;
class ZAllocator { class ZAllocator {
public: public:
static constexpr uint _relocation_allocators = static_cast<uint>(ZPageAge::old); static constexpr uint _relocation_allocators = ZPageAgeCount - 1;
protected: protected:
ZObjectAllocator _object_allocator; ZObjectAllocator _object_allocator;

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -28,13 +28,14 @@
#include "gc/z/zAddress.inline.hpp" #include "gc/z/zAddress.inline.hpp"
#include "gc/z/zHeap.hpp" #include "gc/z/zHeap.hpp"
#include "gc/z/zPageAge.inline.hpp"
inline ZAllocatorEden* ZAllocator::eden() { inline ZAllocatorEden* ZAllocator::eden() {
return _eden; return _eden;
} }
inline ZAllocatorForRelocation* ZAllocator::relocation(ZPageAge page_age) { inline ZAllocatorForRelocation* ZAllocator::relocation(ZPageAge page_age) {
return _relocation[static_cast<uint>(page_age) - 1]; return _relocation[untype(page_age - 1)];
} }
inline ZAllocatorForRelocation* ZAllocator::old() { inline ZAllocatorForRelocation* ZAllocator::old() {

View File

@ -41,6 +41,7 @@
#include "gc/z/zHeap.inline.hpp" #include "gc/z/zHeap.inline.hpp"
#include "gc/z/zJNICritical.hpp" #include "gc/z/zJNICritical.hpp"
#include "gc/z/zMark.inline.hpp" #include "gc/z/zMark.inline.hpp"
#include "gc/z/zPageAge.inline.hpp"
#include "gc/z/zPageAllocator.hpp" #include "gc/z/zPageAllocator.hpp"
#include "gc/z/zRelocationSet.inline.hpp" #include "gc/z/zRelocationSet.inline.hpp"
#include "gc/z/zRelocationSetSelector.inline.hpp" #include "gc/z/zRelocationSetSelector.inline.hpp"
@ -699,11 +700,10 @@ uint ZGenerationYoung::compute_tenuring_threshold(ZRelocationSetSelectorStats st
uint last_populated_age = 0; uint last_populated_age = 0;
size_t last_populated_live = 0; size_t last_populated_live = 0;
for (uint i = 0; i <= ZPageAgeMax; ++i) { for (ZPageAge age : ZPageAgeRange()) {
const ZPageAge age = static_cast<ZPageAge>(i);
const size_t young_live = stats.small(age).live() + stats.medium(age).live() + stats.large(age).live(); const size_t young_live = stats.small(age).live() + stats.medium(age).live() + stats.large(age).live();
if (young_live > 0) { if (young_live > 0) {
last_populated_age = i; last_populated_age = untype(age);
last_populated_live = young_live; last_populated_live = young_live;
if (young_live_last > 0) { if (young_live_last > 0) {
young_life_expectancy_sum += double(young_live) / double(young_live_last); young_life_expectancy_sum += double(young_live) / double(young_live_last);
@ -842,8 +842,8 @@ void ZGenerationYoung::mark_start() {
// Retire allocating pages // Retire allocating pages
ZAllocator::eden()->retire_pages(); ZAllocator::eden()->retire_pages();
for (ZPageAge i = ZPageAge::survivor1; i <= ZPageAge::survivor14; i = static_cast<ZPageAge>(static_cast<uint>(i) + 1)) { for (ZPageAge age : ZPageAgeRangeSurvivor) {
ZAllocator::relocation(i)->retire_pages(); ZAllocator::relocation(age)->retire_pages();
} }
// Reset allocated/reclaimed/used statistics // Reset allocated/reclaimed/used statistics
@ -948,6 +948,14 @@ void ZGenerationYoung::register_with_remset(ZPage* page) {
_remembered.register_found_old(page); _remembered.register_found_old(page);
} }
ZRemembered* ZGenerationYoung::remembered() {
return &_remembered;
}
void ZGenerationYoung::remap_current_remset(ZRemsetTableIterator* iter) {
_remembered.remap_current(iter);
}
ZGenerationTracer* ZGenerationYoung::jfr_tracer() { ZGenerationTracer* ZGenerationYoung::jfr_tracer() {
return &_jfr_tracer; return &_jfr_tracer;
} }
@ -1435,7 +1443,7 @@ typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_none> ZRemapCLDClosure;
class ZRemapYoungRootsTask : public ZTask { class ZRemapYoungRootsTask : public ZTask {
private: private:
ZGenerationPagesParallelIterator _old_pages_parallel_iterator; ZRemsetTableIterator _remset_table_iterator;
ZRootsIteratorAllColored _roots_colored; ZRootsIteratorAllColored _roots_colored;
ZRootsIteratorAllUncolored _roots_uncolored; ZRootsIteratorAllUncolored _roots_uncolored;
@ -1449,7 +1457,7 @@ private:
public: public:
ZRemapYoungRootsTask(ZPageTable* page_table, ZPageAllocator* page_allocator) ZRemapYoungRootsTask(ZPageTable* page_table, ZPageAllocator* page_allocator)
: ZTask("ZRemapYoungRootsTask"), : ZTask("ZRemapYoungRootsTask"),
_old_pages_parallel_iterator(page_table, ZGenerationId::old, page_allocator), _remset_table_iterator(ZGeneration::young()->remembered(), false /* previous */),
_roots_colored(ZGenerationIdOptional::old), _roots_colored(ZGenerationIdOptional::old),
_roots_uncolored(ZGenerationIdOptional::old), _roots_uncolored(ZGenerationIdOptional::old),
_cl_colored(), _cl_colored(),
@ -1472,11 +1480,8 @@ public:
{ {
ZStatTimerWorker timer(ZSubPhaseConcurrentRemapRememberedOld); ZStatTimerWorker timer(ZSubPhaseConcurrentRemapRememberedOld);
_old_pages_parallel_iterator.do_pages([&](ZPage* page) { // Visit all object fields that potentially pointing into young generation
// Visit all object fields that potentially pointing into young generation ZGeneration::young()->remap_current_remset(&_remset_table_iterator);
page->oops_do_current_remembered(ZBarrier::load_barrier_on_oop_field);
return true;
});
} }
} }
}; };

View File

@ -191,6 +191,7 @@ class ZGenerationYoung : public ZGeneration {
friend class VM_ZMarkStartYoung; friend class VM_ZMarkStartYoung;
friend class VM_ZMarkStartYoungAndOld; friend class VM_ZMarkStartYoungAndOld;
friend class VM_ZRelocateStartYoung; friend class VM_ZRelocateStartYoung;
friend class ZRemapYoungRootsTask;
friend class ZYoungTypeSetter; friend class ZYoungTypeSetter;
private: private:
@ -219,6 +220,8 @@ private:
void pause_relocate_start(); void pause_relocate_start();
void concurrent_relocate(); void concurrent_relocate();
ZRemembered* remembered();
public: public:
ZGenerationYoung(ZPageTable* page_table, ZGenerationYoung(ZPageTable* page_table,
const ZForwardingTable* old_forwarding_table, const ZForwardingTable* old_forwarding_table,
@ -252,6 +255,9 @@ public:
// Register old pages with remembered set // Register old pages with remembered set
void register_with_remset(ZPage* page); void register_with_remset(ZPage* page);
// Remap the oops of the current remembered set
void remap_current_remset(ZRemsetTableIterator* iter);
// Serviceability // Serviceability
ZGenerationTracer* jfr_tracer(); ZGenerationTracer* jfr_tracer();

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -24,6 +24,7 @@
#ifndef SHARE_GC_Z_ZPAGEAGE_HPP #ifndef SHARE_GC_Z_ZPAGEAGE_HPP
#define SHARE_GC_Z_ZPAGEAGE_HPP #define SHARE_GC_Z_ZPAGEAGE_HPP
#include "utilities/enumIterator.hpp"
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
enum class ZPageAge : uint8_t { enum class ZPageAge : uint8_t {
@ -45,6 +46,19 @@ enum class ZPageAge : uint8_t {
old old
}; };
constexpr uint ZPageAgeMax = static_cast<uint>(ZPageAge::old); constexpr uint ZPageAgeCount = static_cast<uint>(ZPageAge::old) + 1;
constexpr ZPageAge ZPageAgeLastPlusOne = static_cast<ZPageAge>(ZPageAgeCount);
ENUMERATOR_RANGE(ZPageAge,
ZPageAge::eden,
ZPageAge::old);
using ZPageAgeRange = EnumRange<ZPageAge>;
constexpr ZPageAgeRange ZPageAgeRangeEden = ZPageAgeRange::create<ZPageAge::eden, ZPageAge::survivor1>();
constexpr ZPageAgeRange ZPageAgeRangeYoung = ZPageAgeRange::create<ZPageAge::eden, ZPageAge::old>();
constexpr ZPageAgeRange ZPageAgeRangeSurvivor = ZPageAgeRange::create<ZPageAge::survivor1, ZPageAge::old>();
constexpr ZPageAgeRange ZPageAgeRangeRelocation = ZPageAgeRange::create<ZPageAge::survivor1, ZPageAgeLastPlusOne>();
constexpr ZPageAgeRange ZPageAgeRangeOld = ZPageAgeRange::create<ZPageAge::old, ZPageAgeLastPlusOne>();
#endif // SHARE_GC_Z_ZPAGEAGE_HPP #endif // SHARE_GC_Z_ZPAGEAGE_HPP

Some files were not shown because too many files have changed in this diff Show More