openjdk/src/hotspot/cpu/x86/gc/z/z_x86_64.ad
Roberto Castañeda Lozano 91f12600d2 8345067: C2: enable implicit null checks for ZGC reads
Reviewed-by: aboldtch, kvn, epeter
2025-06-09 06:23:17 +00:00

253 lines
9.0 KiB
Plaintext

//
// Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
source_hpp %{
#include "gc/shared/gc_globals.hpp"
#include "gc/z/c2/zBarrierSetC2.hpp"
#include "gc/z/zThreadLocalData.hpp"
%}
source %{
#include "c2_intelJccErratum_x86.hpp"
#include "gc/z/zBarrierSetAssembler.hpp"
static void z_color(MacroAssembler* masm, const MachNode* node, Register ref) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
__ shlq(ref, barrier_Relocation::unpatched);
__ orq_imm32(ref, barrier_Relocation::unpatched);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterOr);
}
static void z_uncolor(MacroAssembler* masm, const MachNode* node, Register ref) {
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatLoadGoodBeforeShl);
__ shrq(ref, barrier_Relocation::unpatched);
}
static void z_keep_alive_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
__ Assembler::testl(ref, barrier_Relocation::unpatched);
__ relocate(barrier_Relocation::spec(), ZBarrierRelocationFormatMarkBadAfterTest);
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
__ jcc(Assembler::notEqual, *stub->entry());
z_uncolor(masm, node, ref);
__ bind(*stub->continuation());
}
static void z_load_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register ref) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
const bool on_non_strong =
((node->barrier_data() & ZBarrierWeak) != 0) ||
((node->barrier_data() & ZBarrierPhantom) != 0);
if (on_non_strong) {
z_keep_alive_load_barrier(masm, node, ref_addr, ref);
return;
}
z_uncolor(masm, node, ref);
if (node->barrier_data() == ZBarrierElided) {
return;
}
ZLoadBarrierStubC2* const stub = ZLoadBarrierStubC2::create(node, ref_addr, ref);
{
IntelJccErratumAlignment intel_alignment(masm, 6);
__ jcc(Assembler::above, *stub->entry());
}
__ bind(*stub->continuation());
}
static void z_store_barrier(MacroAssembler* masm, const MachNode* node, Address ref_addr, Register rnew_zaddress, Register rnew_zpointer, bool is_atomic) {
Assembler::InlineSkippedInstructionsCounter skipped_counter(masm);
if (node->barrier_data() == ZBarrierElided) {
if (rnew_zaddress != noreg) {
// noreg means null; no need to color
__ movptr(rnew_zpointer, rnew_zaddress);
z_color(masm, node, rnew_zpointer);
}
} else {
bool is_native = (node->barrier_data() & ZBarrierNative) != 0;
bool is_nokeepalive = (node->barrier_data() & ZBarrierNoKeepalive) != 0;
ZStoreBarrierStubC2* const stub = ZStoreBarrierStubC2::create(node, ref_addr, rnew_zaddress, rnew_zpointer, is_native, is_atomic, is_nokeepalive);
ZBarrierSetAssembler* bs_asm = ZBarrierSet::assembler();
bs_asm->store_barrier_fast(masm, ref_addr, rnew_zaddress, rnew_zpointer, true /* in_nmethod */, is_atomic, *stub->entry(), *stub->continuation());
}
}
%}
operand no_rax_RegP()
%{
constraint(ALLOC_IN_RC(ptr_no_rax_reg));
match(RegP);
match(rbx_RegP);
match(rsi_RegP);
match(rdi_RegP);
format %{ %}
interface(REG_INTER);
%}
// Load Pointer
instruct zLoadP(rRegP dst, memory mem, rFlagsReg cr)
%{
predicate(UseZGC && n->as_Load()->barrier_data() != 0);
match(Set dst (LoadP mem));
effect(TEMP dst, KILL cr);
// The main load is a candidate to implement implicit null checks. The
// barrier's slow path includes an identical reload, which does not need to be
// registered in the exception table because it is dominated by the main one.
ins_is_late_expanded_null_check_candidate(true);
ins_cost(125);
format %{ "movq $dst, $mem" %}
ins_encode %{
__ movptr($dst$$Register, $mem$$Address);
z_load_barrier(masm, this, $mem$$Address, $dst$$Register);
%}
ins_pipe(ialu_reg_mem);
%}
// Load Pointer and Null Check
instruct zLoadPNullCheck(rFlagsReg cr, memory op, immP0 zero)
%{
predicate(UseZGC && n->in(1)->as_Load()->barrier_data() != 0);
match(Set cr (CmpP (LoadP op) zero));
ins_cost(500); // XXX
format %{ "testq $op, 0xffffffffffff0000\t# ptr" %}
ins_encode %{
// A null pointer will have all address bits 0. This mask sign extends
// all address bits, so we can test if the address is 0.
__ testq($op$$Address, ZBarrierSetAssembler::ZPointerAddressMask);
%}
ins_pipe(ialu_cr_reg_imm);
%}
// Store Pointer
instruct zStoreP(memory mem, any_RegP src, rRegP tmp, rFlagsReg cr)
%{
predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem src));
effect(TEMP tmp, KILL cr);
ins_cost(125); // XXX
format %{ "movq $mem, $src\t# ptr" %}
ins_encode %{
z_store_barrier(masm, this, $mem$$Address, $src$$Register, $tmp$$Register, false /* is_atomic */);
__ movq($mem$$Address, $tmp$$Register);
%}
ins_pipe(ialu_mem_reg);
%}
// Store Null Pointer
instruct zStorePNull(memory mem, immP0 zero, rRegP tmp, rFlagsReg cr)
%{
predicate(UseZGC && n->as_Store()->barrier_data() != 0);
match(Set mem (StoreP mem zero));
effect(TEMP tmp, KILL cr);
ins_cost(125); // XXX
format %{ "movq $mem, 0\t# ptr" %}
ins_encode %{
z_store_barrier(masm, this, $mem$$Address, noreg, $tmp$$Register, false /* is_atomic */);
// Store a colored null - barrier code above does not need to color
__ movq($mem$$Address, barrier_Relocation::unpatched);
// The relocation cant be fully after the mov, as that is the beginning of a random subsequent
// instruction, which violates assumptions made by unrelated code. Hence the end() - 1
__ code_section()->relocate(__ code_section()->end() - 1, barrier_Relocation::spec(), ZBarrierRelocationFormatStoreGoodAfterMov);
%}
ins_pipe(ialu_mem_reg);
%}
instruct zCompareAndExchangeP(indirect mem, no_rax_RegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
match(Set oldval (CompareAndExchangeP mem (Binary oldval newval)));
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL cr);
format %{ "lock\n\t"
"cmpxchgq $newval, $mem" %}
ins_encode %{
assert_different_registers($oldval$$Register, $mem$$Register);
assert_different_registers($oldval$$Register, $newval$$Register);
const Address mem_addr = Address($mem$$Register, 0);
z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
z_color(masm, this, $oldval$$Register);
__ lock();
__ cmpxchgptr($tmp$$Register, mem_addr);
z_uncolor(masm, this, $oldval$$Register);
%}
ins_pipe(pipe_cmpxchg);
%}
instruct zCompareAndSwapP(rRegI res, indirect mem, rRegP newval, rRegP tmp, rax_RegP oldval, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL oldval, KILL cr);
format %{ "lock\n\t"
"cmpxchgq $newval, $mem\n\t"
"setcc $res \t# emits sete + movzbl or setzue for APX" %}
ins_encode %{
assert_different_registers($oldval$$Register, $mem$$Register);
const Address mem_addr = Address($mem$$Register, 0);
z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
z_color(masm, this, $oldval$$Register);
__ lock();
__ cmpxchgptr($tmp$$Register, mem_addr);
__ setcc(Assembler::equal, $res$$Register);
%}
ins_pipe(pipe_cmpxchg);
%}
instruct zXChgP(indirect mem, rRegP newval, rRegP tmp, rFlagsReg cr) %{
match(Set newval (GetAndSetP mem newval));
predicate(UseZGC && n->as_LoadStore()->barrier_data() != 0);
effect(TEMP tmp, KILL cr);
format %{ "xchgq $newval, $mem" %}
ins_encode %{
assert_different_registers($mem$$Register, $newval$$Register);
const Address mem_addr = Address($mem$$Register, 0);
z_store_barrier(masm, this, mem_addr, $newval$$Register, $tmp$$Register, true /* is_atomic */);
__ movptr($newval$$Register, $tmp$$Register);
__ xchgptr($newval$$Register, mem_addr);
z_uncolor(masm, this, $newval$$Register);
%}
ins_pipe(pipe_cmpxchg);
%}