Roberto Castañeda Lozano 91f12600d2 8345067: C2: enable implicit null checks for ZGC reads
Reviewed-by: aboldtch, kvn, epeter
2025-06-09 06:23:17 +00:00

17499 lines
501 KiB
Plaintext

//
// Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
// Copyright (c) 2014, 2024, Red Hat, Inc. All rights reserved.
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
//
// This code is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License version 2 only, as
// published by the Free Software Foundation.
//
// This code is distributed in the hope that it will be useful, but WITHOUT
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
// version 2 for more details (a copy is included in the LICENSE file that
// accompanied this code).
//
// You should have received a copy of the GNU General Public License version
// 2 along with this work; if not, write to the Free Software Foundation,
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
//
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
// or visit www.oracle.com if you need additional information or have any
// questions.
//
//
// AArch64 Architecture Description File
//----------REGISTER DEFINITION BLOCK------------------------------------------
// This information is used by the matcher and the register allocator to
// describe individual registers and classes of registers within the target
// architecture.
register %{
//----------Architecture Description Register Definitions----------------------
// General Registers
// "reg_def" name ( register save type, C convention save type,
// ideal register type, encoding );
// Register Save Types:
//
// NS = No-Save: The register allocator assumes that these registers
// can be used without saving upon entry to the method, &
// that they do not need to be saved at call sites.
//
// SOC = Save-On-Call: The register allocator assumes that these registers
// can be used without saving upon entry to the method,
// but that they must be saved at call sites.
//
// SOE = Save-On-Entry: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, but they do not need to be saved at call
// sites.
//
// AS = Always-Save: The register allocator assumes that these registers
// must be saved before using them upon entry to the
// method, & that they must be saved at call sites.
//
// Ideal Register Type is used to determine how to save & restore a
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
//
// The encoding number is the actual bit-pattern placed into the opcodes.
// We must define the 64 bit int registers in two 32 bit halves, the
// real lower register and a virtual upper half register. upper halves
// are used by the register allocator but are not actually supplied as
// operands to memory ops.
//
// follow the C1 compiler in making registers
//
// r0-r7,r10-r26 volatile (caller save)
// r27-r32 system (no save, no allocate)
// r8-r9 non-allocatable (so we can use them as scratch regs)
//
// as regards Java usage. we don't use any callee save registers
// because this makes it difficult to de-optimise a frame (see comment
// in x86 implementation of Deoptimization::unwind_callee_save_values)
//
// General Registers
reg_def R0 ( SOC, SOC, Op_RegI, 0, r0->as_VMReg() );
reg_def R0_H ( SOC, SOC, Op_RegI, 0, r0->as_VMReg()->next() );
reg_def R1 ( SOC, SOC, Op_RegI, 1, r1->as_VMReg() );
reg_def R1_H ( SOC, SOC, Op_RegI, 1, r1->as_VMReg()->next() );
reg_def R2 ( SOC, SOC, Op_RegI, 2, r2->as_VMReg() );
reg_def R2_H ( SOC, SOC, Op_RegI, 2, r2->as_VMReg()->next() );
reg_def R3 ( SOC, SOC, Op_RegI, 3, r3->as_VMReg() );
reg_def R3_H ( SOC, SOC, Op_RegI, 3, r3->as_VMReg()->next() );
reg_def R4 ( SOC, SOC, Op_RegI, 4, r4->as_VMReg() );
reg_def R4_H ( SOC, SOC, Op_RegI, 4, r4->as_VMReg()->next() );
reg_def R5 ( SOC, SOC, Op_RegI, 5, r5->as_VMReg() );
reg_def R5_H ( SOC, SOC, Op_RegI, 5, r5->as_VMReg()->next() );
reg_def R6 ( SOC, SOC, Op_RegI, 6, r6->as_VMReg() );
reg_def R6_H ( SOC, SOC, Op_RegI, 6, r6->as_VMReg()->next() );
reg_def R7 ( SOC, SOC, Op_RegI, 7, r7->as_VMReg() );
reg_def R7_H ( SOC, SOC, Op_RegI, 7, r7->as_VMReg()->next() );
reg_def R8 ( NS, SOC, Op_RegI, 8, r8->as_VMReg() ); // rscratch1, non-allocatable
reg_def R8_H ( NS, SOC, Op_RegI, 8, r8->as_VMReg()->next() );
reg_def R9 ( NS, SOC, Op_RegI, 9, r9->as_VMReg() ); // rscratch2, non-allocatable
reg_def R9_H ( NS, SOC, Op_RegI, 9, r9->as_VMReg()->next() );
reg_def R10 ( SOC, SOC, Op_RegI, 10, r10->as_VMReg() );
reg_def R10_H ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next());
reg_def R11 ( SOC, SOC, Op_RegI, 11, r11->as_VMReg() );
reg_def R11_H ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next());
reg_def R12 ( SOC, SOC, Op_RegI, 12, r12->as_VMReg() );
reg_def R12_H ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next());
reg_def R13 ( SOC, SOC, Op_RegI, 13, r13->as_VMReg() );
reg_def R13_H ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next());
reg_def R14 ( SOC, SOC, Op_RegI, 14, r14->as_VMReg() );
reg_def R14_H ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next());
reg_def R15 ( SOC, SOC, Op_RegI, 15, r15->as_VMReg() );
reg_def R15_H ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next());
reg_def R16 ( SOC, SOC, Op_RegI, 16, r16->as_VMReg() );
reg_def R16_H ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next());
reg_def R17 ( SOC, SOC, Op_RegI, 17, r17->as_VMReg() );
reg_def R17_H ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next());
reg_def R18 ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg() );
reg_def R18_H ( SOC, SOC, Op_RegI, 18, r18_tls->as_VMReg()->next());
reg_def R19 ( SOC, SOE, Op_RegI, 19, r19->as_VMReg() );
reg_def R19_H ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next());
reg_def R20 ( SOC, SOE, Op_RegI, 20, r20->as_VMReg() ); // caller esp
reg_def R20_H ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next());
reg_def R21 ( SOC, SOE, Op_RegI, 21, r21->as_VMReg() );
reg_def R21_H ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next());
reg_def R22 ( SOC, SOE, Op_RegI, 22, r22->as_VMReg() );
reg_def R22_H ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next());
reg_def R23 ( SOC, SOE, Op_RegI, 23, r23->as_VMReg() );
reg_def R23_H ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next());
reg_def R24 ( SOC, SOE, Op_RegI, 24, r24->as_VMReg() );
reg_def R24_H ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next());
reg_def R25 ( SOC, SOE, Op_RegI, 25, r25->as_VMReg() );
reg_def R25_H ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next());
reg_def R26 ( SOC, SOE, Op_RegI, 26, r26->as_VMReg() );
reg_def R26_H ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next());
reg_def R27 ( SOC, SOE, Op_RegI, 27, r27->as_VMReg() ); // heapbase
reg_def R27_H ( SOC, SOE, Op_RegI, 27, r27->as_VMReg()->next());
reg_def R28 ( NS, SOE, Op_RegI, 28, r28->as_VMReg() ); // thread
reg_def R28_H ( NS, SOE, Op_RegI, 28, r28->as_VMReg()->next());
reg_def R29 ( NS, NS, Op_RegI, 29, r29->as_VMReg() ); // fp
reg_def R29_H ( NS, NS, Op_RegI, 29, r29->as_VMReg()->next());
reg_def R30 ( NS, NS, Op_RegI, 30, r30->as_VMReg() ); // lr
reg_def R30_H ( NS, NS, Op_RegI, 30, r30->as_VMReg()->next());
reg_def R31 ( NS, NS, Op_RegI, 31, r31_sp->as_VMReg() ); // sp
reg_def R31_H ( NS, NS, Op_RegI, 31, r31_sp->as_VMReg()->next());
// ----------------------------
// Float/Double/Vector Registers
// ----------------------------
// Double Registers
// The rules of ADL require that double registers be defined in pairs.
// Each pair must be two 32-bit values, but not necessarily a pair of
// single float registers. In each pair, ADLC-assigned register numbers
// must be adjacent, with the lower number even. Finally, when the
// CPU stores such a register pair to memory, the word associated with
// the lower ADLC-assigned number must be stored to the lower address.
// AArch64 has 32 floating-point registers. Each can store a vector of
// single or double precision floating-point values up to 8 * 32
// floats, 4 * 64 bit floats or 2 * 128 bit floats. We currently only
// use the first float or double element of the vector.
// for Java use float registers v0-v15 are always save on call whereas
// the platform ABI treats v8-v15 as callee save). float registers
// v16-v31 are SOC as per the platform spec
// For SVE vector registers, we simply extend vector register size to 8
// 'logical' slots. This is nominally 256 bits but it actually covers
// all possible 'physical' SVE vector register lengths from 128 ~ 2048
// bits. The 'physical' SVE vector register length is detected during
// startup, so the register allocator is able to identify the correct
// number of bytes needed for an SVE spill/unspill.
// Note that a vector register with 4 slots denotes a 128-bit NEON
// register allowing it to be distinguished from the corresponding SVE
// vector register when the SVE vector length is 128 bits.
reg_def V0 ( SOC, SOC, Op_RegF, 0, v0->as_VMReg() );
reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next() );
reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) );
reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) );
reg_def V1 ( SOC, SOC, Op_RegF, 1, v1->as_VMReg() );
reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next() );
reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) );
reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) );
reg_def V2 ( SOC, SOC, Op_RegF, 2, v2->as_VMReg() );
reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next() );
reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) );
reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) );
reg_def V3 ( SOC, SOC, Op_RegF, 3, v3->as_VMReg() );
reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next() );
reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) );
reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) );
reg_def V4 ( SOC, SOC, Op_RegF, 4, v4->as_VMReg() );
reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next() );
reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) );
reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) );
reg_def V5 ( SOC, SOC, Op_RegF, 5, v5->as_VMReg() );
reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next() );
reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) );
reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) );
reg_def V6 ( SOC, SOC, Op_RegF, 6, v6->as_VMReg() );
reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next() );
reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) );
reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) );
reg_def V7 ( SOC, SOC, Op_RegF, 7, v7->as_VMReg() );
reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next() );
reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) );
reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) );
reg_def V8 ( SOC, SOE, Op_RegF, 8, v8->as_VMReg() );
reg_def V8_H ( SOC, SOE, Op_RegF, 8, v8->as_VMReg()->next() );
reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) );
reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) );
reg_def V9 ( SOC, SOE, Op_RegF, 9, v9->as_VMReg() );
reg_def V9_H ( SOC, SOE, Op_RegF, 9, v9->as_VMReg()->next() );
reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) );
reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) );
reg_def V10 ( SOC, SOE, Op_RegF, 10, v10->as_VMReg() );
reg_def V10_H ( SOC, SOE, Op_RegF, 10, v10->as_VMReg()->next() );
reg_def V10_J ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2) );
reg_def V10_K ( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3) );
reg_def V11 ( SOC, SOE, Op_RegF, 11, v11->as_VMReg() );
reg_def V11_H ( SOC, SOE, Op_RegF, 11, v11->as_VMReg()->next() );
reg_def V11_J ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2) );
reg_def V11_K ( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3) );
reg_def V12 ( SOC, SOE, Op_RegF, 12, v12->as_VMReg() );
reg_def V12_H ( SOC, SOE, Op_RegF, 12, v12->as_VMReg()->next() );
reg_def V12_J ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2) );
reg_def V12_K ( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3) );
reg_def V13 ( SOC, SOE, Op_RegF, 13, v13->as_VMReg() );
reg_def V13_H ( SOC, SOE, Op_RegF, 13, v13->as_VMReg()->next() );
reg_def V13_J ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2) );
reg_def V13_K ( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3) );
reg_def V14 ( SOC, SOE, Op_RegF, 14, v14->as_VMReg() );
reg_def V14_H ( SOC, SOE, Op_RegF, 14, v14->as_VMReg()->next() );
reg_def V14_J ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2) );
reg_def V14_K ( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3) );
reg_def V15 ( SOC, SOE, Op_RegF, 15, v15->as_VMReg() );
reg_def V15_H ( SOC, SOE, Op_RegF, 15, v15->as_VMReg()->next() );
reg_def V15_J ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2) );
reg_def V15_K ( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3) );
reg_def V16 ( SOC, SOC, Op_RegF, 16, v16->as_VMReg() );
reg_def V16_H ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() );
reg_def V16_J ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2) );
reg_def V16_K ( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3) );
reg_def V17 ( SOC, SOC, Op_RegF, 17, v17->as_VMReg() );
reg_def V17_H ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() );
reg_def V17_J ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2) );
reg_def V17_K ( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3) );
reg_def V18 ( SOC, SOC, Op_RegF, 18, v18->as_VMReg() );
reg_def V18_H ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() );
reg_def V18_J ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2) );
reg_def V18_K ( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3) );
reg_def V19 ( SOC, SOC, Op_RegF, 19, v19->as_VMReg() );
reg_def V19_H ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() );
reg_def V19_J ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2) );
reg_def V19_K ( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3) );
reg_def V20 ( SOC, SOC, Op_RegF, 20, v20->as_VMReg() );
reg_def V20_H ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() );
reg_def V20_J ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2) );
reg_def V20_K ( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3) );
reg_def V21 ( SOC, SOC, Op_RegF, 21, v21->as_VMReg() );
reg_def V21_H ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() );
reg_def V21_J ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2) );
reg_def V21_K ( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3) );
reg_def V22 ( SOC, SOC, Op_RegF, 22, v22->as_VMReg() );
reg_def V22_H ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() );
reg_def V22_J ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2) );
reg_def V22_K ( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3) );
reg_def V23 ( SOC, SOC, Op_RegF, 23, v23->as_VMReg() );
reg_def V23_H ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() );
reg_def V23_J ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2) );
reg_def V23_K ( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3) );
reg_def V24 ( SOC, SOC, Op_RegF, 24, v24->as_VMReg() );
reg_def V24_H ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() );
reg_def V24_J ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2) );
reg_def V24_K ( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3) );
reg_def V25 ( SOC, SOC, Op_RegF, 25, v25->as_VMReg() );
reg_def V25_H ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() );
reg_def V25_J ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2) );
reg_def V25_K ( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3) );
reg_def V26 ( SOC, SOC, Op_RegF, 26, v26->as_VMReg() );
reg_def V26_H ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() );
reg_def V26_J ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2) );
reg_def V26_K ( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3) );
reg_def V27 ( SOC, SOC, Op_RegF, 27, v27->as_VMReg() );
reg_def V27_H ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() );
reg_def V27_J ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2) );
reg_def V27_K ( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3) );
reg_def V28 ( SOC, SOC, Op_RegF, 28, v28->as_VMReg() );
reg_def V28_H ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() );
reg_def V28_J ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2) );
reg_def V28_K ( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3) );
reg_def V29 ( SOC, SOC, Op_RegF, 29, v29->as_VMReg() );
reg_def V29_H ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() );
reg_def V29_J ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2) );
reg_def V29_K ( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3) );
reg_def V30 ( SOC, SOC, Op_RegF, 30, v30->as_VMReg() );
reg_def V30_H ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() );
reg_def V30_J ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2) );
reg_def V30_K ( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3) );
reg_def V31 ( SOC, SOC, Op_RegF, 31, v31->as_VMReg() );
reg_def V31_H ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() );
reg_def V31_J ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2) );
reg_def V31_K ( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3) );
// ----------------------------
// SVE Predicate Registers
// ----------------------------
reg_def P0 (SOC, SOC, Op_RegVectMask, 0, p0->as_VMReg());
reg_def P1 (SOC, SOC, Op_RegVectMask, 1, p1->as_VMReg());
reg_def P2 (SOC, SOC, Op_RegVectMask, 2, p2->as_VMReg());
reg_def P3 (SOC, SOC, Op_RegVectMask, 3, p3->as_VMReg());
reg_def P4 (SOC, SOC, Op_RegVectMask, 4, p4->as_VMReg());
reg_def P5 (SOC, SOC, Op_RegVectMask, 5, p5->as_VMReg());
reg_def P6 (SOC, SOC, Op_RegVectMask, 6, p6->as_VMReg());
reg_def P7 (SOC, SOC, Op_RegVectMask, 7, p7->as_VMReg());
reg_def P8 (SOC, SOC, Op_RegVectMask, 8, p8->as_VMReg());
reg_def P9 (SOC, SOC, Op_RegVectMask, 9, p9->as_VMReg());
reg_def P10 (SOC, SOC, Op_RegVectMask, 10, p10->as_VMReg());
reg_def P11 (SOC, SOC, Op_RegVectMask, 11, p11->as_VMReg());
reg_def P12 (SOC, SOC, Op_RegVectMask, 12, p12->as_VMReg());
reg_def P13 (SOC, SOC, Op_RegVectMask, 13, p13->as_VMReg());
reg_def P14 (SOC, SOC, Op_RegVectMask, 14, p14->as_VMReg());
reg_def P15 (SOC, SOC, Op_RegVectMask, 15, p15->as_VMReg());
// ----------------------------
// Special Registers
// ----------------------------
// the AArch64 CSPR status flag register is not directly accessible as
// instruction operand. the FPSR status flag register is a system
// register which can be written/read using MSR/MRS but again does not
// appear as an operand (a code identifying the FSPR occurs as an
// immediate value in the instruction).
reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad());
// Specify priority of register selection within phases of register
// allocation. Highest priority is first. A useful heuristic is to
// give registers a low priority when they are required by machine
// instructions, like EAX and EDX on I486, and choose no-save registers
// before save-on-call, & save-on-call before save-on-entry. Registers
// which participate in fixed calling sequences should come last.
// Registers which are used as pairs must fall on an even boundary.
alloc_class chunk0(
// volatiles
R10, R10_H,
R11, R11_H,
R12, R12_H,
R13, R13_H,
R14, R14_H,
R15, R15_H,
R16, R16_H,
R17, R17_H,
R18, R18_H,
// arg registers
R0, R0_H,
R1, R1_H,
R2, R2_H,
R3, R3_H,
R4, R4_H,
R5, R5_H,
R6, R6_H,
R7, R7_H,
// non-volatiles
R19, R19_H,
R20, R20_H,
R21, R21_H,
R22, R22_H,
R23, R23_H,
R24, R24_H,
R25, R25_H,
R26, R26_H,
// non-allocatable registers
R27, R27_H, // heapbase
R28, R28_H, // thread
R29, R29_H, // fp
R30, R30_H, // lr
R31, R31_H, // sp
R8, R8_H, // rscratch1
R9, R9_H, // rscratch2
);
alloc_class chunk1(
// no save
V16, V16_H, V16_J, V16_K,
V17, V17_H, V17_J, V17_K,
V18, V18_H, V18_J, V18_K,
V19, V19_H, V19_J, V19_K,
V20, V20_H, V20_J, V20_K,
V21, V21_H, V21_J, V21_K,
V22, V22_H, V22_J, V22_K,
V23, V23_H, V23_J, V23_K,
V24, V24_H, V24_J, V24_K,
V25, V25_H, V25_J, V25_K,
V26, V26_H, V26_J, V26_K,
V27, V27_H, V27_J, V27_K,
V28, V28_H, V28_J, V28_K,
V29, V29_H, V29_J, V29_K,
V30, V30_H, V30_J, V30_K,
V31, V31_H, V31_J, V31_K,
// arg registers
V0, V0_H, V0_J, V0_K,
V1, V1_H, V1_J, V1_K,
V2, V2_H, V2_J, V2_K,
V3, V3_H, V3_J, V3_K,
V4, V4_H, V4_J, V4_K,
V5, V5_H, V5_J, V5_K,
V6, V6_H, V6_J, V6_K,
V7, V7_H, V7_J, V7_K,
// non-volatiles
V8, V8_H, V8_J, V8_K,
V9, V9_H, V9_J, V9_K,
V10, V10_H, V10_J, V10_K,
V11, V11_H, V11_J, V11_K,
V12, V12_H, V12_J, V12_K,
V13, V13_H, V13_J, V13_K,
V14, V14_H, V14_J, V14_K,
V15, V15_H, V15_J, V15_K,
);
alloc_class chunk2 (
// Governing predicates for load/store and arithmetic
P0,
P1,
P2,
P3,
P4,
P5,
P6,
// Extra predicates
P8,
P9,
P10,
P11,
P12,
P13,
P14,
P15,
// Preserved for all-true predicate
P7,
);
alloc_class chunk3(RFLAGS);
//----------Architecture Description Register Classes--------------------------
// Several register classes are automatically defined based upon information in
// this architecture description.
// 1) reg_class inline_cache_reg ( /* as def'd in frame section */ )
// 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
//
// Class for all 32 bit general purpose registers
reg_class all_reg32(
R0,
R1,
R2,
R3,
R4,
R5,
R6,
R7,
R10,
R11,
R12,
R13,
R14,
R15,
R16,
R17,
R18,
R19,
R20,
R21,
R22,
R23,
R24,
R25,
R26,
R27,
R28,
R29,
R30,
R31
);
// Class for all 32 bit integer registers (excluding SP which
// will never be used as an integer register)
reg_class any_reg32 %{
return _ANY_REG32_mask;
%}
// Singleton class for R0 int register
reg_class int_r0_reg(R0);
// Singleton class for R2 int register
reg_class int_r2_reg(R2);
// Singleton class for R3 int register
reg_class int_r3_reg(R3);
// Singleton class for R4 int register
reg_class int_r4_reg(R4);
// Singleton class for R31 int register
reg_class int_r31_reg(R31);
// Class for all 64 bit general purpose registers
reg_class all_reg(
R0, R0_H,
R1, R1_H,
R2, R2_H,
R3, R3_H,
R4, R4_H,
R5, R5_H,
R6, R6_H,
R7, R7_H,
R10, R10_H,
R11, R11_H,
R12, R12_H,
R13, R13_H,
R14, R14_H,
R15, R15_H,
R16, R16_H,
R17, R17_H,
R18, R18_H,
R19, R19_H,
R20, R20_H,
R21, R21_H,
R22, R22_H,
R23, R23_H,
R24, R24_H,
R25, R25_H,
R26, R26_H,
R27, R27_H,
R28, R28_H,
R29, R29_H,
R30, R30_H,
R31, R31_H
);
// Class for all long integer registers (including SP)
reg_class any_reg %{
return _ANY_REG_mask;
%}
// Class for non-allocatable 32 bit registers
reg_class non_allocatable_reg32(
#ifdef R18_RESERVED
// See comment in register_aarch64.hpp
R18, // tls on Windows
#endif
R28, // thread
R30, // lr
R31 // sp
);
// Class for non-allocatable 64 bit registers
reg_class non_allocatable_reg(
#ifdef R18_RESERVED
// See comment in register_aarch64.hpp
R18, R18_H, // tls on Windows, platform register on macOS
#endif
R28, R28_H, // thread
R30, R30_H, // lr
R31, R31_H // sp
);
// Class for all non-special integer registers
reg_class no_special_reg32 %{
return _NO_SPECIAL_REG32_mask;
%}
// Class for all non-special long integer registers
reg_class no_special_reg %{
return _NO_SPECIAL_REG_mask;
%}
// Class for 64 bit register r0
reg_class r0_reg(
R0, R0_H
);
// Class for 64 bit register r1
reg_class r1_reg(
R1, R1_H
);
// Class for 64 bit register r2
reg_class r2_reg(
R2, R2_H
);
// Class for 64 bit register r3
reg_class r3_reg(
R3, R3_H
);
// Class for 64 bit register r4
reg_class r4_reg(
R4, R4_H
);
// Class for 64 bit register r5
reg_class r5_reg(
R5, R5_H
);
// Class for 64 bit register r10
reg_class r10_reg(
R10, R10_H
);
// Class for 64 bit register r11
reg_class r11_reg(
R11, R11_H
);
// Class for method register
reg_class method_reg(
R12, R12_H
);
// Class for thread register
reg_class thread_reg(
R28, R28_H
);
// Class for frame pointer register
reg_class fp_reg(
R29, R29_H
);
// Class for link register
reg_class lr_reg(
R30, R30_H
);
// Class for long sp register
reg_class sp_reg(
R31, R31_H
);
// Class for all pointer registers
reg_class ptr_reg %{
return _PTR_REG_mask;
%}
// Class for all non_special pointer registers
reg_class no_special_ptr_reg %{
return _NO_SPECIAL_PTR_REG_mask;
%}
// Class for all non_special pointer registers (excluding rfp)
reg_class no_special_no_rfp_ptr_reg %{
return _NO_SPECIAL_NO_RFP_PTR_REG_mask;
%}
// Class for all float registers
reg_class float_reg(
V0,
V1,
V2,
V3,
V4,
V5,
V6,
V7,
V8,
V9,
V10,
V11,
V12,
V13,
V14,
V15,
V16,
V17,
V18,
V19,
V20,
V21,
V22,
V23,
V24,
V25,
V26,
V27,
V28,
V29,
V30,
V31
);
// Double precision float registers have virtual `high halves' that
// are needed by the allocator.
// Class for all double registers
reg_class double_reg(
V0, V0_H,
V1, V1_H,
V2, V2_H,
V3, V3_H,
V4, V4_H,
V5, V5_H,
V6, V6_H,
V7, V7_H,
V8, V8_H,
V9, V9_H,
V10, V10_H,
V11, V11_H,
V12, V12_H,
V13, V13_H,
V14, V14_H,
V15, V15_H,
V16, V16_H,
V17, V17_H,
V18, V18_H,
V19, V19_H,
V20, V20_H,
V21, V21_H,
V22, V22_H,
V23, V23_H,
V24, V24_H,
V25, V25_H,
V26, V26_H,
V27, V27_H,
V28, V28_H,
V29, V29_H,
V30, V30_H,
V31, V31_H
);
// Class for all SVE vector registers.
reg_class vectora_reg (
V0, V0_H, V0_J, V0_K,
V1, V1_H, V1_J, V1_K,
V2, V2_H, V2_J, V2_K,
V3, V3_H, V3_J, V3_K,
V4, V4_H, V4_J, V4_K,
V5, V5_H, V5_J, V5_K,
V6, V6_H, V6_J, V6_K,
V7, V7_H, V7_J, V7_K,
V8, V8_H, V8_J, V8_K,
V9, V9_H, V9_J, V9_K,
V10, V10_H, V10_J, V10_K,
V11, V11_H, V11_J, V11_K,
V12, V12_H, V12_J, V12_K,
V13, V13_H, V13_J, V13_K,
V14, V14_H, V14_J, V14_K,
V15, V15_H, V15_J, V15_K,
V16, V16_H, V16_J, V16_K,
V17, V17_H, V17_J, V17_K,
V18, V18_H, V18_J, V18_K,
V19, V19_H, V19_J, V19_K,
V20, V20_H, V20_J, V20_K,
V21, V21_H, V21_J, V21_K,
V22, V22_H, V22_J, V22_K,
V23, V23_H, V23_J, V23_K,
V24, V24_H, V24_J, V24_K,
V25, V25_H, V25_J, V25_K,
V26, V26_H, V26_J, V26_K,
V27, V27_H, V27_J, V27_K,
V28, V28_H, V28_J, V28_K,
V29, V29_H, V29_J, V29_K,
V30, V30_H, V30_J, V30_K,
V31, V31_H, V31_J, V31_K,
);
// Class for all 64bit vector registers
reg_class vectord_reg(
V0, V0_H,
V1, V1_H,
V2, V2_H,
V3, V3_H,
V4, V4_H,
V5, V5_H,
V6, V6_H,
V7, V7_H,
V8, V8_H,
V9, V9_H,
V10, V10_H,
V11, V11_H,
V12, V12_H,
V13, V13_H,
V14, V14_H,
V15, V15_H,
V16, V16_H,
V17, V17_H,
V18, V18_H,
V19, V19_H,
V20, V20_H,
V21, V21_H,
V22, V22_H,
V23, V23_H,
V24, V24_H,
V25, V25_H,
V26, V26_H,
V27, V27_H,
V28, V28_H,
V29, V29_H,
V30, V30_H,
V31, V31_H
);
// Class for all 128bit vector registers
reg_class vectorx_reg(
V0, V0_H, V0_J, V0_K,
V1, V1_H, V1_J, V1_K,
V2, V2_H, V2_J, V2_K,
V3, V3_H, V3_J, V3_K,
V4, V4_H, V4_J, V4_K,
V5, V5_H, V5_J, V5_K,
V6, V6_H, V6_J, V6_K,
V7, V7_H, V7_J, V7_K,
V8, V8_H, V8_J, V8_K,
V9, V9_H, V9_J, V9_K,
V10, V10_H, V10_J, V10_K,
V11, V11_H, V11_J, V11_K,
V12, V12_H, V12_J, V12_K,
V13, V13_H, V13_J, V13_K,
V14, V14_H, V14_J, V14_K,
V15, V15_H, V15_J, V15_K,
V16, V16_H, V16_J, V16_K,
V17, V17_H, V17_J, V17_K,
V18, V18_H, V18_J, V18_K,
V19, V19_H, V19_J, V19_K,
V20, V20_H, V20_J, V20_K,
V21, V21_H, V21_J, V21_K,
V22, V22_H, V22_J, V22_K,
V23, V23_H, V23_J, V23_K,
V24, V24_H, V24_J, V24_K,
V25, V25_H, V25_J, V25_K,
V26, V26_H, V26_J, V26_K,
V27, V27_H, V27_J, V27_K,
V28, V28_H, V28_J, V28_K,
V29, V29_H, V29_J, V29_K,
V30, V30_H, V30_J, V30_K,
V31, V31_H, V31_J, V31_K
);
// Class for 128 bit register v0
reg_class v0_reg(
V0, V0_H
);
// Class for 128 bit register v1
reg_class v1_reg(
V1, V1_H
);
// Class for 128 bit register v2
reg_class v2_reg(
V2, V2_H
);
// Class for 128 bit register v3
reg_class v3_reg(
V3, V3_H
);
// Class for 128 bit register v4
reg_class v4_reg(
V4, V4_H
);
// Class for 128 bit register v5
reg_class v5_reg(
V5, V5_H
);
// Class for 128 bit register v6
reg_class v6_reg(
V6, V6_H
);
// Class for 128 bit register v7
reg_class v7_reg(
V7, V7_H
);
// Class for 128 bit register v8
reg_class v8_reg(
V8, V8_H
);
// Class for 128 bit register v9
reg_class v9_reg(
V9, V9_H
);
// Class for 128 bit register v10
reg_class v10_reg(
V10, V10_H
);
// Class for 128 bit register v11
reg_class v11_reg(
V11, V11_H
);
// Class for 128 bit register v12
reg_class v12_reg(
V12, V12_H
);
// Class for 128 bit register v13
reg_class v13_reg(
V13, V13_H
);
// Class for 128 bit register v14
reg_class v14_reg(
V14, V14_H
);
// Class for 128 bit register v15
reg_class v15_reg(
V15, V15_H
);
// Class for 128 bit register v16
reg_class v16_reg(
V16, V16_H
);
// Class for 128 bit register v17
reg_class v17_reg(
V17, V17_H
);
// Class for 128 bit register v18
reg_class v18_reg(
V18, V18_H
);
// Class for 128 bit register v19
reg_class v19_reg(
V19, V19_H
);
// Class for 128 bit register v20
reg_class v20_reg(
V20, V20_H
);
// Class for 128 bit register v21
reg_class v21_reg(
V21, V21_H
);
// Class for 128 bit register v22
reg_class v22_reg(
V22, V22_H
);
// Class for 128 bit register v23
reg_class v23_reg(
V23, V23_H
);
// Class for 128 bit register v24
reg_class v24_reg(
V24, V24_H
);
// Class for 128 bit register v25
reg_class v25_reg(
V25, V25_H
);
// Class for 128 bit register v26
reg_class v26_reg(
V26, V26_H
);
// Class for 128 bit register v27
reg_class v27_reg(
V27, V27_H
);
// Class for 128 bit register v28
reg_class v28_reg(
V28, V28_H
);
// Class for 128 bit register v29
reg_class v29_reg(
V29, V29_H
);
// Class for 128 bit register v30
reg_class v30_reg(
V30, V30_H
);
// Class for 128 bit register v31
reg_class v31_reg(
V31, V31_H
);
// Class for all SVE predicate registers.
reg_class pr_reg (
P0,
P1,
P2,
P3,
P4,
P5,
P6,
// P7, non-allocatable, preserved with all elements preset to TRUE.
P8,
P9,
P10,
P11,
P12,
P13,
P14,
P15
);
// Class for SVE governing predicate registers, which are used
// to determine the active elements of a predicated instruction.
reg_class gov_pr (
P0,
P1,
P2,
P3,
P4,
P5,
P6,
// P7, non-allocatable, preserved with all elements preset to TRUE.
);
reg_class p0_reg(P0);
reg_class p1_reg(P1);
// Singleton class for condition codes
reg_class int_flags(RFLAGS);
%}
//----------DEFINITION BLOCK---------------------------------------------------
// Define name --> value mappings to inform the ADLC of an integer valued name
// Current support includes integer values in the range [0, 0x7FFFFFFF]
// Format:
// int_def <name> ( <int_value>, <expression>);
// Generated Code in ad_<arch>.hpp
// #define <name> (<expression>)
// // value == <int_value>
// Generated code in ad_<arch>.cpp adlc_verification()
// assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
//
// we follow the ppc-aix port in using a simple cost model which ranks
// register operations as cheap, memory ops as more expensive and
// branches as most expensive. the first two have a low as well as a
// normal cost. huge cost appears to be a way of saying don't do
// something
definitions %{
// The default cost (of a register move instruction).
int_def INSN_COST ( 100, 100);
int_def BRANCH_COST ( 200, 2 * INSN_COST);
int_def CALL_COST ( 200, 2 * INSN_COST);
int_def VOLATILE_REF_COST ( 1000, 10 * INSN_COST);
%}
//----------SOURCE BLOCK-------------------------------------------------------
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source_hpp %{
#include "asm/macroAssembler.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "opto/addnode.hpp"
#include "opto/convertnode.hpp"
#include "runtime/objectMonitor.hpp"
extern RegMask _ANY_REG32_mask;
extern RegMask _ANY_REG_mask;
extern RegMask _PTR_REG_mask;
extern RegMask _NO_SPECIAL_REG32_mask;
extern RegMask _NO_SPECIAL_REG_mask;
extern RegMask _NO_SPECIAL_PTR_REG_mask;
extern RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
class CallStubImpl {
//--------------------------------------------------------------
//---< Used for optimization in Compile::shorten_branches >---
//--------------------------------------------------------------
public:
// Size of call trampoline stub.
static uint size_call_trampoline() {
return 0; // no call trampolines on this platform
}
// number of relocations needed by a call trampoline stub
static uint reloc_call_trampoline() {
return 0; // no call trampolines on this platform
}
};
class HandlerImpl {
public:
static int emit_exception_handler(C2_MacroAssembler *masm);
static int emit_deopt_handler(C2_MacroAssembler* masm);
static uint size_exception_handler() {
return MacroAssembler::far_codestub_branch_size();
}
static uint size_deopt_handler() {
// count one adr and one far branch instruction
return NativeInstruction::instruction_size + MacroAssembler::far_codestub_branch_size();
}
};
class Node::PD {
public:
enum NodeFlags {
_last_flag = Node::_last_flag
};
};
bool is_CAS(int opcode, bool maybe_volatile);
// predicates controlling emit of ldr<x>/ldar<x> and associated dmb
bool unnecessary_acquire(const Node *barrier);
bool needs_acquiring_load(const Node *load);
// predicates controlling emit of str<x>/stlr<x> and associated dmbs
bool unnecessary_release(const Node *barrier);
bool unnecessary_volatile(const Node *barrier);
bool needs_releasing_store(const Node *store);
// predicate controlling translation of CompareAndSwapX
bool needs_acquiring_load_exclusive(const Node *load);
// predicate controlling addressing modes
bool size_fits_all_mem_uses(AddPNode* addp, int shift);
// Convert BootTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond);
%}
source %{
// Derived RegMask with conditionally allocatable registers
void PhaseOutput::pd_perform_mach_node_analysis() {
}
int MachNode::pd_alignment_required() const {
return 1;
}
int MachNode::compute_padding(int current_offset) const {
return 0;
}
RegMask _ANY_REG32_mask;
RegMask _ANY_REG_mask;
RegMask _PTR_REG_mask;
RegMask _NO_SPECIAL_REG32_mask;
RegMask _NO_SPECIAL_REG_mask;
RegMask _NO_SPECIAL_PTR_REG_mask;
RegMask _NO_SPECIAL_NO_RFP_PTR_REG_mask;
void reg_mask_init() {
// We derive below RegMask(s) from the ones which are auto-generated from
// adlc register classes to make AArch64 rheapbase (r27) and rfp (r29)
// registers conditionally reserved.
_ANY_REG32_mask = _ALL_REG32_mask;
_ANY_REG32_mask.Remove(OptoReg::as_OptoReg(r31_sp->as_VMReg()));
_ANY_REG_mask = _ALL_REG_mask;
_PTR_REG_mask = _ALL_REG_mask;
_NO_SPECIAL_REG32_mask = _ALL_REG32_mask;
_NO_SPECIAL_REG32_mask.SUBTRACT(_NON_ALLOCATABLE_REG32_mask);
_NO_SPECIAL_REG_mask = _ALL_REG_mask;
_NO_SPECIAL_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
_NO_SPECIAL_PTR_REG_mask = _ALL_REG_mask;
_NO_SPECIAL_PTR_REG_mask.SUBTRACT(_NON_ALLOCATABLE_REG_mask);
// r27 is not allocatable when compressed oops is on and heapbase is not
// zero, compressed klass pointers doesn't use r27 after JDK-8234794
if (UseCompressedOops && (CompressedOops::base() != nullptr)) {
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r27->as_VMReg()));
}
// r29 is not allocatable when PreserveFramePointer is on
if (PreserveFramePointer) {
_NO_SPECIAL_REG32_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
_NO_SPECIAL_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
_NO_SPECIAL_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
}
_NO_SPECIAL_NO_RFP_PTR_REG_mask = _NO_SPECIAL_PTR_REG_mask;
_NO_SPECIAL_NO_RFP_PTR_REG_mask.Remove(OptoReg::as_OptoReg(r29->as_VMReg()));
}
// Optimizaton of volatile gets and puts
// -------------------------------------
//
// AArch64 has ldar<x> and stlr<x> instructions which we can safely
// use to implement volatile reads and writes. For a volatile read
// we simply need
//
// ldar<x>
//
// and for a volatile write we need
//
// stlr<x>
//
// Alternatively, we can implement them by pairing a normal
// load/store with a memory barrier. For a volatile read we need
//
// ldr<x>
// dmb ishld
//
// for a volatile write
//
// dmb ish
// str<x>
// dmb ish
//
// We can also use ldaxr and stlxr to implement compare and swap CAS
// sequences. These are normally translated to an instruction
// sequence like the following
//
// dmb ish
// retry:
// ldxr<x> rval raddr
// cmp rval rold
// b.ne done
// stlxr<x> rval, rnew, rold
// cbnz rval retry
// done:
// cset r0, eq
// dmb ishld
//
// Note that the exclusive store is already using an stlxr
// instruction. That is required to ensure visibility to other
// threads of the exclusive write (assuming it succeeds) before that
// of any subsequent writes.
//
// The following instruction sequence is an improvement on the above
//
// retry:
// ldaxr<x> rval raddr
// cmp rval rold
// b.ne done
// stlxr<x> rval, rnew, rold
// cbnz rval retry
// done:
// cset r0, eq
//
// We don't need the leading dmb ish since the stlxr guarantees
// visibility of prior writes in the case that the swap is
// successful. Crucially we don't have to worry about the case where
// the swap is not successful since no valid program should be
// relying on visibility of prior changes by the attempting thread
// in the case where the CAS fails.
//
// Similarly, we don't need the trailing dmb ishld if we substitute
// an ldaxr instruction since that will provide all the guarantees we
// require regarding observation of changes made by other threads
// before any change to the CAS address observed by the load.
//
// In order to generate the desired instruction sequence we need to
// be able to identify specific 'signature' ideal graph node
// sequences which i) occur as a translation of a volatile reads or
// writes or CAS operations and ii) do not occur through any other
// translation or graph transformation. We can then provide
// alternative aldc matching rules which translate these node
// sequences to the desired machine code sequences. Selection of the
// alternative rules can be implemented by predicates which identify
// the relevant node sequences.
//
// The ideal graph generator translates a volatile read to the node
// sequence
//
// LoadX[mo_acquire]
// MemBarAcquire
//
// As a special case when using the compressed oops optimization we
// may also see this variant
//
// LoadN[mo_acquire]
// DecodeN
// MemBarAcquire
//
// A volatile write is translated to the node sequence
//
// MemBarRelease
// StoreX[mo_release] {CardMark}-optional
// MemBarVolatile
//
// n.b. the above node patterns are generated with a strict
// 'signature' configuration of input and output dependencies (see
// the predicates below for exact details). The card mark may be as
// simple as a few extra nodes or, in a few GC configurations, may
// include more complex control flow between the leading and
// trailing memory barriers. However, whatever the card mark
// configuration these signatures are unique to translated volatile
// reads/stores -- they will not appear as a result of any other
// bytecode translation or inlining nor as a consequence of
// optimizing transforms.
//
// We also want to catch inlined unsafe volatile gets and puts and
// be able to implement them using either ldar<x>/stlr<x> or some
// combination of ldr<x>/stlr<x> and dmb instructions.
//
// Inlined unsafe volatiles puts manifest as a minor variant of the
// normal volatile put node sequence containing an extra cpuorder
// membar
//
// MemBarRelease
// MemBarCPUOrder
// StoreX[mo_release] {CardMark}-optional
// MemBarCPUOrder
// MemBarVolatile
//
// n.b. as an aside, a cpuorder membar is not itself subject to
// matching and translation by adlc rules. However, the rule
// predicates need to detect its presence in order to correctly
// select the desired adlc rules.
//
// Inlined unsafe volatile gets manifest as a slightly different
// node sequence to a normal volatile get because of the
// introduction of some CPUOrder memory barriers to bracket the
// Load. However, but the same basic skeleton of a LoadX feeding a
// MemBarAcquire, possibly through an optional DecodeN, is still
// present
//
// MemBarCPUOrder
// || \\
// MemBarCPUOrder LoadX[mo_acquire]
// || |
// || {DecodeN} optional
// || /
// MemBarAcquire
//
// In this case the acquire membar does not directly depend on the
// load. However, we can be sure that the load is generated from an
// inlined unsafe volatile get if we see it dependent on this unique
// sequence of membar nodes. Similarly, given an acquire membar we
// can know that it was added because of an inlined unsafe volatile
// get if it is fed and feeds a cpuorder membar and if its feed
// membar also feeds an acquiring load.
//
// Finally an inlined (Unsafe) CAS operation is translated to the
// following ideal graph
//
// MemBarRelease
// MemBarCPUOrder
// CompareAndSwapX {CardMark}-optional
// MemBarCPUOrder
// MemBarAcquire
//
// So, where we can identify these volatile read and write
// signatures we can choose to plant either of the above two code
// sequences. For a volatile read we can simply plant a normal
// ldr<x> and translate the MemBarAcquire to a dmb. However, we can
// also choose to inhibit translation of the MemBarAcquire and
// inhibit planting of the ldr<x>, instead planting an ldar<x>.
//
// When we recognise a volatile store signature we can choose to
// plant at a dmb ish as a translation for the MemBarRelease, a
// normal str<x> and then a dmb ish for the MemBarVolatile.
// Alternatively, we can inhibit translation of the MemBarRelease
// and MemBarVolatile and instead plant a simple stlr<x>
// instruction.
//
// when we recognise a CAS signature we can choose to plant a dmb
// ish as a translation for the MemBarRelease, the conventional
// macro-instruction sequence for the CompareAndSwap node (which
// uses ldxr<x>) and then a dmb ishld for the MemBarAcquire.
// Alternatively, we can elide generation of the dmb instructions
// and plant the alternative CompareAndSwap macro-instruction
// sequence (which uses ldaxr<x>).
//
// Of course, the above only applies when we see these signature
// configurations. We still want to plant dmb instructions in any
// other cases where we may see a MemBarAcquire, MemBarRelease or
// MemBarVolatile. For example, at the end of a constructor which
// writes final/volatile fields we will see a MemBarRelease
// instruction and this needs a 'dmb ish' lest we risk the
// constructed object being visible without making the
// final/volatile field writes visible.
//
// n.b. the translation rules below which rely on detection of the
// volatile signatures and insert ldar<x> or stlr<x> are failsafe.
// If we see anything other than the signature configurations we
// always just translate the loads and stores to ldr<x> and str<x>
// and translate acquire, release and volatile membars to the
// relevant dmb instructions.
//
// is_CAS(int opcode, bool maybe_volatile)
//
// return true if opcode is one of the possible CompareAndSwapX
// values otherwise false.
bool is_CAS(int opcode, bool maybe_volatile)
{
switch(opcode) {
// We handle these
case Op_CompareAndSwapI:
case Op_CompareAndSwapL:
case Op_CompareAndSwapP:
case Op_CompareAndSwapN:
case Op_ShenandoahCompareAndSwapP:
case Op_ShenandoahCompareAndSwapN:
case Op_CompareAndSwapB:
case Op_CompareAndSwapS:
case Op_GetAndSetI:
case Op_GetAndSetL:
case Op_GetAndSetP:
case Op_GetAndSetN:
case Op_GetAndAddI:
case Op_GetAndAddL:
return true;
case Op_CompareAndExchangeI:
case Op_CompareAndExchangeN:
case Op_CompareAndExchangeB:
case Op_CompareAndExchangeS:
case Op_CompareAndExchangeL:
case Op_CompareAndExchangeP:
case Op_WeakCompareAndSwapB:
case Op_WeakCompareAndSwapS:
case Op_WeakCompareAndSwapI:
case Op_WeakCompareAndSwapL:
case Op_WeakCompareAndSwapP:
case Op_WeakCompareAndSwapN:
case Op_ShenandoahWeakCompareAndSwapP:
case Op_ShenandoahWeakCompareAndSwapN:
case Op_ShenandoahCompareAndExchangeP:
case Op_ShenandoahCompareAndExchangeN:
return maybe_volatile;
default:
return false;
}
}
// helper to determine the maximum number of Phi nodes we may need to
// traverse when searching from a card mark membar for the merge mem
// feeding a trailing membar or vice versa
// predicates controlling emit of ldr<x>/ldar<x>
bool unnecessary_acquire(const Node *barrier)
{
assert(barrier->is_MemBar(), "expecting a membar");
MemBarNode* mb = barrier->as_MemBar();
if (mb->trailing_load()) {
return true;
}
if (mb->trailing_load_store()) {
Node* load_store = mb->in(MemBarNode::Precedent);
assert(load_store->is_LoadStore(), "unexpected graph shape");
return is_CAS(load_store->Opcode(), true);
}
return false;
}
bool needs_acquiring_load(const Node *n)
{
assert(n->is_Load(), "expecting a load");
LoadNode *ld = n->as_Load();
return ld->is_acquire();
}
bool unnecessary_release(const Node *n)
{
assert((n->is_MemBar() &&
n->Opcode() == Op_MemBarRelease),
"expecting a release membar");
MemBarNode *barrier = n->as_MemBar();
if (!barrier->leading()) {
return false;
} else {
Node* trailing = barrier->trailing_membar();
MemBarNode* trailing_mb = trailing->as_MemBar();
assert(trailing_mb->trailing(), "Not a trailing membar?");
assert(trailing_mb->leading_membar() == n, "inconsistent leading/trailing membars");
Node* mem = trailing_mb->in(MemBarNode::Precedent);
if (mem->is_Store()) {
assert(mem->as_Store()->is_release(), "");
assert(trailing_mb->Opcode() == Op_MemBarVolatile, "");
return true;
} else {
assert(mem->is_LoadStore(), "");
assert(trailing_mb->Opcode() == Op_MemBarAcquire, "");
return is_CAS(mem->Opcode(), true);
}
}
return false;
}
bool unnecessary_volatile(const Node *n)
{
// assert n->is_MemBar();
MemBarNode *mbvol = n->as_MemBar();
bool release = mbvol->trailing_store();
assert(!release || (mbvol->in(MemBarNode::Precedent)->is_Store() && mbvol->in(MemBarNode::Precedent)->as_Store()->is_release()), "");
#ifdef ASSERT
if (release) {
Node* leading = mbvol->leading_membar();
assert(leading->Opcode() == Op_MemBarRelease, "");
assert(leading->as_MemBar()->leading_store(), "");
assert(leading->as_MemBar()->trailing_membar() == mbvol, "");
}
#endif
return release;
}
// predicates controlling emit of str<x>/stlr<x>
bool needs_releasing_store(const Node *n)
{
// assert n->is_Store();
StoreNode *st = n->as_Store();
return st->trailing_membar() != nullptr;
}
// predicate controlling translation of CAS
//
// returns true if CAS needs to use an acquiring load otherwise false
bool needs_acquiring_load_exclusive(const Node *n)
{
assert(is_CAS(n->Opcode(), true), "expecting a compare and swap");
LoadStoreNode* ldst = n->as_LoadStore();
if (is_CAS(n->Opcode(), false)) {
assert(ldst->trailing_membar() != nullptr, "expected trailing membar");
} else {
return ldst->trailing_membar() != nullptr;
}
// so we can just return true here
return true;
}
#define __ masm->
// advance declarations for helper functions to convert register
// indices to register objects
// the ad file has to provide implementations of certain methods
// expected by the generic code
//
// REQUIRED FUNCTIONALITY
//=============================================================================
// !!!!! Special hack to get all types of calls to specify the byte offset
// from the start of the call to the point where the return address
// will point.
int MachCallStaticJavaNode::ret_addr_offset()
{
// call should be a simple bl
int off = 4;
return off;
}
int MachCallDynamicJavaNode::ret_addr_offset()
{
return 16; // movz, movk, movk, bl
}
int MachCallRuntimeNode::ret_addr_offset() {
// for generated stubs the call will be
// bl(addr)
// or with far branches
// bl(trampoline_stub)
// for real runtime callouts it will be six instructions
// see aarch64_enc_java_to_runtime
// adr(rscratch2, retaddr)
// str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
// lea(rscratch1, RuntimeAddress(addr)
// blr(rscratch1)
CodeBlob *cb = CodeCache::find_blob(_entry_point);
if (cb) {
return 1 * NativeInstruction::instruction_size;
} else {
return 6 * NativeInstruction::instruction_size;
}
}
//=============================================================================
#ifndef PRODUCT
void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
st->print("BREAKPOINT");
}
#endif
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
__ brk(0);
}
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
//=============================================================================
#ifndef PRODUCT
void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const {
st->print("nop \t# %d bytes pad for loops and calls", _count);
}
#endif
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc*) const {
for (int i = 0; i < _count; i++) {
__ nop();
}
}
uint MachNopNode::size(PhaseRegAlloc*) const {
return _count * NativeInstruction::instruction_size;
}
//=============================================================================
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
int ConstantTable::calculate_table_base_offset() const {
return 0; // absolute addressing, no offset
}
bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
ShouldNotReachHere();
}
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
// Empty encoding
}
uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
return 0;
}
#ifndef PRODUCT
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
st->print("-- \t// MachConstantBaseNode (empty encoding)");
}
#endif
#ifndef PRODUCT
void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
Compile* C = ra_->C;
int framesize = C->output()->frame_slots() << LogBytesPerInt;
if (C->output()->need_stack_bang(framesize))
st->print("# stack bang size=%d\n\t", framesize);
if (VM_Version::use_rop_protection()) {
st->print("ldr zr, [lr]\n\t");
st->print("paciaz\n\t");
}
if (framesize < ((1 << 9) + 2 * wordSize)) {
st->print("sub sp, sp, #%d\n\t", framesize);
st->print("stp rfp, lr, [sp, #%d]", framesize - 2 * wordSize);
if (PreserveFramePointer) st->print("\n\tadd rfp, sp, #%d", framesize - 2 * wordSize);
} else {
st->print("stp lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize));
if (PreserveFramePointer) st->print("mov rfp, sp\n\t");
st->print("mov rscratch1, #%d\n\t", framesize - 2 * wordSize);
st->print("sub sp, sp, rscratch1");
}
if (C->stub_function() == nullptr) {
st->print("\n\t");
st->print("ldr rscratch1, [guard]\n\t");
st->print("dmb ishld\n\t");
st->print("ldr rscratch2, [rthread, #thread_disarmed_guard_value_offset]\n\t");
st->print("cmp rscratch1, rscratch2\n\t");
st->print("b.eq skip");
st->print("\n\t");
st->print("blr #nmethod_entry_barrier_stub\n\t");
st->print("b skip\n\t");
st->print("guard: int\n\t");
st->print("\n\t");
st->print("skip:\n\t");
}
}
#endif
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
// n.b. frame size includes space for return pc and rfp
const int framesize = C->output()->frame_size_in_bytes();
// insert a nop at the start of the prolog so we can patch in a
// branch if we need to invalidate the method later
__ nop();
if (C->clinit_barrier_on_entry()) {
assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
Label L_skip_barrier;
__ mov_metadata(rscratch2, C->method()->holder()->constant_encoding());
__ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
__ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(L_skip_barrier);
}
if (C->max_vector_size() > 0) {
__ reinitialize_ptrue();
}
int bangsize = C->output()->bang_size_in_bytes();
if (C->output()->need_stack_bang(bangsize))
__ generate_stack_overflow_check(bangsize);
__ build_frame(framesize);
if (C->stub_function() == nullptr) {
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
// Dummy labels for just measuring the code size
Label dummy_slow_path;
Label dummy_continuation;
Label dummy_guard;
Label* slow_path = &dummy_slow_path;
Label* continuation = &dummy_continuation;
Label* guard = &dummy_guard;
if (!Compile::current()->output()->in_scratch_emit_size()) {
// Use real labels from actual stub when not emitting code for the purpose of measuring its size
C2EntryBarrierStub* stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub();
Compile::current()->output()->add_stub(stub);
slow_path = &stub->entry();
continuation = &stub->continuation();
guard = &stub->guard();
}
// In the C2 code, we move the non-hot part of nmethod entry barriers out-of-line to a stub.
bs->nmethod_entry_barrier(masm, slow_path, continuation, guard);
}
if (VerifyStackAtCalls) {
Unimplemented();
}
C->output()->set_frame_complete(__ offset());
if (C->has_mach_constant_base_node()) {
// NOTE: We set the table base offset here because users might be
// emitted before MachConstantBaseNode.
ConstantTable& constant_table = C->output()->constant_table();
constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
}
}
uint MachPrologNode::size(PhaseRegAlloc* ra_) const
{
return MachNode::size(ra_); // too many variables; just compute it
// the hard way
}
int MachPrologNode::reloc() const
{
return 0;
}
//=============================================================================
#ifndef PRODUCT
void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
Compile* C = ra_->C;
int framesize = C->output()->frame_slots() << LogBytesPerInt;
st->print("# pop frame %d\n\t",framesize);
if (framesize == 0) {
st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize));
} else if (framesize < ((1 << 9) + 2 * wordSize)) {
st->print("ldp lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize);
st->print("add sp, sp, #%d\n\t", framesize);
} else {
st->print("mov rscratch1, #%d\n\t", framesize - 2 * wordSize);
st->print("add sp, sp, rscratch1\n\t");
st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize));
}
if (VM_Version::use_rop_protection()) {
st->print("autiaz\n\t");
st->print("ldr zr, [lr]\n\t");
}
if (do_polling() && C->is_method_compilation()) {
st->print("# test polling word\n\t");
st->print("ldr rscratch1, [rthread],#%d\n\t", in_bytes(JavaThread::polling_word_offset()));
st->print("cmp sp, rscratch1\n\t");
st->print("bhi #slow_path");
}
}
#endif
void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
Compile* C = ra_->C;
int framesize = C->output()->frame_slots() << LogBytesPerInt;
__ remove_frame(framesize);
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
__ reserved_stack_check();
}
if (do_polling() && C->is_method_compilation()) {
Label dummy_label;
Label* code_stub = &dummy_label;
if (!C->output()->in_scratch_emit_size()) {
C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
C->output()->add_stub(stub);
code_stub = &stub->entry();
}
__ relocate(relocInfo::poll_return_type);
__ safepoint_poll(*code_stub, true /* at_return */, false /* acquire */, true /* in_nmethod */);
}
}
uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
// Variable size. Determine dynamically.
return MachNode::size(ra_);
}
int MachEpilogNode::reloc() const {
// Return number of relocatable values contained in this instruction.
return 1; // 1 for polling page.
}
const Pipeline * MachEpilogNode::pipeline() const {
return MachNode::pipeline_class();
}
//=============================================================================
static enum RC rc_class(OptoReg::Name reg) {
if (reg == OptoReg::Bad) {
return rc_bad;
}
// we have 32 int registers * 2 halves
int slots_of_int_registers = Register::number_of_registers * Register::max_slots_per_register;
if (reg < slots_of_int_registers) {
return rc_int;
}
// we have 32 float register * 8 halves
int slots_of_float_registers = FloatRegister::number_of_registers * FloatRegister::max_slots_per_register;
if (reg < slots_of_int_registers + slots_of_float_registers) {
return rc_float;
}
int slots_of_predicate_registers = PRegister::number_of_registers * PRegister::max_slots_per_register;
if (reg < slots_of_int_registers + slots_of_float_registers + slots_of_predicate_registers) {
return rc_predicate;
}
// Between predicate regs & stack is the flags.
assert(OptoReg::is_stack(reg), "blow up if spilling flags");
return rc_stack;
}
uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
Compile* C = ra_->C;
// Get registers to move.
OptoReg::Name src_hi = ra_->get_reg_second(in(1));
OptoReg::Name src_lo = ra_->get_reg_first(in(1));
OptoReg::Name dst_hi = ra_->get_reg_second(this);
OptoReg::Name dst_lo = ra_->get_reg_first(this);
enum RC src_hi_rc = rc_class(src_hi);
enum RC src_lo_rc = rc_class(src_lo);
enum RC dst_hi_rc = rc_class(dst_hi);
enum RC dst_lo_rc = rc_class(dst_lo);
assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
if (src_hi != OptoReg::Bad && !bottom_type()->isa_vectmask()) {
assert((src_lo&1)==0 && src_lo+1==src_hi &&
(dst_lo&1)==0 && dst_lo+1==dst_hi,
"expected aligned-adjacent pairs");
}
if (src_lo == dst_lo && src_hi == dst_hi) {
return 0; // Self copy, no move.
}
bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi &&
(dst_lo & 1) == 0 && dst_lo + 1 == dst_hi;
int src_offset = ra_->reg2offset(src_lo);
int dst_offset = ra_->reg2offset(dst_lo);
if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
uint ireg = ideal_reg();
if (ireg == Op_VecA && masm) {
int sve_vector_reg_size_in_bytes = Matcher::scalable_vector_reg_size(T_BYTE);
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack->stack
__ spill_copy_sve_vector_stack_to_stack(src_offset, dst_offset,
sve_vector_reg_size_in_bytes);
} else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
__ spill_sve_vector(as_FloatRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
sve_vector_reg_size_in_bytes);
} else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
__ unspill_sve_vector(as_FloatRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
sve_vector_reg_size_in_bytes);
} else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
__ sve_orr(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
} else {
ShouldNotReachHere();
}
} else if (masm) {
assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector");
assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack->stack
assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
if (ireg == Op_VecD) {
__ unspill(rscratch1, true, src_offset);
__ spill(rscratch1, true, dst_offset);
} else {
__ spill_copy128(src_offset, dst_offset);
}
} else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
__ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]),
ireg == Op_VecD ? __ T8B : __ T16B,
as_FloatRegister(Matcher::_regEncode[src_lo]));
} else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
__ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
ireg == Op_VecD ? __ D : __ Q,
ra_->reg2offset(dst_lo));
} else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) {
__ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
ireg == Op_VecD ? __ D : __ Q,
ra_->reg2offset(src_lo));
} else {
ShouldNotReachHere();
}
}
} else if (masm) {
switch (src_lo_rc) {
case rc_int:
if (dst_lo_rc == rc_int) { // gpr --> gpr copy
if (is64) {
__ mov(as_Register(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
} else {
__ movw(as_Register(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
}
} else if (dst_lo_rc == rc_float) { // gpr --> fpr copy
if (is64) {
__ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
} else {
__ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_Register(Matcher::_regEncode[src_lo]));
}
} else { // gpr --> stack spill
assert(dst_lo_rc == rc_stack, "spill to bad register class");
__ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset);
}
break;
case rc_float:
if (dst_lo_rc == rc_int) { // fpr --> gpr copy
if (is64) {
__ fmovd(as_Register(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
} else {
__ fmovs(as_Register(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
}
} else if (dst_lo_rc == rc_float) { // fpr --> fpr copy
if (is64) {
__ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
} else {
__ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]),
as_FloatRegister(Matcher::_regEncode[src_lo]));
}
} else { // fpr --> stack spill
assert(dst_lo_rc == rc_stack, "spill to bad register class");
__ spill(as_FloatRegister(Matcher::_regEncode[src_lo]),
is64 ? __ D : __ S, dst_offset);
}
break;
case rc_stack:
if (dst_lo_rc == rc_int) { // stack --> gpr load
__ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset);
} else if (dst_lo_rc == rc_float) { // stack --> fpr load
__ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]),
is64 ? __ D : __ S, src_offset);
} else if (dst_lo_rc == rc_predicate) {
__ unspill_sve_predicate(as_PRegister(Matcher::_regEncode[dst_lo]), ra_->reg2offset(src_lo),
Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
} else { // stack --> stack copy
assert(dst_lo_rc == rc_stack, "spill to bad register class");
if (ideal_reg() == Op_RegVectMask) {
__ spill_copy_sve_predicate_stack_to_stack(src_offset, dst_offset,
Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
} else {
__ unspill(rscratch1, is64, src_offset);
__ spill(rscratch1, is64, dst_offset);
}
}
break;
case rc_predicate:
if (dst_lo_rc == rc_predicate) {
__ sve_mov(as_PRegister(Matcher::_regEncode[dst_lo]), as_PRegister(Matcher::_regEncode[src_lo]));
} else if (dst_lo_rc == rc_stack) {
__ spill_sve_predicate(as_PRegister(Matcher::_regEncode[src_lo]), ra_->reg2offset(dst_lo),
Matcher::scalable_vector_reg_size(T_BYTE) >> 3);
} else {
assert(false, "bad src and dst rc_class combination.");
ShouldNotReachHere();
}
break;
default:
assert(false, "bad rc_class for spill");
ShouldNotReachHere();
}
}
if (st) {
st->print("spill ");
if (src_lo_rc == rc_stack) {
st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo));
} else {
st->print("%s -> ", Matcher::regName[src_lo]);
}
if (dst_lo_rc == rc_stack) {
st->print("[sp, #%d]", ra_->reg2offset(dst_lo));
} else {
st->print("%s", Matcher::regName[dst_lo]);
}
if (bottom_type()->isa_vect() && !bottom_type()->isa_vectmask()) {
int vsize = 0;
switch (ideal_reg()) {
case Op_VecD:
vsize = 64;
break;
case Op_VecX:
vsize = 128;
break;
case Op_VecA:
vsize = Matcher::scalable_vector_reg_size(T_BYTE) * 8;
break;
default:
assert(false, "bad register type for spill");
ShouldNotReachHere();
}
st->print("\t# vector spill size = %d", vsize);
} else if (ideal_reg() == Op_RegVectMask) {
assert(Matcher::supports_scalable_vector(), "bad register type for spill");
int vsize = Matcher::scalable_predicate_reg_slots() * 32;
st->print("\t# predicate spill size = %d", vsize);
} else {
st->print("\t# spill size = %d", is64 ? 64 : 32);
}
}
return 0;
}
#ifndef PRODUCT
void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
if (!ra_)
st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
else
implementation(nullptr, ra_, false, st);
}
#endif
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
implementation(masm, ra_, false, nullptr);
}
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
return MachNode::size(ra_);
}
//=============================================================================
#ifndef PRODUCT
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_reg_first(this);
st->print("add %s, rsp, #%d]\t# box lock",
Matcher::regName[reg], offset);
}
#endif
void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
int reg = ra_->get_encode(this);
// This add will handle any 24-bit signed offset. 24 bits allows an
// 8 megabyte stack frame.
__ add(as_Register(reg), sp, offset);
}
uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
// BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
if (Assembler::operand_valid_for_add_sub_immediate(offset)) {
return NativeInstruction::instruction_size;
} else {
return 2 * NativeInstruction::instruction_size;
}
}
//=============================================================================
#ifndef PRODUCT
void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmpw rscratch1, r10");
} else {
st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmp rscratch1, r10");
}
st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
}
#endif
void MachUEPNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const
{
__ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
{
return MachNode::size(ra_);
}
// REQUIRED EMIT CODE
//=============================================================================
// Emit exception handler code.
int HandlerImpl::emit_exception_handler(C2_MacroAssembler* masm)
{
// mov rscratch1 #exception_blob_entry_point
// br rscratch1
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
address base = __ start_a_stub(size_exception_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
__ end_a_stub();
return offset;
}
// Emit deopt handler code.
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm)
{
// Note that the code buffer's insts_mark is always relative to insts.
// That's why we must use the macroassembler to generate a handler.
address base = __ start_a_stub(size_deopt_handler());
if (base == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return 0; // CodeBuffer::expand failed
}
int offset = __ offset();
__ adr(lr, __ pc());
__ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset == (int) size_deopt_handler(), "overflow");
__ end_a_stub();
return offset;
}
// REQUIRED MATCHER CODE
//=============================================================================
bool Matcher::match_rule_supported(int opcode) {
if (!has_match_rule(opcode))
return false;
switch (opcode) {
case Op_OnSpinWait:
return VM_Version::supports_on_spin_wait();
case Op_CacheWB:
case Op_CacheWBPreSync:
case Op_CacheWBPostSync:
if (!VM_Version::supports_data_cache_line_flush()) {
return false;
}
break;
case Op_ExpandBits:
case Op_CompressBits:
if (!VM_Version::supports_svebitperm()) {
return false;
}
break;
case Op_FmaF:
case Op_FmaD:
case Op_FmaVF:
case Op_FmaVD:
if (!UseFMA) {
return false;
}
break;
case Op_FmaHF:
// UseFMA flag also needs to be checked along with FEAT_FP16
if (!UseFMA || !is_feat_fp16_supported()) {
return false;
}
break;
case Op_AddHF:
case Op_SubHF:
case Op_MulHF:
case Op_DivHF:
case Op_MinHF:
case Op_MaxHF:
case Op_SqrtHF:
// Half-precision floating point scalar operations require FEAT_FP16
// to be available. FEAT_FP16 is enabled if both "fphp" and "asimdhp"
// features are supported.
if (!is_feat_fp16_supported()) {
return false;
}
break;
}
return true; // Per default match rules are supported.
}
const RegMask* Matcher::predicate_reg_mask(void) {
return &_PR_REG_mask;
}
bool Matcher::supports_vector_calling_convention(void) {
return EnableVectorSupport;
}
OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
assert(EnableVectorSupport, "sanity");
int lo = V0_num;
int hi = V0_H_num;
if (ideal_reg == Op_VecX || ideal_reg == Op_VecA) {
hi = V0_K_num;
}
return OptoRegPair(hi, lo);
}
// Is this branch offset short enough that a short branch can be used?
//
// NOTE: If the platform does not provide any short branch variants, then
// this method should return false for offset 0.
bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
// The passed offset is relative to address of the branch.
return (-32768 <= offset && offset < 32768);
}
// Vector width in bytes.
int Matcher::vector_width_in_bytes(BasicType bt) {
// The MaxVectorSize should have been set by detecting SVE max vector register size.
int size = MIN2((UseSVE > 0) ? (int)FloatRegister::sve_vl_max : (int)FloatRegister::neon_vl, (int)MaxVectorSize);
// Minimum 2 values in vector
if (size < 2*type2aelembytes(bt)) size = 0;
// But never < 4
if (size < 4) size = 0;
return size;
}
// Limits on vector size (number of elements) loaded into vector.
int Matcher::max_vector_size(const BasicType bt) {
return vector_width_in_bytes(bt)/type2aelembytes(bt);
}
int Matcher::min_vector_size(const BasicType bt) {
int max_size = max_vector_size(bt);
// Limit the min vector size to 8 bytes.
int size = 8 / type2aelembytes(bt);
if (bt == T_BYTE) {
// To support vector api shuffle/rearrange.
size = 4;
} else if (bt == T_BOOLEAN) {
// To support vector api load/store mask.
size = 2;
}
if (size < 2) size = 2;
return MIN2(size, max_size);
}
int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
// Actual max scalable vector register length.
int Matcher::scalable_vector_reg_size(const BasicType bt) {
return Matcher::max_vector_size(bt);
}
// Vector ideal reg.
uint Matcher::vector_ideal_reg(int len) {
if (UseSVE > 0 && FloatRegister::neon_vl < len && len <= FloatRegister::sve_vl_max) {
return Op_VecA;
}
switch(len) {
// For 16-bit/32-bit mask vector, reuse VecD.
case 2:
case 4:
case 8: return Op_VecD;
case 16: return Op_VecX;
}
ShouldNotReachHere();
return 0;
}
MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* generic_opnd, uint ideal_reg, bool is_temp) {
assert(Matcher::is_generic_vector(generic_opnd), "not generic");
switch (ideal_reg) {
case Op_VecA: return new vecAOper();
case Op_VecD: return new vecDOper();
case Op_VecX: return new vecXOper();
}
ShouldNotReachHere();
return nullptr;
}
bool Matcher::is_reg2reg_move(MachNode* m) {
return false;
}
bool Matcher::is_generic_vector(MachOper* opnd) {
return opnd->opcode() == VREG;
}
// Return whether or not this register is ever used as an argument.
// This function is used on startup to build the trampoline stubs in
// generateOptoStub. Registers not mentioned will be killed by the VM
// call in the trampoline, and arguments in those registers not be
// available to the callee.
bool Matcher::can_be_java_arg(int reg)
{
return
reg == R0_num || reg == R0_H_num ||
reg == R1_num || reg == R1_H_num ||
reg == R2_num || reg == R2_H_num ||
reg == R3_num || reg == R3_H_num ||
reg == R4_num || reg == R4_H_num ||
reg == R5_num || reg == R5_H_num ||
reg == R6_num || reg == R6_H_num ||
reg == R7_num || reg == R7_H_num ||
reg == V0_num || reg == V0_H_num ||
reg == V1_num || reg == V1_H_num ||
reg == V2_num || reg == V2_H_num ||
reg == V3_num || reg == V3_H_num ||
reg == V4_num || reg == V4_H_num ||
reg == V5_num || reg == V5_H_num ||
reg == V6_num || reg == V6_H_num ||
reg == V7_num || reg == V7_H_num;
}
bool Matcher::is_spillable_arg(int reg)
{
return can_be_java_arg(reg);
}
uint Matcher::int_pressure_limit()
{
// JDK-8183543: When taking the number of available registers as int
// register pressure threshold, the jtreg test:
// test/hotspot/jtreg/compiler/regalloc/TestC2IntPressure.java
// failed due to C2 compilation failure with
// "COMPILE SKIPPED: failed spill-split-recycle sanity check".
//
// A derived pointer is live at CallNode and then is flagged by RA
// as a spilled LRG. Spilling heuristics(Spill-USE) explicitly skip
// derived pointers and lastly fail to spill after reaching maximum
// number of iterations. Lowering the default pressure threshold to
// (_NO_SPECIAL_REG32_mask.Size() minus 1) forces CallNode to become
// a high register pressure area of the code so that split_DEF can
// generate DefinitionSpillCopy for the derived pointer.
uint default_int_pressure_threshold = _NO_SPECIAL_REG32_mask.Size() - 1;
if (!PreserveFramePointer) {
// When PreserveFramePointer is off, frame pointer is allocatable,
// but different from other SOC registers, it is excluded from
// fatproj's mask because its save type is No-Save. Decrease 1 to
// ensure high pressure at fatproj when PreserveFramePointer is off.
// See check_pressure_at_fatproj().
default_int_pressure_threshold--;
}
return (INTPRESSURE == -1) ? default_int_pressure_threshold : INTPRESSURE;
}
uint Matcher::float_pressure_limit()
{
// _FLOAT_REG_mask is generated by adlc from the float_reg register class.
return (FLOATPRESSURE == -1) ? _FLOAT_REG_mask.Size() : FLOATPRESSURE;
}
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
return false;
}
RegMask Matcher::divI_proj_mask() {
ShouldNotReachHere();
return RegMask();
}
// Register for MODI projection of divmodI.
RegMask Matcher::modI_proj_mask() {
ShouldNotReachHere();
return RegMask();
}
// Register for DIVL projection of divmodL.
RegMask Matcher::divL_proj_mask() {
ShouldNotReachHere();
return RegMask();
}
// Register for MODL projection of divmodL.
RegMask Matcher::modL_proj_mask() {
ShouldNotReachHere();
return RegMask();
}
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
return FP_REG_mask();
}
bool size_fits_all_mem_uses(AddPNode* addp, int shift) {
for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
Node* u = addp->fast_out(i);
if (u->is_LoadStore()) {
// On AArch64, LoadStoreNodes (i.e. compare and swap
// instructions) only take register indirect as an operand, so
// any attempt to use an AddPNode as an input to a LoadStoreNode
// must fail.
return false;
}
if (u->is_Mem()) {
int opsize = u->as_Mem()->memory_size();
assert(opsize > 0, "unexpected memory operand size");
if (u->as_Mem()->memory_size() != (1<<shift)) {
return false;
}
}
}
return true;
}
// Convert BootTest condition to Assembler condition.
// Replicate the logic of cmpOpOper::ccode() and cmpOpUOper::ccode().
Assembler::Condition to_assembler_cond(BoolTest::mask cond) {
Assembler::Condition result;
switch(cond) {
case BoolTest::eq:
result = Assembler::EQ; break;
case BoolTest::ne:
result = Assembler::NE; break;
case BoolTest::le:
result = Assembler::LE; break;
case BoolTest::ge:
result = Assembler::GE; break;
case BoolTest::lt:
result = Assembler::LT; break;
case BoolTest::gt:
result = Assembler::GT; break;
case BoolTest::ule:
result = Assembler::LS; break;
case BoolTest::uge:
result = Assembler::HS; break;
case BoolTest::ult:
result = Assembler::LO; break;
case BoolTest::ugt:
result = Assembler::HI; break;
case BoolTest::overflow:
result = Assembler::VS; break;
case BoolTest::no_overflow:
result = Assembler::VC; break;
default:
ShouldNotReachHere();
return Assembler::Condition(-1);
}
// Check conversion
if (cond & BoolTest::unsigned_compare) {
assert(cmpOpUOper((BoolTest::mask)((int)cond & ~(BoolTest::unsigned_compare))).ccode() == result, "Invalid conversion");
} else {
assert(cmpOpOper(cond).ccode() == result, "Invalid conversion");
}
return result;
}
// Binary src (Replicate con)
static bool is_valid_sve_arith_imm_pattern(Node* n, Node* m) {
if (n == nullptr || m == nullptr) {
return false;
}
if (UseSVE == 0 || m->Opcode() != Op_Replicate) {
return false;
}
Node* imm_node = m->in(1);
if (!imm_node->is_Con()) {
return false;
}
const Type* t = imm_node->bottom_type();
if (!(t->isa_int() || t->isa_long())) {
return false;
}
switch (n->Opcode()) {
case Op_AndV:
case Op_OrV:
case Op_XorV: {
Assembler::SIMD_RegVariant T = Assembler::elemType_to_regVariant(Matcher::vector_element_basic_type(n));
uint64_t value = t->isa_long() ? (uint64_t)imm_node->get_long() : (uint64_t)imm_node->get_int();
return Assembler::operand_valid_for_sve_logical_immediate(Assembler::regVariant_to_elemBits(T), value);
}
case Op_AddVB:
return (imm_node->get_int() <= 255 && imm_node->get_int() >= -255);
case Op_AddVS:
case Op_AddVI:
return Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)imm_node->get_int());
case Op_AddVL:
return Assembler::operand_valid_for_sve_add_sub_immediate(imm_node->get_long());
default:
return false;
}
}
// (XorV src (Replicate m1))
// (XorVMask src (MaskAll m1))
static bool is_vector_bitwise_not_pattern(Node* n, Node* m) {
if (n != nullptr && m != nullptr) {
return (n->Opcode() == Op_XorV || n->Opcode() == Op_XorVMask) &&
VectorNode::is_all_ones_vector(m);
}
return false;
}
// Should the matcher clone input 'm' of node 'n'?
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
if (is_vshift_con_pattern(n, m) ||
is_vector_bitwise_not_pattern(n, m) ||
is_valid_sve_arith_imm_pattern(n, m) ||
is_encode_and_store_pattern(n, m)) {
mstack.push(m, Visit);
return true;
}
return false;
}
// Should the Matcher clone shifts on addressing modes, expecting them
// to be subsumed into complex addressing expressions or compute them
// into registers?
bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
// Loads and stores with indirect memory input (e.g., volatile loads and
// stores) do not subsume the input into complex addressing expressions. If
// the addressing expression is input to at least one such load or store, do
// not clone the addressing expression. Query needs_acquiring_load and
// needs_releasing_store as a proxy for indirect memory input, as it is not
// possible to directly query for indirect memory input at this stage.
for (DUIterator_Fast imax, i = m->fast_outs(imax); i < imax; i++) {
Node* n = m->fast_out(i);
if (n->is_Load() && needs_acquiring_load(n)) {
return false;
}
if (n->is_Store() && needs_releasing_store(n)) {
return false;
}
}
if (clone_base_plus_offset_address(m, mstack, address_visited)) {
return true;
}
Node *off = m->in(AddPNode::Offset);
if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() &&
size_fits_all_mem_uses(m, off->in(2)->get_int()) &&
// Are there other uses besides address expressions?
!is_visited(off)) {
address_visited.set(off->_idx); // Flag as address_visited
mstack.push(off->in(2), Visit);
Node *conv = off->in(1);
if (conv->Opcode() == Op_ConvI2L &&
// Are there other uses besides address expressions?
!is_visited(conv)) {
address_visited.set(conv->_idx); // Flag as address_visited
mstack.push(conv->in(1), Pre_Visit);
} else {
mstack.push(conv, Pre_Visit);
}
address_visited.test_set(m->_idx); // Flag as address_visited
mstack.push(m->in(AddPNode::Address), Pre_Visit);
mstack.push(m->in(AddPNode::Base), Pre_Visit);
return true;
} else if (off->Opcode() == Op_ConvI2L &&
// Are there other uses besides address expressions?
!is_visited(off)) {
address_visited.test_set(m->_idx); // Flag as address_visited
address_visited.set(off->_idx); // Flag as address_visited
mstack.push(off->in(1), Pre_Visit);
mstack.push(m->in(AddPNode::Address), Pre_Visit);
mstack.push(m->in(AddPNode::Base), Pre_Visit);
return true;
}
return false;
}
#define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN) \
{ \
guarantee(INDEX == -1, "mode not permitted for volatile"); \
guarantee(DISP == 0, "mode not permitted for volatile"); \
guarantee(SCALE == 0, "mode not permitted for volatile"); \
__ INSN(REG, as_Register(BASE)); \
}
static Address mem2address(int opcode, Register base, int index, int size, int disp)
{
Address::extend scale;
// Hooboy, this is fugly. We need a way to communicate to the
// encoder that the index needs to be sign extended, so we have to
// enumerate all the cases.
switch (opcode) {
case INDINDEXSCALEDI2L:
case INDINDEXSCALEDI2LN:
case INDINDEXI2L:
case INDINDEXI2LN:
scale = Address::sxtw(size);
break;
default:
scale = Address::lsl(size);
}
if (index == -1) {
return Address(base, disp);
} else {
assert(disp == 0, "unsupported address mode: disp = %d", disp);
return Address(base, as_Register(index), scale);
}
}
typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr);
typedef void (MacroAssembler::* mem_insn2)(Register Rt, Register adr);
typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr);
typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt,
MacroAssembler::SIMD_RegVariant T, const Address &adr);
// Used for all non-volatile memory accesses. The use of
// $mem->opcode() to discover whether this pattern uses sign-extended
// offsets is something of a kludge.
static void loadStore(C2_MacroAssembler* masm, mem_insn insn,
Register reg, int opcode,
Register base, int index, int scale, int disp,
int size_in_memory)
{
Address addr = mem2address(opcode, base, index, scale, disp);
if (addr.getMode() == Address::base_plus_offset) {
/* Fix up any out-of-range offsets. */
assert_different_registers(rscratch1, base);
assert_different_registers(rscratch1, reg);
addr = __ legitimize_address(addr, size_in_memory, rscratch1);
}
(masm->*insn)(reg, addr);
}
static void loadStore(C2_MacroAssembler* masm, mem_float_insn insn,
FloatRegister reg, int opcode,
Register base, int index, int size, int disp,
int size_in_memory)
{
Address::extend scale;
switch (opcode) {
case INDINDEXSCALEDI2L:
case INDINDEXSCALEDI2LN:
scale = Address::sxtw(size);
break;
default:
scale = Address::lsl(size);
}
if (index == -1) {
// Fix up any out-of-range offsets.
assert_different_registers(rscratch1, base);
Address addr = Address(base, disp);
addr = __ legitimize_address(addr, size_in_memory, rscratch1);
(masm->*insn)(reg, addr);
} else {
assert(disp == 0, "unsupported address mode: disp = %d", disp);
(masm->*insn)(reg, Address(base, as_Register(index), scale));
}
}
static void loadStore(C2_MacroAssembler* masm, mem_vector_insn insn,
FloatRegister reg, MacroAssembler::SIMD_RegVariant T,
int opcode, Register base, int index, int size, int disp)
{
if (index == -1) {
(masm->*insn)(reg, T, Address(base, disp));
} else {
assert(disp == 0, "unsupported address mode");
(masm->*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size)));
}
}
%}
//----------ENCODING BLOCK-----------------------------------------------------
// This block specifies the encoding classes used by the compiler to
// output byte streams. Encoding classes are parameterized macros
// used by Machine Instruction Nodes in order to generate the bit
// encoding of the instruction. Operands specify their base encoding
// interface with the interface keyword. There are currently
// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
// COND_INTER. REG_INTER causes an operand to generate a function
// which returns its register number when queried. CONST_INTER causes
// an operand to generate a function which returns the value of the
// constant when queried. MEMORY_INTER causes an operand to generate
// four functions which return the Base Register, the Index Register,
// the Scale Value, and the Offset Value of the operand when queried.
// COND_INTER causes an operand to generate six functions which return
// the encoding code (ie - encoding bits for the instruction)
// associated with each basic boolean condition for a conditional
// instruction.
//
// Instructions specify two basic values for encoding. Again, a
// function is available to check if the constant displacement is an
// oop. They use the ins_encode keyword to specify their encoding
// classes (which must be a sequence of enc_class names, and their
// parameters, specified in the encoding block), and they use the
// opcode keyword to specify, in order, their primary, secondary, and
// tertiary opcode. Only the opcode sections which a particular
// instruction needs for encoding need to be specified.
encode %{
// Build emit functions for each basic byte or larger field in the
// intel encoding scheme (opcode, rm, sib, immediate), and call them
// from C++ code in the enc_class source block. Emit functions will
// live in the main source block for now. In future, we can
// generalize this by adding a syntax that specifies the sizes of
// fields in an order, so that the adlc can build the emit functions
// automagically
// catch all for unimplemented encodings
enc_class enc_unimplemented %{
__ unimplemented("C2 catch all");
%}
// BEGIN Non-volatile memory access
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsbw(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsb(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrsb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrb(iRegI dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrb(iRegL dst, memory1 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrb, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrshw(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrshw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsh(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrsh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrh(iRegI dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrh(iRegL dst, memory2 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrh, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrw(iRegI dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrw(iRegL dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrsw(iRegL dst, memory4 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldrsw, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldr(iRegL dst, memory8 mem) %{
Register dst_reg = as_Register($dst$$reg);
loadStore(masm, &MacroAssembler::ldr, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrs(vRegF dst, memory4 mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(masm, &MacroAssembler::ldrs, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_ldrd(vRegD dst, memory8 mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(masm, &MacroAssembler::ldrd, dst_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb(iRegI src, memory1 mem) %{
Register src_reg = as_Register($src$$reg);
loadStore(masm, &MacroAssembler::strb, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0(memory1 mem) %{
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strh(iRegI src, memory2 mem) %{
Register src_reg = as_Register($src$$reg);
loadStore(masm, &MacroAssembler::strh, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strh0(memory2 mem) %{
loadStore(masm, &MacroAssembler::strh, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 2);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strw(iRegI src, memory4 mem) %{
Register src_reg = as_Register($src$$reg);
loadStore(masm, &MacroAssembler::strw, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strw0(memory4 mem) %{
loadStore(masm, &MacroAssembler::strw, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_str(iRegL src, memory8 mem) %{
Register src_reg = as_Register($src$$reg);
// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
}
loadStore(masm, &MacroAssembler::str, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_str0(memory8 mem) %{
loadStore(masm, &MacroAssembler::str, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strs(vRegF src, memory4 mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(masm, &MacroAssembler::strs, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strd(vRegD src, memory8 mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(masm, &MacroAssembler::strd, src_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
%}
// This encoding class is generated automatically from ad_encode.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
enc_class aarch64_enc_strb0_ordered(memory4 mem) %{
__ membar(Assembler::StoreStore);
loadStore(masm, &MacroAssembler::strb, zr, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 1);
%}
// END Non-volatile memory access
// Vector loads and stores
enc_class aarch64_enc_ldrvH(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::H,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvS(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::S,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvD(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::D,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_ldrvQ(vReg dst, memory mem) %{
FloatRegister dst_reg = as_FloatRegister($dst$$reg);
loadStore(masm, &MacroAssembler::ldr, dst_reg, MacroAssembler::Q,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvH(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::H,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvS(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::S,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvD(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::D,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
enc_class aarch64_enc_strvQ(vReg src, memory mem) %{
FloatRegister src_reg = as_FloatRegister($src$$reg);
loadStore(masm, &MacroAssembler::str, src_reg, MacroAssembler::Q,
$mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp);
%}
// volatile loads and stores
enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{
MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlrb);
%}
enc_class aarch64_enc_stlrb0(memory mem) %{
MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlrb);
%}
enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{
MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlrh);
%}
enc_class aarch64_enc_stlrh0(memory mem) %{
MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlrh);
%}
enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{
MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlrw);
%}
enc_class aarch64_enc_stlrw0(memory mem) %{
MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlrw);
%}
enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{
Register dst_reg = as_Register($dst$$reg);
MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarb);
__ sxtbw(dst_reg, dst_reg);
%}
enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{
Register dst_reg = as_Register($dst$$reg);
MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarb);
__ sxtb(dst_reg, dst_reg);
%}
enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{
MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarb);
%}
enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{
MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarb);
%}
enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{
Register dst_reg = as_Register($dst$$reg);
MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarh);
__ sxthw(dst_reg, dst_reg);
%}
enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{
Register dst_reg = as_Register($dst$$reg);
MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarh);
__ sxth(dst_reg, dst_reg);
%}
enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{
MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarh);
%}
enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{
MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarh);
%}
enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{
MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarw);
%}
enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{
MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarw);
%}
enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{
MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldar);
%}
enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{
MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldarw);
__ fmovs(as_FloatRegister($dst$$reg), rscratch1);
%}
enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{
MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, ldar);
__ fmovd(as_FloatRegister($dst$$reg), rscratch1);
%}
enc_class aarch64_enc_stlr(iRegL src, memory mem) %{
Register src_reg = as_Register($src$$reg);
// we sometimes get asked to store the stack pointer into the
// current thread -- we cannot do that directly on AArch64
if (src_reg == r31_sp) {
assert(as_Register($mem$$base) == rthread, "unexpected store for sp");
__ mov(rscratch2, sp);
src_reg = rscratch2;
}
MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlr);
%}
enc_class aarch64_enc_stlr0(memory mem) %{
MOV_VOLATILE(zr, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlr);
%}
enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{
{
FloatRegister src_reg = as_FloatRegister($src$$reg);
__ fmovs(rscratch2, src_reg);
}
MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlrw);
%}
enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{
{
FloatRegister src_reg = as_FloatRegister($src$$reg);
__ fmovd(rscratch2, src_reg);
}
MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp,
rscratch1, stlr);
%}
// synchronized read/update encodings
enc_class aarch64_enc_ldaxr(iRegL dst, memory8 mem) %{
Register dst_reg = as_Register($dst$$reg);
Register base = as_Register($mem$$base);
int index = $mem$$index;
int scale = $mem$$scale;
int disp = $mem$$disp;
if (index == -1) {
if (disp != 0) {
__ lea(rscratch1, Address(base, disp));
__ ldaxr(dst_reg, rscratch1);
} else {
// TODO
// should we ever get anything other than this case?
__ ldaxr(dst_reg, base);
}
} else {
Register index_reg = as_Register(index);
if (disp == 0) {
__ lea(rscratch1, Address(base, index_reg, Address::lsl(scale)));
__ ldaxr(dst_reg, rscratch1);
} else {
__ lea(rscratch1, Address(base, disp));
__ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale)));
__ ldaxr(dst_reg, rscratch1);
}
}
%}
enc_class aarch64_enc_stlxr(iRegLNoSp src, memory8 mem) %{
Register src_reg = as_Register($src$$reg);
Register base = as_Register($mem$$base);
int index = $mem$$index;
int scale = $mem$$scale;
int disp = $mem$$disp;
if (index == -1) {
if (disp != 0) {
__ lea(rscratch2, Address(base, disp));
__ stlxr(rscratch1, src_reg, rscratch2);
} else {
// TODO
// should we ever get anything other than this case?
__ stlxr(rscratch1, src_reg, base);
}
} else {
Register index_reg = as_Register(index);
if (disp == 0) {
__ lea(rscratch2, Address(base, index_reg, Address::lsl(scale)));
__ stlxr(rscratch1, src_reg, rscratch2);
} else {
__ lea(rscratch2, Address(base, disp));
__ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale)));
__ stlxr(rscratch1, src_reg, rscratch2);
}
}
__ cmpw(rscratch1, zr);
%}
enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
%}
enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
%}
enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
%}
enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, noreg);
%}
// The only difference between aarch64_enc_cmpxchg and
// aarch64_enc_cmpxchg_acq is that we use load-acquire in the
// CompareAndSwap sequence to serve as a barrier on acquiring a
// lock.
enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
%}
enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
%}
enc_class aarch64_enc_cmpxchgs_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
%}
enc_class aarch64_enc_cmpxchgb_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{
guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding");
__ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ false, noreg);
%}
// auxiliary used for CompareAndSwapX to set result register
enc_class aarch64_enc_cset_eq(iRegINoSp res) %{
Register res_reg = as_Register($res$$reg);
__ cset(res_reg, Assembler::EQ);
%}
// prefetch encodings
enc_class aarch64_enc_prefetchw(memory mem) %{
Register base = as_Register($mem$$base);
int index = $mem$$index;
int scale = $mem$$scale;
int disp = $mem$$disp;
if (index == -1) {
// Fix up any out-of-range offsets.
assert_different_registers(rscratch1, base);
Address addr = Address(base, disp);
addr = __ legitimize_address(addr, 8, rscratch1);
__ prfm(addr, PSTL1KEEP);
} else {
Register index_reg = as_Register(index);
if (disp == 0) {
__ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP);
} else {
__ lea(rscratch1, Address(base, disp));
__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP);
}
}
%}
// mov encodings
enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{
uint32_t con = (uint32_t)$src$$constant;
Register dst_reg = as_Register($dst$$reg);
if (con == 0) {
__ movw(dst_reg, zr);
} else {
__ movw(dst_reg, con);
}
%}
enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{
Register dst_reg = as_Register($dst$$reg);
uint64_t con = (uint64_t)$src$$constant;
if (con == 0) {
__ mov(dst_reg, zr);
} else {
__ mov(dst_reg, con);
}
%}
enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr || con == (address)1) {
ShouldNotReachHere();
} else {
relocInfo::relocType rtype = $src->constant_reloc();
if (rtype == relocInfo::oop_type) {
__ movoop(dst_reg, (jobject)con);
} else if (rtype == relocInfo::metadata_type) {
__ mov_metadata(dst_reg, (Metadata*)con);
} else {
assert(rtype == relocInfo::none, "unexpected reloc type");
if (! __ is_valid_AArch64_address(con) ||
con < (address)(uintptr_t)os::vm_page_size()) {
__ mov(dst_reg, con);
} else {
uint64_t offset;
__ adrp(dst_reg, con, offset);
__ add(dst_reg, dst_reg, offset);
}
}
}
%}
enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, zr);
%}
enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, (uint64_t)1);
%}
enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{
__ load_byte_map_base($dst$$Register);
%}
enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
ShouldNotReachHere();
} else {
relocInfo::relocType rtype = $src->constant_reloc();
assert(rtype == relocInfo::oop_type, "unexpected reloc type");
__ set_narrow_oop(dst_reg, (jobject)con);
}
%}
enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{
Register dst_reg = as_Register($dst$$reg);
__ mov(dst_reg, zr);
%}
enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{
Register dst_reg = as_Register($dst$$reg);
address con = (address)$src$$constant;
if (con == nullptr) {
ShouldNotReachHere();
} else {
relocInfo::relocType rtype = $src->constant_reloc();
assert(rtype == relocInfo::metadata_type, "unexpected reloc type");
__ set_narrow_klass(dst_reg, (Klass *)con);
}
%}
// arithmetic encodings
enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{
Register dst_reg = as_Register($dst$$reg);
Register src_reg = as_Register($src1$$reg);
int32_t con = (int32_t)$src2$$constant;
// add has primary == 0, subtract has primary == 1
if ($primary) { con = -con; }
if (con < 0) {
__ subw(dst_reg, src_reg, -con);
} else {
__ addw(dst_reg, src_reg, con);
}
%}
enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{
Register dst_reg = as_Register($dst$$reg);
Register src_reg = as_Register($src1$$reg);
int32_t con = (int32_t)$src2$$constant;
// add has primary == 0, subtract has primary == 1
if ($primary) { con = -con; }
if (con < 0) {
__ sub(dst_reg, src_reg, -con);
} else {
__ add(dst_reg, src_reg, con);
}
%}
enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
__ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1);
%}
enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
__ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1);
%}
enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
__ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1);
%}
enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{
Register dst_reg = as_Register($dst$$reg);
Register src1_reg = as_Register($src1$$reg);
Register src2_reg = as_Register($src2$$reg);
__ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1);
%}
// compare instruction encodings
enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmpw(reg1, reg2);
%}
enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{
Register reg = as_Register($src1$$reg);
int32_t val = $src2$$constant;
if (val >= 0) {
__ subsw(zr, reg, val);
} else {
__ addsw(zr, reg, -val);
}
%}
enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{
Register reg1 = as_Register($src1$$reg);
uint32_t val = (uint32_t)$src2$$constant;
__ movw(rscratch1, val);
__ cmpw(reg1, rscratch1);
%}
enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmp(reg1, reg2);
%}
enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{
Register reg = as_Register($src1$$reg);
int64_t val = $src2$$constant;
if (val >= 0) {
__ subs(zr, reg, val);
} else if (val != -val) {
__ adds(zr, reg, -val);
} else {
// aargh, Long.MIN_VALUE is a special case
__ orr(rscratch1, zr, (uint64_t)val);
__ subs(zr, reg, rscratch1);
}
%}
enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{
Register reg1 = as_Register($src1$$reg);
uint64_t val = (uint64_t)$src2$$constant;
__ mov(rscratch1, val);
__ cmp(reg1, rscratch1);
%}
enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmp(reg1, reg2);
%}
enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{
Register reg1 = as_Register($src1$$reg);
Register reg2 = as_Register($src2$$reg);
__ cmpw(reg1, reg2);
%}
enc_class aarch64_enc_testp(iRegP src) %{
Register reg = as_Register($src$$reg);
__ cmp(reg, zr);
%}
enc_class aarch64_enc_testn(iRegN src) %{
Register reg = as_Register($src$$reg);
__ cmpw(reg, zr);
%}
enc_class aarch64_enc_b(label lbl) %{
Label *L = $lbl$$label;
__ b(*L);
%}
enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{
Label *L = $lbl$$label;
__ br ((Assembler::Condition)$cmp$$cmpcode, *L);
%}
enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{
Label *L = $lbl$$label;
__ br ((Assembler::Condition)$cmp$$cmpcode, *L);
%}
enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result)
%{
Register sub_reg = as_Register($sub$$reg);
Register super_reg = as_Register($super$$reg);
Register temp_reg = as_Register($temp$$reg);
Register result_reg = as_Register($result$$reg);
Label miss;
__ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg,
nullptr, &miss,
/*set_cond_codes:*/ true);
if ($primary) {
__ mov(result_reg, zr);
}
__ bind(miss);
%}
enc_class aarch64_enc_java_static_call(method meth) %{
address addr = (address)$meth$$method;
address call;
if (!_method) {
// A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type));
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
} else if (_method->intrinsic_id() == vmIntrinsicID::_ensureMaterializedForStackWalk) {
// The NOP here is purely to ensure that eliding a call to
// JVM_EnsureMaterializedForStackWalk doesn't change the code size.
__ nop();
__ block_comment("call JVM_EnsureMaterializedForStackWalk (elided)");
} else {
int method_index = resolved_method_index(masm);
RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
: static_call_Relocation::spec(method_index);
call = __ trampoline_call(Address(addr, rspec));
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
if (CodeBuffer::supports_shared_stubs() && _method->can_be_statically_bound()) {
// Calls of the same statically bound method can share
// a stub to the interpreter.
__ code()->shared_stub_to_interp_for(_method, call - __ begin());
} else {
// Emit stub for static call
address stub = CompiledDirectCall::emit_to_interp_stub(masm, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
}
}
__ post_call_nop();
// Only non uncommon_trap calls need to reinitialize ptrue.
if (Compile::current()->max_vector_size() > 0 && uncommon_trap_request() == 0) {
__ reinitialize_ptrue();
}
%}
enc_class aarch64_enc_java_dynamic_call(method meth) %{
int method_index = resolved_method_index(masm);
address call = __ ic_call((address)$meth$$method, method_index);
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
__ post_call_nop();
if (Compile::current()->max_vector_size() > 0) {
__ reinitialize_ptrue();
}
%}
enc_class aarch64_enc_call_epilog() %{
if (VerifyStackAtCalls) {
// Check that stack depth is unchanged: find majik cookie on stack
__ call_Unimplemented();
}
%}
enc_class aarch64_enc_java_to_runtime(method meth) %{
// some calls to generated routines (arraycopy code) are scheduled
// by C2 as runtime calls. if so we can call them using a br (they
// will be in a reachable segment) otherwise we have to use a blr
// which loads the absolute address into a register.
address entry = (address)$meth$$method;
CodeBlob *cb = CodeCache::find_blob(entry);
if (cb) {
address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type));
if (call == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
__ post_call_nop();
} else {
Label retaddr;
// Make the anchor frame walkable
__ adr(rscratch2, retaddr);
__ str(rscratch2, Address(rthread, JavaThread::last_Java_pc_offset()));
__ lea(rscratch1, RuntimeAddress(entry));
__ blr(rscratch1);
__ bind(retaddr);
__ post_call_nop();
}
if (Compile::current()->max_vector_size() > 0) {
__ reinitialize_ptrue();
}
%}
enc_class aarch64_enc_rethrow() %{
__ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub()));
%}
enc_class aarch64_enc_ret() %{
#ifdef ASSERT
if (Compile::current()->max_vector_size() > 0) {
__ verify_ptrue();
}
#endif
__ ret(lr);
%}
enc_class aarch64_enc_tail_call(iRegP jump_target) %{
Register target_reg = as_Register($jump_target$$reg);
__ br(target_reg);
%}
enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{
Register target_reg = as_Register($jump_target$$reg);
// exception oop should be in r0
// ret addr has been popped into lr
// callee expects it in r3
__ mov(r3, lr);
__ br(target_reg);
%}
%}
//----------FRAME--------------------------------------------------------------
// Definition of frame structure and management information.
//
// S T A C K L A Y O U T Allocators stack-slot number
// | (to get allocators register number
// G Owned by | | v add OptoReg::stack0())
// r CALLER | |
// o | +--------+ pad to even-align allocators stack-slot
// w V | pad0 | numbers; owned by CALLER
// t -----------+--------+----> Matcher::_in_arg_limit, unaligned
// h ^ | in | 5
// | | args | 4 Holes in incoming args owned by SELF
// | | | | 3
// | | +--------+
// V | | old out| Empty on Intel, window on Sparc
// | old |preserve| Must be even aligned.
// | SP-+--------+----> Matcher::_old_SP, even aligned
// | | in | 3 area for Intel ret address
// Owned by |preserve| Empty on Sparc.
// SELF +--------+
// | | pad2 | 2 pad to align old SP
// | +--------+ 1
// | | locks | 0
// | +--------+----> OptoReg::stack0(), even aligned
// | | pad1 | 11 pad to align new SP
// | +--------+
// | | | 10
// | | spills | 9 spills
// V | | 8 (pad0 slot for callee)
// -----------+--------+----> Matcher::_out_arg_limit, unaligned
// ^ | out | 7
// | | args | 6 Holes in outgoing args owned by CALLEE
// Owned by +--------+
// CALLEE | new out| 6 Empty on Intel, window on Sparc
// | new |preserve| Must be even-aligned.
// | SP-+--------+----> Matcher::_new_SP, even aligned
// | | |
//
// Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
// known from SELF's arguments and the Java calling convention.
// Region 6-7 is determined per call site.
// Note 2: If the calling convention leaves holes in the incoming argument
// area, those holes are owned by SELF. Holes in the outgoing area
// are owned by the CALLEE. Holes should not be necessary in the
// incoming area, as the Java calling convention is completely under
// the control of the AD file. Doubles can be sorted and packed to
// avoid holes. Holes in the outgoing arguments may be necessary for
// varargs C calling conventions.
// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
// even aligned with pad0 as needed.
// Region 6 is even aligned. Region 6-7 is NOT even aligned;
// (the latter is true on Intel but is it false on AArch64?)
// region 6-11 is even aligned; it may be padded out more so that
// the region from SP to FP meets the minimum stack alignment.
// Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
// alignment. Region 11, pad1, may be dynamically extended so that
// SP meets the minimum alignment.
frame %{
// These three registers define part of the calling convention
// between compiled code and the interpreter.
// Inline Cache Register or Method for I2C.
inline_cache_reg(R12);
// Number of stack slots consumed by locking an object
sync_stack_slots(2);
// Compiled code's Frame Pointer
frame_pointer(R31);
// Interpreter stores its frame pointer in a register which is
// stored to the stack by I2CAdaptors.
// I2CAdaptors convert from interpreted java to compiled java.
interpreter_frame_pointer(R29);
// Stack alignment requirement
stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes)
// Number of outgoing stack slots killed above the out_preserve_stack_slots
// for calls to C. Supports the var-args backing area for register parms.
varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt);
// The after-PROLOG location of the return address. Location of
// return address specifies a type (REG or STACK) and a number
// representing the register number (i.e. - use a register name) or
// stack slot.
// Ret Addr is on stack in slot 0 if no locks or verification or alignment.
// Otherwise, it is above the locks and verification slot and alignment word
// TODO this may well be correct but need to check why that - 2 is there
// ppc port uses 0 but we definitely need to allow for fixed_slots
// which folds in the space used for monitors
return_addr(STACK - 2 +
align_up((Compile::current()->in_preserve_stack_slots() +
Compile::current()->fixed_slots()),
stack_alignment_in_slots()));
// Location of compiled Java return values. Same as C for now.
return_value
%{
// TODO do we allow ideal_reg == Op_RegN???
assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL,
"only return normal values");
static const int lo[Op_RegL + 1] = { // enum name
0, // Op_Node
0, // Op_Set
R0_num, // Op_RegN
R0_num, // Op_RegI
R0_num, // Op_RegP
V0_num, // Op_RegF
V0_num, // Op_RegD
R0_num // Op_RegL
};
static const int hi[Op_RegL + 1] = { // enum name
0, // Op_Node
0, // Op_Set
OptoReg::Bad, // Op_RegN
OptoReg::Bad, // Op_RegI
R0_H_num, // Op_RegP
OptoReg::Bad, // Op_RegF
V0_H_num, // Op_RegD
R0_H_num // Op_RegL
};
return OptoRegPair(hi[ideal_reg], lo[ideal_reg]);
%}
%}
//----------ATTRIBUTES---------------------------------------------------------
//----------Operand Attributes-------------------------------------------------
op_attrib op_cost(1); // Required cost attribute
//----------Instruction Attributes---------------------------------------------
ins_attrib ins_cost(INSN_COST); // Required cost attribute
ins_attrib ins_size(32); // Required size attribute (in bits)
ins_attrib ins_short_branch(0); // Required flag: is this instruction
// a non-matching short branch variant
// of some long branch?
ins_attrib ins_alignment(4); // Required alignment attribute (must
// be a power of 2) specifies the
// alignment that some part of the
// instruction (not necessarily the
// start) requires. If > 1, a
// compute_padding() function must be
// provided for the instruction
// Whether this node is expanded during code emission into a sequence of
// instructions and the first instruction can perform an implicit null check.
ins_attrib ins_is_late_expanded_null_check_candidate(false);
//----------OPERANDS-----------------------------------------------------------
// Operand definitions must precede instruction definitions for correct parsing
// in the ADLC because operands constitute user defined types which are used in
// instruction definitions.
//----------Simple Operands----------------------------------------------------
// Integer operands 32 bit
// 32 bit immediate
operand immI()
%{
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 32 bit zero
operand immI0()
%{
predicate(n->get_int() == 0);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 32 bit unit increment
operand immI_1()
%{
predicate(n->get_int() == 1);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 32 bit unit decrement
operand immI_M1()
%{
predicate(n->get_int() == -1);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Shift values for add/sub extension shift
operand immIExt()
%{
predicate(0 <= n->get_int() && (n->get_int() <= 4));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_gt_1()
%{
predicate(n->get_int() > 1);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_le_4()
%{
predicate(n->get_int() <= 4);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_16()
%{
predicate(n->get_int() == 16);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_24()
%{
predicate(n->get_int() == 24);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_32()
%{
predicate(n->get_int() == 32);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_48()
%{
predicate(n->get_int() == 48);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_56()
%{
predicate(n->get_int() == 56);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_255()
%{
predicate(n->get_int() == 255);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_65535()
%{
predicate(n->get_int() == 65535);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_positive()
%{
predicate(n->get_int() > 0);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// BoolTest condition for signed compare
operand immI_cmp_cond()
%{
predicate(!Matcher::is_unsigned_booltest_pred(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// BoolTest condition for unsigned compare
operand immI_cmpU_cond()
%{
predicate(Matcher::is_unsigned_booltest_pred(n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immL_255()
%{
predicate(n->get_long() == 255L);
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immL_65535()
%{
predicate(n->get_long() == 65535L);
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immL_4294967295()
%{
predicate(n->get_long() == 4294967295L);
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immL_bitmask()
%{
predicate((n->get_long() != 0)
&& ((n->get_long() & 0xc000000000000000l) == 0)
&& is_power_of_2(n->get_long() + 1));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immI_bitmask()
%{
predicate((n->get_int() != 0)
&& ((n->get_int() & 0xc0000000) == 0)
&& is_power_of_2(n->get_int() + 1));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immL_positive_bitmaskI()
%{
predicate((n->get_long() != 0)
&& ((julong)n->get_long() < 0x80000000ULL)
&& is_power_of_2(n->get_long() + 1));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Scale values for scaled offset addressing modes (up to long but not quad)
operand immIScale()
%{
predicate(0 <= n->get_int() && (n->get_int() <= 3));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 5 bit signed integer
operand immI5()
%{
predicate(Assembler::is_simm(n->get_int(), 5));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 7 bit unsigned integer
operand immIU7()
%{
predicate(Assembler::is_uimm(n->get_int(), 7));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Offset for scaled or unscaled immediate loads and stores
operand immIOffset()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 0));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immIOffset1()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 0));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immIOffset2()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 1));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immIOffset4()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 2));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immIOffset8()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 3));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immIOffset16()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 4));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLOffset()
%{
predicate(n->get_long() >= -256 && n->get_long() <= 65520);
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset1()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 0));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset2()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 1));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset4()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 2));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset8()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 3));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset16()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 4));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 5 bit signed long integer
operand immL5()
%{
predicate(Assembler::is_simm(n->get_long(), 5));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 7 bit unsigned long integer
operand immLU7()
%{
predicate(Assembler::is_uimm(n->get_long(), 7));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 8 bit signed value.
operand immI8()
%{
predicate(n->get_int() <= 127 && n->get_int() >= -128);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 8 bit signed value (simm8), or #simm8 LSL 8.
operand immI8_shift8()
%{
predicate((n->get_int() <= 127 && n->get_int() >= -128) ||
(n->get_int() <= 32512 && n->get_int() >= -32768 && (n->get_int() & 0xff) == 0));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 8 bit signed value (simm8), or #simm8 LSL 8.
operand immL8_shift8()
%{
predicate((n->get_long() <= 127 && n->get_long() >= -128) ||
(n->get_long() <= 32512 && n->get_long() >= -32768 && (n->get_long() & 0xff) == 0));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 8 bit integer valid for vector add sub immediate
operand immBAddSubV()
%{
predicate(n->get_int() <= 255 && n->get_int() >= -255);
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 32 bit integer valid for add sub immediate
operand immIAddSub()
%{
predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 32 bit integer valid for vector add sub immediate
operand immIAddSubV()
%{
predicate(Assembler::operand_valid_for_sve_add_sub_immediate((int64_t)n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 32 bit unsigned integer valid for logical immediate
operand immBLog()
%{
predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerByte, (uint64_t)n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immSLog()
%{
predicate(Assembler::operand_valid_for_sve_logical_immediate(BitsPerShort, (uint64_t)n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immILog()
%{
predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer operands 64 bit
// 64 bit immediate
operand immL()
%{
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 64 bit zero
operand immL0()
%{
predicate(n->get_long() == 0);
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 64 bit unit decrement
operand immL_M1()
%{
predicate(n->get_long() == -1);
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 64 bit integer valid for add sub immediate
operand immLAddSub()
%{
predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long()));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 64 bit integer valid for addv subv immediate
operand immLAddSubV()
%{
predicate(Assembler::operand_valid_for_sve_add_sub_immediate(n->get_long()));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 64 bit integer valid for logical immediate
operand immLLog()
%{
predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Long Immediate: low 32-bit mask
operand immL_32bits()
%{
predicate(n->get_long() == 0xFFFFFFFFL);
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Pointer operands
// Pointer Immediate
operand immP()
%{
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// nullptr Pointer Immediate
operand immP0()
%{
predicate(n->get_ptr() == 0);
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Pointer Immediate One
// this is used in object initialization (initial object header)
operand immP_1()
%{
predicate(n->get_ptr() == 1);
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Card Table Byte Map Base
operand immByteMapBase()
%{
// Get base of card map
predicate(BarrierSet::barrier_set()->is_a(BarrierSet::CardTableBarrierSet) &&
SHENANDOAHGC_ONLY(!BarrierSet::barrier_set()->is_a(BarrierSet::ShenandoahBarrierSet) &&)
(CardTable::CardValue*)n->get_ptr() == ((CardTableBarrierSet*)(BarrierSet::barrier_set()))->card_table()->byte_map_base());
match(ConP);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Float and Double operands
// Double Immediate
operand immD()
%{
match(ConD);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Double Immediate: +0.0d
operand immD0()
%{
predicate(jlong_cast(n->getd()) == 0);
match(ConD);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// constant 'double +0.0'.
operand immDPacked()
%{
predicate(Assembler::operand_valid_for_float_immediate(n->getd()));
match(ConD);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Float Immediate
operand immF()
%{
match(ConF);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Float Immediate: +0.0f.
operand immF0()
%{
predicate(jint_cast(n->getf()) == 0);
match(ConF);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Half Float (FP16) Immediate
operand immH()
%{
match(ConH);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
//
operand immFPacked()
%{
predicate(Assembler::operand_valid_for_float_immediate((double)n->getf()));
match(ConF);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Narrow pointer operands
// Narrow Pointer Immediate
operand immN()
%{
match(ConN);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Narrow nullptr Pointer Immediate
operand immN0()
%{
predicate(n->get_narrowcon() == 0);
match(ConN);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immNKlass()
%{
match(ConNKlass);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// Integer 32 bit Register Operands
// Integer 32 bitRegister (excludes SP)
operand iRegI()
%{
constraint(ALLOC_IN_RC(any_reg32));
match(RegI);
match(iRegINoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Integer 32 bit Register not Special
operand iRegINoSp()
%{
constraint(ALLOC_IN_RC(no_special_reg32));
match(RegI);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Integer 64 bit Register Operands
// Integer 64 bit Register (includes SP)
operand iRegL()
%{
constraint(ALLOC_IN_RC(any_reg));
match(RegL);
match(iRegLNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Integer 64 bit Register not Special
operand iRegLNoSp()
%{
constraint(ALLOC_IN_RC(no_special_reg));
match(RegL);
match(iRegL_R0);
format %{ %}
interface(REG_INTER);
%}
// Pointer Register Operands
// Pointer Register
operand iRegP()
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(RegP);
match(iRegPNoSp);
match(iRegP_R0);
//match(iRegP_R2);
//match(iRegP_R4);
match(iRegP_R5);
match(thread_RegP);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register not Special
operand iRegPNoSp()
%{
constraint(ALLOC_IN_RC(no_special_ptr_reg));
match(RegP);
// match(iRegP);
// match(iRegP_R0);
// match(iRegP_R2);
// match(iRegP_R4);
// match(iRegP_R5);
// match(thread_RegP);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// This operand is not allowed to use rfp even if
// rfp is not used to hold the frame pointer.
operand iRegPNoSpNoRfp()
%{
constraint(ALLOC_IN_RC(no_special_no_rfp_ptr_reg));
match(RegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register R0 only
operand iRegP_R0()
%{
constraint(ALLOC_IN_RC(r0_reg));
match(RegP);
// match(iRegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register R1 only
operand iRegP_R1()
%{
constraint(ALLOC_IN_RC(r1_reg));
match(RegP);
// match(iRegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register R2 only
operand iRegP_R2()
%{
constraint(ALLOC_IN_RC(r2_reg));
match(RegP);
// match(iRegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register R3 only
operand iRegP_R3()
%{
constraint(ALLOC_IN_RC(r3_reg));
match(RegP);
// match(iRegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register R4 only
operand iRegP_R4()
%{
constraint(ALLOC_IN_RC(r4_reg));
match(RegP);
// match(iRegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register R5 only
operand iRegP_R5()
%{
constraint(ALLOC_IN_RC(r5_reg));
match(RegP);
// match(iRegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer 64 bit Register R10 only
operand iRegP_R10()
%{
constraint(ALLOC_IN_RC(r10_reg));
match(RegP);
// match(iRegP);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Long 64 bit Register R0 only
operand iRegL_R0()
%{
constraint(ALLOC_IN_RC(r0_reg));
match(RegL);
match(iRegLNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Long 64 bit Register R11 only
operand iRegL_R11()
%{
constraint(ALLOC_IN_RC(r11_reg));
match(RegL);
match(iRegLNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Register R0 only
operand iRegI_R0()
%{
constraint(ALLOC_IN_RC(int_r0_reg));
match(RegI);
match(iRegINoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Register R2 only
operand iRegI_R2()
%{
constraint(ALLOC_IN_RC(int_r2_reg));
match(RegI);
match(iRegINoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Register R3 only
operand iRegI_R3()
%{
constraint(ALLOC_IN_RC(int_r3_reg));
match(RegI);
match(iRegINoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Register R4 only
operand iRegI_R4()
%{
constraint(ALLOC_IN_RC(int_r4_reg));
match(RegI);
match(iRegINoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Pointer Register Operands
// Narrow Pointer Register
operand iRegN()
%{
constraint(ALLOC_IN_RC(any_reg32));
match(RegN);
match(iRegNNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Integer 64 bit Register not Special
operand iRegNNoSp()
%{
constraint(ALLOC_IN_RC(no_special_reg32));
match(RegN);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Float Register
// Float register operands
operand vRegF()
%{
constraint(ALLOC_IN_RC(float_reg));
match(RegF);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Double Register
// Double register operands
operand vRegD()
%{
constraint(ALLOC_IN_RC(double_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Generic vector class. This will be used for
// all vector operands, including NEON and SVE.
operand vReg()
%{
constraint(ALLOC_IN_RC(dynamic));
match(VecA);
match(VecD);
match(VecX);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vecA()
%{
constraint(ALLOC_IN_RC(vectora_reg));
match(VecA);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vecD()
%{
constraint(ALLOC_IN_RC(vectord_reg));
match(VecD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vecX()
%{
constraint(ALLOC_IN_RC(vectorx_reg));
match(VecX);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V0()
%{
constraint(ALLOC_IN_RC(v0_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V1()
%{
constraint(ALLOC_IN_RC(v1_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V2()
%{
constraint(ALLOC_IN_RC(v2_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V3()
%{
constraint(ALLOC_IN_RC(v3_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V4()
%{
constraint(ALLOC_IN_RC(v4_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V5()
%{
constraint(ALLOC_IN_RC(v5_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V6()
%{
constraint(ALLOC_IN_RC(v6_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V7()
%{
constraint(ALLOC_IN_RC(v7_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V12()
%{
constraint(ALLOC_IN_RC(v12_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand vRegD_V13()
%{
constraint(ALLOC_IN_RC(v13_reg));
match(RegD);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand pReg()
%{
constraint(ALLOC_IN_RC(pr_reg));
match(RegVectMask);
match(pRegGov);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand pRegGov()
%{
constraint(ALLOC_IN_RC(gov_pr));
match(RegVectMask);
match(pReg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand pRegGov_P0()
%{
constraint(ALLOC_IN_RC(p0_reg));
match(RegVectMask);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
operand pRegGov_P1()
%{
constraint(ALLOC_IN_RC(p1_reg));
match(RegVectMask);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Flags register, used as output of signed compare instructions
// note that on AArch64 we also use this register as the output for
// for floating point compare instructions (CmpF CmpD). this ensures
// that ordered inequality tests use GT, GE, LT or LE none of which
// pass through cases where the result is unordered i.e. one or both
// inputs to the compare is a NaN. this means that the ideal code can
// replace e.g. a GT with an LE and not end up capturing the NaN case
// (where the comparison should always fail). EQ and NE tests are
// always generated in ideal code so that unordered folds into the NE
// case, matching the behaviour of AArch64 NE.
//
// This differs from x86 where the outputs of FP compares use a
// special FP flags registers and where compares based on this
// register are distinguished into ordered inequalities (cmpOpUCF) and
// EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests
// to explicitly handle the unordered case in branches. x86 also has
// to include extra CMoveX rules to accept a cmpOpUCF input.
operand rFlagsReg()
%{
constraint(ALLOC_IN_RC(int_flags));
match(RegFlags);
op_cost(0);
format %{ "RFLAGS" %}
interface(REG_INTER);
%}
// Flags register, used as output of unsigned compare instructions
operand rFlagsRegU()
%{
constraint(ALLOC_IN_RC(int_flags));
match(RegFlags);
op_cost(0);
format %{ "RFLAGSU" %}
interface(REG_INTER);
%}
// Special Registers
// Method Register
operand inline_cache_RegP(iRegP reg)
%{
constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg
match(reg);
match(iRegPNoSp);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
// Thread Register
operand thread_RegP(iRegP reg)
%{
constraint(ALLOC_IN_RC(thread_reg)); // link_reg
match(reg);
op_cost(0);
format %{ %}
interface(REG_INTER);
%}
//----------Memory Operands----------------------------------------------------
operand indirect(iRegP reg)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(reg);
op_cost(0);
format %{ "[$reg]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp(0x0);
%}
%}
operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale)
%{
constraint(ALLOC_IN_RC(ptr_reg));
predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
match(AddP reg (LShiftL (ConvI2L ireg) scale));
op_cost(0);
format %{ "$reg, $ireg sxtw($scale), 0, I2L" %}
interface(MEMORY_INTER) %{
base($reg);
index($ireg);
scale($scale);
disp(0x0);
%}
%}
operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale)
%{
constraint(ALLOC_IN_RC(ptr_reg));
predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
match(AddP reg (LShiftL lreg scale));
op_cost(0);
format %{ "$reg, $lreg lsl($scale)" %}
interface(MEMORY_INTER) %{
base($reg);
index($lreg);
scale($scale);
disp(0x0);
%}
%}
operand indIndexI2L(iRegP reg, iRegI ireg)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg (ConvI2L ireg));
op_cost(0);
format %{ "$reg, $ireg, 0, I2L" %}
interface(MEMORY_INTER) %{
base($reg);
index($ireg);
scale(0x0);
disp(0x0);
%}
%}
operand indIndex(iRegP reg, iRegL lreg)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg lreg);
op_cost(0);
format %{ "$reg, $lreg" %}
interface(MEMORY_INTER) %{
base($reg);
index($lreg);
scale(0x0);
disp(0x0);
%}
%}
operand indOffI1(iRegP reg, immIOffset1 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffI2(iRegP reg, immIOffset2 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffI4(iRegP reg, immIOffset4 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffI8(iRegP reg, immIOffset8 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffI16(iRegP reg, immIOffset16 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL1(iRegP reg, immLoffset1 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL2(iRegP reg, immLoffset2 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL4(iRegP reg, immLoffset4 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL8(iRegP reg, immLoffset8 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL16(iRegP reg, immLoffset16 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indirectX2P(iRegL reg)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(CastX2P reg);
op_cost(0);
format %{ "[$reg]\t# long -> ptr" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp(0x0);
%}
%}
operand indOffX2P(iRegL reg, immLOffset off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (CastX2P reg) off);
op_cost(0);
format %{ "[$reg, $off]\t# long -> ptr" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indirectN(iRegN reg)
%{
predicate(CompressedOops::shift() == 0);
constraint(ALLOC_IN_RC(ptr_reg));
match(DecodeN reg);
op_cost(0);
format %{ "[$reg]\t# narrow" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp(0x0);
%}
%}
operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale)
%{
predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale));
op_cost(0);
format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %}
interface(MEMORY_INTER) %{
base($reg);
index($ireg);
scale($scale);
disp(0x0);
%}
%}
operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale)
%{
predicate(CompressedOops::shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int()));
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) (LShiftL lreg scale));
op_cost(0);
format %{ "$reg, $lreg lsl($scale)\t# narrow" %}
interface(MEMORY_INTER) %{
base($reg);
index($lreg);
scale($scale);
disp(0x0);
%}
%}
operand indIndexI2LN(iRegN reg, iRegI ireg)
%{
predicate(CompressedOops::shift() == 0);
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) (ConvI2L ireg));
op_cost(0);
format %{ "$reg, $ireg, 0, I2L\t# narrow" %}
interface(MEMORY_INTER) %{
base($reg);
index($ireg);
scale(0x0);
disp(0x0);
%}
%}
operand indIndexN(iRegN reg, iRegL lreg)
%{
predicate(CompressedOops::shift() == 0);
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) lreg);
op_cost(0);
format %{ "$reg, $lreg\t# narrow" %}
interface(MEMORY_INTER) %{
base($reg);
index($lreg);
scale(0x0);
disp(0x0);
%}
%}
operand indOffIN(iRegN reg, immIOffset off)
%{
predicate(CompressedOops::shift() == 0);
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) off);
op_cost(0);
format %{ "[$reg, $off]\t# narrow" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffLN(iRegN reg, immLOffset off)
%{
predicate(CompressedOops::shift() == 0);
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP (DecodeN reg) off);
op_cost(0);
format %{ "[$reg, $off]\t# narrow" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
//----------Special Memory Operands--------------------------------------------
// Stack Slot Operand - This operand is used for loading and storing temporary
// values on the stack where a match requires a value to
// flow through memory.
operand stackSlotP(sRegP reg)
%{
constraint(ALLOC_IN_RC(stack_slots));
op_cost(100);
// No match rule because this operand is only generated in matching
// match(RegP);
format %{ "[$reg]" %}
interface(MEMORY_INTER) %{
base(0x1e); // RSP
index(0x0); // No Index
scale(0x0); // No Scale
disp($reg); // Stack Offset
%}
%}
operand stackSlotI(sRegI reg)
%{
constraint(ALLOC_IN_RC(stack_slots));
// No match rule because this operand is only generated in matching
// match(RegI);
format %{ "[$reg]" %}
interface(MEMORY_INTER) %{
base(0x1e); // RSP
index(0x0); // No Index
scale(0x0); // No Scale
disp($reg); // Stack Offset
%}
%}
operand stackSlotF(sRegF reg)
%{
constraint(ALLOC_IN_RC(stack_slots));
// No match rule because this operand is only generated in matching
// match(RegF);
format %{ "[$reg]" %}
interface(MEMORY_INTER) %{
base(0x1e); // RSP
index(0x0); // No Index
scale(0x0); // No Scale
disp($reg); // Stack Offset
%}
%}
operand stackSlotD(sRegD reg)
%{
constraint(ALLOC_IN_RC(stack_slots));
// No match rule because this operand is only generated in matching
// match(RegD);
format %{ "[$reg]" %}
interface(MEMORY_INTER) %{
base(0x1e); // RSP
index(0x0); // No Index
scale(0x0); // No Scale
disp($reg); // Stack Offset
%}
%}
operand stackSlotL(sRegL reg)
%{
constraint(ALLOC_IN_RC(stack_slots));
// No match rule because this operand is only generated in matching
// match(RegL);
format %{ "[$reg]" %}
interface(MEMORY_INTER) %{
base(0x1e); // RSP
index(0x0); // No Index
scale(0x0); // No Scale
disp($reg); // Stack Offset
%}
%}
// Operands for expressing Control Flow
// NOTE: Label is a predefined operand which should not be redefined in
// the AD file. It is generically handled within the ADLC.
//----------Conditional Branch Operands----------------------------------------
// Comparison Op - This is the operation of the comparison, and is limited to
// the following set of codes:
// L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
//
// Other attributes of the comparison, such as unsignedness, are specified
// by the comparison instruction that sets a condition code flags register.
// That result is represented by a flags operand whose subtype is appropriate
// to the unsignedness (etc.) of the comparison.
//
// Later, the instruction which matches both the Comparison Op (a Bool) and
// the flags (produced by the Cmp) specifies the coding of the comparison op
// by matching a specific subtype of Bool operand below, such as cmpOpU.
// used for signed integral comparisons and fp comparisons
operand cmpOp()
%{
match(Bool);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0xb, "lt");
greater_equal(0xa, "ge");
less_equal(0xd, "le");
greater(0xc, "gt");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// used for unsigned integral comparisons
operand cmpOpU()
%{
match(Bool);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0x3, "lo");
greater_equal(0x2, "hs");
less_equal(0x9, "ls");
greater(0x8, "hi");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// used for certain integral comparisons which can be
// converted to cbxx or tbxx instructions
operand cmpOpEqNe()
%{
match(Bool);
op_cost(0);
predicate(n->as_Bool()->_test._test == BoolTest::ne
|| n->as_Bool()->_test._test == BoolTest::eq);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0xb, "lt");
greater_equal(0xa, "ge");
less_equal(0xd, "le");
greater(0xc, "gt");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// used for certain integral comparisons which can be
// converted to cbxx or tbxx instructions
operand cmpOpLtGe()
%{
match(Bool);
op_cost(0);
predicate(n->as_Bool()->_test._test == BoolTest::lt
|| n->as_Bool()->_test._test == BoolTest::ge);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0xb, "lt");
greater_equal(0xa, "ge");
less_equal(0xd, "le");
greater(0xc, "gt");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// used for certain unsigned integral comparisons which can be
// converted to cbxx or tbxx instructions
operand cmpOpUEqNeLeGt()
%{
match(Bool);
op_cost(0);
predicate(n->as_Bool()->_test._test == BoolTest::eq ||
n->as_Bool()->_test._test == BoolTest::ne ||
n->as_Bool()->_test._test == BoolTest::le ||
n->as_Bool()->_test._test == BoolTest::gt);
format %{ "" %}
interface(COND_INTER) %{
equal(0x0, "eq");
not_equal(0x1, "ne");
less(0x3, "lo");
greater_equal(0x2, "hs");
less_equal(0x9, "ls");
greater(0x8, "hi");
overflow(0x6, "vs");
no_overflow(0x7, "vc");
%}
%}
// Special operand allowing long args to int ops to be truncated for free
operand iRegL2I(iRegL reg) %{
op_cost(0);
match(ConvL2I reg);
format %{ "l2i($reg)" %}
interface(REG_INTER)
%}
operand iRegL2P(iRegL reg) %{
op_cost(0);
match(CastX2P reg);
format %{ "l2p($reg)" %}
interface(REG_INTER)
%}
opclass vmem2(indirect, indIndex, indOffI2, indOffL2);
opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
//----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify
// instruction definitions by not requiring the AD writer to specify
// separate instructions for every form of operand when the
// instruction accepts multiple operand types with the same basic
// encoding and format. The classic case of this is memory operands.
// memory is used to define read/write location for load/store
// instruction defs. we can turn a memory op into an Address
opclass memory1(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI1, indOffL1,
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
opclass memory2(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI2, indOffL2,
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indirectX2P, indOffX2P);
opclass memory4(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI4, indOffL4,
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
opclass memory8(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI8, indOffL8,
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
// All of the memory operands. For the pipeline description.
opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex,
indOffI1, indOffL1, indOffI2, indOffL2, indOffI4, indOffL4, indOffI8, indOffL8,
indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN, indirectX2P, indOffX2P);
// iRegIorL2I is used for src inputs in rules for 32 bit int (I)
// operations. it allows the src to be either an iRegI or a (ConvL2I
// iRegL). in the latter case the l2i normally planted for a ConvL2I
// can be elided because the 32-bit instruction will just employ the
// lower 32 bits anyway.
//
// n.b. this does not elide all L2I conversions. if the truncated
// value is consumed by more than one operation then the ConvL2I
// cannot be bundled into the consuming nodes so an l2i gets planted
// (actually a movw $dst $src) and the downstream instructions consume
// the result of the l2i as an iRegI input. That's a shame since the
// movw is actually redundant but its not too costly.
opclass iRegIorL2I(iRegI, iRegL2I);
opclass iRegPorL2P(iRegP, iRegL2P);
//----------PIPELINE-----------------------------------------------------------
// Rules which define the behavior of the target architectures pipeline.
// For specific pipelines, eg A53, define the stages of that pipeline
//pipe_desc(ISS, EX1, EX2, WR);
#define ISS S0
#define EX1 S1
#define EX2 S2
#define WR S3
// Integer ALU reg operation
pipeline %{
attributes %{
// ARM instructions are of fixed length
fixed_size_instructions; // Fixed size instructions TODO does
max_instructions_per_bundle = 4; // A53 = 2, A57 = 4
// ARM instructions come in 32-bit word units
instruction_unit_size = 4; // An instruction is 4 bytes long
instruction_fetch_unit_size = 64; // The processor fetches one line
instruction_fetch_units = 1; // of 64 bytes
// List of nop instructions
nops( MachNop );
%}
// We don't use an actual pipeline model so don't care about resources
// or description. we do use pipeline classes to introduce fixed
// latencies
//----------RESOURCES----------------------------------------------------------
// Resources are the functional units available to the machine
resources( INS0, INS1, INS01 = INS0 | INS1,
ALU0, ALU1, ALU = ALU0 | ALU1,
MAC,
DIV,
BRANCH,
LDST,
NEON_FP);
//----------PIPELINE DESCRIPTION-----------------------------------------------
// Pipeline Description specifies the stages in the machine's pipeline
// Define the pipeline as a generic 6 stage pipeline
pipe_desc(S0, S1, S2, S3, S4, S5);
//----------PIPELINE CLASSES---------------------------------------------------
// Pipeline Classes describe the stages in which input and output are
// referenced by the hardware pipeline.
pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2)
%{
single_instruction;
src1 : S1(read);
src2 : S2(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2)
%{
single_instruction;
src1 : S1(read);
src2 : S2(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_uop_s(vRegF dst, vRegF src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_uop_d(vRegD dst, vRegD src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_d2f(vRegF dst, vRegD src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_f2d(vRegD dst, vRegF src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_f2i(iRegINoSp dst, vRegF src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_f2l(iRegLNoSp dst, vRegF src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_i2f(vRegF dst, iRegIorL2I src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_l2f(vRegF dst, iRegL src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_d2i(iRegINoSp dst, vRegD src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_d2l(iRegLNoSp dst, vRegD src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_i2d(vRegD dst, iRegIorL2I src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_l2d(vRegD dst, iRegIorL2I src)
%{
single_instruction;
src : S1(read);
dst : S5(write);
INS01 : ISS;
NEON_FP : S5;
%}
pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2)
%{
single_instruction;
src1 : S1(read);
src2 : S2(read);
dst : S5(write);
INS0 : ISS;
NEON_FP : S5;
%}
pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2)
%{
single_instruction;
src1 : S1(read);
src2 : S2(read);
dst : S5(write);
INS0 : ISS;
NEON_FP : S5;
%}
pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr)
%{
single_instruction;
cr : S1(read);
src1 : S1(read);
src2 : S1(read);
dst : S3(write);
INS01 : ISS;
NEON_FP : S3;
%}
pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr)
%{
single_instruction;
cr : S1(read);
src1 : S1(read);
src2 : S1(read);
dst : S3(write);
INS01 : ISS;
NEON_FP : S3;
%}
pipe_class fp_imm_s(vRegF dst)
%{
single_instruction;
dst : S3(write);
INS01 : ISS;
NEON_FP : S3;
%}
pipe_class fp_imm_d(vRegD dst)
%{
single_instruction;
dst : S3(write);
INS01 : ISS;
NEON_FP : S3;
%}
pipe_class fp_load_constant_s(vRegF dst)
%{
single_instruction;
dst : S4(write);
INS01 : ISS;
NEON_FP : S4;
%}
pipe_class fp_load_constant_d(vRegD dst)
%{
single_instruction;
dst : S4(write);
INS01 : ISS;
NEON_FP : S4;
%}
//------- Integer ALU operations --------------------------
// Integer ALU reg-reg operation
// Operands needed in EX1, result generated in EX2
// Eg. ADD x0, x1, x2
pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2)
%{
single_instruction;
dst : EX2(write);
src1 : EX1(read);
src2 : EX1(read);
INS01 : ISS; // Dual issue as instruction 0 or 1
ALU : EX2;
%}
// Integer ALU reg-reg operation with constant shift
// Shifted register must be available in LATE_ISS instead of EX1
// Eg. ADD x0, x1, x2, LSL #2
pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift)
%{
single_instruction;
dst : EX2(write);
src1 : EX1(read);
src2 : ISS(read);
INS01 : ISS;
ALU : EX2;
%}
// Integer ALU reg operation with constant shift
// Eg. LSL x0, x1, #shift
pipe_class ialu_reg_shift(iRegI dst, iRegI src1)
%{
single_instruction;
dst : EX2(write);
src1 : ISS(read);
INS01 : ISS;
ALU : EX2;
%}
// Integer ALU reg-reg operation with variable shift
// Both operands must be available in LATE_ISS instead of EX1
// Result is available in EX1 instead of EX2
// Eg. LSLV x0, x1, x2
pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2)
%{
single_instruction;
dst : EX1(write);
src1 : ISS(read);
src2 : ISS(read);
INS01 : ISS;
ALU : EX1;
%}
// Integer ALU reg-reg operation with extract
// As for _vshift above, but result generated in EX2
// Eg. EXTR x0, x1, x2, #N
pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2)
%{
single_instruction;
dst : EX2(write);
src1 : ISS(read);
src2 : ISS(read);
INS1 : ISS; // Can only dual issue as Instruction 1
ALU : EX1;
%}
// Integer ALU reg operation
// Eg. NEG x0, x1
pipe_class ialu_reg(iRegI dst, iRegI src)
%{
single_instruction;
dst : EX2(write);
src : EX1(read);
INS01 : ISS;
ALU : EX2;
%}
// Integer ALU reg mmediate operation
// Eg. ADD x0, x1, #N
pipe_class ialu_reg_imm(iRegI dst, iRegI src1)
%{
single_instruction;
dst : EX2(write);
src1 : EX1(read);
INS01 : ISS;
ALU : EX2;
%}
// Integer ALU immediate operation (no source operands)
// Eg. MOV x0, #N
pipe_class ialu_imm(iRegI dst)
%{
single_instruction;
dst : EX1(write);
INS01 : ISS;
ALU : EX1;
%}
//------- Compare operation -------------------------------
// Compare reg-reg
// Eg. CMP x0, x1
pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
%{
single_instruction;
// fixed_latency(16);
cr : EX2(write);
op1 : EX1(read);
op2 : EX1(read);
INS01 : ISS;
ALU : EX2;
%}
// Compare reg-reg
// Eg. CMP x0, #N
pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1)
%{
single_instruction;
// fixed_latency(16);
cr : EX2(write);
op1 : EX1(read);
INS01 : ISS;
ALU : EX2;
%}
//------- Conditional instructions ------------------------
// Conditional no operands
// Eg. CSINC x0, zr, zr, <cond>
pipe_class icond_none(iRegI dst, rFlagsReg cr)
%{
single_instruction;
cr : EX1(read);
dst : EX2(write);
INS01 : ISS;
ALU : EX2;
%}
// Conditional 2 operand
// EG. CSEL X0, X1, X2, <cond>
pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr)
%{
single_instruction;
cr : EX1(read);
src1 : EX1(read);
src2 : EX1(read);
dst : EX2(write);
INS01 : ISS;
ALU : EX2;
%}
// Conditional 2 operand
// EG. CSEL X0, X1, X2, <cond>
pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr)
%{
single_instruction;
cr : EX1(read);
src : EX1(read);
dst : EX2(write);
INS01 : ISS;
ALU : EX2;
%}
//------- Multiply pipeline operations --------------------
// Multiply reg-reg
// Eg. MUL w0, w1, w2
pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
%{
single_instruction;
dst : WR(write);
src1 : ISS(read);
src2 : ISS(read);
INS01 : ISS;
MAC : WR;
%}
// Multiply accumulate
// Eg. MADD w0, w1, w2, w3
pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
%{
single_instruction;
dst : WR(write);
src1 : ISS(read);
src2 : ISS(read);
src3 : ISS(read);
INS01 : ISS;
MAC : WR;
%}
// Eg. MUL w0, w1, w2
pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2)
%{
single_instruction;
fixed_latency(3); // Maximum latency for 64 bit mul
dst : WR(write);
src1 : ISS(read);
src2 : ISS(read);
INS01 : ISS;
MAC : WR;
%}
// Multiply accumulate
// Eg. MADD w0, w1, w2, w3
pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3)
%{
single_instruction;
fixed_latency(3); // Maximum latency for 64 bit mul
dst : WR(write);
src1 : ISS(read);
src2 : ISS(read);
src3 : ISS(read);
INS01 : ISS;
MAC : WR;
%}
//------- Divide pipeline operations --------------------
// Eg. SDIV w0, w1, w2
pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
%{
single_instruction;
fixed_latency(8); // Maximum latency for 32 bit divide
dst : WR(write);
src1 : ISS(read);
src2 : ISS(read);
INS0 : ISS; // Can only dual issue as instruction 0
DIV : WR;
%}
// Eg. SDIV x0, x1, x2
pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2)
%{
single_instruction;
fixed_latency(16); // Maximum latency for 64 bit divide
dst : WR(write);
src1 : ISS(read);
src2 : ISS(read);
INS0 : ISS; // Can only dual issue as instruction 0
DIV : WR;
%}
//------- Load pipeline operations ------------------------
// Load - prefetch
// Eg. PFRM <mem>
pipe_class iload_prefetch(memory mem)
%{
single_instruction;
mem : ISS(read);
INS01 : ISS;
LDST : WR;
%}
// Load - reg, mem
// Eg. LDR x0, <mem>
pipe_class iload_reg_mem(iRegI dst, memory mem)
%{
single_instruction;
dst : WR(write);
mem : ISS(read);
INS01 : ISS;
LDST : WR;
%}
// Load - reg, reg
// Eg. LDR x0, [sp, x1]
pipe_class iload_reg_reg(iRegI dst, iRegI src)
%{
single_instruction;
dst : WR(write);
src : ISS(read);
INS01 : ISS;
LDST : WR;
%}
//------- Store pipeline operations -----------------------
// Store - zr, mem
// Eg. STR zr, <mem>
pipe_class istore_mem(memory mem)
%{
single_instruction;
mem : ISS(read);
INS01 : ISS;
LDST : WR;
%}
// Store - reg, mem
// Eg. STR x0, <mem>
pipe_class istore_reg_mem(iRegI src, memory mem)
%{
single_instruction;
mem : ISS(read);
src : EX2(read);
INS01 : ISS;
LDST : WR;
%}
// Store - reg, reg
// Eg. STR x0, [sp, x1]
pipe_class istore_reg_reg(iRegI dst, iRegI src)
%{
single_instruction;
dst : ISS(read);
src : EX2(read);
INS01 : ISS;
LDST : WR;
%}
//------- Store pipeline operations -----------------------
// Branch
pipe_class pipe_branch()
%{
single_instruction;
INS01 : ISS;
BRANCH : EX1;
%}
// Conditional branch
pipe_class pipe_branch_cond(rFlagsReg cr)
%{
single_instruction;
cr : EX1(read);
INS01 : ISS;
BRANCH : EX1;
%}
// Compare & Branch
// EG. CBZ/CBNZ
pipe_class pipe_cmp_branch(iRegI op1)
%{
single_instruction;
op1 : EX1(read);
INS01 : ISS;
BRANCH : EX1;
%}
//------- Synchronisation operations ----------------------
// Any operation requiring serialization.
// EG. DMB/Atomic Ops/Load Acquire/Str Release
pipe_class pipe_serial()
%{
single_instruction;
force_serialization;
fixed_latency(16);
INS01 : ISS(2); // Cannot dual issue with any other instruction
LDST : WR;
%}
// Generic big/slow expanded idiom - also serialized
pipe_class pipe_slow()
%{
instruction_count(10);
multiple_bundles;
force_serialization;
fixed_latency(16);
INS01 : ISS(2); // Cannot dual issue with any other instruction
LDST : WR;
%}
// Empty pipeline class
pipe_class pipe_class_empty()
%{
single_instruction;
fixed_latency(0);
%}
// Default pipeline class.
pipe_class pipe_class_default()
%{
single_instruction;
fixed_latency(2);
%}
// Pipeline class for compares.
pipe_class pipe_class_compare()
%{
single_instruction;
fixed_latency(16);
%}
// Pipeline class for memory operations.
pipe_class pipe_class_memory()
%{
single_instruction;
fixed_latency(16);
%}
// Pipeline class for call.
pipe_class pipe_class_call()
%{
single_instruction;
fixed_latency(100);
%}
// Define the class for the Nop node.
define %{
MachNop = pipe_class_empty;
%}
%}
//----------INSTRUCTIONS-------------------------------------------------------
//
// match -- States which machine-independent subtree may be replaced
// by this instruction.
// ins_cost -- The estimated cost of this instruction is used by instruction
// selection to identify a minimum cost tree of machine
// instructions that matches a tree of machine-independent
// instructions.
// format -- A string providing the disassembly for this instruction.
// The value of an instruction's operand may be inserted
// by referring to it with a '$' prefix.
// opcode -- Three instruction opcodes may be provided. These are referred
// to within an encode class as $primary, $secondary, and $tertiary
// rrspectively. The primary opcode is commonly used to
// indicate the type of machine instruction, while secondary
// and tertiary are often used for prefix options or addressing
// modes.
// ins_encode -- A list of encode classes with parameters. The encode class
// name must have been defined in an 'enc_class' specification
// in the encode section of the architecture description.
// ============================================================================
// Memory (Load/Store) Instructions
// Load Instructions
// Load Byte (8 bit signed)
instruct loadB(iRegINoSp dst, memory1 mem)
%{
match(Set dst (LoadB mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldrsbw $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldrsbw(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Byte (8 bit signed) into long
instruct loadB2L(iRegLNoSp dst, memory1 mem)
%{
match(Set dst (ConvI2L (LoadB mem)));
predicate(!needs_acquiring_load(n->in(1)));
ins_cost(4 * INSN_COST);
format %{ "ldrsb $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldrsb(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Byte (8 bit unsigned)
instruct loadUB(iRegINoSp dst, memory1 mem)
%{
match(Set dst (LoadUB mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldrbw $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldrb(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Byte (8 bit unsigned) into long
instruct loadUB2L(iRegLNoSp dst, memory1 mem)
%{
match(Set dst (ConvI2L (LoadUB mem)));
predicate(!needs_acquiring_load(n->in(1)));
ins_cost(4 * INSN_COST);
format %{ "ldrb $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldrb(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Short (16 bit signed)
instruct loadS(iRegINoSp dst, memory2 mem)
%{
match(Set dst (LoadS mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldrshw $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldrshw(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Short (16 bit signed) into long
instruct loadS2L(iRegLNoSp dst, memory2 mem)
%{
match(Set dst (ConvI2L (LoadS mem)));
predicate(!needs_acquiring_load(n->in(1)));
ins_cost(4 * INSN_COST);
format %{ "ldrsh $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldrsh(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Char (16 bit unsigned)
instruct loadUS(iRegINoSp dst, memory2 mem)
%{
match(Set dst (LoadUS mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldrh $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldrh(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Short/Char (16 bit unsigned) into long
instruct loadUS2L(iRegLNoSp dst, memory2 mem)
%{
match(Set dst (ConvI2L (LoadUS mem)));
predicate(!needs_acquiring_load(n->in(1)));
ins_cost(4 * INSN_COST);
format %{ "ldrh $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldrh(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Integer (32 bit signed)
instruct loadI(iRegINoSp dst, memory4 mem)
%{
match(Set dst (LoadI mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldrw $dst, $mem\t# int" %}
ins_encode(aarch64_enc_ldrw(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Integer (32 bit signed) into long
instruct loadI2L(iRegLNoSp dst, memory4 mem)
%{
match(Set dst (ConvI2L (LoadI mem)));
predicate(!needs_acquiring_load(n->in(1)));
ins_cost(4 * INSN_COST);
format %{ "ldrsw $dst, $mem\t# int" %}
ins_encode(aarch64_enc_ldrsw(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Integer (32 bit unsigned) into long
instruct loadUI2L(iRegLNoSp dst, memory4 mem, immL_32bits mask)
%{
match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load()));
ins_cost(4 * INSN_COST);
format %{ "ldrw $dst, $mem\t# int" %}
ins_encode(aarch64_enc_ldrw(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Long (64 bit signed)
instruct loadL(iRegLNoSp dst, memory8 mem)
%{
match(Set dst (LoadL mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldr $dst, $mem\t# int" %}
ins_encode(aarch64_enc_ldr(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Range
instruct loadRange(iRegINoSp dst, memory4 mem)
%{
match(Set dst (LoadRange mem));
ins_cost(4 * INSN_COST);
format %{ "ldrw $dst, $mem\t# range" %}
ins_encode(aarch64_enc_ldrw(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Pointer
instruct loadP(iRegPNoSp dst, memory8 mem)
%{
match(Set dst (LoadP mem));
predicate(!needs_acquiring_load(n) && (n->as_Load()->barrier_data() == 0));
ins_cost(4 * INSN_COST);
format %{ "ldr $dst, $mem\t# ptr" %}
ins_encode(aarch64_enc_ldr(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Compressed Pointer
instruct loadN(iRegNNoSp dst, memory4 mem)
%{
match(Set dst (LoadN mem));
predicate(!needs_acquiring_load(n) && n->as_Load()->barrier_data() == 0);
ins_cost(4 * INSN_COST);
format %{ "ldrw $dst, $mem\t# compressed ptr" %}
ins_encode(aarch64_enc_ldrw(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Klass Pointer
instruct loadKlass(iRegPNoSp dst, memory8 mem)
%{
match(Set dst (LoadKlass mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldr $dst, $mem\t# class" %}
ins_encode(aarch64_enc_ldr(dst, mem));
ins_pipe(iload_reg_mem);
%}
// Load Narrow Klass Pointer
instruct loadNKlass(iRegNNoSp dst, memory4 mem)
%{
match(Set dst (LoadNKlass mem));
predicate(!needs_acquiring_load(n) && !UseCompactObjectHeaders);
ins_cost(4 * INSN_COST);
format %{ "ldrw $dst, $mem\t# compressed class ptr" %}
ins_encode(aarch64_enc_ldrw(dst, mem));
ins_pipe(iload_reg_mem);
%}
instruct loadNKlassCompactHeaders(iRegNNoSp dst, memory4 mem)
%{
match(Set dst (LoadNKlass mem));
predicate(!needs_acquiring_load(n) && UseCompactObjectHeaders);
ins_cost(4 * INSN_COST);
format %{
"ldrw $dst, $mem\t# compressed class ptr, shifted\n\t"
"lsrw $dst, $dst, markWord::klass_shift_at_offset"
%}
ins_encode %{
// inlined aarch64_enc_ldrw
loadStore(masm, &MacroAssembler::ldrw, $dst$$Register, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ lsrw($dst$$Register, $dst$$Register, markWord::klass_shift_at_offset);
%}
ins_pipe(iload_reg_mem);
%}
// Load Float
instruct loadF(vRegF dst, memory4 mem)
%{
match(Set dst (LoadF mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldrs $dst, $mem\t# float" %}
ins_encode( aarch64_enc_ldrs(dst, mem) );
ins_pipe(pipe_class_memory);
%}
// Load Double
instruct loadD(vRegD dst, memory8 mem)
%{
match(Set dst (LoadD mem));
predicate(!needs_acquiring_load(n));
ins_cost(4 * INSN_COST);
format %{ "ldrd $dst, $mem\t# double" %}
ins_encode( aarch64_enc_ldrd(dst, mem) );
ins_pipe(pipe_class_memory);
%}
// Load Int Constant
instruct loadConI(iRegINoSp dst, immI src)
%{
match(Set dst src);
ins_cost(INSN_COST);
format %{ "mov $dst, $src\t# int" %}
ins_encode( aarch64_enc_movw_imm(dst, src) );
ins_pipe(ialu_imm);
%}
// Load Long Constant
instruct loadConL(iRegLNoSp dst, immL src)
%{
match(Set dst src);
ins_cost(INSN_COST);
format %{ "mov $dst, $src\t# long" %}
ins_encode( aarch64_enc_mov_imm(dst, src) );
ins_pipe(ialu_imm);
%}
// Load Pointer Constant
instruct loadConP(iRegPNoSp dst, immP con)
%{
match(Set dst con);
ins_cost(INSN_COST * 4);
format %{
"mov $dst, $con\t# ptr\n\t"
%}
ins_encode(aarch64_enc_mov_p(dst, con));
ins_pipe(ialu_imm);
%}
// Load Null Pointer Constant
instruct loadConP0(iRegPNoSp dst, immP0 con)
%{
match(Set dst con);
ins_cost(INSN_COST);
format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p0(dst, con));
ins_pipe(ialu_imm);
%}
// Load Pointer Constant One
instruct loadConP1(iRegPNoSp dst, immP_1 con)
%{
match(Set dst con);
ins_cost(INSN_COST);
format %{ "mov $dst, $con\t# nullptr ptr" %}
ins_encode(aarch64_enc_mov_p1(dst, con));
ins_pipe(ialu_imm);
%}
// Load Byte Map Base Constant
instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con)
%{
match(Set dst con);
ins_cost(INSN_COST);
format %{ "adr $dst, $con\t# Byte Map Base" %}
ins_encode(aarch64_enc_mov_byte_map_base(dst, con));
ins_pipe(ialu_imm);
%}
// Load Narrow Pointer Constant
instruct loadConN(iRegNNoSp dst, immN con)
%{
match(Set dst con);
ins_cost(INSN_COST * 4);
format %{ "mov $dst, $con\t# compressed ptr" %}
ins_encode(aarch64_enc_mov_n(dst, con));
ins_pipe(ialu_imm);
%}
// Load Narrow Null Pointer Constant
instruct loadConN0(iRegNNoSp dst, immN0 con)
%{
match(Set dst con);
ins_cost(INSN_COST);
format %{ "mov $dst, $con\t# compressed nullptr ptr" %}
ins_encode(aarch64_enc_mov_n0(dst, con));
ins_pipe(ialu_imm);
%}
// Load Narrow Klass Constant
instruct loadConNKlass(iRegNNoSp dst, immNKlass con)
%{
match(Set dst con);
ins_cost(INSN_COST);
format %{ "mov $dst, $con\t# compressed klass ptr" %}
ins_encode(aarch64_enc_mov_nk(dst, con));
ins_pipe(ialu_imm);
%}
// Load Packed Float Constant
instruct loadConF_packed(vRegF dst, immFPacked con) %{
match(Set dst con);
ins_cost(INSN_COST * 4);
format %{ "fmovs $dst, $con"%}
ins_encode %{
__ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant);
%}
ins_pipe(fp_imm_s);
%}
// Load Float Constant
instruct loadConF(vRegF dst, immF con) %{
match(Set dst con);
ins_cost(INSN_COST * 4);
format %{
"ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
%}
ins_encode %{
__ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
%}
ins_pipe(fp_load_constant_s);
%}
// Load Packed Double Constant
instruct loadConD_packed(vRegD dst, immDPacked con) %{
match(Set dst con);
ins_cost(INSN_COST);
format %{ "fmovd $dst, $con"%}
ins_encode %{
__ fmovd(as_FloatRegister($dst$$reg), $con$$constant);
%}
ins_pipe(fp_imm_d);
%}
// Load Double Constant
instruct loadConD(vRegD dst, immD con) %{
match(Set dst con);
ins_cost(INSN_COST * 5);
format %{
"ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t"
%}
ins_encode %{
__ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con));
%}
ins_pipe(fp_load_constant_d);
%}
// Load Half Float Constant
// The "ldr" instruction loads a 32-bit word from the constant pool into a
// 32-bit register but only the bottom half will be populated and the top
// 16 bits are zero.
instruct loadConH(vRegF dst, immH con) %{
match(Set dst con);
format %{
"ldrs $dst, [$constantaddress]\t# load from constant table: half float=$con\n\t"
%}
ins_encode %{
__ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con));
%}
ins_pipe(fp_load_constant_s);
%}
// Store Instructions
// Store Byte
instruct storeB(iRegIorL2I src, memory1 mem)
%{
match(Set mem (StoreB mem src));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strb $src, $mem\t# byte" %}
ins_encode(aarch64_enc_strb(src, mem));
ins_pipe(istore_reg_mem);
%}
instruct storeimmB0(immI0 zero, memory1 mem)
%{
match(Set mem (StoreB mem zero));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strb rscractch2, $mem\t# byte" %}
ins_encode(aarch64_enc_strb0(mem));
ins_pipe(istore_mem);
%}
// Store Char/Short
instruct storeC(iRegIorL2I src, memory2 mem)
%{
match(Set mem (StoreC mem src));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strh $src, $mem\t# short" %}
ins_encode(aarch64_enc_strh(src, mem));
ins_pipe(istore_reg_mem);
%}
instruct storeimmC0(immI0 zero, memory2 mem)
%{
match(Set mem (StoreC mem zero));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strh zr, $mem\t# short" %}
ins_encode(aarch64_enc_strh0(mem));
ins_pipe(istore_mem);
%}
// Store Integer
instruct storeI(iRegIorL2I src, memory4 mem)
%{
match(Set mem(StoreI mem src));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strw $src, $mem\t# int" %}
ins_encode(aarch64_enc_strw(src, mem));
ins_pipe(istore_reg_mem);
%}
instruct storeimmI0(immI0 zero, memory4 mem)
%{
match(Set mem(StoreI mem zero));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strw zr, $mem\t# int" %}
ins_encode(aarch64_enc_strw0(mem));
ins_pipe(istore_mem);
%}
// Store Long (64 bit signed)
instruct storeL(iRegL src, memory8 mem)
%{
match(Set mem (StoreL mem src));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "str $src, $mem\t# int" %}
ins_encode(aarch64_enc_str(src, mem));
ins_pipe(istore_reg_mem);
%}
// Store Long (64 bit signed)
instruct storeimmL0(immL0 zero, memory8 mem)
%{
match(Set mem (StoreL mem zero));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "str zr, $mem\t# int" %}
ins_encode(aarch64_enc_str0(mem));
ins_pipe(istore_mem);
%}
// Store Pointer
instruct storeP(iRegP src, memory8 mem)
%{
match(Set mem (StoreP mem src));
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
ins_cost(INSN_COST);
format %{ "str $src, $mem\t# ptr" %}
ins_encode(aarch64_enc_str(src, mem));
ins_pipe(istore_reg_mem);
%}
// Store Pointer
instruct storeimmP0(immP0 zero, memory8 mem)
%{
match(Set mem (StoreP mem zero));
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
ins_cost(INSN_COST);
format %{ "str zr, $mem\t# ptr" %}
ins_encode(aarch64_enc_str0(mem));
ins_pipe(istore_mem);
%}
// Store Compressed Pointer
instruct storeN(iRegN src, memory4 mem)
%{
match(Set mem (StoreN mem src));
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
ins_cost(INSN_COST);
format %{ "strw $src, $mem\t# compressed ptr" %}
ins_encode(aarch64_enc_strw(src, mem));
ins_pipe(istore_reg_mem);
%}
instruct storeImmN0(immN0 zero, memory4 mem)
%{
match(Set mem (StoreN mem zero));
predicate(!needs_releasing_store(n) && n->as_Store()->barrier_data() == 0);
ins_cost(INSN_COST);
format %{ "strw zr, $mem\t# compressed ptr" %}
ins_encode(aarch64_enc_strw0(mem));
ins_pipe(istore_mem);
%}
// Store Float
instruct storeF(vRegF src, memory4 mem)
%{
match(Set mem (StoreF mem src));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strs $src, $mem\t# float" %}
ins_encode( aarch64_enc_strs(src, mem) );
ins_pipe(pipe_class_memory);
%}
// TODO
// implement storeImmF0 and storeFImmPacked
// Store Double
instruct storeD(vRegD src, memory8 mem)
%{
match(Set mem (StoreD mem src));
predicate(!needs_releasing_store(n));
ins_cost(INSN_COST);
format %{ "strd $src, $mem\t# double" %}
ins_encode( aarch64_enc_strd(src, mem) );
ins_pipe(pipe_class_memory);
%}
// Store Compressed Klass Pointer
instruct storeNKlass(iRegN src, memory4 mem)
%{
predicate(!needs_releasing_store(n));
match(Set mem (StoreNKlass mem src));
ins_cost(INSN_COST);
format %{ "strw $src, $mem\t# compressed klass ptr" %}
ins_encode(aarch64_enc_strw(src, mem));
ins_pipe(istore_reg_mem);
%}
// TODO
// implement storeImmD0 and storeDImmPacked
// prefetch instructions
// Must be safe to execute with invalid address (cannot fault).
instruct prefetchalloc( memory8 mem ) %{
match(PrefetchAllocation mem);
ins_cost(INSN_COST);
format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %}
ins_encode( aarch64_enc_prefetchw(mem) );
ins_pipe(iload_prefetch);
%}
// ---------------- volatile loads and stores ----------------
// Load Byte (8 bit signed)
instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadB mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarsb $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldarsb(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Byte (8 bit signed) into long
instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (ConvI2L (LoadB mem)));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarsb $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldarsb(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Byte (8 bit unsigned)
instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadUB mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarb $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldarb(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Byte (8 bit unsigned) into long
instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (ConvI2L (LoadUB mem)));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarb $dst, $mem\t# byte" %}
ins_encode(aarch64_enc_ldarb(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Short (16 bit signed)
instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadS mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarshw $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldarshw(dst, mem));
ins_pipe(pipe_serial);
%}
instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadUS mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarhw $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldarhw(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Short/Char (16 bit unsigned) into long
instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (ConvI2L (LoadUS mem)));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarh $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldarh(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Short/Char (16 bit signed) into long
instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (ConvI2L (LoadS mem)));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarh $dst, $mem\t# short" %}
ins_encode(aarch64_enc_ldarsh(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Integer (32 bit signed)
instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadI mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarw $dst, $mem\t# int" %}
ins_encode(aarch64_enc_ldarw(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Integer (32 bit unsigned) into long
instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask)
%{
match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
ins_cost(VOLATILE_REF_COST);
format %{ "ldarw $dst, $mem\t# int" %}
ins_encode(aarch64_enc_ldarw(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Long (64 bit signed)
instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadL mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldar $dst, $mem\t# int" %}
ins_encode(aarch64_enc_ldar(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Pointer
instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadP mem));
predicate(n->as_Load()->barrier_data() == 0);
ins_cost(VOLATILE_REF_COST);
format %{ "ldar $dst, $mem\t# ptr" %}
ins_encode(aarch64_enc_ldar(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Compressed Pointer
instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadN mem));
predicate(n->as_Load()->barrier_data() == 0);
ins_cost(VOLATILE_REF_COST);
format %{ "ldarw $dst, $mem\t# compressed ptr" %}
ins_encode(aarch64_enc_ldarw(dst, mem));
ins_pipe(pipe_serial);
%}
// Load Float
instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadF mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldars $dst, $mem\t# float" %}
ins_encode( aarch64_enc_fldars(dst, mem) );
ins_pipe(pipe_serial);
%}
// Load Double
instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem)
%{
match(Set dst (LoadD mem));
ins_cost(VOLATILE_REF_COST);
format %{ "ldard $dst, $mem\t# double" %}
ins_encode( aarch64_enc_fldard(dst, mem) );
ins_pipe(pipe_serial);
%}
// Store Byte
instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreB mem src));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrb $src, $mem\t# byte" %}
ins_encode(aarch64_enc_stlrb(src, mem));
ins_pipe(pipe_class_memory);
%}
instruct storeimmB0_volatile(immI0 zero, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreB mem zero));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrb zr, $mem\t# byte" %}
ins_encode(aarch64_enc_stlrb0(mem));
ins_pipe(pipe_class_memory);
%}
// Store Char/Short
instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreC mem src));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrh $src, $mem\t# short" %}
ins_encode(aarch64_enc_stlrh(src, mem));
ins_pipe(pipe_class_memory);
%}
instruct storeimmC0_volatile(immI0 zero, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreC mem zero));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrh zr, $mem\t# short" %}
ins_encode(aarch64_enc_stlrh0(mem));
ins_pipe(pipe_class_memory);
%}
// Store Integer
instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem)
%{
match(Set mem(StoreI mem src));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrw $src, $mem\t# int" %}
ins_encode(aarch64_enc_stlrw(src, mem));
ins_pipe(pipe_class_memory);
%}
instruct storeimmI0_volatile(immI0 zero, /* sync_memory*/indirect mem)
%{
match(Set mem(StoreI mem zero));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrw zr, $mem\t# int" %}
ins_encode(aarch64_enc_stlrw0(mem));
ins_pipe(pipe_class_memory);
%}
// Store Long (64 bit signed)
instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreL mem src));
ins_cost(VOLATILE_REF_COST);
format %{ "stlr $src, $mem\t# int" %}
ins_encode(aarch64_enc_stlr(src, mem));
ins_pipe(pipe_class_memory);
%}
instruct storeimmL0_volatile(immL0 zero, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreL mem zero));
ins_cost(VOLATILE_REF_COST);
format %{ "stlr zr, $mem\t# int" %}
ins_encode(aarch64_enc_stlr0(mem));
ins_pipe(pipe_class_memory);
%}
// Store Pointer
instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreP mem src));
predicate(n->as_Store()->barrier_data() == 0);
ins_cost(VOLATILE_REF_COST);
format %{ "stlr $src, $mem\t# ptr" %}
ins_encode(aarch64_enc_stlr(src, mem));
ins_pipe(pipe_class_memory);
%}
instruct storeimmP0_volatile(immP0 zero, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreP mem zero));
predicate(n->as_Store()->barrier_data() == 0);
ins_cost(VOLATILE_REF_COST);
format %{ "stlr zr, $mem\t# ptr" %}
ins_encode(aarch64_enc_stlr0(mem));
ins_pipe(pipe_class_memory);
%}
// Store Compressed Pointer
instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreN mem src));
predicate(n->as_Store()->barrier_data() == 0);
ins_cost(VOLATILE_REF_COST);
format %{ "stlrw $src, $mem\t# compressed ptr" %}
ins_encode(aarch64_enc_stlrw(src, mem));
ins_pipe(pipe_class_memory);
%}
instruct storeimmN0_volatile(immN0 zero, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreN mem zero));
predicate(n->as_Store()->barrier_data() == 0);
ins_cost(VOLATILE_REF_COST);
format %{ "stlrw zr, $mem\t# compressed ptr" %}
ins_encode(aarch64_enc_stlrw0(mem));
ins_pipe(pipe_class_memory);
%}
// Store Float
instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreF mem src));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrs $src, $mem\t# float" %}
ins_encode( aarch64_enc_fstlrs(src, mem) );
ins_pipe(pipe_class_memory);
%}
// TODO
// implement storeImmF0 and storeFImmPacked
// Store Double
instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem)
%{
match(Set mem (StoreD mem src));
ins_cost(VOLATILE_REF_COST);
format %{ "stlrd $src, $mem\t# double" %}
ins_encode( aarch64_enc_fstlrd(src, mem) );
ins_pipe(pipe_class_memory);
%}
// ---------------- end of volatile loads and stores ----------------
instruct cacheWB(indirect addr)
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWB addr);
ins_cost(100);
format %{"cache wb $addr" %}
ins_encode %{
assert($addr->index_position() < 0, "should be");
assert($addr$$disp == 0, "should be");
__ cache_wb(Address($addr$$base$$Register, 0));
%}
ins_pipe(pipe_slow); // XXX
%}
instruct cacheWBPreSync()
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWBPreSync);
ins_cost(100);
format %{"cache wb presync" %}
ins_encode %{
__ cache_wbsync(true);
%}
ins_pipe(pipe_slow); // XXX
%}
instruct cacheWBPostSync()
%{
predicate(VM_Version::supports_data_cache_line_flush());
match(CacheWBPostSync);
ins_cost(100);
format %{"cache wb postsync" %}
ins_encode %{
__ cache_wbsync(false);
%}
ins_pipe(pipe_slow); // XXX
%}
// ============================================================================
// BSWAP Instructions
instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{
match(Set dst (ReverseBytesI src));
ins_cost(INSN_COST);
format %{ "revw $dst, $src" %}
ins_encode %{
__ revw(as_Register($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{
match(Set dst (ReverseBytesL src));
ins_cost(INSN_COST);
format %{ "rev $dst, $src" %}
ins_encode %{
__ rev(as_Register($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{
match(Set dst (ReverseBytesUS src));
ins_cost(INSN_COST);
format %{ "rev16w $dst, $src" %}
ins_encode %{
__ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{
match(Set dst (ReverseBytesS src));
ins_cost(INSN_COST);
format %{ "rev16w $dst, $src\n\t"
"sbfmw $dst, $dst, #0, #15" %}
ins_encode %{
__ rev16w(as_Register($dst$$reg), as_Register($src$$reg));
__ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U);
%}
ins_pipe(ialu_reg);
%}
// ============================================================================
// Zero Count Instructions
instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{
match(Set dst (CountLeadingZerosI src));
ins_cost(INSN_COST);
format %{ "clzw $dst, $src" %}
ins_encode %{
__ clzw(as_Register($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{
match(Set dst (CountLeadingZerosL src));
ins_cost(INSN_COST);
format %{ "clz $dst, $src" %}
ins_encode %{
__ clz(as_Register($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{
match(Set dst (CountTrailingZerosI src));
ins_cost(INSN_COST * 2);
format %{ "rbitw $dst, $src\n\t"
"clzw $dst, $dst" %}
ins_encode %{
__ rbitw(as_Register($dst$$reg), as_Register($src$$reg));
__ clzw(as_Register($dst$$reg), as_Register($dst$$reg));
%}
ins_pipe(ialu_reg);
%}
instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{
match(Set dst (CountTrailingZerosL src));
ins_cost(INSN_COST * 2);
format %{ "rbit $dst, $src\n\t"
"clz $dst, $dst" %}
ins_encode %{
__ rbit(as_Register($dst$$reg), as_Register($src$$reg));
__ clz(as_Register($dst$$reg), as_Register($dst$$reg));
%}
ins_pipe(ialu_reg);
%}
//---------- Population Count Instructions -------------------------------------
//
instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{
match(Set dst (PopCountI src));
effect(TEMP tmp);
ins_cost(INSN_COST * 13);
format %{ "fmovs $tmp, $src\t# vector (1S)\n\t"
"cnt $tmp, $tmp\t# vector (8B)\n\t"
"addv $tmp, $tmp\t# vector (8B)\n\t"
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
__ fmovs($tmp$$FloatRegister, $src$$Register);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_class_default);
%}
instruct popCountI_mem(iRegINoSp dst, memory4 mem, vRegF tmp) %{
match(Set dst (PopCountI (LoadI mem)));
effect(TEMP tmp);
ins_cost(INSN_COST * 13);
format %{ "ldrs $tmp, $mem\n\t"
"cnt $tmp, $tmp\t# vector (8B)\n\t"
"addv $tmp, $tmp\t# vector (8B)\n\t"
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
loadStore(masm, &MacroAssembler::ldrs, tmp_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_class_default);
%}
// Note: Long.bitCount(long) returns an int.
instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{
match(Set dst (PopCountL src));
effect(TEMP tmp);
ins_cost(INSN_COST * 13);
format %{ "mov $tmp, $src\t# vector (1D)\n\t"
"cnt $tmp, $tmp\t# vector (8B)\n\t"
"addv $tmp, $tmp\t# vector (8B)\n\t"
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
__ mov($tmp$$FloatRegister, __ D, 0, $src$$Register);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_class_default);
%}
instruct popCountL_mem(iRegINoSp dst, memory8 mem, vRegD tmp) %{
match(Set dst (PopCountL (LoadL mem)));
effect(TEMP tmp);
ins_cost(INSN_COST * 13);
format %{ "ldrd $tmp, $mem\n\t"
"cnt $tmp, $tmp\t# vector (8B)\n\t"
"addv $tmp, $tmp\t# vector (8B)\n\t"
"mov $dst, $tmp\t# vector (1D)" %}
ins_encode %{
FloatRegister tmp_reg = as_FloatRegister($tmp$$reg);
loadStore(masm, &MacroAssembler::ldrd, tmp_reg, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister);
__ mov($dst$$Register, $tmp$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_class_default);
%}
// ============================================================================
// VerifyVectorAlignment Instruction
instruct verify_vector_alignment(iRegP addr, immL_positive_bitmaskI mask, rFlagsReg cr) %{
match(Set addr (VerifyVectorAlignment addr mask));
effect(KILL cr);
format %{ "verify_vector_alignment $addr $mask \t! verify alignment" %}
ins_encode %{
Label Lskip;
// check if masked bits of addr are zero
__ tst($addr$$Register, $mask$$constant);
__ br(Assembler::EQ, Lskip);
__ stop("verify_vector_alignment found a misaligned vector memory access");
__ bind(Lskip);
%}
ins_pipe(pipe_slow);
%}
// ============================================================================
// MemBar Instruction
instruct load_fence() %{
match(LoadFence);
ins_cost(VOLATILE_REF_COST);
format %{ "load_fence" %}
ins_encode %{
__ membar(Assembler::LoadLoad|Assembler::LoadStore);
%}
ins_pipe(pipe_serial);
%}
instruct unnecessary_membar_acquire() %{
predicate(unnecessary_acquire(n));
match(MemBarAcquire);
ins_cost(0);
format %{ "membar_acquire (elided)" %}
ins_encode %{
__ block_comment("membar_acquire (elided)");
%}
ins_pipe(pipe_class_empty);
%}
instruct membar_acquire() %{
match(MemBarAcquire);
ins_cost(VOLATILE_REF_COST);
format %{ "membar_acquire\n\t"
"dmb ishld" %}
ins_encode %{
__ block_comment("membar_acquire");
__ membar(Assembler::LoadLoad|Assembler::LoadStore);
%}
ins_pipe(pipe_serial);
%}
instruct membar_acquire_lock() %{
match(MemBarAcquireLock);
ins_cost(VOLATILE_REF_COST);
format %{ "membar_acquire_lock (elided)" %}
ins_encode %{
__ block_comment("membar_acquire_lock (elided)");
%}
ins_pipe(pipe_serial);
%}
instruct store_fence() %{
match(StoreFence);
ins_cost(VOLATILE_REF_COST);
format %{ "store_fence" %}
ins_encode %{
__ membar(Assembler::LoadStore|Assembler::StoreStore);
%}
ins_pipe(pipe_serial);
%}
instruct unnecessary_membar_release() %{
predicate(unnecessary_release(n));
match(MemBarRelease);
ins_cost(0);
format %{ "membar_release (elided)" %}
ins_encode %{
__ block_comment("membar_release (elided)");
%}
ins_pipe(pipe_serial);
%}
instruct membar_release() %{
match(MemBarRelease);
ins_cost(VOLATILE_REF_COST);
format %{ "membar_release\n\t"
"dmb ishst\n\tdmb ishld" %}
ins_encode %{
__ block_comment("membar_release");
// These will be merged if AlwaysMergeDMB is enabled.
__ membar(Assembler::StoreStore);
__ membar(Assembler::LoadStore);
%}
ins_pipe(pipe_serial);
%}
instruct membar_storestore() %{
match(MemBarStoreStore);
match(StoreStoreFence);
ins_cost(VOLATILE_REF_COST);
format %{ "MEMBAR-store-store" %}
ins_encode %{
__ membar(Assembler::StoreStore);
%}
ins_pipe(pipe_serial);
%}
instruct membar_release_lock() %{
match(MemBarReleaseLock);
ins_cost(VOLATILE_REF_COST);
format %{ "membar_release_lock (elided)" %}
ins_encode %{
__ block_comment("membar_release_lock (elided)");
%}
ins_pipe(pipe_serial);
%}
instruct unnecessary_membar_volatile() %{
predicate(unnecessary_volatile(n));
match(MemBarVolatile);
ins_cost(0);
format %{ "membar_volatile (elided)" %}
ins_encode %{
__ block_comment("membar_volatile (elided)");
%}
ins_pipe(pipe_serial);
%}
instruct membar_volatile() %{
match(MemBarVolatile);
ins_cost(VOLATILE_REF_COST*100);
format %{ "membar_volatile\n\t"
"dmb ish"%}
ins_encode %{
__ block_comment("membar_volatile");
__ membar(Assembler::StoreLoad);
%}
ins_pipe(pipe_serial);
%}
// ============================================================================
// Cast/Convert Instructions
instruct castX2P(iRegPNoSp dst, iRegL src) %{
match(Set dst (CastX2P src));
ins_cost(INSN_COST);
format %{ "mov $dst, $src\t# long -> ptr" %}
ins_encode %{
if ($dst$$reg != $src$$reg) {
__ mov(as_Register($dst$$reg), as_Register($src$$reg));
}
%}
ins_pipe(ialu_reg);
%}
instruct castP2X(iRegLNoSp dst, iRegP src) %{
match(Set dst (CastP2X src));
ins_cost(INSN_COST);
format %{ "mov $dst, $src\t# ptr -> long" %}
ins_encode %{
if ($dst$$reg != $src$$reg) {
__ mov(as_Register($dst$$reg), as_Register($src$$reg));
}
%}
ins_pipe(ialu_reg);
%}
// Convert oop into int for vectors alignment masking
instruct convP2I(iRegINoSp dst, iRegP src) %{
match(Set dst (ConvL2I (CastP2X src)));
ins_cost(INSN_COST);
format %{ "movw $dst, $src\t# ptr -> int" %}
ins_encode %{
__ movw($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
// Convert compressed oop into int for vectors alignment masking
// in case of 32bit oops (heap < 4Gb).
instruct convN2I(iRegINoSp dst, iRegN src)
%{
predicate(CompressedOops::shift() == 0);
match(Set dst (ConvL2I (CastP2X (DecodeN src))));
ins_cost(INSN_COST);
format %{ "mov dst, $src\t# compressed ptr -> int" %}
ins_encode %{
__ movw($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
// Convert oop pointer into compressed form
instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
match(Set dst (EncodeP src));
effect(KILL cr);
ins_cost(INSN_COST * 3);
format %{ "encode_heap_oop $dst, $src" %}
ins_encode %{
Register s = $src$$Register;
Register d = $dst$$Register;
__ encode_heap_oop(d, s);
%}
ins_pipe(ialu_reg);
%}
instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{
predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
match(Set dst (EncodeP src));
ins_cost(INSN_COST * 3);
format %{ "encode_heap_oop_not_null $dst, $src" %}
ins_encode %{
__ encode_heap_oop_not_null($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
match(Set dst (DecodeN src));
ins_cost(INSN_COST * 3);
format %{ "decode_heap_oop $dst, $src" %}
ins_encode %{
Register s = $src$$Register;
Register d = $dst$$Register;
__ decode_heap_oop(d, s);
%}
ins_pipe(ialu_reg);
%}
instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{
predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
match(Set dst (DecodeN src));
ins_cost(INSN_COST * 3);
format %{ "decode_heap_oop_not_null $dst, $src" %}
ins_encode %{
Register s = $src$$Register;
Register d = $dst$$Register;
__ decode_heap_oop_not_null(d, s);
%}
ins_pipe(ialu_reg);
%}
// n.b. AArch64 implementations of encode_klass_not_null and
// decode_klass_not_null do not modify the flags register so, unlike
// Intel, we don't kill CR as a side effect here
instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{
match(Set dst (EncodePKlass src));
ins_cost(INSN_COST * 3);
format %{ "encode_klass_not_null $dst,$src" %}
ins_encode %{
Register src_reg = as_Register($src$$reg);
Register dst_reg = as_Register($dst$$reg);
__ encode_klass_not_null(dst_reg, src_reg);
%}
ins_pipe(ialu_reg);
%}
instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{
match(Set dst (DecodeNKlass src));
ins_cost(INSN_COST * 3);
format %{ "decode_klass_not_null $dst,$src" %}
ins_encode %{
Register src_reg = as_Register($src$$reg);
Register dst_reg = as_Register($dst$$reg);
if (dst_reg != src_reg) {
__ decode_klass_not_null(dst_reg, src_reg);
} else {
__ decode_klass_not_null(dst_reg);
}
%}
ins_pipe(ialu_reg);
%}
instruct checkCastPP(iRegPNoSp dst)
%{
match(Set dst (CheckCastPP dst));
size(0);
format %{ "# checkcastPP of $dst" %}
ins_encode(/* empty encoding */);
ins_pipe(pipe_class_empty);
%}
instruct castPP(iRegPNoSp dst)
%{
match(Set dst (CastPP dst));
size(0);
format %{ "# castPP of $dst" %}
ins_encode(/* empty encoding */);
ins_pipe(pipe_class_empty);
%}
instruct castII(iRegI dst)
%{
predicate(VerifyConstraintCasts == 0);
match(Set dst (CastII dst));
size(0);
format %{ "# castII of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
instruct castII_checked(iRegI dst, rFlagsReg cr)
%{
predicate(VerifyConstraintCasts > 0);
match(Set dst (CastII dst));
effect(KILL cr);
format %{ "# castII_checked of $dst" %}
ins_encode %{
__ verify_int_in_range(_idx, bottom_type()->is_int(), $dst$$Register, rscratch1);
%}
ins_pipe(pipe_slow);
%}
instruct castLL(iRegL dst)
%{
predicate(VerifyConstraintCasts == 0);
match(Set dst (CastLL dst));
size(0);
format %{ "# castLL of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
instruct castLL_checked(iRegL dst, rFlagsReg cr)
%{
predicate(VerifyConstraintCasts > 0);
match(Set dst (CastLL dst));
effect(KILL cr);
format %{ "# castLL_checked of $dst" %}
ins_encode %{
__ verify_long_in_range(_idx, bottom_type()->is_long(), $dst$$Register, rscratch1);
%}
ins_pipe(pipe_slow);
%}
instruct castHH(vRegF dst)
%{
match(Set dst (CastHH dst));
size(0);
format %{ "# castHH of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
instruct castFF(vRegF dst)
%{
match(Set dst (CastFF dst));
size(0);
format %{ "# castFF of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
instruct castDD(vRegD dst)
%{
match(Set dst (CastDD dst));
size(0);
format %{ "# castDD of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
instruct castVV(vReg dst)
%{
match(Set dst (CastVV dst));
size(0);
format %{ "# castVV of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
instruct castVVMask(pRegGov dst)
%{
match(Set dst (CastVV dst));
size(0);
format %{ "# castVV of $dst" %}
ins_encode(/* empty encoding */);
ins_cost(0);
ins_pipe(pipe_class_empty);
%}
// ============================================================================
// Atomic operation instructions
//
// standard CompareAndSwapX when we are using barriers
// these have higher priority than the rules selected by a predicate
// XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher
// can't match them
instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
predicate(n->as_LoadStore()->barrier_data() == 0);
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
predicate(n->as_LoadStore()->barrier_data() == 0);
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
// alternative CompareAndSwapX when we are eliding barriers
instruct compareAndSwapBAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgb_acq(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapSAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgs_acq(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndSwapL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndSwapN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval"
"cset $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval),
aarch64_enc_cset_eq(res));
ins_pipe(pipe_slow);
%}
// ---------------------------------------------------------------------
// BEGIN This section of the file is automatically generated. Do not edit --------------
// Sundry CAS operations. Note that release is always true,
// regardless of the memory ordering of the CAS. This is because we
// need the volatile case to be sequentially consistent but there is
// no trailing StoreLoad barrier emitted by C2. Unfortunately we
// can't check the type of memory ordering here, so we always emit a
// STLXR.
// This section is generated from cas.m4
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxtbw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxthw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxtbw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
__ sxthw($res$$Register, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeLAcq(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (CompareAndExchangeL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangeNAcq(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (CompareAndExchangeN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct compareAndExchangePAcq(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (CompareAndExchangeP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(TEMP_DEF res, KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ false, $res$$Register);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(2 * VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ false, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapBAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapB mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgb_acq $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::byte, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapSAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapS mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgs_acq $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::halfword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapIAcq(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapI mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapLAcq(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set res (WeakCompareAndSwapL mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapNAcq(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set res (WeakCompareAndSwapN mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchgw_acq $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::word, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// This pattern is generated automatically from cas.m4.
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct weakCompareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set res (WeakCompareAndSwapP mem (Binary oldval newval)));
ins_cost(VOLATILE_REF_COST);
effect(KILL cr);
format %{
"cmpxchg_acq $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval"
"csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)"
%}
ins_encode %{
__ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register,
Assembler::xword, /*acquire*/ true, /*release*/ true,
/*weak*/ true, noreg);
__ csetw($res$$Register, Assembler::EQ);
%}
ins_pipe(pipe_slow);
%}
// END This section of the file is automatically generated. Do not edit --------------
// ---------------------------------------------------------------------
instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{
match(Set prev (GetAndSetI mem newv));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{
match(Set prev (GetAndSetL mem newv));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set prev (GetAndSetN mem newv));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchgw $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{
predicate(n->as_LoadStore()->barrier_data() == 0);
match(Set prev (GetAndSetP mem newv));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "atomic_xchg $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_setIAcq(indirect mem, iRegI newv, iRegINoSp prev) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set prev (GetAndSetI mem newv));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_setLAcq(indirect mem, iRegL newv, iRegLNoSp prev) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set prev (GetAndSetL mem newv));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_setNAcq(indirect mem, iRegN newv, iRegINoSp prev) %{
predicate(needs_acquiring_load_exclusive(n) && n->as_LoadStore()->barrier_data() == 0);
match(Set prev (GetAndSetN mem newv));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchgw_acq $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgalw($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_setPAcq(indirect mem, iRegP newv, iRegPNoSp prev) %{
predicate(needs_acquiring_load_exclusive(n) && (n->as_LoadStore()->barrier_data() == 0));
match(Set prev (GetAndSetP mem newv));
ins_cost(VOLATILE_REF_COST);
format %{ "atomic_xchg_acq $prev, $newv, [$mem]" %}
ins_encode %{
__ atomic_xchgal($prev$$Register, $newv$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{
match(Set newval (GetAndAddL mem incr));
ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addL [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
match(Set newval (GetAndAddL mem incr));
ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addL $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddL mem incr));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addL [$mem], $incr" %}
ins_encode %{
__ atomic_add(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
match(Set newval (GetAndAddI mem incr));
ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addI [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{
match(Set newval (GetAndAddI mem incr));
ins_cost(2 * VOLATILE_REF_COST + 1);
format %{ "get_and_addI $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used());
match(Set dummy (GetAndAddI mem incr));
ins_cost(2 * VOLATILE_REF_COST);
format %{ "get_and_addI [$mem], $incr" %}
ins_encode %{
__ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addLAcq(indirect mem, iRegLNoSp newval, iRegL incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST + 1);
format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addal($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addL_no_resAcq(indirect mem, Universe dummy, iRegL incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addL_acq [$mem], $incr" %}
ins_encode %{
__ atomic_addal(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addLiAcq(indirect mem, iRegLNoSp newval, immLAddSub incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST + 1);
format %{ "get_and_addL_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addal($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addLi_no_resAcq(indirect mem, Universe dummy, immLAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddL mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addL_acq [$mem], $incr" %}
ins_encode %{
__ atomic_addal(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addIAcq(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST + 1);
format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw($newval$$Register, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addI_no_resAcq(indirect mem, Universe dummy, iRegIorL2I incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addI_acq [$mem], $incr" %}
ins_encode %{
__ atomic_addalw(noreg, $incr$$Register, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addIiAcq(indirect mem, iRegINoSp newval, immIAddSub incr) %{
predicate(needs_acquiring_load_exclusive(n));
match(Set newval (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST + 1);
format %{ "get_and_addI_acq $newval, [$mem], $incr" %}
ins_encode %{
__ atomic_addalw($newval$$Register, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
instruct get_and_addIi_no_resAcq(indirect mem, Universe dummy, immIAddSub incr) %{
predicate(n->as_LoadStore()->result_not_used() && needs_acquiring_load_exclusive(n));
match(Set dummy (GetAndAddI mem incr));
ins_cost(VOLATILE_REF_COST);
format %{ "get_and_addI_acq [$mem], $incr" %}
ins_encode %{
__ atomic_addalw(noreg, $incr$$constant, as_Register($mem$$base));
%}
ins_pipe(pipe_serial);
%}
// Manifest a CmpU result in an integer register.
// (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
instruct cmpU3_reg_reg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg flags)
%{
match(Set dst (CmpU3 src1 src2));
effect(KILL flags);
ins_cost(INSN_COST * 3);
format %{
"cmpw $src1, $src2\n\t"
"csetw $dst, ne\n\t"
"cnegw $dst, lo\t# CmpU3(reg)"
%}
ins_encode %{
__ cmpw($src1$$Register, $src2$$Register);
__ csetw($dst$$Register, Assembler::NE);
__ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
%}
ins_pipe(pipe_class_default);
%}
instruct cmpU3_reg_imm(iRegINoSp dst, iRegI src1, immIAddSub src2, rFlagsReg flags)
%{
match(Set dst (CmpU3 src1 src2));
effect(KILL flags);
ins_cost(INSN_COST * 3);
format %{
"subsw zr, $src1, $src2\n\t"
"csetw $dst, ne\n\t"
"cnegw $dst, lo\t# CmpU3(imm)"
%}
ins_encode %{
__ subsw(zr, $src1$$Register, (int32_t)$src2$$constant);
__ csetw($dst$$Register, Assembler::NE);
__ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
%}
ins_pipe(pipe_class_default);
%}
// Manifest a CmpUL result in an integer register.
// (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
instruct cmpUL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
%{
match(Set dst (CmpUL3 src1 src2));
effect(KILL flags);
ins_cost(INSN_COST * 3);
format %{
"cmp $src1, $src2\n\t"
"csetw $dst, ne\n\t"
"cnegw $dst, lo\t# CmpUL3(reg)"
%}
ins_encode %{
__ cmp($src1$$Register, $src2$$Register);
__ csetw($dst$$Register, Assembler::NE);
__ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
%}
ins_pipe(pipe_class_default);
%}
instruct cmpUL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
%{
match(Set dst (CmpUL3 src1 src2));
effect(KILL flags);
ins_cost(INSN_COST * 3);
format %{
"subs zr, $src1, $src2\n\t"
"csetw $dst, ne\n\t"
"cnegw $dst, lo\t# CmpUL3(imm)"
%}
ins_encode %{
__ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
__ csetw($dst$$Register, Assembler::NE);
__ cnegw($dst$$Register, $dst$$Register, Assembler::LO);
%}
ins_pipe(pipe_class_default);
%}
// Manifest a CmpL result in an integer register.
// (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0)
instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags)
%{
match(Set dst (CmpL3 src1 src2));
effect(KILL flags);
ins_cost(INSN_COST * 3);
format %{
"cmp $src1, $src2\n\t"
"csetw $dst, ne\n\t"
"cnegw $dst, lt\t# CmpL3(reg)"
%}
ins_encode %{
__ cmp($src1$$Register, $src2$$Register);
__ csetw($dst$$Register, Assembler::NE);
__ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
%}
ins_pipe(pipe_class_default);
%}
instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags)
%{
match(Set dst (CmpL3 src1 src2));
effect(KILL flags);
ins_cost(INSN_COST * 3);
format %{
"subs zr, $src1, $src2\n\t"
"csetw $dst, ne\n\t"
"cnegw $dst, lt\t# CmpL3(imm)"
%}
ins_encode %{
__ subs(zr, $src1$$Register, (int32_t)$src2$$constant);
__ csetw($dst$$Register, Assembler::NE);
__ cnegw($dst$$Register, $dst$$Register, Assembler::LT);
%}
ins_pipe(pipe_class_default);
%}
// ============================================================================
// Conditional Move Instructions
// n.b. we have identical rules for both a signed compare op (cmpOp)
// and an unsigned compare op (cmpOpU). it would be nice if we could
// define an op class which merged both inputs and use it to type the
// argument to a single rule. unfortunatelyt his fails because the
// opclass does not live up to the COND_INTER interface of its
// component operands. When the generic code tries to negate the
// operand it ends up running the generci Machoper::negate method
// which throws a ShouldNotHappen. So, we have to provide two flavours
// of each rule, one for a cmpOp and a second for a cmpOpU (sigh).
instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
// special cases where one arg is zero
// n.b. this is selected in preference to the rule above because it
// avoids loading constant 0 into a source register
// TODO
// we ought only to be able to cull one of these variants as the ideal
// transforms ought always to order the zero consistently (to left/right?)
instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src, zr $cmp\t# signed, int" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, zr, $src $cmp\t# signed, int" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
// special case for creating a boolean 0 or 1
// n.b. this is selected in preference to the rule above because it
// avoids loading constants 0 and 1 into a source register
instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
ins_cost(INSN_COST * 2);
format %{ "csincw $dst, zr, zr $cmp\t# signed, int" %}
ins_encode %{
// equivalently
// cset(as_Register($dst$$reg),
// negate_condition((Assembler::Condition)$cmp$$cmpcode));
__ csincw(as_Register($dst$$reg),
zr,
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_none);
%}
instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{
match(Set dst (CMoveI (Binary cmp cr) (Binary one zero)));
ins_cost(INSN_COST * 2);
format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int" %}
ins_encode %{
// equivalently
// cset(as_Register($dst$$reg),
// negate_condition((Assembler::Condition)$cmp$$cmpcode));
__ csincw(as_Register($dst$$reg),
zr,
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_none);
%}
instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
// special cases where one arg is zero
instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, zr, $src $cmp\t# signed, long" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{
match(Set dst (CMoveL (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, zr, $src $cmp\t# unsigned, long" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src, zr $cmp\t# signed, long" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{
match(Set dst (CMoveL (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src, zr $cmp\t# unsigned, long" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{
match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
// special cases where one arg is zero
instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, zr, $src $cmp\t# signed, ptr" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{
match(Set dst (CMoveP (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src, zr $cmp\t# signed, ptr" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{
match(Set dst (CMoveP (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr" %}
ins_encode %{
__ csel(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{
match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src2$$reg),
as_Register($src1$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg_reg);
%}
// special cases where one arg is zero
instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{
match(Set dst (CMoveN (Binary cmp cr) (Binary src zero)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
zr,
as_Register($src$$reg),
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{
match(Set dst (CMoveN (Binary cmp cr) (Binary zero src)));
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr" %}
ins_encode %{
__ cselw(as_Register($dst$$reg),
as_Register($src$$reg),
zr,
(Assembler::Condition)$cmp$$cmpcode);
%}
ins_pipe(icond_reg);
%}
instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1, vRegF src2)
%{
match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
ins_encode %{
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
__ fcsels(as_FloatRegister($dst$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src1$$reg),
cond);
%}
ins_pipe(fp_cond_reg_reg_s);
%}
instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1, vRegF src2)
%{
match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
ins_encode %{
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
__ fcsels(as_FloatRegister($dst$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src1$$reg),
cond);
%}
ins_pipe(fp_cond_reg_reg_s);
%}
instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1, vRegD src2)
%{
match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %}
ins_encode %{
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
__ fcseld(as_FloatRegister($dst$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src1$$reg),
cond);
%}
ins_pipe(fp_cond_reg_reg_d);
%}
instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1, vRegD src2)
%{
match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %}
ins_encode %{
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
__ fcseld(as_FloatRegister($dst$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src1$$reg),
cond);
%}
ins_pipe(fp_cond_reg_reg_d);
%}
// ============================================================================
// Arithmetic Instructions
//
// Integer Addition
// TODO
// these currently employ operations which do not set CR and hence are
// not flagged as killing CR but we would like to isolate the cases
// where we want to set flags from those where we don't. need to work
// out how to do that.
instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (AddI src1 src2));
ins_cost(INSN_COST);
format %{ "addw $dst, $src1, $src2" %}
ins_encode %{
__ addw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
match(Set dst (AddI src1 src2));
ins_cost(INSN_COST);
format %{ "addw $dst, $src1, $src2" %}
// use opcode to indicate that this is an add not a sub
opcode(0x0);
ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
ins_pipe(ialu_reg_imm);
%}
instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{
match(Set dst (AddI (ConvL2I src1) src2));
ins_cost(INSN_COST);
format %{ "addw $dst, $src1, $src2" %}
// use opcode to indicate that this is an add not a sub
opcode(0x0);
ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
ins_pipe(ialu_reg_imm);
%}
// Pointer Addition
instruct addP_reg_reg(iRegPNoSp dst, iRegPorL2P src1, iRegL src2) %{
match(Set dst (AddP src1 src2));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2\t# ptr" %}
ins_encode %{
__ add(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct addP_reg_reg_ext(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2) %{
match(Set dst (AddP src1 (ConvI2L src2)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %}
ins_encode %{
__ add(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw);
%}
ins_pipe(ialu_reg_reg);
%}
instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegPorL2P src1, iRegL src2, immIScale scale) %{
match(Set dst (AddP src1 (LShiftL src2 scale)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %}
ins_encode %{
__ lea(as_Register($dst$$reg),
Address(as_Register($src1$$reg), as_Register($src2$$reg),
Address::lsl($scale$$constant)));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegPorL2P src1, iRegIorL2I src2, immIScale scale) %{
match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %}
ins_encode %{
__ lea(as_Register($dst$$reg),
Address(as_Register($src1$$reg), as_Register($src2$$reg),
Address::sxtw($scale$$constant)));
%}
ins_pipe(ialu_reg_reg_shift);
%}
instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{
match(Set dst (LShiftL (ConvI2L src) scale));
ins_cost(INSN_COST);
format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %}
ins_encode %{
__ sbfiz(as_Register($dst$$reg),
as_Register($src$$reg),
$scale$$constant & 63, MIN2(32, (int)((-$scale$$constant) & 63)));
%}
ins_pipe(ialu_reg_shift);
%}
// Pointer Immediate Addition
// n.b. this needs to be more expensive than using an indirect memory
// operand
instruct addP_reg_imm(iRegPNoSp dst, iRegPorL2P src1, immLAddSub src2) %{
match(Set dst (AddP src1 src2));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2\t# ptr" %}
// use opcode to indicate that this is an add not a sub
opcode(0x0);
ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
ins_pipe(ialu_reg_imm);
%}
// Long Addition
instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (AddL src1 src2));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2" %}
ins_encode %{
__ add(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
// No constant pool entries requiredLong Immediate Addition.
instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
match(Set dst (AddL src1 src2));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2" %}
// use opcode to indicate that this is an add not a sub
opcode(0x0);
ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
ins_pipe(ialu_reg_imm);
%}
// Integer Subtraction
instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (SubI src1 src2));
ins_cost(INSN_COST);
format %{ "subw $dst, $src1, $src2" %}
ins_encode %{
__ subw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
// Immediate Subtraction
instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{
match(Set dst (SubI src1 src2));
ins_cost(INSN_COST);
format %{ "subw $dst, $src1, $src2" %}
// use opcode to indicate that this is a sub not an add
opcode(0x1);
ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2));
ins_pipe(ialu_reg_imm);
%}
// Long Subtraction
instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (SubL src1 src2));
ins_cost(INSN_COST);
format %{ "sub $dst, $src1, $src2" %}
ins_encode %{
__ sub(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
// No constant pool entries requiredLong Immediate Subtraction.
instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{
match(Set dst (SubL src1 src2));
ins_cost(INSN_COST);
format %{ "sub$dst, $src1, $src2" %}
// use opcode to indicate that this is a sub not an add
opcode(0x1);
ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) );
ins_pipe(ialu_reg_imm);
%}
// Integer Negation (special case for sub)
instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{
match(Set dst (SubI zero src));
ins_cost(INSN_COST);
format %{ "negw $dst, $src\t# int" %}
ins_encode %{
__ negw(as_Register($dst$$reg),
as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
// Long Negation
instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{
match(Set dst (SubL zero src));
ins_cost(INSN_COST);
format %{ "neg $dst, $src\t# long" %}
ins_encode %{
__ neg(as_Register($dst$$reg),
as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
// Integer Multiply
instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (MulI src1 src2));
ins_cost(INSN_COST * 3);
format %{ "mulw $dst, $src1, $src2" %}
ins_encode %{
__ mulw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(imul_reg_reg);
%}
instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (MulL (ConvI2L src1) (ConvI2L src2)));
ins_cost(INSN_COST * 3);
format %{ "smull $dst, $src1, $src2" %}
ins_encode %{
__ smull(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(imul_reg_reg);
%}
// Long Multiply
instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (MulL src1 src2));
ins_cost(INSN_COST * 5);
format %{ "mul $dst, $src1, $src2" %}
ins_encode %{
__ mul(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(lmul_reg_reg);
%}
instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
%{
match(Set dst (MulHiL src1 src2));
ins_cost(INSN_COST * 7);
format %{ "smulh $dst, $src1, $src2\t# mulhi" %}
ins_encode %{
__ smulh(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(lmul_reg_reg);
%}
instruct umulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr)
%{
match(Set dst (UMulHiL src1 src2));
ins_cost(INSN_COST * 7);
format %{ "umulh $dst, $src1, $src2\t# umulhi" %}
ins_encode %{
__ umulh(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(lmul_reg_reg);
%}
// Combined Integer Multiply & Add/Sub
instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
match(Set dst (AddI src3 (MulI src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "madd $dst, $src1, $src2, $src3" %}
ins_encode %{
__ maddw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
as_Register($src3$$reg));
%}
ins_pipe(imac_reg_reg);
%}
instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{
match(Set dst (SubI src3 (MulI src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "msub $dst, $src1, $src2, $src3" %}
ins_encode %{
__ msubw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
as_Register($src3$$reg));
%}
ins_pipe(imac_reg_reg);
%}
// Combined Integer Multiply & Neg
instruct mnegI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI0 zero) %{
match(Set dst (MulI (SubI zero src1) src2));
ins_cost(INSN_COST * 3);
format %{ "mneg $dst, $src1, $src2" %}
ins_encode %{
__ mnegw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(imac_reg_reg);
%}
// Combined Long Multiply & Add/Sub
instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
match(Set dst (AddL src3 (MulL src1 src2)));
ins_cost(INSN_COST * 5);
format %{ "madd $dst, $src1, $src2, $src3" %}
ins_encode %{
__ madd(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
as_Register($src3$$reg));
%}
ins_pipe(lmac_reg_reg);
%}
instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{
match(Set dst (SubL src3 (MulL src1 src2)));
ins_cost(INSN_COST * 5);
format %{ "msub $dst, $src1, $src2, $src3" %}
ins_encode %{
__ msub(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
as_Register($src3$$reg));
%}
ins_pipe(lmac_reg_reg);
%}
// Combined Long Multiply & Neg
instruct mnegL(iRegLNoSp dst, iRegL src1, iRegL src2, immL0 zero) %{
match(Set dst (MulL (SubL zero src1) src2));
ins_cost(INSN_COST * 5);
format %{ "mneg $dst, $src1, $src2" %}
ins_encode %{
__ mneg(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(lmac_reg_reg);
%}
// Combine Integer Signed Multiply & Add/Sub/Neg Long
instruct smaddL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
match(Set dst (AddL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
ins_cost(INSN_COST * 3);
format %{ "smaddl $dst, $src1, $src2, $src3" %}
ins_encode %{
__ smaddl(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
as_Register($src3$$reg));
%}
ins_pipe(imac_reg_reg);
%}
instruct smsubL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegLNoSp src3) %{
match(Set dst (SubL src3 (MulL (ConvI2L src1) (ConvI2L src2))));
ins_cost(INSN_COST * 3);
format %{ "smsubl $dst, $src1, $src2, $src3" %}
ins_encode %{
__ smsubl(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
as_Register($src3$$reg));
%}
ins_pipe(imac_reg_reg);
%}
instruct smnegL(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2, immL0 zero) %{
match(Set dst (MulL (SubL zero (ConvI2L src1)) (ConvI2L src2)));
ins_cost(INSN_COST * 3);
format %{ "smnegl $dst, $src1, $src2" %}
ins_encode %{
__ smnegl(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(imac_reg_reg);
%}
// Combined Multiply-Add Shorts into Integer (dst = src1 * src2 + src3 * src4)
instruct muladdS2I(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3, iRegIorL2I src4) %{
match(Set dst (MulAddS2I (Binary src1 src2) (Binary src3 src4)));
ins_cost(INSN_COST * 5);
format %{ "mulw rscratch1, $src1, $src2\n\t"
"maddw $dst, $src3, $src4, rscratch1" %}
ins_encode %{
__ mulw(rscratch1, as_Register($src1$$reg), as_Register($src2$$reg));
__ maddw(as_Register($dst$$reg), as_Register($src3$$reg), as_Register($src4$$reg), rscratch1); %}
ins_pipe(imac_reg_reg);
%}
// Integer Divide
instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (DivI src1 src2));
ins_cost(INSN_COST * 19);
format %{ "sdivw $dst, $src1, $src2" %}
ins_encode(aarch64_enc_divw(dst, src1, src2));
ins_pipe(idiv_reg_reg);
%}
// Long Divide
instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (DivL src1 src2));
ins_cost(INSN_COST * 35);
format %{ "sdiv $dst, $src1, $src2" %}
ins_encode(aarch64_enc_div(dst, src1, src2));
ins_pipe(ldiv_reg_reg);
%}
// Integer Remainder
instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (ModI src1 src2));
ins_cost(INSN_COST * 22);
format %{ "sdivw rscratch1, $src1, $src2\n\t"
"msubw $dst, rscratch1, $src2, $src1" %}
ins_encode(aarch64_enc_modw(dst, src1, src2));
ins_pipe(idiv_reg_reg);
%}
// Long Remainder
instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (ModL src1 src2));
ins_cost(INSN_COST * 38);
format %{ "sdiv rscratch1, $src1, $src2\n"
"msub $dst, rscratch1, $src2, $src1" %}
ins_encode(aarch64_enc_mod(dst, src1, src2));
ins_pipe(ldiv_reg_reg);
%}
// Unsigned Integer Divide
instruct UdivI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (UDivI src1 src2));
ins_cost(INSN_COST * 19);
format %{ "udivw $dst, $src1, $src2" %}
ins_encode %{
__ udivw($dst$$Register, $src1$$Register, $src2$$Register);
%}
ins_pipe(idiv_reg_reg);
%}
// Unsigned Long Divide
instruct UdivL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (UDivL src1 src2));
ins_cost(INSN_COST * 35);
format %{ "udiv $dst, $src1, $src2" %}
ins_encode %{
__ udiv($dst$$Register, $src1$$Register, $src2$$Register);
%}
ins_pipe(ldiv_reg_reg);
%}
// Unsigned Integer Remainder
instruct UmodI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (UModI src1 src2));
ins_cost(INSN_COST * 22);
format %{ "udivw rscratch1, $src1, $src2\n\t"
"msubw $dst, rscratch1, $src2, $src1" %}
ins_encode %{
__ udivw(rscratch1, $src1$$Register, $src2$$Register);
__ msubw($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
%}
ins_pipe(idiv_reg_reg);
%}
// Unsigned Long Remainder
instruct UModL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (UModL src1 src2));
ins_cost(INSN_COST * 38);
format %{ "udiv rscratch1, $src1, $src2\n"
"msub $dst, rscratch1, $src2, $src1" %}
ins_encode %{
__ udiv(rscratch1, $src1$$Register, $src2$$Register);
__ msub($dst$$Register, rscratch1, $src2$$Register, $src1$$Register);
%}
ins_pipe(ldiv_reg_reg);
%}
// Integer Shifts
// Shift Left Register
instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (LShiftI src1 src2));
ins_cost(INSN_COST * 2);
format %{ "lslvw $dst, $src1, $src2" %}
ins_encode %{
__ lslvw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// Shift Left Immediate
instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
match(Set dst (LShiftI src1 src2));
ins_cost(INSN_COST);
format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %}
ins_encode %{
__ lslw(as_Register($dst$$reg),
as_Register($src1$$reg),
$src2$$constant & 0x1f);
%}
ins_pipe(ialu_reg_shift);
%}
// Shift Right Logical Register
instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (URShiftI src1 src2));
ins_cost(INSN_COST * 2);
format %{ "lsrvw $dst, $src1, $src2" %}
ins_encode %{
__ lsrvw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// Shift Right Logical Immediate
instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
match(Set dst (URShiftI src1 src2));
ins_cost(INSN_COST);
format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %}
ins_encode %{
__ lsrw(as_Register($dst$$reg),
as_Register($src1$$reg),
$src2$$constant & 0x1f);
%}
ins_pipe(ialu_reg_shift);
%}
// Shift Right Arithmetic Register
instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (RShiftI src1 src2));
ins_cost(INSN_COST * 2);
format %{ "asrvw $dst, $src1, $src2" %}
ins_encode %{
__ asrvw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// Shift Right Arithmetic Immediate
instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{
match(Set dst (RShiftI src1 src2));
ins_cost(INSN_COST);
format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %}
ins_encode %{
__ asrw(as_Register($dst$$reg),
as_Register($src1$$reg),
$src2$$constant & 0x1f);
%}
ins_pipe(ialu_reg_shift);
%}
// Combined Int Mask and Right Shift (using UBFM)
// TODO
// Long Shifts
// Shift Left Register
instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
match(Set dst (LShiftL src1 src2));
ins_cost(INSN_COST * 2);
format %{ "lslv $dst, $src1, $src2" %}
ins_encode %{
__ lslv(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// Shift Left Immediate
instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
match(Set dst (LShiftL src1 src2));
ins_cost(INSN_COST);
format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %}
ins_encode %{
__ lsl(as_Register($dst$$reg),
as_Register($src1$$reg),
$src2$$constant & 0x3f);
%}
ins_pipe(ialu_reg_shift);
%}
// Shift Right Logical Register
instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
match(Set dst (URShiftL src1 src2));
ins_cost(INSN_COST * 2);
format %{ "lsrv $dst, $src1, $src2" %}
ins_encode %{
__ lsrv(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// Shift Right Logical Immediate
instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
match(Set dst (URShiftL src1 src2));
ins_cost(INSN_COST);
format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %}
ins_encode %{
__ lsr(as_Register($dst$$reg),
as_Register($src1$$reg),
$src2$$constant & 0x3f);
%}
ins_pipe(ialu_reg_shift);
%}
// A special-case pattern for card table stores.
instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{
match(Set dst (URShiftL (CastP2X src1) src2));
ins_cost(INSN_COST);
format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %}
ins_encode %{
__ lsr(as_Register($dst$$reg),
as_Register($src1$$reg),
$src2$$constant & 0x3f);
%}
ins_pipe(ialu_reg_shift);
%}
// Shift Right Arithmetic Register
instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{
match(Set dst (RShiftL src1 src2));
ins_cost(INSN_COST * 2);
format %{ "asrv $dst, $src1, $src2" %}
ins_encode %{
__ asrv(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// Shift Right Arithmetic Immediate
instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{
match(Set dst (RShiftL src1 src2));
ins_cost(INSN_COST);
format %{ "asr $dst, $src1, ($src2 & 0x3f)" %}
ins_encode %{
__ asr(as_Register($dst$$reg),
as_Register($src1$$reg),
$src2$$constant & 0x3f);
%}
ins_pipe(ialu_reg_shift);
%}
// BEGIN This section of the file is automatically generated. Do not edit --------------
// This section is generated from aarch64_ad.m4
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct regL_not_reg(iRegLNoSp dst,
iRegL src1, immL_M1 m1,
rFlagsReg cr) %{
match(Set dst (XorL src1 m1));
ins_cost(INSN_COST);
format %{ "eon $dst, $src1, zr" %}
ins_encode %{
__ eon(as_Register($dst$$reg),
as_Register($src1$$reg),
zr,
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct regI_not_reg(iRegINoSp dst,
iRegIorL2I src1, immI_M1 m1,
rFlagsReg cr) %{
match(Set dst (XorI src1 m1));
ins_cost(INSN_COST);
format %{ "eonw $dst, $src1, zr" %}
ins_encode %{
__ eonw(as_Register($dst$$reg),
as_Register($src1$$reg),
zr,
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct NegI_reg_URShift_reg(iRegINoSp dst,
immI0 zero, iRegIorL2I src1, immI src2) %{
match(Set dst (SubI zero (URShiftI src1 src2)));
ins_cost(1.9 * INSN_COST);
format %{ "negw $dst, $src1, LSR $src2" %}
ins_encode %{
__ negw(as_Register($dst$$reg), as_Register($src1$$reg),
Assembler::LSR, $src2$$constant & 0x1f);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct NegI_reg_RShift_reg(iRegINoSp dst,
immI0 zero, iRegIorL2I src1, immI src2) %{
match(Set dst (SubI zero (RShiftI src1 src2)));
ins_cost(1.9 * INSN_COST);
format %{ "negw $dst, $src1, ASR $src2" %}
ins_encode %{
__ negw(as_Register($dst$$reg), as_Register($src1$$reg),
Assembler::ASR, $src2$$constant & 0x1f);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct NegI_reg_LShift_reg(iRegINoSp dst,
immI0 zero, iRegIorL2I src1, immI src2) %{
match(Set dst (SubI zero (LShiftI src1 src2)));
ins_cost(1.9 * INSN_COST);
format %{ "negw $dst, $src1, LSL $src2" %}
ins_encode %{
__ negw(as_Register($dst$$reg), as_Register($src1$$reg),
Assembler::LSL, $src2$$constant & 0x1f);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct NegL_reg_URShift_reg(iRegLNoSp dst,
immL0 zero, iRegL src1, immI src2) %{
match(Set dst (SubL zero (URShiftL src1 src2)));
ins_cost(1.9 * INSN_COST);
format %{ "neg $dst, $src1, LSR $src2" %}
ins_encode %{
__ neg(as_Register($dst$$reg), as_Register($src1$$reg),
Assembler::LSR, $src2$$constant & 0x3f);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct NegL_reg_RShift_reg(iRegLNoSp dst,
immL0 zero, iRegL src1, immI src2) %{
match(Set dst (SubL zero (RShiftL src1 src2)));
ins_cost(1.9 * INSN_COST);
format %{ "neg $dst, $src1, ASR $src2" %}
ins_encode %{
__ neg(as_Register($dst$$reg), as_Register($src1$$reg),
Assembler::ASR, $src2$$constant & 0x3f);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct NegL_reg_LShift_reg(iRegLNoSp dst,
immL0 zero, iRegL src1, immI src2) %{
match(Set dst (SubL zero (LShiftL src1 src2)));
ins_cost(1.9 * INSN_COST);
format %{ "neg $dst, $src1, LSL $src2" %}
ins_encode %{
__ neg(as_Register($dst$$reg), as_Register($src1$$reg),
Assembler::LSL, $src2$$constant & 0x3f);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
match(Set dst (AndI src1 (XorI src2 m1)));
ins_cost(INSN_COST);
format %{ "bicw $dst, $src1, $src2" %}
ins_encode %{
__ bicw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2, immL_M1 m1) %{
match(Set dst (AndL src1 (XorL src2 m1)));
ins_cost(INSN_COST);
format %{ "bic $dst, $src1, $src2" %}
ins_encode %{
__ bic(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
match(Set dst (OrI src1 (XorI src2 m1)));
ins_cost(INSN_COST);
format %{ "ornw $dst, $src1, $src2" %}
ins_encode %{
__ ornw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2, immL_M1 m1) %{
match(Set dst (OrL src1 (XorL src2 m1)));
ins_cost(INSN_COST);
format %{ "orn $dst, $src1, $src2" %}
ins_encode %{
__ orn(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1) %{
match(Set dst (XorI m1 (XorI src2 src1)));
ins_cost(INSN_COST);
format %{ "eonw $dst, $src1, $src2" %}
ins_encode %{
__ eonw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2, immL_M1 m1) %{
match(Set dst (XorL m1 (XorL src2 src1)));
ins_cost(INSN_COST);
format %{ "eon $dst, $src1, $src2" %}
ins_encode %{
__ eon(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL, 0);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val >>> shift)) ==> bicw
instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bicw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ bicw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val >>> shift)) ==> bic
instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bic $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ bic(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val >> shift)) ==> bicw
instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bicw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ bicw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val >> shift)) ==> bic
instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bic $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ bic(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val ror shift)) ==> bicw
instruct AndI_reg_RotateRight_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (AndI src1 (XorI(RotateRight src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bicw $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ bicw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val ror shift)) ==> bic
instruct AndL_reg_RotateRight_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (AndL src1 (XorL(RotateRight src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bic $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ bic(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val << shift)) ==> bicw
instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bicw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ bicw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val & (-1 ^ (val << shift)) ==> bic
instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "bic $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ bic(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val >>> shift)) ==> eonw
instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eonw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ eonw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val >>> shift)) ==> eon
instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eon $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ eon(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val >> shift)) ==> eonw
instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eonw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ eonw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val >> shift)) ==> eon
instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eon $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ eon(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val ror shift)) ==> eonw
instruct XorI_reg_RotateRight_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (XorI src4 (XorI(RotateRight src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eonw $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ eonw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val ror shift)) ==> eon
instruct XorL_reg_RotateRight_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (XorL src4 (XorL(RotateRight src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eon $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ eon(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val << shift)) ==> eonw
instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eonw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ eonw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val ^ (-1 ^ (val << shift)) ==> eon
instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1)));
ins_cost(1.9 * INSN_COST);
format %{ "eon $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ eon(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val >>> shift)) ==> ornw
instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "ornw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ ornw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val >>> shift)) ==> orn
instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "orn $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ orn(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val >> shift)) ==> ornw
instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "ornw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ ornw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val >> shift)) ==> orn
instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "orn $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ orn(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val ror shift)) ==> ornw
instruct OrI_reg_RotateRight_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (OrI src1 (XorI(RotateRight src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "ornw $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ ornw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val ror shift)) ==> orn
instruct OrL_reg_RotateRight_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (OrL src1 (XorL(RotateRight src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "orn $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ orn(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val << shift)) ==> ornw
instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3, immI_M1 src4) %{
match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "ornw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ ornw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// val | (-1 ^ (val << shift)) ==> orn
instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3, immL_M1 src4) %{
match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4)));
ins_cost(1.9 * INSN_COST);
format %{ "orn $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ orn(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (AndI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ andw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (AndL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andr $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ andr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (AndI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ andw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (AndL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andr $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ andr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (AndI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ andw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (AndL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andr $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ andr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndI_reg_RotateRight_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (AndI src1 (RotateRight src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andw $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ andw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AndL_reg_RotateRight_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (AndL src1 (RotateRight src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "andr $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ andr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (XorI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eorw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ eorw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (XorL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eor $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ eor(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (XorI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eorw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ eorw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (XorL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eor $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ eor(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (XorI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eorw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ eorw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (XorL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eor $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ eor(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorI_reg_RotateRight_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (XorI src1 (RotateRight src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eorw $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ eorw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct XorL_reg_RotateRight_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (XorL src1 (RotateRight src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "eor $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ eor(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (OrI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orrw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ orrw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (OrL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orr $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ orr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (OrI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orrw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ orrw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (OrL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orr $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ orr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (OrI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orrw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ orrw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (OrL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orr $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ orr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrI_reg_RotateRight_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (OrI src1 (RotateRight src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orrw $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ orrw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct OrL_reg_RotateRight_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (OrL src1 (RotateRight src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "orr $dst, $src1, $src2, ROR $src3" %}
ins_encode %{
__ orr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ROR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (AddI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ addw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (AddL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ add(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (AddI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ addw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (AddL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ add(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (AddI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ addw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (AddL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ add(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubI_reg_URShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (SubI src1 (URShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ subw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubL_reg_URShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (SubL src1 (URShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, LSR $src3" %}
ins_encode %{
__ sub(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubI_reg_RShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (SubI src1 (RShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ subw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubL_reg_RShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (SubL src1 (RShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, ASR $src3" %}
ins_encode %{
__ sub(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::ASR,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubI_reg_LShift_reg(iRegINoSp dst,
iRegIorL2I src1, iRegIorL2I src2,
immI src3) %{
match(Set dst (SubI src1 (LShiftI src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ subw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubL_reg_LShift_reg(iRegLNoSp dst,
iRegL src1, iRegL src2,
immI src3) %{
match(Set dst (SubL src1 (LShiftL src2 src3)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, LSL $src3" %}
ins_encode %{
__ sub(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg),
Assembler::LSL,
$src3$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
%{
match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count));
ins_cost(INSN_COST * 2);
format %{ "sbfm $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant & 63;
int rshift = $rshift_count$$constant & 63;
int s = 63 - lshift;
int r = (rshift - lshift) & 63;
__ sbfm(as_Register($dst$$reg),
as_Register($src$$reg),
r, s);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
%{
match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count));
ins_cost(INSN_COST * 2);
format %{ "sbfmw $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant & 31;
int rshift = $rshift_count$$constant & 31;
int s = 31 - lshift;
int r = (rshift - lshift) & 31;
__ sbfmw(as_Register($dst$$reg),
as_Register($src$$reg),
r, s);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
%{
match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count));
ins_cost(INSN_COST * 2);
format %{ "ubfm $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant & 63;
int rshift = $rshift_count$$constant & 63;
int s = 63 - lshift;
int r = (rshift - lshift) & 63;
__ ubfm(as_Register($dst$$reg),
as_Register($src$$reg),
r, s);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// Shift Left followed by Shift Right.
// This idiom is used by the compiler for the i2b bytecode etc.
instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
%{
match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count));
ins_cost(INSN_COST * 2);
format %{ "ubfmw $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %}
ins_encode %{
int lshift = $lshift_count$$constant & 31;
int rshift = $rshift_count$$constant & 31;
int s = 31 - lshift;
int r = (rshift - lshift) & 31;
__ ubfmw(as_Register($dst$$reg),
as_Register($src$$reg),
r, s);
%}
ins_pipe(ialu_reg_shift);
%}
// Bitfield extract with shift & mask
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
%{
match(Set dst (AndI (URShiftI src rshift) mask));
// Make sure we are not going to exceed what ubfxw can do.
predicate((exact_log2(n->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
ins_cost(INSN_COST);
format %{ "ubfxw $dst, $src, $rshift, $mask" %}
ins_encode %{
int rshift = $rshift$$constant & 31;
intptr_t mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfxw(as_Register($dst$$reg),
as_Register($src$$reg), rshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
%{
match(Set dst (AndL (URShiftL src rshift) mask));
// Make sure we are not going to exceed what ubfx can do.
predicate((exact_log2_long(n->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= (63 + 1));
ins_cost(INSN_COST);
format %{ "ubfx $dst, $src, $rshift, $mask" %}
ins_encode %{
int rshift = $rshift$$constant & 63;
intptr_t mask = $mask$$constant;
int width = exact_log2_long(mask+1);
__ ubfx(as_Register($dst$$reg),
as_Register($src$$reg), rshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// We can use ubfx when extending an And with a mask when we know mask
// is positive. We know that because immI_bitmask guarantees it.
instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
%{
match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
// Make sure we are not going to exceed what ubfxw can do.
predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(1)->in(2)->get_int() & 31)) <= (31 + 1));
ins_cost(INSN_COST * 2);
format %{ "ubfx $dst, $src, $rshift, $mask" %}
ins_encode %{
int rshift = $rshift$$constant & 31;
intptr_t mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfx(as_Register($dst$$reg),
as_Register($src$$reg), rshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// We can use ubfiz when masking by a positive number and then left shifting the result.
// We know that the mask is positive because immI_bitmask guarantees it.
instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
%{
match(Set dst (LShiftI (AndI src mask) lshift));
predicate((exact_log2(n->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 31)) <= (31 + 1));
ins_cost(INSN_COST);
format %{ "ubfizw $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant & 31;
intptr_t mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfizw(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// We can use ubfiz when masking by a positive number and then left shifting the result.
// We know that the mask is positive because immL_bitmask guarantees it.
instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
%{
match(Set dst (LShiftL (AndL src mask) lshift));
predicate((exact_log2_long(n->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant & 63;
intptr_t mask = $mask$$constant;
int width = exact_log2_long(mask+1);
__ ubfiz(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// We can use ubfiz when masking by a positive number and then left shifting the result.
// We know that the mask is positive because immI_bitmask guarantees it.
instruct ubfizwIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
%{
match(Set dst (ConvI2L (LShiftI (AndI src mask) lshift)));
predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(1)->in(2)->get_int() & 31)) <= 31);
ins_cost(INSN_COST);
format %{ "ubfizw $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant & 31;
intptr_t mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfizw(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// We can use ubfiz when masking by a positive number and then left shifting the result.
// We know that the mask is positive because immL_bitmask guarantees it.
instruct ubfizLConvL2I(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
%{
match(Set dst (ConvL2I (LShiftL (AndL src mask) lshift)));
predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(1)->in(2)->get_int() & 63)) <= 31);
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant & 63;
intptr_t mask = $mask$$constant;
int width = exact_log2_long(mask+1);
__ ubfiz(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
%{
match(Set dst (LShiftL (ConvI2L (AndI src mask)) lshift));
predicate((exact_log2(n->in(1)->in(1)->in(2)->get_int() + 1) + (n->in(2)->get_int() & 63)) <= (63 + 1));
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant & 63;
intptr_t mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfiz(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// If there is a convert L to I block between and AndL and a LShiftI, we can also match ubfiz
instruct ubfizLConvL2Ix(iRegINoSp dst, iRegL src, immI lshift, immL_positive_bitmaskI mask)
%{
match(Set dst (LShiftI (ConvL2I (AndL src mask)) lshift));
predicate((exact_log2_long(n->in(1)->in(1)->in(2)->get_long() + 1) + (n->in(2)->get_int() & 31)) <= 31);
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, $lshift, $mask" %}
ins_encode %{
int lshift = $lshift$$constant & 31;
intptr_t mask = $mask$$constant;
int width = exact_log2(mask+1);
__ ubfiz(as_Register($dst$$reg),
as_Register($src$$reg), lshift, width);
%}
ins_pipe(ialu_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
// Can skip int2long conversions after AND with small bitmask
instruct ubfizIConvI2LAndI(iRegLNoSp dst, iRegI src, immI_bitmask msk)
%{
match(Set dst (ConvI2L (AndI src msk)));
ins_cost(INSN_COST);
format %{ "ubfiz $dst, $src, 0, exact_log2($msk + 1) " %}
ins_encode %{
__ ubfiz(as_Register($dst$$reg), as_Register($src$$reg), 0, exact_log2($msk$$constant + 1));
%}
ins_pipe(ialu_reg_shift);
%}
// Rotations
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
ins_encode %{
__ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
$rshift$$constant & 63);
%}
ins_pipe(ialu_reg_reg_extr);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
ins_encode %{
__ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
$rshift$$constant & 31);
%}
ins_pipe(ialu_reg_reg_extr);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
predicate(0 == (((n->in(1)->in(2)->get_int() & 63) + (n->in(2)->in(2)->get_int() & 63)) & 63));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
ins_encode %{
__ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
$rshift$$constant & 63);
%}
ins_pipe(ialu_reg_reg_extr);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
%{
match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
predicate(0 == (((n->in(1)->in(2)->get_int() & 31) + (n->in(2)->in(2)->get_int() & 31)) & 31));
ins_cost(INSN_COST);
format %{ "extr $dst, $src1, $src2, #$rshift" %}
ins_encode %{
__ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg),
$rshift$$constant & 31);
%}
ins_pipe(ialu_reg_reg_extr);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct rorI_imm(iRegINoSp dst, iRegI src, immI shift)
%{
match(Set dst (RotateRight src shift));
ins_cost(INSN_COST);
format %{ "ror $dst, $src, $shift" %}
ins_encode %{
__ extrw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
$shift$$constant & 0x1f);
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct rorL_imm(iRegLNoSp dst, iRegL src, immI shift)
%{
match(Set dst (RotateRight src shift));
ins_cost(INSN_COST);
format %{ "ror $dst, $src, $shift" %}
ins_encode %{
__ extr(as_Register($dst$$reg), as_Register($src$$reg), as_Register($src$$reg),
$shift$$constant & 0x3f);
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct rorI_reg(iRegINoSp dst, iRegI src, iRegI shift)
%{
match(Set dst (RotateRight src shift));
ins_cost(INSN_COST);
format %{ "ror $dst, $src, $shift" %}
ins_encode %{
__ rorvw(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct rorL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
%{
match(Set dst (RotateRight src shift));
ins_cost(INSN_COST);
format %{ "ror $dst, $src, $shift" %}
ins_encode %{
__ rorv(as_Register($dst$$reg), as_Register($src$$reg), as_Register($shift$$reg));
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct rolI_reg(iRegINoSp dst, iRegI src, iRegI shift)
%{
match(Set dst (RotateLeft src shift));
ins_cost(INSN_COST);
format %{ "rol $dst, $src, $shift" %}
ins_encode %{
__ subw(rscratch1, zr, as_Register($shift$$reg));
__ rorvw(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct rolL_reg(iRegLNoSp dst, iRegL src, iRegI shift)
%{
match(Set dst (RotateLeft src shift));
ins_cost(INSN_COST);
format %{ "rol $dst, $src, $shift" %}
ins_encode %{
__ subw(rscratch1, zr, as_Register($shift$$reg));
__ rorv(as_Register($dst$$reg), as_Register($src$$reg), rscratch1);
%}
ins_pipe(ialu_reg_reg_vshift);
%}
// Add/subtract (extended)
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
%{
match(Set dst (AddL src1 (ConvI2L src2)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
%{
match(Set dst (SubL src1 (ConvI2L src2)));
ins_cost(INSN_COST);
format %{ "sub $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, sxth" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, sxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, sxth" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, sxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
%{
match(Set dst (AddI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
format %{ "addw $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
%{
match(Set dst (AddI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
format %{ "addw $dst, $src1, $src2, uxth" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
%{
match(Set dst (AddL src1 (AndL src2 mask)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
%{
match(Set dst (AddL src1 (AndL src2 mask)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, uxth" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
%{
match(Set dst (AddL src1 (AndL src2 mask)));
ins_cost(INSN_COST);
format %{ "add $dst, $src1, $src2, uxtw" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtw);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
%{
match(Set dst (SubI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
format %{ "subw $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
%{
match(Set dst (SubI src1 (AndI src2 mask)));
ins_cost(INSN_COST);
format %{ "subw $dst, $src1, $src2, uxth" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
%{
match(Set dst (SubL src1 (AndL src2 mask)));
ins_cost(INSN_COST);
format %{ "sub $dst, $src1, $src2, uxtb" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
%{
match(Set dst (SubL src1 (AndL src2 mask)));
ins_cost(INSN_COST);
format %{ "sub $dst, $src1, $src2, uxth" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
%{
match(Set dst (SubL src1 (AndL src2 mask)));
ins_cost(INSN_COST);
format %{ "sub $dst, $src1, $src2, uxtw" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtw);
%}
ins_pipe(ialu_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw #lshift2" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxtw #lshift2" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, sxtb #lshift2" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, sxth #lshift2" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxth, ($lshift2$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, sxtw #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, sxtw #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "add $dst, $src1, $src2, uxtw #lshift" %}
ins_encode %{
__ add(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "sub $dst, $src1, $src2, uxtw #lshift" %}
ins_encode %{
__ sub(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtw, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "addw $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ addw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, uxtb #lshift" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxtb, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
%{
match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
ins_cost(1.9 * INSN_COST);
format %{ "subw $dst, $src1, $src2, uxth #lshift" %}
ins_encode %{
__ subw(as_Register($dst$$reg), as_Register($src1$$reg),
as_Register($src2$$reg), ext::uxth, ($lshift$$constant));
%}
ins_pipe(ialu_reg_reg_shift);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_reg_lt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE src2, USE cr);
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src1, $src2 lt\t" %}
ins_encode %{
__ cselw($dst$$Register,
$src1$$Register,
$src2$$Register,
Assembler::LT);
%}
ins_pipe(icond_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_reg_gt(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE src2, USE cr);
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src1, $src2 gt\t" %}
ins_encode %{
__ cselw($dst$$Register,
$src1$$Register,
$src2$$Register,
Assembler::GT);
%}
ins_pipe(icond_reg_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_imm0_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE cr);
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src1, zr lt\t" %}
ins_encode %{
__ cselw($dst$$Register,
$src1$$Register,
zr,
Assembler::LT);
%}
ins_pipe(icond_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_imm0_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE cr);
ins_cost(INSN_COST * 2);
format %{ "cselw $dst, $src1, zr gt\t" %}
ins_encode %{
__ cselw($dst$$Register,
$src1$$Register,
zr,
Assembler::GT);
%}
ins_pipe(icond_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_imm1_le(iRegINoSp dst, iRegI src1, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE cr);
ins_cost(INSN_COST * 2);
format %{ "csincw $dst, $src1, zr le\t" %}
ins_encode %{
__ csincw($dst$$Register,
$src1$$Register,
zr,
Assembler::LE);
%}
ins_pipe(icond_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_imm1_gt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE cr);
ins_cost(INSN_COST * 2);
format %{ "csincw $dst, $src1, zr gt\t" %}
ins_encode %{
__ csincw($dst$$Register,
$src1$$Register,
zr,
Assembler::GT);
%}
ins_pipe(icond_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_immM1_lt(iRegINoSp dst, iRegI src1, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE cr);
ins_cost(INSN_COST * 2);
format %{ "csinvw $dst, $src1, zr lt\t" %}
ins_encode %{
__ csinvw($dst$$Register,
$src1$$Register,
zr,
Assembler::LT);
%}
ins_pipe(icond_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct cmovI_reg_immM1_ge(iRegINoSp dst, iRegI src1, rFlagsReg cr)
%{
effect(DEF dst, USE src1, USE cr);
ins_cost(INSN_COST * 2);
format %{ "csinvw $dst, $src1, zr ge\t" %}
ins_encode %{
__ csinvw($dst$$Register,
$src1$$Register,
zr,
Assembler::GE);
%}
ins_pipe(icond_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct minI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
%{
match(Set dst (MinI src imm));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm0_lt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct minI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
%{
match(Set dst (MinI imm src));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm0_lt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct minI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
%{
match(Set dst (MinI src imm));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm1_le(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct minI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
%{
match(Set dst (MinI imm src));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm1_le(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct minI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
%{
match(Set dst (MinI src imm));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_immM1_lt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct minI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
%{
match(Set dst (MinI imm src));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_immM1_lt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct maxI_reg_imm0(iRegINoSp dst, iRegIorL2I src, immI0 imm)
%{
match(Set dst (MaxI src imm));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm0_gt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct maxI_imm0_reg(iRegINoSp dst, immI0 imm, iRegIorL2I src)
%{
match(Set dst (MaxI imm src));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm0_gt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct maxI_reg_imm1(iRegINoSp dst, iRegIorL2I src, immI_1 imm)
%{
match(Set dst (MaxI src imm));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm1_gt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct maxI_imm1_reg(iRegINoSp dst, immI_1 imm, iRegIorL2I src)
%{
match(Set dst (MaxI imm src));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_imm1_gt(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct maxI_reg_immM1(iRegINoSp dst, iRegIorL2I src, immI_M1 imm)
%{
match(Set dst (MaxI src imm));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_immM1_ge(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct maxI_immM1_reg(iRegINoSp dst, immI_M1 imm, iRegIorL2I src)
%{
match(Set dst (MaxI imm src));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_imm0(cr, src);
cmovI_reg_immM1_ge(dst, src, cr);
%}
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct bits_reverse_I(iRegINoSp dst, iRegIorL2I src)
%{
match(Set dst (ReverseI src));
ins_cost(INSN_COST);
format %{ "rbitw $dst, $src" %}
ins_encode %{
__ rbitw($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
// This pattern is automatically generated from aarch64_ad.m4
// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
instruct bits_reverse_L(iRegLNoSp dst, iRegL src)
%{
match(Set dst (ReverseL src));
ins_cost(INSN_COST);
format %{ "rbit $dst, $src" %}
ins_encode %{
__ rbit($dst$$Register, $src$$Register);
%}
ins_pipe(ialu_reg);
%}
// END This section of the file is automatically generated. Do not edit --------------
// ============================================================================
// Floating Point Arithmetic Instructions
instruct addHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (AddHF src1 src2));
format %{ "faddh $dst, $src1, $src2" %}
ins_encode %{
__ faddh($dst$$FloatRegister,
$src1$$FloatRegister,
$src2$$FloatRegister);
%}
ins_pipe(fp_dop_reg_reg_s);
%}
instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (AddF src1 src2));
ins_cost(INSN_COST * 5);
format %{ "fadds $dst, $src1, $src2" %}
ins_encode %{
__ fadds(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_s);
%}
instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
match(Set dst (AddD src1 src2));
ins_cost(INSN_COST * 5);
format %{ "faddd $dst, $src1, $src2" %}
ins_encode %{
__ faddd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_d);
%}
instruct subHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (SubHF src1 src2));
format %{ "fsubh $dst, $src1, $src2" %}
ins_encode %{
__ fsubh($dst$$FloatRegister,
$src1$$FloatRegister,
$src2$$FloatRegister);
%}
ins_pipe(fp_dop_reg_reg_s);
%}
instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (SubF src1 src2));
ins_cost(INSN_COST * 5);
format %{ "fsubs $dst, $src1, $src2" %}
ins_encode %{
__ fsubs(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_s);
%}
instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
match(Set dst (SubD src1 src2));
ins_cost(INSN_COST * 5);
format %{ "fsubd $dst, $src1, $src2" %}
ins_encode %{
__ fsubd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_d);
%}
instruct mulHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (MulHF src1 src2));
format %{ "fmulh $dst, $src1, $src2" %}
ins_encode %{
__ fmulh($dst$$FloatRegister,
$src1$$FloatRegister,
$src2$$FloatRegister);
%}
ins_pipe(fp_dop_reg_reg_s);
%}
instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (MulF src1 src2));
ins_cost(INSN_COST * 6);
format %{ "fmuls $dst, $src1, $src2" %}
ins_encode %{
__ fmuls(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_s);
%}
instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
match(Set dst (MulD src1 src2));
ins_cost(INSN_COST * 6);
format %{ "fmuld $dst, $src1, $src2" %}
ins_encode %{
__ fmuld(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_d);
%}
// src1 * src2 + src3 (half-precision float)
instruct maddHF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
match(Set dst (FmaHF src3 (Binary src1 src2)));
format %{ "fmaddh $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fmaddh($dst$$FloatRegister,
$src1$$FloatRegister,
$src2$$FloatRegister,
$src3$$FloatRegister);
%}
ins_pipe(pipe_class_default);
%}
// src1 * src2 + src3
instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
match(Set dst (FmaF src3 (Binary src1 src2)));
format %{ "fmadds $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fmadds(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// src1 * src2 + src3
instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
match(Set dst (FmaD src3 (Binary src1 src2)));
format %{ "fmaddd $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fmaddd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// src1 * (-src2) + src3
// "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
format %{ "fmsubs $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fmsubs(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// src1 * (-src2) + src3
// "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
format %{ "fmsubd $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fmsubd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// src1 * (-src2) - src3
// "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{
match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
format %{ "fnmadds $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fnmadds(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// src1 * (-src2) - src3
// "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{
match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
format %{ "fnmaddd $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fnmaddd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// src1 * src2 - src3
instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{
match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
format %{ "fnmsubs $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
__ fnmsubs(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// src1 * src2 - src3
instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{
match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
format %{ "fnmsubd $dst, $src1, $src2, $src3" %}
ins_encode %{
assert(UseFMA, "Needs FMA instructions support.");
// n.b. insn name should be fnmsubd
__ fnmsub(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg),
as_FloatRegister($src3$$reg));
%}
ins_pipe(pipe_class_default);
%}
// Math.max(HH)H (half-precision float)
instruct maxHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (MaxHF src1 src2));
format %{ "fmaxh $dst, $src1, $src2" %}
ins_encode %{
__ fmaxh($dst$$FloatRegister,
$src1$$FloatRegister,
$src2$$FloatRegister);
%}
ins_pipe(fp_dop_reg_reg_s);
%}
// Math.min(HH)H (half-precision float)
instruct minHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (MinHF src1 src2));
format %{ "fminh $dst, $src1, $src2" %}
ins_encode %{
__ fminh($dst$$FloatRegister,
$src1$$FloatRegister,
$src2$$FloatRegister);
%}
ins_pipe(fp_dop_reg_reg_s);
%}
// Math.max(FF)F
instruct maxF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (MaxF src1 src2));
format %{ "fmaxs $dst, $src1, $src2" %}
ins_encode %{
__ fmaxs(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_s);
%}
// Math.min(FF)F
instruct minF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (MinF src1 src2));
format %{ "fmins $dst, $src1, $src2" %}
ins_encode %{
__ fmins(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_s);
%}
// Math.max(DD)D
instruct maxD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
match(Set dst (MaxD src1 src2));
format %{ "fmaxd $dst, $src1, $src2" %}
ins_encode %{
__ fmaxd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_d);
%}
// Math.min(DD)D
instruct minD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
match(Set dst (MinD src1 src2));
format %{ "fmind $dst, $src1, $src2" %}
ins_encode %{
__ fmind(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_dop_reg_reg_d);
%}
instruct divHF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (DivHF src1 src2));
format %{ "fdivh $dst, $src1, $src2" %}
ins_encode %{
__ fdivh($dst$$FloatRegister,
$src1$$FloatRegister,
$src2$$FloatRegister);
%}
ins_pipe(fp_div_s);
%}
instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (DivF src1 src2));
ins_cost(INSN_COST * 18);
format %{ "fdivs $dst, $src1, $src2" %}
ins_encode %{
__ fdivs(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_div_s);
%}
instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{
match(Set dst (DivD src1 src2));
ins_cost(INSN_COST * 32);
format %{ "fdivd $dst, $src1, $src2" %}
ins_encode %{
__ fdivd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_div_d);
%}
instruct negF_reg_reg(vRegF dst, vRegF src) %{
match(Set dst (NegF src));
ins_cost(INSN_COST * 3);
format %{ "fneg $dst, $src" %}
ins_encode %{
__ fnegs(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
%}
ins_pipe(fp_uop_s);
%}
instruct negD_reg_reg(vRegD dst, vRegD src) %{
match(Set dst (NegD src));
ins_cost(INSN_COST * 3);
format %{ "fnegd $dst, $src" %}
ins_encode %{
__ fnegd(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
%}
ins_pipe(fp_uop_d);
%}
instruct absI_reg(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr)
%{
match(Set dst (AbsI src));
effect(KILL cr);
ins_cost(INSN_COST * 2);
format %{ "cmpw $src, zr\n\t"
"cnegw $dst, $src, Assembler::LT\t# int abs"
%}
ins_encode %{
__ cmpw(as_Register($src$$reg), zr);
__ cnegw(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
%}
ins_pipe(pipe_class_default);
%}
instruct absL_reg(iRegLNoSp dst, iRegL src, rFlagsReg cr)
%{
match(Set dst (AbsL src));
effect(KILL cr);
ins_cost(INSN_COST * 2);
format %{ "cmp $src, zr\n\t"
"cneg $dst, $src, Assembler::LT\t# long abs"
%}
ins_encode %{
__ cmp(as_Register($src$$reg), zr);
__ cneg(as_Register($dst$$reg), as_Register($src$$reg), Assembler::LT);
%}
ins_pipe(pipe_class_default);
%}
instruct absF_reg(vRegF dst, vRegF src) %{
match(Set dst (AbsF src));
ins_cost(INSN_COST * 3);
format %{ "fabss $dst, $src" %}
ins_encode %{
__ fabss(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
%}
ins_pipe(fp_uop_s);
%}
instruct absD_reg(vRegD dst, vRegD src) %{
match(Set dst (AbsD src));
ins_cost(INSN_COST * 3);
format %{ "fabsd $dst, $src" %}
ins_encode %{
__ fabsd(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
%}
ins_pipe(fp_uop_d);
%}
instruct absdF_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (AbsF (SubF src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "fabds $dst, $src1, $src2" %}
ins_encode %{
__ fabds(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_uop_s);
%}
instruct absdD_reg(vRegD dst, vRegD src1, vRegD src2) %{
match(Set dst (AbsD (SubD src1 src2)));
ins_cost(INSN_COST * 3);
format %{ "fabdd $dst, $src1, $src2" %}
ins_encode %{
__ fabdd(as_FloatRegister($dst$$reg),
as_FloatRegister($src1$$reg),
as_FloatRegister($src2$$reg));
%}
ins_pipe(fp_uop_d);
%}
instruct sqrtD_reg(vRegD dst, vRegD src) %{
match(Set dst (SqrtD src));
ins_cost(INSN_COST * 50);
format %{ "fsqrtd $dst, $src" %}
ins_encode %{
__ fsqrtd(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
%}
ins_pipe(fp_div_s);
%}
instruct sqrtF_reg(vRegF dst, vRegF src) %{
match(Set dst (SqrtF src));
ins_cost(INSN_COST * 50);
format %{ "fsqrts $dst, $src" %}
ins_encode %{
__ fsqrts(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
%}
ins_pipe(fp_div_d);
%}
instruct sqrtHF_reg(vRegF dst, vRegF src) %{
match(Set dst (SqrtHF src));
format %{ "fsqrth $dst, $src" %}
ins_encode %{
__ fsqrth($dst$$FloatRegister,
$src$$FloatRegister);
%}
ins_pipe(fp_div_s);
%}
// Math.rint, floor, ceil
instruct roundD_reg(vRegD dst, vRegD src, immI rmode) %{
match(Set dst (RoundDoubleMode src rmode));
format %{ "frint $dst, $src, $rmode" %}
ins_encode %{
switch ($rmode$$constant) {
case RoundDoubleModeNode::rmode_rint:
__ frintnd(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
break;
case RoundDoubleModeNode::rmode_floor:
__ frintmd(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
break;
case RoundDoubleModeNode::rmode_ceil:
__ frintpd(as_FloatRegister($dst$$reg),
as_FloatRegister($src$$reg));
break;
}
%}
ins_pipe(fp_uop_d);
%}
instruct copySignD_reg(vRegD dst, vRegD src1, vRegD src2, vRegD zero) %{
match(Set dst (CopySignD src1 (Binary src2 zero)));
effect(TEMP_DEF dst, USE src1, USE src2, USE zero);
format %{ "CopySignD $dst $src1 $src2" %}
ins_encode %{
FloatRegister dst = as_FloatRegister($dst$$reg),
src1 = as_FloatRegister($src1$$reg),
src2 = as_FloatRegister($src2$$reg),
zero = as_FloatRegister($zero$$reg);
__ fnegd(dst, zero);
__ bsl(dst, __ T8B, src2, src1);
%}
ins_pipe(fp_uop_d);
%}
instruct copySignF_reg(vRegF dst, vRegF src1, vRegF src2) %{
match(Set dst (CopySignF src1 src2));
effect(TEMP_DEF dst, USE src1, USE src2);
format %{ "CopySignF $dst $src1 $src2" %}
ins_encode %{
FloatRegister dst = as_FloatRegister($dst$$reg),
src1 = as_FloatRegister($src1$$reg),
src2 = as_FloatRegister($src2$$reg);
__ movi(dst, __ T2S, 0x80, 24);
__ bsl(dst, __ T8B, src2, src1);
%}
ins_pipe(fp_uop_d);
%}
instruct signumD_reg(vRegD dst, vRegD src, vRegD zero, vRegD one) %{
match(Set dst (SignumD src (Binary zero one)));
effect(TEMP_DEF dst, USE src, USE zero, USE one);
format %{ "signumD $dst, $src" %}
ins_encode %{
FloatRegister src = as_FloatRegister($src$$reg),
dst = as_FloatRegister($dst$$reg),
zero = as_FloatRegister($zero$$reg),
one = as_FloatRegister($one$$reg);
__ facgtd(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
__ ushrd(dst, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
// Bit selection instruction gets bit from "one" for each enabled bit in
// "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
// NaN the whole "src" will be copied because "dst" is zero. For all other
// "src" values dst is 0x7FF..F, which means only the sign bit is copied
// from "src", and all other bits are copied from 1.0.
__ bsl(dst, __ T8B, one, src);
%}
ins_pipe(fp_uop_d);
%}
instruct signumF_reg(vRegF dst, vRegF src, vRegF zero, vRegF one) %{
match(Set dst (SignumF src (Binary zero one)));
effect(TEMP_DEF dst, USE src, USE zero, USE one);
format %{ "signumF $dst, $src" %}
ins_encode %{
FloatRegister src = as_FloatRegister($src$$reg),
dst = as_FloatRegister($dst$$reg),
zero = as_FloatRegister($zero$$reg),
one = as_FloatRegister($one$$reg);
__ facgts(dst, src, zero); // dst=0 for +-0.0 and NaN. 0xFFF..F otherwise
__ ushr(dst, __ T2S, dst, 1); // dst=0 for +-0.0 and NaN. 0x7FF..F otherwise
// Bit selection instruction gets bit from "one" for each enabled bit in
// "dst", otherwise gets a bit from "src". For "src" that contains +-0.0 or
// NaN the whole "src" will be copied because "dst" is zero. For all other
// "src" values dst is 0x7FF..F, which means only the sign bit is copied
// from "src", and all other bits are copied from 1.0.
__ bsl(dst, __ T8B, one, src);
%}
ins_pipe(fp_uop_d);
%}
instruct onspinwait() %{
match(OnSpinWait);
ins_cost(INSN_COST);
format %{ "onspinwait" %}
ins_encode %{
__ spin_wait();
%}
ins_pipe(pipe_class_empty);
%}
// ============================================================================
// Logical Instructions
// Integer Logical Instructions
// And Instructions
instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{
match(Set dst (AndI src1 src2));
format %{ "andw $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ andw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{
match(Set dst (AndI src1 src2));
format %{ "andsw $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ andw(as_Register($dst$$reg),
as_Register($src1$$reg),
(uint64_t)($src2$$constant));
%}
ins_pipe(ialu_reg_imm);
%}
// Or Instructions
instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (OrI src1 src2));
format %{ "orrw $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ orrw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
match(Set dst (OrI src1 src2));
format %{ "orrw $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ orrw(as_Register($dst$$reg),
as_Register($src1$$reg),
(uint64_t)($src2$$constant));
%}
ins_pipe(ialu_reg_imm);
%}
// Xor Instructions
instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{
match(Set dst (XorI src1 src2));
format %{ "eorw $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ eorw(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{
match(Set dst (XorI src1 src2));
format %{ "eorw $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ eorw(as_Register($dst$$reg),
as_Register($src1$$reg),
(uint64_t)($src2$$constant));
%}
ins_pipe(ialu_reg_imm);
%}
// Long Logical Instructions
// TODO
instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{
match(Set dst (AndL src1 src2));
format %{ "and $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ andr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{
match(Set dst (AndL src1 src2));
format %{ "and $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ andr(as_Register($dst$$reg),
as_Register($src1$$reg),
(uint64_t)($src2$$constant));
%}
ins_pipe(ialu_reg_imm);
%}
// Or Instructions
instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (OrL src1 src2));
format %{ "orr $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ orr(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
match(Set dst (OrL src1 src2));
format %{ "orr $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ orr(as_Register($dst$$reg),
as_Register($src1$$reg),
(uint64_t)($src2$$constant));
%}
ins_pipe(ialu_reg_imm);
%}
// Xor Instructions
instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{
match(Set dst (XorL src1 src2));
format %{ "eor $dst, $src1, $src2\t# int" %}
ins_cost(INSN_COST);
ins_encode %{
__ eor(as_Register($dst$$reg),
as_Register($src1$$reg),
as_Register($src2$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{
match(Set dst (XorL src1 src2));
ins_cost(INSN_COST);
format %{ "eor $dst, $src1, $src2\t# int" %}
ins_encode %{
__ eor(as_Register($dst$$reg),
as_Register($src1$$reg),
(uint64_t)($src2$$constant));
%}
ins_pipe(ialu_reg_imm);
%}
instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src)
%{
match(Set dst (ConvI2L src));
ins_cost(INSN_COST);
format %{ "sxtw $dst, $src\t# i2l" %}
ins_encode %{
__ sbfm($dst$$Register, $src$$Register, 0, 31);
%}
ins_pipe(ialu_reg_shift);
%}
// this pattern occurs in bigmath arithmetic
instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask)
%{
match(Set dst (AndL (ConvI2L src) mask));
ins_cost(INSN_COST);
format %{ "ubfm $dst, $src, 0, 31\t# ui2l" %}
ins_encode %{
__ ubfm($dst$$Register, $src$$Register, 0, 31);
%}
ins_pipe(ialu_reg_shift);
%}
instruct convL2I_reg(iRegINoSp dst, iRegL src) %{
match(Set dst (ConvL2I src));
ins_cost(INSN_COST);
format %{ "movw $dst, $src \t// l2i" %}
ins_encode %{
__ movw(as_Register($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(ialu_reg);
%}
instruct convD2F_reg(vRegF dst, vRegD src) %{
match(Set dst (ConvD2F src));
ins_cost(INSN_COST * 5);
format %{ "fcvtd $dst, $src \t// d2f" %}
ins_encode %{
__ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
%}
ins_pipe(fp_d2f);
%}
instruct convF2D_reg(vRegD dst, vRegF src) %{
match(Set dst (ConvF2D src));
ins_cost(INSN_COST * 5);
format %{ "fcvts $dst, $src \t// f2d" %}
ins_encode %{
__ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg));
%}
ins_pipe(fp_f2d);
%}
instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{
match(Set dst (ConvF2I src));
ins_cost(INSN_COST * 5);
format %{ "fcvtzsw $dst, $src \t// f2i" %}
ins_encode %{
__ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
%}
ins_pipe(fp_f2i);
%}
instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{
match(Set dst (ConvF2L src));
ins_cost(INSN_COST * 5);
format %{ "fcvtzs $dst, $src \t// f2l" %}
ins_encode %{
__ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg));
%}
ins_pipe(fp_f2l);
%}
instruct convF2HF_reg_reg(iRegINoSp dst, vRegF src, vRegF tmp) %{
match(Set dst (ConvF2HF src));
format %{ "fcvt $tmp, $src\t# convert single to half precision\n\t"
"smov $dst, $tmp\t# move result from $tmp to $dst"
%}
effect(TEMP tmp);
ins_encode %{
__ flt_to_flt16($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct convHF2F_reg_reg(vRegF dst, iRegINoSp src, vRegF tmp) %{
match(Set dst (ConvHF2F src));
format %{ "mov $tmp, $src\t# move source from $src to $tmp\n\t"
"fcvt $dst, $tmp\t# convert half to single precision"
%}
effect(TEMP tmp);
ins_encode %{
__ flt16_to_flt($dst$$FloatRegister, $src$$Register, $tmp$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{
match(Set dst (ConvI2F src));
ins_cost(INSN_COST * 5);
format %{ "scvtfws $dst, $src \t// i2f" %}
ins_encode %{
__ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(fp_i2f);
%}
instruct convL2F_reg_reg(vRegF dst, iRegL src) %{
match(Set dst (ConvL2F src));
ins_cost(INSN_COST * 5);
format %{ "scvtfs $dst, $src \t// l2f" %}
ins_encode %{
__ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(fp_l2f);
%}
instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{
match(Set dst (ConvD2I src));
ins_cost(INSN_COST * 5);
format %{ "fcvtzdw $dst, $src \t// d2i" %}
ins_encode %{
__ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg));
%}
ins_pipe(fp_d2i);
%}
instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
match(Set dst (ConvD2L src));
ins_cost(INSN_COST * 5);
format %{ "fcvtzd $dst, $src \t// d2l" %}
ins_encode %{
__ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg));
%}
ins_pipe(fp_d2l);
%}
instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{
match(Set dst (ConvI2D src));
ins_cost(INSN_COST * 5);
format %{ "scvtfwd $dst, $src \t// i2d" %}
ins_encode %{
__ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(fp_i2d);
%}
instruct convL2D_reg_reg(vRegD dst, iRegL src) %{
match(Set dst (ConvL2D src));
ins_cost(INSN_COST * 5);
format %{ "scvtfd $dst, $src \t// l2d" %}
ins_encode %{
__ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg));
%}
ins_pipe(fp_l2d);
%}
instruct round_double_reg(iRegLNoSp dst, vRegD src, vRegD ftmp, rFlagsReg cr)
%{
match(Set dst (RoundD src));
effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
format %{ "java_round_double $dst,$src"%}
ins_encode %{
__ java_round_double($dst$$Register, as_FloatRegister($src$$reg),
as_FloatRegister($ftmp$$reg));
%}
ins_pipe(pipe_slow);
%}
instruct round_float_reg(iRegINoSp dst, vRegF src, vRegF ftmp, rFlagsReg cr)
%{
match(Set dst (RoundF src));
effect(TEMP_DEF dst, TEMP ftmp, KILL cr);
format %{ "java_round_float $dst,$src"%}
ins_encode %{
__ java_round_float($dst$$Register, as_FloatRegister($src$$reg),
as_FloatRegister($ftmp$$reg));
%}
ins_pipe(pipe_slow);
%}
// stack <-> reg and reg <-> reg shuffles with no conversion
instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{
match(Set dst (MoveF2I src));
effect(DEF dst, USE src);
ins_cost(4 * INSN_COST);
format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %}
ins_encode %{
__ ldrw($dst$$Register, Address(sp, $src$$disp));
%}
ins_pipe(iload_reg_reg);
%}
instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{
match(Set dst (MoveI2F src));
effect(DEF dst, USE src);
ins_cost(4 * INSN_COST);
format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %}
ins_encode %{
__ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
%}
ins_pipe(pipe_class_memory);
%}
instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{
match(Set dst (MoveD2L src));
effect(DEF dst, USE src);
ins_cost(4 * INSN_COST);
format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %}
ins_encode %{
__ ldr($dst$$Register, Address(sp, $src$$disp));
%}
ins_pipe(iload_reg_reg);
%}
instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{
match(Set dst (MoveL2D src));
effect(DEF dst, USE src);
ins_cost(4 * INSN_COST);
format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %}
ins_encode %{
__ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp));
%}
ins_pipe(pipe_class_memory);
%}
instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{
match(Set dst (MoveF2I src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %}
ins_encode %{
__ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
%}
ins_pipe(pipe_class_memory);
%}
instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
match(Set dst (MoveI2F src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %}
ins_encode %{
__ strw($src$$Register, Address(sp, $dst$$disp));
%}
ins_pipe(istore_reg_reg);
%}
instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{
match(Set dst (MoveD2L src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %}
ins_encode %{
__ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp));
%}
ins_pipe(pipe_class_memory);
%}
instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
match(Set dst (MoveL2D src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "str $src, $dst\t# MoveL2D_reg_stack" %}
ins_encode %{
__ str($src$$Register, Address(sp, $dst$$disp));
%}
ins_pipe(istore_reg_reg);
%}
instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{
match(Set dst (MoveF2I src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %}
ins_encode %{
__ fmovs($dst$$Register, as_FloatRegister($src$$reg));
%}
ins_pipe(fp_f2i);
%}
instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{
match(Set dst (MoveI2F src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %}
ins_encode %{
__ fmovs(as_FloatRegister($dst$$reg), $src$$Register);
%}
ins_pipe(fp_i2f);
%}
instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{
match(Set dst (MoveD2L src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %}
ins_encode %{
__ fmovd($dst$$Register, as_FloatRegister($src$$reg));
%}
ins_pipe(fp_d2l);
%}
instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{
match(Set dst (MoveL2D src));
effect(DEF dst, USE src);
ins_cost(INSN_COST);
format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %}
ins_encode %{
__ fmovd(as_FloatRegister($dst$$reg), $src$$Register);
%}
ins_pipe(fp_l2d);
%}
// ============================================================================
// clearing of an array
instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr)
%{
match(Set dummy (ClearArray cnt base));
effect(USE_KILL cnt, USE_KILL base, KILL cr);
ins_cost(4 * INSN_COST);
format %{ "ClearArray $cnt, $base" %}
ins_encode %{
address tpc = __ zero_words($base$$Register, $cnt$$Register);
if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 temp, Universe dummy, rFlagsReg cr)
%{
predicate((uint64_t)n->in(2)->get_long()
< (uint64_t)(BlockZeroingLowLimit >> LogBytesPerWord));
match(Set dummy (ClearArray cnt base));
effect(TEMP temp, USE_KILL base, KILL cr);
ins_cost(4 * INSN_COST);
format %{ "ClearArray $cnt, $base" %}
ins_encode %{
address tpc = __ zero_words($base$$Register, (uint64_t)$cnt$$constant);
if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
// ============================================================================
// Overflow Math Instructions
instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
%{
match(Set cr (OverflowAddI op1 op2));
format %{ "cmnw $op1, $op2\t# overflow check int" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmnw($op1$$Register, $op2$$Register);
%}
ins_pipe(icmp_reg_reg);
%}
instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
%{
match(Set cr (OverflowAddI op1 op2));
format %{ "cmnw $op1, $op2\t# overflow check int" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmnw($op1$$Register, $op2$$constant);
%}
ins_pipe(icmp_reg_imm);
%}
instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
%{
match(Set cr (OverflowAddL op1 op2));
format %{ "cmn $op1, $op2\t# overflow check long" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmn($op1$$Register, $op2$$Register);
%}
ins_pipe(icmp_reg_reg);
%}
instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
%{
match(Set cr (OverflowAddL op1 op2));
format %{ "adds zr, $op1, $op2\t# overflow check long" %}
ins_cost(INSN_COST);
ins_encode %{
__ adds(zr, $op1$$Register, $op2$$constant);
%}
ins_pipe(icmp_reg_imm);
%}
instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
%{
match(Set cr (OverflowSubI op1 op2));
format %{ "cmpw $op1, $op2\t# overflow check int" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmpw($op1$$Register, $op2$$Register);
%}
ins_pipe(icmp_reg_reg);
%}
instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2)
%{
match(Set cr (OverflowSubI op1 op2));
format %{ "cmpw $op1, $op2\t# overflow check int" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmpw($op1$$Register, $op2$$constant);
%}
ins_pipe(icmp_reg_imm);
%}
instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
%{
match(Set cr (OverflowSubL op1 op2));
format %{ "cmp $op1, $op2\t# overflow check long" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmp($op1$$Register, $op2$$Register);
%}
ins_pipe(icmp_reg_reg);
%}
instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2)
%{
match(Set cr (OverflowSubL op1 op2));
format %{ "cmp $op1, $op2\t# overflow check long" %}
ins_cost(INSN_COST);
ins_encode %{
__ subs(zr, $op1$$Register, $op2$$constant);
%}
ins_pipe(icmp_reg_imm);
%}
instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1)
%{
match(Set cr (OverflowSubI zero op1));
format %{ "cmpw zr, $op1\t# overflow check int" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmpw(zr, $op1$$Register);
%}
ins_pipe(icmp_reg_imm);
%}
instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1)
%{
match(Set cr (OverflowSubL zero op1));
format %{ "cmp zr, $op1\t# overflow check long" %}
ins_cost(INSN_COST);
ins_encode %{
__ cmp(zr, $op1$$Register);
%}
ins_pipe(icmp_reg_imm);
%}
instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2)
%{
match(Set cr (OverflowMulI op1 op2));
format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
"cmp rscratch1, rscratch1, sxtw\n\t"
"movw rscratch1, #0x80000000\n\t"
"cselw rscratch1, rscratch1, zr, NE\n\t"
"cmpw rscratch1, #1" %}
ins_cost(5 * INSN_COST);
ins_encode %{
__ smull(rscratch1, $op1$$Register, $op2$$Register);
__ subs(zr, rscratch1, rscratch1, ext::sxtw); // NE => overflow
__ movw(rscratch1, 0x80000000); // Develop 0 (EQ),
__ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
__ cmpw(rscratch1, 1); // 0x80000000 - 1 => VS
%}
ins_pipe(pipe_slow);
%}
instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr)
%{
match(If cmp (OverflowMulI op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
|| n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
effect(USE labl, KILL cr);
format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t"
"cmp rscratch1, rscratch1, sxtw\n\t"
"b$cmp $labl" %}
ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
__ smull(rscratch1, $op1$$Register, $op2$$Register);
__ subs(zr, rscratch1, rscratch1, ext::sxtw); // NE => overflow
__ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
%}
ins_pipe(pipe_serial);
%}
instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2)
%{
match(Set cr (OverflowMulL op1 op2));
format %{ "mul rscratch1, $op1, $op2\t#overflow check long\n\t"
"smulh rscratch2, $op1, $op2\n\t"
"cmp rscratch2, rscratch1, ASR #63\n\t"
"movw rscratch1, #0x80000000\n\t"
"cselw rscratch1, rscratch1, zr, NE\n\t"
"cmpw rscratch1, #1" %}
ins_cost(6 * INSN_COST);
ins_encode %{
__ mul(rscratch1, $op1$$Register, $op2$$Register); // Result bits 0..63
__ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
__ cmp(rscratch2, rscratch1, Assembler::ASR, 63); // Top is pure sign ext
__ movw(rscratch1, 0x80000000); // Develop 0 (EQ),
__ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE)
__ cmpw(rscratch1, 1); // 0x80000000 - 1 => VS
%}
ins_pipe(pipe_slow);
%}
instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr)
%{
match(If cmp (OverflowMulL op1 op2));
predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow
|| n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow);
effect(USE labl, KILL cr);
format %{ "mul rscratch1, $op1, $op2\t#overflow check long\n\t"
"smulh rscratch2, $op1, $op2\n\t"
"cmp rscratch2, rscratch1, ASR #63\n\t"
"b$cmp $labl" %}
ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
__ mul(rscratch1, $op1$$Register, $op2$$Register); // Result bits 0..63
__ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127
__ cmp(rscratch2, rscratch1, Assembler::ASR, 63); // Top is pure sign ext
__ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L);
%}
ins_pipe(pipe_serial);
%}
// ============================================================================
// Compare Instructions
instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2)
%{
match(Set cr (CmpI op1 op2));
effect(DEF cr, USE op1, USE op2);
ins_cost(INSN_COST);
format %{ "cmpw $op1, $op2" %}
ins_encode(aarch64_enc_cmpw(op1, op2));
ins_pipe(icmp_reg_reg);
%}
instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero)
%{
match(Set cr (CmpI op1 zero));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "cmpw $op1, 0" %}
ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
ins_pipe(icmp_reg_imm);
%}
instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2)
%{
match(Set cr (CmpI op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "cmpw $op1, $op2" %}
ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
ins_pipe(icmp_reg_imm);
%}
instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2)
%{
match(Set cr (CmpI op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST * 2);
format %{ "cmpw $op1, $op2" %}
ins_encode(aarch64_enc_cmpw_imm(op1, op2));
ins_pipe(icmp_reg_imm);
%}
// Unsigned compare Instructions; really, same as signed compare
// except it should only be used to feed an If or a CMovI which takes a
// cmpOpU.
instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2)
%{
match(Set cr (CmpU op1 op2));
effect(DEF cr, USE op1, USE op2);
ins_cost(INSN_COST);
format %{ "cmpw $op1, $op2\t# unsigned" %}
ins_encode(aarch64_enc_cmpw(op1, op2));
ins_pipe(icmp_reg_reg);
%}
instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero)
%{
match(Set cr (CmpU op1 zero));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "cmpw $op1, #0\t# unsigned" %}
ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero));
ins_pipe(icmp_reg_imm);
%}
instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2)
%{
match(Set cr (CmpU op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "cmpw $op1, $op2\t# unsigned" %}
ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2));
ins_pipe(icmp_reg_imm);
%}
instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2)
%{
match(Set cr (CmpU op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST * 2);
format %{ "cmpw $op1, $op2\t# unsigned" %}
ins_encode(aarch64_enc_cmpw_imm(op1, op2));
ins_pipe(icmp_reg_imm);
%}
instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2)
%{
match(Set cr (CmpL op1 op2));
effect(DEF cr, USE op1, USE op2);
ins_cost(INSN_COST);
format %{ "cmp $op1, $op2" %}
ins_encode(aarch64_enc_cmp(op1, op2));
ins_pipe(icmp_reg_reg);
%}
instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero)
%{
match(Set cr (CmpL op1 zero));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "tst $op1" %}
ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
ins_pipe(icmp_reg_imm);
%}
instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2)
%{
match(Set cr (CmpL op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "cmp $op1, $op2" %}
ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
ins_pipe(icmp_reg_imm);
%}
instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2)
%{
match(Set cr (CmpL op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST * 2);
format %{ "cmp $op1, $op2" %}
ins_encode(aarch64_enc_cmp_imm(op1, op2));
ins_pipe(icmp_reg_imm);
%}
instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2)
%{
match(Set cr (CmpUL op1 op2));
effect(DEF cr, USE op1, USE op2);
ins_cost(INSN_COST);
format %{ "cmp $op1, $op2" %}
ins_encode(aarch64_enc_cmp(op1, op2));
ins_pipe(icmp_reg_reg);
%}
instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero)
%{
match(Set cr (CmpUL op1 zero));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "tst $op1" %}
ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero));
ins_pipe(icmp_reg_imm);
%}
instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2)
%{
match(Set cr (CmpUL op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST);
format %{ "cmp $op1, $op2" %}
ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2));
ins_pipe(icmp_reg_imm);
%}
instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2)
%{
match(Set cr (CmpUL op1 op2));
effect(DEF cr, USE op1);
ins_cost(INSN_COST * 2);
format %{ "cmp $op1, $op2" %}
ins_encode(aarch64_enc_cmp_imm(op1, op2));
ins_pipe(icmp_reg_imm);
%}
instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2)
%{
match(Set cr (CmpP op1 op2));
effect(DEF cr, USE op1, USE op2);
ins_cost(INSN_COST);
format %{ "cmp $op1, $op2\t // ptr" %}
ins_encode(aarch64_enc_cmpp(op1, op2));
ins_pipe(icmp_reg_reg);
%}
instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2)
%{
match(Set cr (CmpN op1 op2));
effect(DEF cr, USE op1, USE op2);
ins_cost(INSN_COST);
format %{ "cmp $op1, $op2\t // compressed ptr" %}
ins_encode(aarch64_enc_cmpn(op1, op2));
ins_pipe(icmp_reg_reg);
%}
instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero)
%{
match(Set cr (CmpP op1 zero));
effect(DEF cr, USE op1, USE zero);
ins_cost(INSN_COST);
format %{ "cmp $op1, 0\t // ptr" %}
ins_encode(aarch64_enc_testp(op1));
ins_pipe(icmp_reg_imm);
%}
instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero)
%{
match(Set cr (CmpN op1 zero));
effect(DEF cr, USE op1, USE zero);
ins_cost(INSN_COST);
format %{ "cmp $op1, 0\t // compressed ptr" %}
ins_encode(aarch64_enc_testn(op1));
ins_pipe(icmp_reg_imm);
%}
// FP comparisons
//
// n.b. CmpF/CmpD set a normal flags reg which then gets compared
// using normal cmpOp. See declaration of rFlagsReg for details.
instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2)
%{
match(Set cr (CmpF src1 src2));
ins_cost(3 * INSN_COST);
format %{ "fcmps $src1, $src2" %}
ins_encode %{
__ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_compare);
%}
instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2)
%{
match(Set cr (CmpF src1 src2));
ins_cost(3 * INSN_COST);
format %{ "fcmps $src1, 0.0" %}
ins_encode %{
__ fcmps(as_FloatRegister($src1$$reg), 0.0);
%}
ins_pipe(pipe_class_compare);
%}
// FROM HERE
instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2)
%{
match(Set cr (CmpD src1 src2));
ins_cost(3 * INSN_COST);
format %{ "fcmpd $src1, $src2" %}
ins_encode %{
__ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg));
%}
ins_pipe(pipe_class_compare);
%}
instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2)
%{
match(Set cr (CmpD src1 src2));
ins_cost(3 * INSN_COST);
format %{ "fcmpd $src1, 0.0" %}
ins_encode %{
__ fcmpd(as_FloatRegister($src1$$reg), 0.0);
%}
ins_pipe(pipe_class_compare);
%}
instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr)
%{
match(Set dst (CmpF3 src1 src2));
effect(KILL cr);
ins_cost(5 * INSN_COST);
format %{ "fcmps $src1, $src2\n\t"
"csinvw($dst, zr, zr, eq\n\t"
"csnegw($dst, $dst, $dst, lt)"
%}
ins_encode %{
Label done;
FloatRegister s1 = as_FloatRegister($src1$$reg);
FloatRegister s2 = as_FloatRegister($src2$$reg);
Register d = as_Register($dst$$reg);
__ fcmps(s1, s2);
// installs 0 if EQ else -1
__ csinvw(d, zr, zr, Assembler::EQ);
// keeps -1 if less or unordered else installs 1
__ csnegw(d, d, d, Assembler::LT);
__ bind(done);
%}
ins_pipe(pipe_class_default);
%}
instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr)
%{
match(Set dst (CmpD3 src1 src2));
effect(KILL cr);
ins_cost(5 * INSN_COST);
format %{ "fcmpd $src1, $src2\n\t"
"csinvw($dst, zr, zr, eq\n\t"
"csnegw($dst, $dst, $dst, lt)"
%}
ins_encode %{
Label done;
FloatRegister s1 = as_FloatRegister($src1$$reg);
FloatRegister s2 = as_FloatRegister($src2$$reg);
Register d = as_Register($dst$$reg);
__ fcmpd(s1, s2);
// installs 0 if EQ else -1
__ csinvw(d, zr, zr, Assembler::EQ);
// keeps -1 if less or unordered else installs 1
__ csnegw(d, d, d, Assembler::LT);
__ bind(done);
%}
ins_pipe(pipe_class_default);
%}
instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr)
%{
match(Set dst (CmpF3 src1 zero));
effect(KILL cr);
ins_cost(5 * INSN_COST);
format %{ "fcmps $src1, 0.0\n\t"
"csinvw($dst, zr, zr, eq\n\t"
"csnegw($dst, $dst, $dst, lt)"
%}
ins_encode %{
Label done;
FloatRegister s1 = as_FloatRegister($src1$$reg);
Register d = as_Register($dst$$reg);
__ fcmps(s1, 0.0);
// installs 0 if EQ else -1
__ csinvw(d, zr, zr, Assembler::EQ);
// keeps -1 if less or unordered else installs 1
__ csnegw(d, d, d, Assembler::LT);
__ bind(done);
%}
ins_pipe(pipe_class_default);
%}
instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr)
%{
match(Set dst (CmpD3 src1 zero));
effect(KILL cr);
ins_cost(5 * INSN_COST);
format %{ "fcmpd $src1, 0.0\n\t"
"csinvw($dst, zr, zr, eq\n\t"
"csnegw($dst, $dst, $dst, lt)"
%}
ins_encode %{
Label done;
FloatRegister s1 = as_FloatRegister($src1$$reg);
Register d = as_Register($dst$$reg);
__ fcmpd(s1, 0.0);
// installs 0 if EQ else -1
__ csinvw(d, zr, zr, Assembler::EQ);
// keeps -1 if less or unordered else installs 1
__ csnegw(d, d, d, Assembler::LT);
__ bind(done);
%}
ins_pipe(pipe_class_default);
%}
instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr)
%{
match(Set dst (CmpLTMask p q));
effect(KILL cr);
ins_cost(3 * INSN_COST);
format %{ "cmpw $p, $q\t# cmpLTMask\n\t"
"csetw $dst, lt\n\t"
"subw $dst, zr, $dst"
%}
ins_encode %{
__ cmpw(as_Register($p$$reg), as_Register($q$$reg));
__ csetw(as_Register($dst$$reg), Assembler::LT);
__ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg));
%}
ins_pipe(ialu_reg_reg);
%}
instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr)
%{
match(Set dst (CmpLTMask src zero));
effect(KILL cr);
ins_cost(INSN_COST);
format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %}
ins_encode %{
__ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31);
%}
ins_pipe(ialu_reg_shift);
%}
// ============================================================================
// Max and Min
// Like compI_reg_reg or compI_reg_immI0 but without match rule and second zero parameter.
instruct compI_reg_imm0(rFlagsReg cr, iRegI src)
%{
effect(DEF cr, USE src);
ins_cost(INSN_COST);
format %{ "cmpw $src, 0" %}
ins_encode %{
__ cmpw($src$$Register, 0);
%}
ins_pipe(icmp_reg_imm);
%}
instruct minI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
%{
match(Set dst (MinI src1 src2));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_reg(cr, src1, src2);
cmovI_reg_reg_lt(dst, src1, src2, cr);
%}
%}
instruct maxI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2)
%{
match(Set dst (MaxI src1 src2));
ins_cost(INSN_COST * 3);
expand %{
rFlagsReg cr;
compI_reg_reg(cr, src1, src2);
cmovI_reg_reg_gt(dst, src1, src2, cr);
%}
%}
// ============================================================================
// Branch Instructions
// Direct Branch.
instruct branch(label lbl)
%{
match(Goto);
effect(USE lbl);
ins_cost(BRANCH_COST);
format %{ "b $lbl" %}
ins_encode(aarch64_enc_b(lbl));
ins_pipe(pipe_branch);
%}
// Conditional Near Branch
instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl)
%{
// Same match rule as `branchConFar'.
match(If cmp cr);
effect(USE lbl);
ins_cost(BRANCH_COST);
// If set to 1 this indicates that the current instruction is a
// short variant of a long branch. This avoids using this
// instruction in first-pass matching. It will then only be used in
// the `Shorten_branches' pass.
// ins_short_branch(1);
format %{ "b$cmp $lbl" %}
ins_encode(aarch64_enc_br_con(cmp, lbl));
ins_pipe(pipe_branch_cond);
%}
// Conditional Near Branch Unsigned
instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl)
%{
// Same match rule as `branchConFar'.
match(If cmp cr);
effect(USE lbl);
ins_cost(BRANCH_COST);
// If set to 1 this indicates that the current instruction is a
// short variant of a long branch. This avoids using this
// instruction in first-pass matching. It will then only be used in
// the `Shorten_branches' pass.
// ins_short_branch(1);
format %{ "b$cmp $lbl\t# unsigned" %}
ins_encode(aarch64_enc_br_conU(cmp, lbl));
ins_pipe(pipe_branch_cond);
%}
// Make use of CBZ and CBNZ. These instructions, as well as being
// shorter than (cmp; branch), have the additional benefit of not
// killing the flags.
instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpI op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cbw$cmp $op1, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ)
__ cbzw($op1$$Register, *L);
else
__ cbnzw($op1$$Register, *L);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpL op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ)
__ cbz($op1$$Register, *L);
else
__ cbnz($op1$$Register, *L);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpP op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ)
__ cbz($op1$$Register, *L);
else
__ cbnz($op1$$Register, *L);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{
match(If cmp (CmpN op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cbw$cmp $op1, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ)
__ cbzw($op1$$Register, *L);
else
__ cbnzw($op1$$Register, *L);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{
match(If cmp (CmpP (DecodeN oop) zero));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $oop, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ)
__ cbzw($oop$$Register, *L);
else
__ cbnzw($oop$$Register, *L);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct cmpUI_imm0_branch(cmpOpUEqNeLeGt cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpU op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cbw$cmp $op1, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ || cond == Assembler::LS) {
__ cbzw($op1$$Register, *L);
} else {
assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
__ cbnzw($op1$$Register, *L);
}
%}
ins_pipe(pipe_cmp_branch);
%}
instruct cmpUL_imm0_branch(cmpOpUEqNeLeGt cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpUL op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
if (cond == Assembler::EQ || cond == Assembler::LS) {
__ cbz($op1$$Register, *L);
} else {
assert(cond == Assembler::NE || cond == Assembler::HI, "unexpected condition");
__ cbnz($op1$$Register, *L);
}
%}
ins_pipe(pipe_cmp_branch);
%}
// Test bit and Branch
// Patterns for short (< 32KiB) variants
instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpL op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl # long" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond =
((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbr(cond, $op1$$Register, 63, *L);
%}
ins_pipe(pipe_cmp_branch);
ins_short_branch(1);
%}
instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpI op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl # int" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond =
((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbr(cond, $op1$$Register, 31, *L);
%}
ins_pipe(pipe_cmp_branch);
ins_short_branch(1);
%}
instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
match(If cmp (CmpL (AndL op1 op2) op3));
predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "tb$cmp $op1, $op2, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2_long($op2$$constant);
__ tbr(cond, $op1$$Register, bit, *L);
%}
ins_pipe(pipe_cmp_branch);
ins_short_branch(1);
%}
instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
match(If cmp (CmpI (AndI op1 op2) op3));
predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "tb$cmp $op1, $op2, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2((juint)$op2$$constant);
__ tbr(cond, $op1$$Register, bit, *L);
%}
ins_pipe(pipe_cmp_branch);
ins_short_branch(1);
%}
// And far variants
instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{
match(If cmp (CmpL op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl # long" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond =
((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbr(cond, $op1$$Register, 63, *L, /*far*/true);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{
match(If cmp (CmpI op1 op2));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "cb$cmp $op1, $labl # int" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond =
((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ;
__ tbr(cond, $op1$$Register, 31, *L, /*far*/true);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{
match(If cmp (CmpL (AndL op1 op2) op3));
predicate(is_power_of_2((julong)n->in(2)->in(1)->in(2)->get_long()));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "tb$cmp $op1, $op2, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2_long($op2$$constant);
__ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
%}
ins_pipe(pipe_cmp_branch);
%}
instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{
match(If cmp (CmpI (AndI op1 op2) op3));
predicate(is_power_of_2((juint)n->in(2)->in(1)->in(2)->get_int()));
effect(USE labl);
ins_cost(BRANCH_COST);
format %{ "tb$cmp $op1, $op2, $labl" %}
ins_encode %{
Label* L = $labl$$label;
Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode;
int bit = exact_log2((juint)$op2$$constant);
__ tbr(cond, $op1$$Register, bit, *L, /*far*/true);
%}
ins_pipe(pipe_cmp_branch);
%}
// Test bits
instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{
match(Set cr (CmpL (AndL op1 op2) op3));
predicate(Assembler::operand_valid_for_logical_immediate
(/*is_32*/false, n->in(1)->in(2)->get_long()));
ins_cost(INSN_COST);
format %{ "tst $op1, $op2 # long" %}
ins_encode %{
__ tst($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{
match(Set cr (CmpI (AndI op1 op2) op3));
predicate(Assembler::operand_valid_for_logical_immediate
(/*is_32*/true, n->in(1)->in(2)->get_int()));
ins_cost(INSN_COST);
format %{ "tst $op1, $op2 # int" %}
ins_encode %{
__ tstw($op1$$Register, $op2$$constant);
%}
ins_pipe(ialu_reg_reg);
%}
instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{
match(Set cr (CmpL (AndL op1 op2) op3));
ins_cost(INSN_COST);
format %{ "tst $op1, $op2 # long" %}
ins_encode %{
__ tst($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{
match(Set cr (CmpI (AndI op1 op2) op3));
ins_cost(INSN_COST);
format %{ "tstw $op1, $op2 # int" %}
ins_encode %{
__ tstw($op1$$Register, $op2$$Register);
%}
ins_pipe(ialu_reg_reg);
%}
// Conditional Far Branch
// Conditional Far Branch Unsigned
// TODO: fixme
// counted loop end branch near
instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl)
%{
match(CountedLoopEnd cmp cr);
effect(USE lbl);
ins_cost(BRANCH_COST);
// short variant.
// ins_short_branch(1);
format %{ "b$cmp $lbl \t// counted loop end" %}
ins_encode(aarch64_enc_br_con(cmp, lbl));
ins_pipe(pipe_branch);
%}
// counted loop end branch far
// TODO: fixme
// ============================================================================
// inlined locking and unlocking
instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
ins_cost(5 * INSN_COST);
format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
%}
ins_pipe(pipe_serial);
%}
instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2)
%{
predicate(LockingMode != LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, TEMP tmp2);
ins_cost(5 * INSN_COST);
format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %}
ins_encode %{
__ fast_unlock($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register);
%}
ins_pipe(pipe_serial);
%}
instruct cmpFastLockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
predicate(LockingMode == LM_LIGHTWEIGHT);
match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
ins_cost(5 * INSN_COST);
format %{ "fastlock $object,$box\t! kills $tmp,$tmp2,$tmp3" %}
ins_encode %{
__ fast_lock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
%}
ins_pipe(pipe_serial);
%}
instruct cmpFastUnlockLightweight(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2, iRegPNoSp tmp3)
%{
predicate(LockingMode == LM_LIGHTWEIGHT);
match(Set cr (FastUnlock object box));
effect(TEMP tmp, TEMP tmp2, TEMP tmp3);
ins_cost(5 * INSN_COST);
format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2, $tmp3" %}
ins_encode %{
__ fast_unlock_lightweight($object$$Register, $box$$Register, $tmp$$Register, $tmp2$$Register, $tmp3$$Register);
%}
ins_pipe(pipe_serial);
%}
// ============================================================================
// Safepoint Instructions
// TODO
// provide a near and far version of this code
instruct safePoint(rFlagsReg cr, iRegP poll)
%{
match(SafePoint poll);
effect(KILL cr);
format %{
"ldrw zr, [$poll]\t# Safepoint: poll for GC"
%}
ins_encode %{
__ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type);
%}
ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem);
%}
// ============================================================================
// Procedure Call/Return Instructions
// Call Java Static Instruction
instruct CallStaticJavaDirect(method meth)
%{
match(CallStaticJava);
effect(USE meth);
ins_cost(CALL_COST);
format %{ "call,static $meth \t// ==> " %}
ins_encode(aarch64_enc_java_static_call(meth),
aarch64_enc_call_epilog);
ins_pipe(pipe_class_call);
%}
// TO HERE
// Call Java Dynamic Instruction
instruct CallDynamicJavaDirect(method meth)
%{
match(CallDynamicJava);
effect(USE meth);
ins_cost(CALL_COST);
format %{ "CALL,dynamic $meth \t// ==> " %}
ins_encode(aarch64_enc_java_dynamic_call(meth),
aarch64_enc_call_epilog);
ins_pipe(pipe_class_call);
%}
// Call Runtime Instruction
instruct CallRuntimeDirect(method meth)
%{
match(CallRuntime);
effect(USE meth);
ins_cost(CALL_COST);
format %{ "CALL, runtime $meth" %}
ins_encode( aarch64_enc_java_to_runtime(meth) );
ins_pipe(pipe_class_call);
%}
// Call Runtime Instruction
instruct CallLeafDirect(method meth)
%{
match(CallLeaf);
effect(USE meth);
ins_cost(CALL_COST);
format %{ "CALL, runtime leaf $meth" %}
ins_encode( aarch64_enc_java_to_runtime(meth) );
ins_pipe(pipe_class_call);
%}
// Call Runtime Instruction without safepoint and with vector arguments
instruct CallLeafDirectVector(method meth)
%{
match(CallLeafVector);
effect(USE meth);
ins_cost(CALL_COST);
format %{ "CALL, runtime leaf vector $meth" %}
ins_encode(aarch64_enc_java_to_runtime(meth));
ins_pipe(pipe_class_call);
%}
// Call Runtime Instruction
instruct CallLeafNoFPDirect(method meth)
%{
match(CallLeafNoFP);
effect(USE meth);
ins_cost(CALL_COST);
format %{ "CALL, runtime leaf nofp $meth" %}
ins_encode( aarch64_enc_java_to_runtime(meth) );
ins_pipe(pipe_class_call);
%}
// Tail Call; Jump from runtime stub to Java code.
// Also known as an 'interprocedural jump'.
// Target of jump will eventually return to caller.
// TailJump below removes the return address.
// Don't use rfp for 'jump_target' because a MachEpilogNode has already been
// emitted just above the TailCall which has reset rfp to the caller state.
instruct TailCalljmpInd(iRegPNoSpNoRfp jump_target, inline_cache_RegP method_ptr)
%{
match(TailCall jump_target method_ptr);
ins_cost(CALL_COST);
format %{ "br $jump_target\t# $method_ptr holds method" %}
ins_encode(aarch64_enc_tail_call(jump_target));
ins_pipe(pipe_class_call);
%}
instruct TailjmpInd(iRegPNoSpNoRfp jump_target, iRegP_R0 ex_oop)
%{
match(TailJump jump_target ex_oop);
ins_cost(CALL_COST);
format %{ "br $jump_target\t# $ex_oop holds exception oop" %}
ins_encode(aarch64_enc_tail_jmp(jump_target));
ins_pipe(pipe_class_call);
%}
// Forward exception.
instruct ForwardExceptionjmp()
%{
match(ForwardException);
ins_cost(CALL_COST);
format %{ "b forward_exception_stub" %}
ins_encode %{
__ far_jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
%}
ins_pipe(pipe_class_call);
%}
// Create exception oop: created by stack-crawling runtime code.
// Created exception is now available to this handler, and is setup
// just prior to jumping to this handler. No code emitted.
// TODO check
// should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1
instruct CreateException(iRegP_R0 ex_oop)
%{
match(Set ex_oop (CreateEx));
format %{ " -- \t// exception oop; no code emitted" %}
size(0);
ins_encode( /*empty*/ );
ins_pipe(pipe_class_empty);
%}
// Rethrow exception: The exception oop will come in the first
// argument position. Then JUMP (not call) to the rethrow stub code.
instruct RethrowException() %{
match(Rethrow);
ins_cost(CALL_COST);
format %{ "b rethrow_stub" %}
ins_encode( aarch64_enc_rethrow() );
ins_pipe(pipe_class_call);
%}
// Return Instruction
// epilog node loads ret address into lr as part of frame pop
instruct Ret()
%{
match(Return);
format %{ "ret\t// return register" %}
ins_encode( aarch64_enc_ret() );
ins_pipe(pipe_branch);
%}
// Die now.
instruct ShouldNotReachHere() %{
match(Halt);
ins_cost(CALL_COST);
format %{ "ShouldNotReachHere" %}
ins_encode %{
if (is_reachable()) {
const char* str = __ code_string(_halt_reason);
__ stop(str);
}
%}
ins_pipe(pipe_class_default);
%}
// ============================================================================
// Partial Subtype Check
//
// superklass array for an instance of the superklass. Set a hidden
// internal cache on a hit (cache is checked with exposed code in
// gen_subtype_check()). Return NZ for a miss or zero for a hit. The
// encoding ALSO sets flags.
instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr)
%{
match(Set result (PartialSubtypeCheck sub super));
predicate(!UseSecondarySupersTable);
effect(KILL cr, KILL temp);
ins_cost(20 * INSN_COST); // slightly larger than the next version
format %{ "partialSubtypeCheck $result, $sub, $super" %}
ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result));
opcode(0x1); // Force zero of result reg on hit
ins_pipe(pipe_class_memory);
%}
// Two versions of partialSubtypeCheck, both used when we need to
// search for a super class in the secondary supers array. The first
// is used when we don't know _a priori_ the class being searched
// for. The second, far more common, is used when we do know: this is
// used for instanceof, checkcast, and any case where C2 can determine
// it by constant propagation.
instruct partialSubtypeCheckVarSuper(iRegP_R4 sub, iRegP_R0 super, vRegD_V0 vtemp, iRegP_R5 result,
iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
rFlagsReg cr)
%{
match(Set result (PartialSubtypeCheck sub super));
predicate(UseSecondarySupersTable);
effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
ins_cost(10 * INSN_COST); // slightly larger than the next version
format %{ "partialSubtypeCheck $result, $sub, $super" %}
ins_encode %{
__ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
$vtemp$$FloatRegister,
$result$$Register, /*L_success*/nullptr);
%}
ins_pipe(pipe_class_memory);
%}
instruct partialSubtypeCheckConstSuper(iRegP_R4 sub, iRegP_R0 super_reg, immP super_con, vRegD_V0 vtemp, iRegP_R5 result,
iRegP_R1 tempR1, iRegP_R2 tempR2, iRegP_R3 tempR3,
rFlagsReg cr)
%{
match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
predicate(UseSecondarySupersTable);
effect(KILL cr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP vtemp);
ins_cost(5 * INSN_COST); // smaller than the next version
format %{ "partialSubtypeCheck $result, $sub, $super_reg, $super_con" %}
ins_encode %{
bool success = false;
u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
if (InlineSecondarySupersTest) {
success =
__ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register,
$vtemp$$FloatRegister,
$result$$Register,
super_klass_slot);
} else {
address call = __ trampoline_call(RuntimeAddress(StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot)));
success = (call != nullptr);
}
if (!success) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
// Intrisics for String.compareTo()
instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
%{
predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %}
ins_encode %{
// Count is in 8-bit bytes; non-Compact chars are 16 bits.
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::UU);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2, rFlagsReg cr)
%{
predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(KILL tmp1, KILL tmp2, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
fnoreg, fnoreg, fnoreg, pnoreg, pnoreg, StrIntrinsicNode::LL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
%{
predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
$vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::UL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
vRegD_V0 vtmp1, vRegD_V1 vtmp2, vRegD_V2 vtmp3, rFlagsReg cr)
%{
predicate((UseSVE == 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(KILL tmp1, KILL tmp2, KILL vtmp1, KILL vtmp2, KILL vtmp3,
USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1, $tmp2, $vtmp1, $vtmp2, $vtmp3" %}
ins_encode %{
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister,
$vtmp3$$FloatRegister, pnoreg, pnoreg, StrIntrinsicNode::LU);
%}
ins_pipe(pipe_class_memory);
%}
// Note that Z registers alias the corresponding NEON registers, we declare the vector operands of
// these string_compare variants as NEON register type for convenience so that the prototype of
// string_compare can be shared with all variants.
instruct string_compareLL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
pRegGov_P1 pgtmp2, rFlagsReg cr)
%{
predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # USE sve" %}
ins_encode %{
// Count is in 8-bit bytes; non-Compact chars are 16 bits.
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
StrIntrinsicNode::LL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_compareLU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
pRegGov_P1 pgtmp2, rFlagsReg cr)
%{
predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # USE sve" %}
ins_encode %{
// Count is in 8-bit bytes; non-Compact chars are 16 bits.
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
StrIntrinsicNode::LU);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_compareUL_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
pRegGov_P1 pgtmp2, rFlagsReg cr)
%{
predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # USE sve" %}
ins_encode %{
// Count is in 8-bit bytes; non-Compact chars are 16 bits.
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
StrIntrinsicNode::UL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_compareUU_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2,
iRegI_R0 result, iRegP_R10 tmp1, iRegL_R11 tmp2,
vRegD_V0 vtmp1, vRegD_V1 vtmp2, pRegGov_P0 pgtmp1,
pRegGov_P1 pgtmp2, rFlagsReg cr)
%{
predicate((UseSVE > 0) && (((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU));
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
effect(TEMP tmp1, TEMP tmp2, TEMP vtmp1, TEMP vtmp2, TEMP pgtmp1, TEMP pgtmp2,
USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr);
format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # USE sve" %}
ins_encode %{
// Count is in 8-bit bytes; non-Compact chars are 16 bits.
__ string_compare($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register, $result$$Register,
$tmp1$$Register, $tmp2$$Register,
$vtmp1$$FloatRegister, $vtmp2$$FloatRegister, fnoreg,
as_PRegister($pgtmp1$$reg), as_PRegister($pgtmp2$$reg),
StrIntrinsicNode::UU);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
iRegINoSp tmp3, iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
%{
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
TEMP vtmp0, TEMP vtmp1, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU) "
"# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
$tmp1$$Register, $tmp2$$Register,
$tmp3$$Register, $tmp4$$Register,
$tmp5$$Register, $tmp6$$Register,
-1, $result$$Register, StrIntrinsicNode::UU);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3,
iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
%{
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, TEMP tmp6,
TEMP vtmp0, TEMP vtmp1, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL) "
"# KILL $str1 $cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
$tmp1$$Register, $tmp2$$Register,
$tmp3$$Register, $tmp4$$Register,
$tmp5$$Register, $tmp6$$Register,
-1, $result$$Register, StrIntrinsicNode::LL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2,
iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,iRegINoSp tmp3,
iRegINoSp tmp4, iRegINoSp tmp5, iRegINoSp tmp6,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, rFlagsReg cr)
%{
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
TEMP tmp6, TEMP vtmp0, TEMP vtmp1, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL) "
"# KILL $str1 cnt1 $str2 $cnt2 $tmp1 $tmp2 $tmp3 $tmp4 $tmp5 $tmp6 V0-V1 cr" %}
ins_encode %{
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, $cnt2$$Register,
$tmp1$$Register, $tmp2$$Register,
$tmp3$$Register, $tmp4$$Register,
$tmp5$$Register, $tmp6$$Register,
-1, $result$$Register, StrIntrinsicNode::UL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
%{
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU) "
"# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, zr,
$tmp1$$Register, $tmp2$$Register,
$tmp3$$Register, $tmp4$$Register, zr, zr,
icnt2, $result$$Register, StrIntrinsicNode::UU);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
%{
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL) "
"# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, zr,
$tmp1$$Register, $tmp2$$Register,
$tmp3$$Register, $tmp4$$Register, zr, zr,
icnt2, $result$$Register, StrIntrinsicNode::LL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2,
immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1,
iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr)
%{
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2)));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1,
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr);
format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL) "
"# KILL $str1 $cnt1 $str2 $tmp1 $tmp2 $tmp3 $tmp4 cr" %}
ins_encode %{
int icnt2 = (int)$int_cnt2$$constant;
__ string_indexof($str1$$Register, $str2$$Register,
$cnt1$$Register, zr,
$tmp1$$Register, $tmp2$$Register,
$tmp3$$Register, $tmp4$$Register, zr, zr,
icnt2, $result$$Register, StrIntrinsicNode::UL);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
iRegINoSp tmp3, rFlagsReg cr)
%{
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U));
effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
ins_encode %{
__ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
$result$$Register, $tmp1$$Register, $tmp2$$Register,
$tmp3$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct stringL_indexof_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2,
iRegINoSp tmp3, rFlagsReg cr)
%{
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
predicate((UseSVE == 0) && (((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L));
effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch,
TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr);
format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result" %}
ins_encode %{
__ stringL_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register,
$result$$Register, $tmp1$$Register, $tmp2$$Register,
$tmp3$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct stringL_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
iRegI_R0 result, vecA ztmp1, vecA ztmp2,
pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
format %{ "StringLatin1 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
ins_encode %{
__ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
$result$$Register, $ztmp1$$FloatRegister,
$ztmp2$$FloatRegister, $pgtmp$$PRegister,
$ptmp$$PRegister, true /* isL */);
%}
ins_pipe(pipe_class_memory);
%}
instruct stringU_indexof_char_sve(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch,
iRegI_R0 result, vecA ztmp1, vecA ztmp2,
pRegGov pgtmp, pReg ptmp, rFlagsReg cr) %{
predicate(UseSVE > 0 && ((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
match(Set result (StrIndexOfChar (Binary str1 cnt1) ch));
effect(TEMP ztmp1, TEMP ztmp2, TEMP pgtmp, TEMP ptmp, KILL cr);
format %{ "StringUTF16 IndexOf char[] $str1,$cnt1,$ch -> $result # use sve" %}
ins_encode %{
__ string_indexof_char_sve($str1$$Register, $cnt1$$Register, $ch$$Register,
$result$$Register, $ztmp1$$FloatRegister,
$ztmp2$$FloatRegister, $pgtmp$$PRegister,
$ptmp$$PRegister, false /* isL */);
%}
ins_pipe(pipe_class_memory);
%}
instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt,
iRegI_R0 result, rFlagsReg cr)
%{
predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (StrEquals (Binary str1 str2) cnt));
effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr);
format %{ "String Equals $str1,$str2,$cnt -> $result" %}
ins_encode %{
// Count is in 8-bit bytes; non-Compact chars are 16 bits.
__ string_equals($str1$$Register, $str2$$Register,
$result$$Register, $cnt$$Register);
%}
ins_pipe(pipe_class_memory);
%}
instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
iRegP_R10 tmp, rFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
match(Set result (AryEq ary1 ary2));
effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
TEMP vtmp6, TEMP vtmp7, KILL cr);
format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
ins_encode %{
address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
$result$$Register, $tmp$$Register, 1);
if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result,
iRegP_R3 tmp1, iRegP_R4 tmp2, iRegP_R5 tmp3,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
iRegP_R10 tmp, rFlagsReg cr)
%{
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
match(Set result (AryEq ary1 ary2));
effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, TEMP tmp3,
TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
TEMP vtmp6, TEMP vtmp7, KILL cr);
format %{ "Array Equals $ary1,ary2 -> $result # KILL $ary1 $ary2 $tmp $tmp1 $tmp2 $tmp3 V0-V7 cr" %}
ins_encode %{
address tpc = __ arrays_equals($ary1$$Register, $ary2$$Register,
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register,
$result$$Register, $tmp$$Register, 2);
if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
instruct arrays_hashcode(iRegP_R1 ary, iRegI_R2 cnt, iRegI_R0 result, immI basic_type,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, vRegD_V7 vtmp7,
vRegD_V12 vtmp8, vRegD_V13 vtmp9, rFlagsReg cr)
%{
match(Set result (VectorizedHashCode (Binary ary cnt) (Binary result basic_type)));
effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5, TEMP vtmp6,
TEMP vtmp7, TEMP vtmp8, TEMP vtmp9, USE_KILL ary, USE_KILL cnt, USE basic_type, KILL cr);
format %{ "Array HashCode array[] $ary,$cnt,$result,$basic_type -> $result // KILL all" %}
ins_encode %{
address tpc = __ arrays_hashcode($ary$$Register, $cnt$$Register, $result$$Register,
$vtmp3$$FloatRegister, $vtmp2$$FloatRegister,
$vtmp1$$FloatRegister, $vtmp0$$FloatRegister,
$vtmp4$$FloatRegister, $vtmp5$$FloatRegister,
$vtmp6$$FloatRegister, $vtmp7$$FloatRegister,
$vtmp8$$FloatRegister, $vtmp9$$FloatRegister,
(BasicType)$basic_type$$constant);
if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
instruct count_positives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr)
%{
match(Set result (CountPositives ary1 len));
effect(USE_KILL ary1, USE_KILL len, KILL cr);
format %{ "count positives byte[] $ary1,$len -> $result" %}
ins_encode %{
address tpc = __ count_positives($ary1$$Register, $len$$Register, $result$$Register);
if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe( pipe_slow );
%}
// fast char[] to byte[] compression
instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
iRegI_R0 result, rFlagsReg cr)
%{
match(Set result (StrCompressedCopy src (Binary dst len)));
effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3, TEMP vtmp4, TEMP vtmp5,
USE_KILL src, USE_KILL dst, USE len, KILL cr);
format %{ "String Compress $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
ins_encode %{
__ char_array_compress($src$$Register, $dst$$Register, $len$$Register,
$result$$Register, $vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
$vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
$vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// fast byte[] to char[] inflation
instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, iRegP_R3 tmp,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2, vRegD_V3 vtmp3,
vRegD_V4 vtmp4, vRegD_V5 vtmp5, vRegD_V6 vtmp6, rFlagsReg cr)
%{
match(Set dummy (StrInflatedCopy src (Binary dst len)));
effect(TEMP vtmp0, TEMP vtmp1, TEMP vtmp2, TEMP vtmp3,
TEMP vtmp4, TEMP vtmp5, TEMP vtmp6, TEMP tmp,
USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr);
format %{ "String Inflate $src,$dst # KILL $tmp $src $dst $len V0-V6 cr" %}
ins_encode %{
address tpc = __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register,
$vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
$vtmp2$$FloatRegister, $tmp$$Register);
if (tpc == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
}
%}
ins_pipe(pipe_class_memory);
%}
// encode char[] to byte[] in ISO_8859_1
instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
iRegI_R0 result, rFlagsReg cr)
%{
predicate(!((EncodeISOArrayNode*)n)->is_ascii());
match(Set result (EncodeISOArray src (Binary dst len)));
effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
format %{ "Encode ISO array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
ins_encode %{
__ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
$result$$Register, false,
$vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
$vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
$vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
%}
ins_pipe(pipe_class_memory);
%}
instruct encode_ascii_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len,
vRegD_V0 vtmp0, vRegD_V1 vtmp1, vRegD_V2 vtmp2,
vRegD_V3 vtmp3, vRegD_V4 vtmp4, vRegD_V5 vtmp5,
iRegI_R0 result, rFlagsReg cr)
%{
predicate(((EncodeISOArrayNode*)n)->is_ascii());
match(Set result (EncodeISOArray src (Binary dst len)));
effect(USE_KILL src, USE_KILL dst, USE len, KILL vtmp0, KILL vtmp1,
KILL vtmp2, KILL vtmp3, KILL vtmp4, KILL vtmp5, KILL cr);
format %{ "Encode ASCII array $src,$dst,$len -> $result # KILL $src $dst V0-V5 cr" %}
ins_encode %{
__ encode_iso_array($src$$Register, $dst$$Register, $len$$Register,
$result$$Register, true,
$vtmp0$$FloatRegister, $vtmp1$$FloatRegister,
$vtmp2$$FloatRegister, $vtmp3$$FloatRegister,
$vtmp4$$FloatRegister, $vtmp5$$FloatRegister);
%}
ins_pipe(pipe_class_memory);
%}
//----------------------------- CompressBits/ExpandBits ------------------------
instruct compressBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
vRegF tdst, vRegF tsrc, vRegF tmask) %{
match(Set dst (CompressBits src mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "mov $tsrc, $src\n\t"
"mov $tmask, $mask\n\t"
"bext $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
__ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
__ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
__ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
%}
ins_pipe(pipe_slow);
%}
instruct compressBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
vRegF tdst, vRegF tsrc, vRegF tmask) %{
match(Set dst (CompressBits (LoadI mem) mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "ldrs $tsrc, $mem\n\t"
"ldrs $tmask, $mask\n\t"
"bext $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ ldrs($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bext($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
%}
ins_pipe(pipe_slow);
%}
instruct compressBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
vRegD tdst, vRegD tsrc, vRegD tmask) %{
match(Set dst (CompressBits src mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "mov $tsrc, $src\n\t"
"mov $tmask, $mask\n\t"
"bext $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
__ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
__ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
__ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_slow);
%}
instruct compressBitsL_memcon(iRegLNoSp dst, memory8 mem, immL mask,
vRegF tdst, vRegF tsrc, vRegF tmask) %{
match(Set dst (CompressBits (LoadL mem) mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "ldrd $tsrc, $mem\n\t"
"ldrd $tmask, $mask\n\t"
"bext $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ ldrd($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bext($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_slow);
%}
instruct expandBitsI_reg(iRegINoSp dst, iRegIorL2I src, iRegIorL2I mask,
vRegF tdst, vRegF tsrc, vRegF tmask) %{
match(Set dst (ExpandBits src mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "mov $tsrc, $src\n\t"
"mov $tmask, $mask\n\t"
"bdep $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
__ mov($tsrc$$FloatRegister, __ S, 0, $src$$Register);
__ mov($tmask$$FloatRegister, __ S, 0, $mask$$Register);
__ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
%}
ins_pipe(pipe_slow);
%}
instruct expandBitsI_memcon(iRegINoSp dst, memory4 mem, immI mask,
vRegF tdst, vRegF tsrc, vRegF tmask) %{
match(Set dst (ExpandBits (LoadI mem) mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "ldrs $tsrc, $mem\n\t"
"ldrs $tmask, $mask\n\t"
"bdep $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(masm, &MacroAssembler::ldrs, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 4);
__ ldrs($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bdep($tdst$$FloatRegister, __ S, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ S, 0);
%}
ins_pipe(pipe_slow);
%}
instruct expandBitsL_reg(iRegLNoSp dst, iRegL src, iRegL mask,
vRegD tdst, vRegD tsrc, vRegD tmask) %{
match(Set dst (ExpandBits src mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "mov $tsrc, $src\n\t"
"mov $tmask, $mask\n\t"
"bdep $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
__ mov($tsrc$$FloatRegister, __ D, 0, $src$$Register);
__ mov($tmask$$FloatRegister, __ D, 0, $mask$$Register);
__ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_slow);
%}
instruct expandBitsL_memcon(iRegINoSp dst, memory8 mem, immL mask,
vRegF tdst, vRegF tsrc, vRegF tmask) %{
match(Set dst (ExpandBits (LoadL mem) mask));
effect(TEMP tdst, TEMP tsrc, TEMP tmask);
format %{ "ldrd $tsrc, $mem\n\t"
"ldrd $tmask, $mask\n\t"
"bdep $tdst, $tsrc, $tmask\n\t"
"mov $dst, $tdst"
%}
ins_encode %{
loadStore(masm, &MacroAssembler::ldrd, $tsrc$$FloatRegister, $mem->opcode(),
as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp, 8);
__ ldrd($tmask$$FloatRegister, $constantaddress($mask));
__ sve_bdep($tdst$$FloatRegister, __ D, $tsrc$$FloatRegister, $tmask$$FloatRegister);
__ mov($dst$$Register, $tdst$$FloatRegister, __ D, 0);
%}
ins_pipe(pipe_slow);
%}
//----------------------------- Reinterpret ----------------------------------
// Reinterpret a half-precision float value in a floating point register to a general purpose register
instruct reinterpretHF2S(iRegINoSp dst, vRegF src) %{
match(Set dst (ReinterpretHF2S src));
format %{ "reinterpretHF2S $dst, $src" %}
ins_encode %{
__ smov($dst$$Register, $src$$FloatRegister, __ H, 0);
%}
ins_pipe(pipe_slow);
%}
// Reinterpret a half-precision float value in a general purpose register to a floating point register
instruct reinterpretS2HF(vRegF dst, iRegINoSp src) %{
match(Set dst (ReinterpretS2HF src));
format %{ "reinterpretS2HF $dst, $src" %}
ins_encode %{
__ mov($dst$$FloatRegister, __ H, 0, $src$$Register);
%}
ins_pipe(pipe_slow);
%}
// Without this optimization, ReinterpretS2HF (ConvF2HF src) would result in the following
// instructions (the first two are for ConvF2HF and the last instruction is for ReinterpretS2HF) -
// fcvt $tmp1_fpr, $src_fpr // Convert float to half-precision float
// mov $tmp2_gpr, $tmp1_fpr // Move half-precision float in FPR to a GPR
// mov $dst_fpr, $tmp2_gpr // Move the result from a GPR to an FPR
// The move from FPR to GPR in ConvF2HF and the move from GPR to FPR in ReinterpretS2HF
// can be omitted in this pattern, resulting in -
// fcvt $dst, $src // Convert float to half-precision float
instruct convF2HFAndS2HF(vRegF dst, vRegF src)
%{
match(Set dst (ReinterpretS2HF (ConvF2HF src)));
format %{ "convF2HFAndS2HF $dst, $src" %}
ins_encode %{
__ fcvtsh($dst$$FloatRegister, $src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// Without this optimization, ConvHF2F (ReinterpretHF2S src) would result in the following
// instructions (the first one is for ReinterpretHF2S and the last two are for ConvHF2F) -
// mov $tmp1_gpr, $src_fpr // Move the half-precision float from an FPR to a GPR
// mov $tmp2_fpr, $tmp1_gpr // Move the same value from GPR to an FPR
// fcvt $dst_fpr, $tmp2_fpr // Convert the half-precision float to 32-bit float
// The move from FPR to GPR in ReinterpretHF2S and the move from GPR to FPR in ConvHF2F
// can be omitted as the input (src) is already in an FPR required for the fcvths instruction
// resulting in -
// fcvt $dst, $src // Convert half-precision float to a 32-bit float
instruct convHF2SAndHF2F(vRegF dst, vRegF src)
%{
match(Set dst (ConvHF2F (ReinterpretHF2S src)));
format %{ "convHF2SAndHF2F $dst, $src" %}
ins_encode %{
__ fcvths($dst$$FloatRegister, $src$$FloatRegister);
%}
ins_pipe(pipe_slow);
%}
// ============================================================================
// This name is KNOWN by the ADLC and cannot be changed.
// The ADLC forces a 'TypeRawPtr::BOTTOM' output type
// for this guy.
instruct tlsLoadP(thread_RegP dst)
%{
match(Set dst (ThreadLocal));
ins_cost(0);
format %{ " -- \t// $dst=Thread::current(), empty" %}
size(0);
ins_encode( /*empty*/ );
ins_pipe(pipe_class_empty);
%}
//----------PEEPHOLE RULES-----------------------------------------------------
// These must follow all instruction definitions as they use the names
// defined in the instructions definitions.
//
// peepmatch ( root_instr_name [preceding_instruction]* );
//
// peepconstraint %{
// (instruction_number.operand_name relational_op instruction_number.operand_name
// [, ...] );
// // instruction numbers are zero-based using left to right order in peepmatch
//
// peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
// // provide an instruction_number.operand_name for each operand that appears
// // in the replacement instruction's match rule
//
// ---------VM FLAGS---------------------------------------------------------
//
// All peephole optimizations can be turned off using -XX:-OptoPeephole
//
// Each peephole rule is given an identifying number starting with zero and
// increasing by one in the order seen by the parser. An individual peephole
// can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
// on the command-line.
//
// ---------CURRENT LIMITATIONS----------------------------------------------
//
// Only match adjacent instructions in same basic block
// Only equality constraints
// Only constraints between operands, not (0.dest_reg == RAX_enc)
// Only one replacement instruction
//
// ---------EXAMPLE----------------------------------------------------------
//
// // pertinent parts of existing instructions in architecture description
// instruct movI(iRegINoSp dst, iRegI src)
// %{
// match(Set dst (CopyI src));
// %}
//
// instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr)
// %{
// match(Set dst (AddI dst src));
// effect(KILL cr);
// %}
//
// // Change (inc mov) to lea
// peephole %{
// // increment preceded by register-register move
// peepmatch ( incI_iReg movI );
// // require that the destination register of the increment
// // match the destination register of the move
// peepconstraint ( 0.dst == 1.dst );
// // construct a replacement instruction that sets
// // the destination to ( move's source register + one )
// peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) );
// %}
//
// Implementation no longer uses movX instructions since
// machine-independent system no longer uses CopyX nodes.
//
// peephole
// %{
// peepmatch (incI_iReg movI);
// peepconstraint (0.dst == 1.dst);
// peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
// %}
// peephole
// %{
// peepmatch (decI_iReg movI);
// peepconstraint (0.dst == 1.dst);
// peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
// %}
// peephole
// %{
// peepmatch (addI_iReg_imm movI);
// peepconstraint (0.dst == 1.dst);
// peepreplace (leaI_iReg_immI(0.dst 1.src 0.src));
// %}
// peephole
// %{
// peepmatch (incL_iReg movL);
// peepconstraint (0.dst == 1.dst);
// peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
// %}
// peephole
// %{
// peepmatch (decL_iReg movL);
// peepconstraint (0.dst == 1.dst);
// peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
// %}
// peephole
// %{
// peepmatch (addL_iReg_imm movL);
// peepconstraint (0.dst == 1.dst);
// peepreplace (leaL_iReg_immL(0.dst 1.src 0.src));
// %}
// peephole
// %{
// peepmatch (addP_iReg_imm movP);
// peepconstraint (0.dst == 1.dst);
// peepreplace (leaP_iReg_imm(0.dst 1.src 0.src));
// %}
// // Change load of spilled value to only a spill
// instruct storeI(memory mem, iRegI src)
// %{
// match(Set mem (StoreI mem src));
// %}
//
// instruct loadI(iRegINoSp dst, memory mem)
// %{
// match(Set dst (LoadI mem));
// %}
//
//----------SMARTSPILL RULES---------------------------------------------------
// These must follow all instruction definitions as they use the names
// defined in the instructions definitions.
// Local Variables:
// mode: c++
// End: