14652 lines
473 KiB
Plaintext
14652 lines
473 KiB
Plaintext
//
|
|
// Copyright (c) 2011, 2025, Oracle and/or its affiliates. All rights reserved.
|
|
// Copyright (c) 2012, 2025 SAP SE. All rights reserved.
|
|
// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
//
|
|
// This code is free software; you can redistribute it and/or modify it
|
|
// under the terms of the GNU General Public License version 2 only, as
|
|
// published by the Free Software Foundation.
|
|
//
|
|
// This code is distributed in the hope that it will be useful, but WITHOUT
|
|
// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
// version 2 for more details (a copy is included in the LICENSE file that
|
|
// accompanied this code).
|
|
//
|
|
// You should have received a copy of the GNU General Public License version
|
|
// 2 along with this work; if not, write to the Free Software Foundation,
|
|
// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
//
|
|
// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
// or visit www.oracle.com if you need additional information or have any
|
|
// questions.
|
|
//
|
|
//
|
|
|
|
//
|
|
// PPC64 Architecture Description File
|
|
//
|
|
|
|
//----------REGISTER DEFINITION BLOCK------------------------------------------
|
|
// This information is used by the matcher and the register allocator to
|
|
// describe individual registers and classes of registers within the target
|
|
// architecture.
|
|
register %{
|
|
//----------Architecture Description Register Definitions----------------------
|
|
// General Registers
|
|
// "reg_def" name (register save type, C convention save type,
|
|
// ideal register type, encoding);
|
|
//
|
|
// Register Save Types:
|
|
//
|
|
// NS = No-Save: The register allocator assumes that these registers
|
|
// can be used without saving upon entry to the method, &
|
|
// that they do not need to be saved at call sites.
|
|
//
|
|
// SOC = Save-On-Call: The register allocator assumes that these registers
|
|
// can be used without saving upon entry to the method,
|
|
// but that they must be saved at call sites.
|
|
// These are called "volatiles" on ppc.
|
|
//
|
|
// SOE = Save-On-Entry: The register allocator assumes that these registers
|
|
// must be saved before using them upon entry to the
|
|
// method, but they do not need to be saved at call
|
|
// sites.
|
|
// These are called "nonvolatiles" on ppc.
|
|
//
|
|
// AS = Always-Save: The register allocator assumes that these registers
|
|
// must be saved before using them upon entry to the
|
|
// method, & that they must be saved at call sites.
|
|
//
|
|
// Ideal Register Type is used to determine how to save & restore a
|
|
// register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
|
|
// spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
|
|
//
|
|
// The encoding number is the actual bit-pattern placed into the opcodes.
|
|
//
|
|
// PPC64 register definitions, based on the 64-bit PowerPC ELF ABI
|
|
// Supplement Version 1.7 as of 2003-10-29.
|
|
//
|
|
// For each 64-bit register we must define two registers: the register
|
|
// itself, e.g. R3, and a corresponding virtual other (32-bit-)'half',
|
|
// e.g. R3_H, which is needed by the allocator, but is not used
|
|
// for stores, loads, etc.
|
|
|
|
// ----------------------------
|
|
// Integer/Long Registers
|
|
// ----------------------------
|
|
|
|
// PPC64 has 32 64-bit integer registers.
|
|
|
|
// types: v = volatile, nv = non-volatile, s = system
|
|
reg_def R0 ( SOC, SOC, Op_RegI, 0, R0->as_VMReg() ); // v used in prologs
|
|
reg_def R0_H ( SOC, SOC, Op_RegI, 99, R0->as_VMReg()->next() );
|
|
reg_def R1 ( NS, NS, Op_RegI, 1, R1->as_VMReg() ); // s SP
|
|
reg_def R1_H ( NS, NS, Op_RegI, 99, R1->as_VMReg()->next() );
|
|
reg_def R2 ( SOC, SOC, Op_RegI, 2, R2->as_VMReg() ); // v TOC
|
|
reg_def R2_H ( SOC, SOC, Op_RegI, 99, R2->as_VMReg()->next() );
|
|
reg_def R3 ( SOC, SOC, Op_RegI, 3, R3->as_VMReg() ); // v iarg1 & iret
|
|
reg_def R3_H ( SOC, SOC, Op_RegI, 99, R3->as_VMReg()->next() );
|
|
reg_def R4 ( SOC, SOC, Op_RegI, 4, R4->as_VMReg() ); // iarg2
|
|
reg_def R4_H ( SOC, SOC, Op_RegI, 99, R4->as_VMReg()->next() );
|
|
reg_def R5 ( SOC, SOC, Op_RegI, 5, R5->as_VMReg() ); // v iarg3
|
|
reg_def R5_H ( SOC, SOC, Op_RegI, 99, R5->as_VMReg()->next() );
|
|
reg_def R6 ( SOC, SOC, Op_RegI, 6, R6->as_VMReg() ); // v iarg4
|
|
reg_def R6_H ( SOC, SOC, Op_RegI, 99, R6->as_VMReg()->next() );
|
|
reg_def R7 ( SOC, SOC, Op_RegI, 7, R7->as_VMReg() ); // v iarg5
|
|
reg_def R7_H ( SOC, SOC, Op_RegI, 99, R7->as_VMReg()->next() );
|
|
reg_def R8 ( SOC, SOC, Op_RegI, 8, R8->as_VMReg() ); // v iarg6
|
|
reg_def R8_H ( SOC, SOC, Op_RegI, 99, R8->as_VMReg()->next() );
|
|
reg_def R9 ( SOC, SOC, Op_RegI, 9, R9->as_VMReg() ); // v iarg7
|
|
reg_def R9_H ( SOC, SOC, Op_RegI, 99, R9->as_VMReg()->next() );
|
|
reg_def R10 ( SOC, SOC, Op_RegI, 10, R10->as_VMReg() ); // v iarg8
|
|
reg_def R10_H( SOC, SOC, Op_RegI, 99, R10->as_VMReg()->next());
|
|
reg_def R11 ( SOC, SOC, Op_RegI, 11, R11->as_VMReg() ); // v ENV / scratch
|
|
reg_def R11_H( SOC, SOC, Op_RegI, 99, R11->as_VMReg()->next());
|
|
reg_def R12 ( SOC, SOC, Op_RegI, 12, R12->as_VMReg() ); // v scratch
|
|
reg_def R12_H( SOC, SOC, Op_RegI, 99, R12->as_VMReg()->next());
|
|
reg_def R13 ( NS, NS, Op_RegI, 13, R13->as_VMReg() ); // s system thread id
|
|
reg_def R13_H( NS, NS, Op_RegI, 99, R13->as_VMReg()->next());
|
|
reg_def R14 ( SOC, SOE, Op_RegI, 14, R14->as_VMReg() ); // nv
|
|
reg_def R14_H( SOC, SOE, Op_RegI, 99, R14->as_VMReg()->next());
|
|
reg_def R15 ( SOC, SOE, Op_RegI, 15, R15->as_VMReg() ); // nv
|
|
reg_def R15_H( SOC, SOE, Op_RegI, 99, R15->as_VMReg()->next());
|
|
reg_def R16 ( SOC, SOE, Op_RegI, 16, R16->as_VMReg() ); // nv
|
|
reg_def R16_H( SOC, SOE, Op_RegI, 99, R16->as_VMReg()->next());
|
|
reg_def R17 ( SOC, SOE, Op_RegI, 17, R17->as_VMReg() ); // nv
|
|
reg_def R17_H( SOC, SOE, Op_RegI, 99, R17->as_VMReg()->next());
|
|
reg_def R18 ( SOC, SOE, Op_RegI, 18, R18->as_VMReg() ); // nv
|
|
reg_def R18_H( SOC, SOE, Op_RegI, 99, R18->as_VMReg()->next());
|
|
reg_def R19 ( SOC, SOE, Op_RegI, 19, R19->as_VMReg() ); // nv
|
|
reg_def R19_H( SOC, SOE, Op_RegI, 99, R19->as_VMReg()->next());
|
|
reg_def R20 ( SOC, SOE, Op_RegI, 20, R20->as_VMReg() ); // nv
|
|
reg_def R20_H( SOC, SOE, Op_RegI, 99, R20->as_VMReg()->next());
|
|
reg_def R21 ( SOC, SOE, Op_RegI, 21, R21->as_VMReg() ); // nv
|
|
reg_def R21_H( SOC, SOE, Op_RegI, 99, R21->as_VMReg()->next());
|
|
reg_def R22 ( SOC, SOE, Op_RegI, 22, R22->as_VMReg() ); // nv
|
|
reg_def R22_H( SOC, SOE, Op_RegI, 99, R22->as_VMReg()->next());
|
|
reg_def R23 ( SOC, SOE, Op_RegI, 23, R23->as_VMReg() ); // nv
|
|
reg_def R23_H( SOC, SOE, Op_RegI, 99, R23->as_VMReg()->next());
|
|
reg_def R24 ( SOC, SOE, Op_RegI, 24, R24->as_VMReg() ); // nv
|
|
reg_def R24_H( SOC, SOE, Op_RegI, 99, R24->as_VMReg()->next());
|
|
reg_def R25 ( SOC, SOE, Op_RegI, 25, R25->as_VMReg() ); // nv
|
|
reg_def R25_H( SOC, SOE, Op_RegI, 99, R25->as_VMReg()->next());
|
|
reg_def R26 ( SOC, SOE, Op_RegI, 26, R26->as_VMReg() ); // nv
|
|
reg_def R26_H( SOC, SOE, Op_RegI, 99, R26->as_VMReg()->next());
|
|
reg_def R27 ( SOC, SOE, Op_RegI, 27, R27->as_VMReg() ); // nv
|
|
reg_def R27_H( SOC, SOE, Op_RegI, 99, R27->as_VMReg()->next());
|
|
reg_def R28 ( SOC, SOE, Op_RegI, 28, R28->as_VMReg() ); // nv
|
|
reg_def R28_H( SOC, SOE, Op_RegI, 99, R28->as_VMReg()->next());
|
|
reg_def R29 ( SOC, SOE, Op_RegI, 29, R29->as_VMReg() ); // nv
|
|
reg_def R29_H( SOC, SOE, Op_RegI, 99, R29->as_VMReg()->next());
|
|
reg_def R30 ( SOC, SOE, Op_RegI, 30, R30->as_VMReg() ); // nv
|
|
reg_def R30_H( SOC, SOE, Op_RegI, 99, R30->as_VMReg()->next());
|
|
reg_def R31 ( SOC, SOE, Op_RegI, 31, R31->as_VMReg() ); // nv
|
|
reg_def R31_H( SOC, SOE, Op_RegI, 99, R31->as_VMReg()->next());
|
|
|
|
|
|
// ----------------------------
|
|
// Float/Double Registers
|
|
// ----------------------------
|
|
|
|
// Double Registers
|
|
// The rules of ADL require that double registers be defined in pairs.
|
|
// Each pair must be two 32-bit values, but not necessarily a pair of
|
|
// single float registers. In each pair, ADLC-assigned register numbers
|
|
// must be adjacent, with the lower number even. Finally, when the
|
|
// CPU stores such a register pair to memory, the word associated with
|
|
// the lower ADLC-assigned number must be stored to the lower address.
|
|
|
|
// PPC64 has 32 64-bit floating-point registers. Each can store a single
|
|
// or double precision floating-point value.
|
|
|
|
// types: v = volatile, nv = non-volatile, s = system
|
|
reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg() ); // v scratch
|
|
reg_def F0_H ( SOC, SOC, Op_RegF, 99, F0->as_VMReg()->next() );
|
|
reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg() ); // v farg1 & fret
|
|
reg_def F1_H ( SOC, SOC, Op_RegF, 99, F1->as_VMReg()->next() );
|
|
reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg() ); // v farg2
|
|
reg_def F2_H ( SOC, SOC, Op_RegF, 99, F2->as_VMReg()->next() );
|
|
reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg() ); // v farg3
|
|
reg_def F3_H ( SOC, SOC, Op_RegF, 99, F3->as_VMReg()->next() );
|
|
reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg() ); // v farg4
|
|
reg_def F4_H ( SOC, SOC, Op_RegF, 99, F4->as_VMReg()->next() );
|
|
reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg() ); // v farg5
|
|
reg_def F5_H ( SOC, SOC, Op_RegF, 99, F5->as_VMReg()->next() );
|
|
reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg() ); // v farg6
|
|
reg_def F6_H ( SOC, SOC, Op_RegF, 99, F6->as_VMReg()->next() );
|
|
reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg() ); // v farg7
|
|
reg_def F7_H ( SOC, SOC, Op_RegF, 99, F7->as_VMReg()->next() );
|
|
reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg() ); // v farg8
|
|
reg_def F8_H ( SOC, SOC, Op_RegF, 99, F8->as_VMReg()->next() );
|
|
reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg() ); // v farg9
|
|
reg_def F9_H ( SOC, SOC, Op_RegF, 99, F9->as_VMReg()->next() );
|
|
reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg() ); // v farg10
|
|
reg_def F10_H( SOC, SOC, Op_RegF, 99, F10->as_VMReg()->next());
|
|
reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg() ); // v farg11
|
|
reg_def F11_H( SOC, SOC, Op_RegF, 99, F11->as_VMReg()->next());
|
|
reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg() ); // v farg12
|
|
reg_def F12_H( SOC, SOC, Op_RegF, 99, F12->as_VMReg()->next());
|
|
reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg() ); // v farg13
|
|
reg_def F13_H( SOC, SOC, Op_RegF, 99, F13->as_VMReg()->next());
|
|
reg_def F14 ( SOC, SOE, Op_RegF, 14, F14->as_VMReg() ); // nv
|
|
reg_def F14_H( SOC, SOE, Op_RegF, 99, F14->as_VMReg()->next());
|
|
reg_def F15 ( SOC, SOE, Op_RegF, 15, F15->as_VMReg() ); // nv
|
|
reg_def F15_H( SOC, SOE, Op_RegF, 99, F15->as_VMReg()->next());
|
|
reg_def F16 ( SOC, SOE, Op_RegF, 16, F16->as_VMReg() ); // nv
|
|
reg_def F16_H( SOC, SOE, Op_RegF, 99, F16->as_VMReg()->next());
|
|
reg_def F17 ( SOC, SOE, Op_RegF, 17, F17->as_VMReg() ); // nv
|
|
reg_def F17_H( SOC, SOE, Op_RegF, 99, F17->as_VMReg()->next());
|
|
reg_def F18 ( SOC, SOE, Op_RegF, 18, F18->as_VMReg() ); // nv
|
|
reg_def F18_H( SOC, SOE, Op_RegF, 99, F18->as_VMReg()->next());
|
|
reg_def F19 ( SOC, SOE, Op_RegF, 19, F19->as_VMReg() ); // nv
|
|
reg_def F19_H( SOC, SOE, Op_RegF, 99, F19->as_VMReg()->next());
|
|
reg_def F20 ( SOC, SOE, Op_RegF, 20, F20->as_VMReg() ); // nv
|
|
reg_def F20_H( SOC, SOE, Op_RegF, 99, F20->as_VMReg()->next());
|
|
reg_def F21 ( SOC, SOE, Op_RegF, 21, F21->as_VMReg() ); // nv
|
|
reg_def F21_H( SOC, SOE, Op_RegF, 99, F21->as_VMReg()->next());
|
|
reg_def F22 ( SOC, SOE, Op_RegF, 22, F22->as_VMReg() ); // nv
|
|
reg_def F22_H( SOC, SOE, Op_RegF, 99, F22->as_VMReg()->next());
|
|
reg_def F23 ( SOC, SOE, Op_RegF, 23, F23->as_VMReg() ); // nv
|
|
reg_def F23_H( SOC, SOE, Op_RegF, 99, F23->as_VMReg()->next());
|
|
reg_def F24 ( SOC, SOE, Op_RegF, 24, F24->as_VMReg() ); // nv
|
|
reg_def F24_H( SOC, SOE, Op_RegF, 99, F24->as_VMReg()->next());
|
|
reg_def F25 ( SOC, SOE, Op_RegF, 25, F25->as_VMReg() ); // nv
|
|
reg_def F25_H( SOC, SOE, Op_RegF, 99, F25->as_VMReg()->next());
|
|
reg_def F26 ( SOC, SOE, Op_RegF, 26, F26->as_VMReg() ); // nv
|
|
reg_def F26_H( SOC, SOE, Op_RegF, 99, F26->as_VMReg()->next());
|
|
reg_def F27 ( SOC, SOE, Op_RegF, 27, F27->as_VMReg() ); // nv
|
|
reg_def F27_H( SOC, SOE, Op_RegF, 99, F27->as_VMReg()->next());
|
|
reg_def F28 ( SOC, SOE, Op_RegF, 28, F28->as_VMReg() ); // nv
|
|
reg_def F28_H( SOC, SOE, Op_RegF, 99, F28->as_VMReg()->next());
|
|
reg_def F29 ( SOC, SOE, Op_RegF, 29, F29->as_VMReg() ); // nv
|
|
reg_def F29_H( SOC, SOE, Op_RegF, 99, F29->as_VMReg()->next());
|
|
reg_def F30 ( SOC, SOE, Op_RegF, 30, F30->as_VMReg() ); // nv
|
|
reg_def F30_H( SOC, SOE, Op_RegF, 99, F30->as_VMReg()->next());
|
|
reg_def F31 ( SOC, SOE, Op_RegF, 31, F31->as_VMReg() ); // nv
|
|
reg_def F31_H( SOC, SOE, Op_RegF, 99, F31->as_VMReg()->next());
|
|
|
|
// ----------------------------
|
|
// Special Registers
|
|
// ----------------------------
|
|
|
|
// Condition Codes Flag Registers
|
|
|
|
// PPC64 has 8 condition code "registers" which are all contained
|
|
// in the CR register.
|
|
|
|
// types: v = volatile, nv = non-volatile, s = system
|
|
reg_def CR0(SOC, SOC, Op_RegFlags, 0, CR0->as_VMReg()); // v
|
|
reg_def CR1(SOC, SOC, Op_RegFlags, 1, CR1->as_VMReg()); // v
|
|
reg_def CR2(SOC, SOC, Op_RegFlags, 2, CR2->as_VMReg()); // nv
|
|
reg_def CR3(SOC, SOC, Op_RegFlags, 3, CR3->as_VMReg()); // nv
|
|
reg_def CR4(SOC, SOC, Op_RegFlags, 4, CR4->as_VMReg()); // nv
|
|
reg_def CR5(SOC, SOC, Op_RegFlags, 5, CR5->as_VMReg()); // v
|
|
reg_def CR6(SOC, SOC, Op_RegFlags, 6, CR6->as_VMReg()); // v
|
|
reg_def CR7(SOC, SOC, Op_RegFlags, 7, CR7->as_VMReg()); // v
|
|
|
|
// Special registers of PPC64
|
|
|
|
reg_def SR_XER( SOC, SOC, Op_RegP, 0, SR_XER->as_VMReg()); // v
|
|
reg_def SR_LR( SOC, SOC, Op_RegP, 1, SR_LR->as_VMReg()); // v
|
|
reg_def SR_CTR( SOC, SOC, Op_RegP, 2, SR_CTR->as_VMReg()); // v
|
|
reg_def SR_VRSAVE( SOC, SOC, Op_RegP, 3, SR_VRSAVE->as_VMReg()); // v
|
|
reg_def SR_SPEFSCR(SOC, SOC, Op_RegP, 4, SR_SPEFSCR->as_VMReg()); // v
|
|
reg_def SR_PPR( SOC, SOC, Op_RegP, 5, SR_PPR->as_VMReg()); // v
|
|
|
|
// ----------------------------
|
|
// Vector-Scalar Registers
|
|
// ----------------------------
|
|
// 1st 32 VSRs are aliases for the FPRs which are already defined above.
|
|
reg_def VSR0 (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
|
|
reg_def VSR0_H (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
|
|
reg_def VSR0_J (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
|
|
reg_def VSR0_K (SOC, SOC, Op_RegF, 0, VMRegImpl::Bad());
|
|
|
|
reg_def VSR1 (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad());
|
|
reg_def VSR1_H (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad());
|
|
reg_def VSR1_J (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad());
|
|
reg_def VSR1_K (SOC, SOC, Op_RegF, 1, VMRegImpl::Bad());
|
|
|
|
reg_def VSR2 (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad());
|
|
reg_def VSR2_H (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad());
|
|
reg_def VSR2_J (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad());
|
|
reg_def VSR2_K (SOC, SOC, Op_RegF, 2, VMRegImpl::Bad());
|
|
|
|
reg_def VSR3 (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad());
|
|
reg_def VSR3_H (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad());
|
|
reg_def VSR3_J (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad());
|
|
reg_def VSR3_K (SOC, SOC, Op_RegF, 3, VMRegImpl::Bad());
|
|
|
|
reg_def VSR4 (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad());
|
|
reg_def VSR4_H (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad());
|
|
reg_def VSR4_J (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad());
|
|
reg_def VSR4_K (SOC, SOC, Op_RegF, 4, VMRegImpl::Bad());
|
|
|
|
reg_def VSR5 (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad());
|
|
reg_def VSR5_H (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad());
|
|
reg_def VSR5_J (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad());
|
|
reg_def VSR5_K (SOC, SOC, Op_RegF, 5, VMRegImpl::Bad());
|
|
|
|
reg_def VSR6 (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad());
|
|
reg_def VSR6_H (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad());
|
|
reg_def VSR6_J (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad());
|
|
reg_def VSR6_K (SOC, SOC, Op_RegF, 6, VMRegImpl::Bad());
|
|
|
|
reg_def VSR7 (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad());
|
|
reg_def VSR7_H (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad());
|
|
reg_def VSR7_J (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad());
|
|
reg_def VSR7_K (SOC, SOC, Op_RegF, 7, VMRegImpl::Bad());
|
|
|
|
reg_def VSR8 (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad());
|
|
reg_def VSR8_H (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad());
|
|
reg_def VSR8_J (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad());
|
|
reg_def VSR8_K (SOC, SOC, Op_RegF, 8, VMRegImpl::Bad());
|
|
|
|
reg_def VSR9 (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad());
|
|
reg_def VSR9_H (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad());
|
|
reg_def VSR9_J (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad());
|
|
reg_def VSR9_K (SOC, SOC, Op_RegF, 9, VMRegImpl::Bad());
|
|
|
|
reg_def VSR10 (SOC, SOC, Op_RegF, 10, VMRegImpl::Bad());
|
|
reg_def VSR10_H(SOC, SOC, Op_RegF, 10, VMRegImpl::Bad());
|
|
reg_def VSR10_J(SOC, SOC, Op_RegF, 10, VMRegImpl::Bad());
|
|
reg_def VSR10_K(SOC, SOC, Op_RegF, 10, VMRegImpl::Bad());
|
|
|
|
reg_def VSR11 (SOC, SOC, Op_RegF, 11, VMRegImpl::Bad());
|
|
reg_def VSR11_H(SOC, SOC, Op_RegF, 11, VMRegImpl::Bad());
|
|
reg_def VSR11_J(SOC, SOC, Op_RegF, 11, VMRegImpl::Bad());
|
|
reg_def VSR11_K(SOC, SOC, Op_RegF, 11, VMRegImpl::Bad());
|
|
|
|
reg_def VSR12 (SOC, SOC, Op_RegF, 12, VMRegImpl::Bad());
|
|
reg_def VSR12_H(SOC, SOC, Op_RegF, 12, VMRegImpl::Bad());
|
|
reg_def VSR12_J(SOC, SOC, Op_RegF, 12, VMRegImpl::Bad());
|
|
reg_def VSR12_K(SOC, SOC, Op_RegF, 12, VMRegImpl::Bad());
|
|
|
|
reg_def VSR13 (SOC, SOC, Op_RegF, 13, VMRegImpl::Bad());
|
|
reg_def VSR13_H(SOC, SOC, Op_RegF, 13, VMRegImpl::Bad());
|
|
reg_def VSR13_J(SOC, SOC, Op_RegF, 13, VMRegImpl::Bad());
|
|
reg_def VSR13_K(SOC, SOC, Op_RegF, 13, VMRegImpl::Bad());
|
|
|
|
reg_def VSR14 (SOC, SOC, Op_RegF, 14, VMRegImpl::Bad());
|
|
reg_def VSR14_H(SOC, SOC, Op_RegF, 14, VMRegImpl::Bad());
|
|
reg_def VSR14_J(SOC, SOC, Op_RegF, 14, VMRegImpl::Bad());
|
|
reg_def VSR14_K(SOC, SOC, Op_RegF, 14, VMRegImpl::Bad());
|
|
|
|
reg_def VSR15 (SOC, SOC, Op_RegF, 15, VMRegImpl::Bad());
|
|
reg_def VSR15_H(SOC, SOC, Op_RegF, 15, VMRegImpl::Bad());
|
|
reg_def VSR15_J(SOC, SOC, Op_RegF, 15, VMRegImpl::Bad());
|
|
reg_def VSR15_K(SOC, SOC, Op_RegF, 15, VMRegImpl::Bad());
|
|
|
|
reg_def VSR16 (SOC, SOC, Op_RegF, 16, VMRegImpl::Bad());
|
|
reg_def VSR16_H(SOC, SOC, Op_RegF, 16, VMRegImpl::Bad());
|
|
reg_def VSR16_J(SOC, SOC, Op_RegF, 16, VMRegImpl::Bad());
|
|
reg_def VSR16_K(SOC, SOC, Op_RegF, 16, VMRegImpl::Bad());
|
|
|
|
reg_def VSR17 (SOC, SOC, Op_RegF, 17, VMRegImpl::Bad());
|
|
reg_def VSR17_H(SOC, SOC, Op_RegF, 17, VMRegImpl::Bad());
|
|
reg_def VSR17_J(SOC, SOC, Op_RegF, 17, VMRegImpl::Bad());
|
|
reg_def VSR17_K(SOC, SOC, Op_RegF, 17, VMRegImpl::Bad());
|
|
|
|
reg_def VSR18 (SOC, SOC, Op_RegF, 18, VMRegImpl::Bad());
|
|
reg_def VSR18_H(SOC, SOC, Op_RegF, 18, VMRegImpl::Bad());
|
|
reg_def VSR18_J(SOC, SOC, Op_RegF, 18, VMRegImpl::Bad());
|
|
reg_def VSR18_K(SOC, SOC, Op_RegF, 18, VMRegImpl::Bad());
|
|
|
|
reg_def VSR19 (SOC, SOC, Op_RegF, 19, VMRegImpl::Bad());
|
|
reg_def VSR19_H(SOC, SOC, Op_RegF, 19, VMRegImpl::Bad());
|
|
reg_def VSR19_J(SOC, SOC, Op_RegF, 19, VMRegImpl::Bad());
|
|
reg_def VSR19_K(SOC, SOC, Op_RegF, 19, VMRegImpl::Bad());
|
|
|
|
reg_def VSR20 (SOC, SOC, Op_RegF, 20, VMRegImpl::Bad());
|
|
reg_def VSR20_H(SOC, SOC, Op_RegF, 20, VMRegImpl::Bad());
|
|
reg_def VSR20_J(SOC, SOC, Op_RegF, 20, VMRegImpl::Bad());
|
|
reg_def VSR20_K(SOC, SOC, Op_RegF, 20, VMRegImpl::Bad());
|
|
|
|
reg_def VSR21 (SOC, SOC, Op_RegF, 21, VMRegImpl::Bad());
|
|
reg_def VSR21_H(SOC, SOC, Op_RegF, 21, VMRegImpl::Bad());
|
|
reg_def VSR21_J(SOC, SOC, Op_RegF, 21, VMRegImpl::Bad());
|
|
reg_def VSR21_K(SOC, SOC, Op_RegF, 21, VMRegImpl::Bad());
|
|
|
|
reg_def VSR22 (SOC, SOC, Op_RegF, 22, VMRegImpl::Bad());
|
|
reg_def VSR22_H(SOC, SOC, Op_RegF, 22, VMRegImpl::Bad());
|
|
reg_def VSR22_J(SOC, SOC, Op_RegF, 22, VMRegImpl::Bad());
|
|
reg_def VSR22_K(SOC, SOC, Op_RegF, 22, VMRegImpl::Bad());
|
|
|
|
reg_def VSR23 (SOC, SOC, Op_RegF, 23, VMRegImpl::Bad());
|
|
reg_def VSR23_H(SOC, SOC, Op_RegF, 23, VMRegImpl::Bad());
|
|
reg_def VSR23_J(SOC, SOC, Op_RegF, 23, VMRegImpl::Bad());
|
|
reg_def VSR23_K(SOC, SOC, Op_RegF, 23, VMRegImpl::Bad());
|
|
|
|
reg_def VSR24 (SOC, SOC, Op_RegF, 24, VMRegImpl::Bad());
|
|
reg_def VSR24_H(SOC, SOC, Op_RegF, 24, VMRegImpl::Bad());
|
|
reg_def VSR24_J(SOC, SOC, Op_RegF, 24, VMRegImpl::Bad());
|
|
reg_def VSR24_K(SOC, SOC, Op_RegF, 24, VMRegImpl::Bad());
|
|
|
|
reg_def VSR25 (SOC, SOC, Op_RegF, 25, VMRegImpl::Bad());
|
|
reg_def VSR25_H(SOC, SOC, Op_RegF, 25, VMRegImpl::Bad());
|
|
reg_def VSR25_J(SOC, SOC, Op_RegF, 25, VMRegImpl::Bad());
|
|
reg_def VSR25_K(SOC, SOC, Op_RegF, 25, VMRegImpl::Bad());
|
|
|
|
reg_def VSR26 (SOC, SOC, Op_RegF, 26, VMRegImpl::Bad());
|
|
reg_def VSR26_H(SOC, SOC, Op_RegF, 26, VMRegImpl::Bad());
|
|
reg_def VSR26_J(SOC, SOC, Op_RegF, 26, VMRegImpl::Bad());
|
|
reg_def VSR26_K(SOC, SOC, Op_RegF, 26, VMRegImpl::Bad());
|
|
|
|
reg_def VSR27 (SOC, SOC, Op_RegF, 27, VMRegImpl::Bad());
|
|
reg_def VSR27_H(SOC, SOC, Op_RegF, 27, VMRegImpl::Bad());
|
|
reg_def VSR27_J(SOC, SOC, Op_RegF, 27, VMRegImpl::Bad());
|
|
reg_def VSR27_K(SOC, SOC, Op_RegF, 27, VMRegImpl::Bad());
|
|
|
|
reg_def VSR28 (SOC, SOC, Op_RegF, 28, VMRegImpl::Bad());
|
|
reg_def VSR28_H(SOC, SOC, Op_RegF, 28, VMRegImpl::Bad());
|
|
reg_def VSR28_J(SOC, SOC, Op_RegF, 28, VMRegImpl::Bad());
|
|
reg_def VSR28_K(SOC, SOC, Op_RegF, 28, VMRegImpl::Bad());
|
|
|
|
reg_def VSR29 (SOC, SOC, Op_RegF, 29, VMRegImpl::Bad());
|
|
reg_def VSR29_H(SOC, SOC, Op_RegF, 29, VMRegImpl::Bad());
|
|
reg_def VSR29_J(SOC, SOC, Op_RegF, 29, VMRegImpl::Bad());
|
|
reg_def VSR29_K(SOC, SOC, Op_RegF, 29, VMRegImpl::Bad());
|
|
|
|
reg_def VSR30 (SOC, SOC, Op_RegF, 30, VMRegImpl::Bad());
|
|
reg_def VSR30_H(SOC, SOC, Op_RegF, 30, VMRegImpl::Bad());
|
|
reg_def VSR30_J(SOC, SOC, Op_RegF, 30, VMRegImpl::Bad());
|
|
reg_def VSR30_K(SOC, SOC, Op_RegF, 30, VMRegImpl::Bad());
|
|
|
|
reg_def VSR31 (SOC, SOC, Op_RegF, 31, VMRegImpl::Bad());
|
|
reg_def VSR31_H(SOC, SOC, Op_RegF, 31, VMRegImpl::Bad());
|
|
reg_def VSR31_J(SOC, SOC, Op_RegF, 31, VMRegImpl::Bad());
|
|
reg_def VSR31_K(SOC, SOC, Op_RegF, 31, VMRegImpl::Bad());
|
|
|
|
// 2nd 32 VSRs are aliases for the VRs which are only defined here.
|
|
reg_def VSR32 (SOC, SOC, Op_RegF, 32, VSR32->as_VMReg() );
|
|
reg_def VSR32_H(SOC, SOC, Op_RegF, 32, VSR32->as_VMReg()->next() );
|
|
reg_def VSR32_J(SOC, SOC, Op_RegF, 32, VSR32->as_VMReg()->next(2));
|
|
reg_def VSR32_K(SOC, SOC, Op_RegF, 32, VSR32->as_VMReg()->next(3));
|
|
|
|
reg_def VSR33 (SOC, SOC, Op_RegF, 33, VSR33->as_VMReg() );
|
|
reg_def VSR33_H(SOC, SOC, Op_RegF, 33, VSR33->as_VMReg()->next() );
|
|
reg_def VSR33_J(SOC, SOC, Op_RegF, 33, VSR33->as_VMReg()->next(2));
|
|
reg_def VSR33_K(SOC, SOC, Op_RegF, 33, VSR33->as_VMReg()->next(3));
|
|
|
|
reg_def VSR34 (SOC, SOC, Op_RegF, 34, VSR34->as_VMReg() );
|
|
reg_def VSR34_H(SOC, SOC, Op_RegF, 34, VSR34->as_VMReg()->next() );
|
|
reg_def VSR34_J(SOC, SOC, Op_RegF, 34, VSR34->as_VMReg()->next(2));
|
|
reg_def VSR34_K(SOC, SOC, Op_RegF, 34, VSR34->as_VMReg()->next(3));
|
|
|
|
reg_def VSR35 (SOC, SOC, Op_RegF, 35, VSR35->as_VMReg() );
|
|
reg_def VSR35_H(SOC, SOC, Op_RegF, 35, VSR35->as_VMReg()->next() );
|
|
reg_def VSR35_J(SOC, SOC, Op_RegF, 35, VSR35->as_VMReg()->next(2));
|
|
reg_def VSR35_K(SOC, SOC, Op_RegF, 35, VSR35->as_VMReg()->next(3));
|
|
|
|
reg_def VSR36 (SOC, SOC, Op_RegF, 36, VSR36->as_VMReg() );
|
|
reg_def VSR36_H(SOC, SOC, Op_RegF, 36, VSR36->as_VMReg()->next() );
|
|
reg_def VSR36_J(SOC, SOC, Op_RegF, 36, VSR36->as_VMReg()->next(2));
|
|
reg_def VSR36_K(SOC, SOC, Op_RegF, 36, VSR36->as_VMReg()->next(3));
|
|
|
|
reg_def VSR37 (SOC, SOC, Op_RegF, 37, VSR37->as_VMReg() );
|
|
reg_def VSR37_H(SOC, SOC, Op_RegF, 37, VSR37->as_VMReg()->next() );
|
|
reg_def VSR37_J(SOC, SOC, Op_RegF, 37, VSR37->as_VMReg()->next(2));
|
|
reg_def VSR37_K(SOC, SOC, Op_RegF, 37, VSR37->as_VMReg()->next(3));
|
|
|
|
reg_def VSR38 (SOC, SOC, Op_RegF, 38, VSR38->as_VMReg() );
|
|
reg_def VSR38_H(SOC, SOC, Op_RegF, 38, VSR38->as_VMReg()->next() );
|
|
reg_def VSR38_J(SOC, SOC, Op_RegF, 38, VSR38->as_VMReg()->next(2));
|
|
reg_def VSR38_K(SOC, SOC, Op_RegF, 38, VSR38->as_VMReg()->next(3));
|
|
|
|
reg_def VSR39 (SOC, SOC, Op_RegF, 39, VSR39->as_VMReg() );
|
|
reg_def VSR39_H(SOC, SOC, Op_RegF, 39, VSR39->as_VMReg()->next() );
|
|
reg_def VSR39_J(SOC, SOC, Op_RegF, 39, VSR39->as_VMReg()->next(2));
|
|
reg_def VSR39_K(SOC, SOC, Op_RegF, 39, VSR39->as_VMReg()->next(3));
|
|
|
|
reg_def VSR40 (SOC, SOC, Op_RegF, 40, VSR40->as_VMReg() );
|
|
reg_def VSR40_H(SOC, SOC, Op_RegF, 40, VSR40->as_VMReg()->next() );
|
|
reg_def VSR40_J(SOC, SOC, Op_RegF, 40, VSR40->as_VMReg()->next(2));
|
|
reg_def VSR40_K(SOC, SOC, Op_RegF, 40, VSR40->as_VMReg()->next(3));
|
|
|
|
reg_def VSR41 (SOC, SOC, Op_RegF, 41, VSR41->as_VMReg() );
|
|
reg_def VSR41_H(SOC, SOC, Op_RegF, 41, VSR41->as_VMReg()->next() );
|
|
reg_def VSR41_J(SOC, SOC, Op_RegF, 41, VSR41->as_VMReg()->next(2));
|
|
reg_def VSR41_K(SOC, SOC, Op_RegF, 41, VSR41->as_VMReg()->next(3));
|
|
|
|
reg_def VSR42 (SOC, SOC, Op_RegF, 42, VSR42->as_VMReg() );
|
|
reg_def VSR42_H(SOC, SOC, Op_RegF, 42, VSR42->as_VMReg()->next() );
|
|
reg_def VSR42_J(SOC, SOC, Op_RegF, 42, VSR42->as_VMReg()->next(2));
|
|
reg_def VSR42_K(SOC, SOC, Op_RegF, 42, VSR42->as_VMReg()->next(3));
|
|
|
|
reg_def VSR43 (SOC, SOC, Op_RegF, 43, VSR43->as_VMReg() );
|
|
reg_def VSR43_H(SOC, SOC, Op_RegF, 43, VSR43->as_VMReg()->next() );
|
|
reg_def VSR43_J(SOC, SOC, Op_RegF, 43, VSR43->as_VMReg()->next(2));
|
|
reg_def VSR43_K(SOC, SOC, Op_RegF, 43, VSR43->as_VMReg()->next(3));
|
|
|
|
reg_def VSR44 (SOC, SOC, Op_RegF, 44, VSR44->as_VMReg() );
|
|
reg_def VSR44_H(SOC, SOC, Op_RegF, 44, VSR44->as_VMReg()->next() );
|
|
reg_def VSR44_J(SOC, SOC, Op_RegF, 44, VSR44->as_VMReg()->next(2));
|
|
reg_def VSR44_K(SOC, SOC, Op_RegF, 44, VSR44->as_VMReg()->next(3));
|
|
|
|
reg_def VSR45 (SOC, SOC, Op_RegF, 45, VSR45->as_VMReg() );
|
|
reg_def VSR45_H(SOC, SOC, Op_RegF, 45, VSR45->as_VMReg()->next() );
|
|
reg_def VSR45_J(SOC, SOC, Op_RegF, 45, VSR45->as_VMReg()->next(2));
|
|
reg_def VSR45_K(SOC, SOC, Op_RegF, 45, VSR45->as_VMReg()->next(3));
|
|
|
|
reg_def VSR46 (SOC, SOC, Op_RegF, 46, VSR46->as_VMReg() );
|
|
reg_def VSR46_H(SOC, SOC, Op_RegF, 46, VSR46->as_VMReg()->next() );
|
|
reg_def VSR46_J(SOC, SOC, Op_RegF, 46, VSR46->as_VMReg()->next(2));
|
|
reg_def VSR46_K(SOC, SOC, Op_RegF, 46, VSR46->as_VMReg()->next(3));
|
|
|
|
reg_def VSR47 (SOC, SOC, Op_RegF, 47, VSR47->as_VMReg() );
|
|
reg_def VSR47_H(SOC, SOC, Op_RegF, 47, VSR47->as_VMReg()->next() );
|
|
reg_def VSR47_J(SOC, SOC, Op_RegF, 47, VSR47->as_VMReg()->next(2));
|
|
reg_def VSR47_K(SOC, SOC, Op_RegF, 47, VSR47->as_VMReg()->next(3));
|
|
|
|
reg_def VSR48 (SOC, SOC, Op_RegF, 48, VSR48->as_VMReg() );
|
|
reg_def VSR48_H(SOC, SOC, Op_RegF, 48, VSR48->as_VMReg()->next() );
|
|
reg_def VSR48_J(SOC, SOC, Op_RegF, 48, VSR48->as_VMReg()->next(2));
|
|
reg_def VSR48_K(SOC, SOC, Op_RegF, 48, VSR48->as_VMReg()->next(3));
|
|
|
|
reg_def VSR49 (SOC, SOC, Op_RegF, 49, VSR49->as_VMReg() );
|
|
reg_def VSR49_H(SOC, SOC, Op_RegF, 49, VSR49->as_VMReg()->next() );
|
|
reg_def VSR49_J(SOC, SOC, Op_RegF, 49, VSR49->as_VMReg()->next(2));
|
|
reg_def VSR49_K(SOC, SOC, Op_RegF, 49, VSR49->as_VMReg()->next(3));
|
|
|
|
reg_def VSR50 (SOC, SOC, Op_RegF, 50, VSR50->as_VMReg() );
|
|
reg_def VSR50_H(SOC, SOC, Op_RegF, 50, VSR50->as_VMReg()->next() );
|
|
reg_def VSR50_J(SOC, SOC, Op_RegF, 50, VSR50->as_VMReg()->next(2));
|
|
reg_def VSR50_K(SOC, SOC, Op_RegF, 50, VSR50->as_VMReg()->next(3));
|
|
|
|
reg_def VSR51 (SOC, SOC, Op_RegF, 51, VSR51->as_VMReg() );
|
|
reg_def VSR51_H(SOC, SOC, Op_RegF, 51, VSR51->as_VMReg()->next() );
|
|
reg_def VSR51_J(SOC, SOC, Op_RegF, 51, VSR51->as_VMReg()->next(2));
|
|
reg_def VSR51_K(SOC, SOC, Op_RegF, 51, VSR51->as_VMReg()->next(3));
|
|
|
|
reg_def VSR52 (SOC, SOE, Op_RegF, 52, VSR52->as_VMReg() );
|
|
reg_def VSR52_H(SOC, SOE, Op_RegF, 52, VSR52->as_VMReg()->next() );
|
|
reg_def VSR52_J(SOC, SOE, Op_RegF, 52, VSR52->as_VMReg()->next(2));
|
|
reg_def VSR52_K(SOC, SOE, Op_RegF, 52, VSR52->as_VMReg()->next(3));
|
|
|
|
reg_def VSR53 (SOC, SOE, Op_RegF, 53, VSR53->as_VMReg() );
|
|
reg_def VSR53_H(SOC, SOE, Op_RegF, 53, VSR53->as_VMReg()->next() );
|
|
reg_def VSR53_J(SOC, SOE, Op_RegF, 53, VSR53->as_VMReg()->next(2));
|
|
reg_def VSR53_K(SOC, SOE, Op_RegF, 53, VSR53->as_VMReg()->next(3));
|
|
|
|
reg_def VSR54 (SOC, SOE, Op_RegF, 54, VSR54->as_VMReg() );
|
|
reg_def VSR54_H(SOC, SOE, Op_RegF, 54, VSR54->as_VMReg()->next() );
|
|
reg_def VSR54_J(SOC, SOE, Op_RegF, 54, VSR54->as_VMReg()->next(2));
|
|
reg_def VSR54_K(SOC, SOE, Op_RegF, 54, VSR54->as_VMReg()->next(3));
|
|
|
|
reg_def VSR55 (SOC, SOE, Op_RegF, 55, VSR55->as_VMReg() );
|
|
reg_def VSR55_H(SOC, SOE, Op_RegF, 55, VSR55->as_VMReg()->next() );
|
|
reg_def VSR55_J(SOC, SOE, Op_RegF, 55, VSR55->as_VMReg()->next(2));
|
|
reg_def VSR55_K(SOC, SOE, Op_RegF, 55, VSR55->as_VMReg()->next(3));
|
|
|
|
reg_def VSR56 (SOC, SOE, Op_RegF, 56, VSR56->as_VMReg() );
|
|
reg_def VSR56_H(SOC, SOE, Op_RegF, 56, VSR56->as_VMReg()->next() );
|
|
reg_def VSR56_J(SOC, SOE, Op_RegF, 56, VSR56->as_VMReg()->next(2));
|
|
reg_def VSR56_K(SOC, SOE, Op_RegF, 56, VSR56->as_VMReg()->next(3));
|
|
|
|
reg_def VSR57 (SOC, SOE, Op_RegF, 57, VSR57->as_VMReg() );
|
|
reg_def VSR57_H(SOC, SOE, Op_RegF, 57, VSR57->as_VMReg()->next() );
|
|
reg_def VSR57_J(SOC, SOE, Op_RegF, 57, VSR57->as_VMReg()->next(2));
|
|
reg_def VSR57_K(SOC, SOE, Op_RegF, 57, VSR57->as_VMReg()->next(3));
|
|
|
|
reg_def VSR58 (SOC, SOE, Op_RegF, 58, VSR58->as_VMReg() );
|
|
reg_def VSR58_H(SOC, SOE, Op_RegF, 58, VSR58->as_VMReg()->next() );
|
|
reg_def VSR58_J(SOC, SOE, Op_RegF, 58, VSR58->as_VMReg()->next(2));
|
|
reg_def VSR58_K(SOC, SOE, Op_RegF, 58, VSR58->as_VMReg()->next(3));
|
|
|
|
reg_def VSR59 (SOC, SOE, Op_RegF, 59, VSR59->as_VMReg() );
|
|
reg_def VSR59_H(SOC, SOE, Op_RegF, 59, VSR59->as_VMReg()->next() );
|
|
reg_def VSR59_J(SOC, SOE, Op_RegF, 59, VSR59->as_VMReg()->next(2));
|
|
reg_def VSR59_K(SOC, SOE, Op_RegF, 59, VSR59->as_VMReg()->next(3));
|
|
|
|
reg_def VSR60 (SOC, SOE, Op_RegF, 60, VSR60->as_VMReg() );
|
|
reg_def VSR60_H(SOC, SOE, Op_RegF, 60, VSR60->as_VMReg()->next() );
|
|
reg_def VSR60_J(SOC, SOE, Op_RegF, 60, VSR60->as_VMReg()->next(2));
|
|
reg_def VSR60_K(SOC, SOE, Op_RegF, 60, VSR60->as_VMReg()->next(3));
|
|
|
|
reg_def VSR61 (SOC, SOE, Op_RegF, 61, VSR61->as_VMReg() );
|
|
reg_def VSR61_H(SOC, SOE, Op_RegF, 61, VSR61->as_VMReg()->next() );
|
|
reg_def VSR61_J(SOC, SOE, Op_RegF, 61, VSR61->as_VMReg()->next(2));
|
|
reg_def VSR61_K(SOC, SOE, Op_RegF, 61, VSR61->as_VMReg()->next(3));
|
|
|
|
reg_def VSR62 (SOC, SOE, Op_RegF, 62, VSR62->as_VMReg() );
|
|
reg_def VSR62_H(SOC, SOE, Op_RegF, 62, VSR62->as_VMReg()->next() );
|
|
reg_def VSR62_J(SOC, SOE, Op_RegF, 62, VSR62->as_VMReg()->next(2));
|
|
reg_def VSR62_K(SOC, SOE, Op_RegF, 62, VSR62->as_VMReg()->next(3));
|
|
|
|
reg_def VSR63 (SOC, SOE, Op_RegF, 63, VSR63->as_VMReg() );
|
|
reg_def VSR63_H(SOC, SOE, Op_RegF, 63, VSR63->as_VMReg()->next() );
|
|
reg_def VSR63_J(SOC, SOE, Op_RegF, 63, VSR63->as_VMReg()->next(2));
|
|
reg_def VSR63_K(SOC, SOE, Op_RegF, 63, VSR63->as_VMReg()->next(3));
|
|
|
|
// ----------------------------
|
|
// Specify priority of register selection within phases of register
|
|
// allocation. Highest priority is first. A useful heuristic is to
|
|
// give registers a low priority when they are required by machine
|
|
// instructions, like EAX and EDX on I486, and choose no-save registers
|
|
// before save-on-call, & save-on-call before save-on-entry. Registers
|
|
// which participate in fixed calling sequences should come last.
|
|
// Registers which are used as pairs must fall on an even boundary.
|
|
|
|
// It's worth about 1% on SPEC geomean to get this right.
|
|
|
|
// Chunk0, chunk1, and chunk2 form the MachRegisterNumbers enumeration
|
|
// in adGlobals_ppc.hpp which defines the <register>_num values, e.g.
|
|
// R3_num. Therefore, R3_num may not be (and in reality is not)
|
|
// the same as R3->encoding()! Furthermore, we cannot make any
|
|
// assumptions on ordering, e.g. R3_num may be less than R2_num.
|
|
// Additionally, the function
|
|
// static enum RC rc_class(OptoReg::Name reg )
|
|
// maps a given <register>_num value to its chunk type (except for flags)
|
|
// and its current implementation relies on chunk0 and chunk1 having a
|
|
// size of 64 each.
|
|
|
|
// If you change this allocation class, please have a look at the
|
|
// default values for the parameters RoundRobinIntegerRegIntervalStart
|
|
// and RoundRobinFloatRegIntervalStart
|
|
|
|
alloc_class chunk0 (
|
|
// Chunk0 contains *all* 64 integer registers halves.
|
|
|
|
// "non-volatile" registers
|
|
R14, R14_H,
|
|
R15, R15_H,
|
|
R17, R17_H,
|
|
R18, R18_H,
|
|
R19, R19_H,
|
|
R20, R20_H,
|
|
R21, R21_H,
|
|
R22, R22_H,
|
|
R23, R23_H,
|
|
R24, R24_H,
|
|
R25, R25_H,
|
|
R26, R26_H,
|
|
R27, R27_H,
|
|
R28, R28_H,
|
|
R29, R29_H,
|
|
R30, R30_H,
|
|
R31, R31_H,
|
|
|
|
// scratch/special registers
|
|
R11, R11_H,
|
|
R12, R12_H,
|
|
|
|
// argument registers
|
|
R10, R10_H,
|
|
R9, R9_H,
|
|
R8, R8_H,
|
|
R7, R7_H,
|
|
R6, R6_H,
|
|
R5, R5_H,
|
|
R4, R4_H,
|
|
R3, R3_H,
|
|
|
|
// special registers, not available for allocation
|
|
R16, R16_H, // R16_thread
|
|
R13, R13_H, // system thread id
|
|
R2, R2_H, // may be used for TOC
|
|
R1, R1_H, // SP
|
|
R0, R0_H // R0 (scratch)
|
|
);
|
|
|
|
// If you change this allocation class, please have a look at the
|
|
// default values for the parameters RoundRobinIntegerRegIntervalStart
|
|
// and RoundRobinFloatRegIntervalStart
|
|
|
|
alloc_class chunk1 (
|
|
// Chunk1 contains *all* 64 floating-point registers halves.
|
|
|
|
// scratch register
|
|
F0, F0_H,
|
|
|
|
// argument registers
|
|
F13, F13_H,
|
|
F12, F12_H,
|
|
F11, F11_H,
|
|
F10, F10_H,
|
|
F9, F9_H,
|
|
F8, F8_H,
|
|
F7, F7_H,
|
|
F6, F6_H,
|
|
F5, F5_H,
|
|
F4, F4_H,
|
|
F3, F3_H,
|
|
F2, F2_H,
|
|
F1, F1_H,
|
|
|
|
// non-volatile registers
|
|
F14, F14_H,
|
|
F15, F15_H,
|
|
F16, F16_H,
|
|
F17, F17_H,
|
|
F18, F18_H,
|
|
F19, F19_H,
|
|
F20, F20_H,
|
|
F21, F21_H,
|
|
F22, F22_H,
|
|
F23, F23_H,
|
|
F24, F24_H,
|
|
F25, F25_H,
|
|
F26, F26_H,
|
|
F27, F27_H,
|
|
F28, F28_H,
|
|
F29, F29_H,
|
|
F30, F30_H,
|
|
F31, F31_H
|
|
);
|
|
|
|
alloc_class chunk2 (
|
|
VSR0 , VSR0_H , VSR0_J , VSR0_K ,
|
|
VSR1 , VSR1_H , VSR1_J , VSR1_K ,
|
|
VSR2 , VSR2_H , VSR2_J , VSR2_K ,
|
|
VSR3 , VSR3_H , VSR3_J , VSR3_K ,
|
|
VSR4 , VSR4_H , VSR4_J , VSR4_K ,
|
|
VSR5 , VSR5_H , VSR5_J , VSR5_K ,
|
|
VSR6 , VSR6_H , VSR6_J , VSR6_K ,
|
|
VSR7 , VSR7_H , VSR7_J , VSR7_K ,
|
|
VSR8 , VSR8_H , VSR8_J , VSR8_K ,
|
|
VSR9 , VSR9_H , VSR9_J , VSR9_K ,
|
|
VSR10, VSR10_H, VSR10_J, VSR10_K,
|
|
VSR11, VSR11_H, VSR11_J, VSR11_K,
|
|
VSR12, VSR12_H, VSR12_J, VSR12_K,
|
|
VSR13, VSR13_H, VSR13_J, VSR13_K,
|
|
VSR14, VSR14_H, VSR14_J, VSR14_K,
|
|
VSR15, VSR15_H, VSR15_J, VSR15_K,
|
|
VSR16, VSR16_H, VSR16_J, VSR16_K,
|
|
VSR17, VSR17_H, VSR17_J, VSR17_K,
|
|
VSR18, VSR18_H, VSR18_J, VSR18_K,
|
|
VSR19, VSR19_H, VSR19_J, VSR19_K,
|
|
VSR20, VSR20_H, VSR20_J, VSR20_K,
|
|
VSR21, VSR21_H, VSR21_J, VSR21_K,
|
|
VSR22, VSR22_H, VSR22_J, VSR22_K,
|
|
VSR23, VSR23_H, VSR23_J, VSR23_K,
|
|
VSR24, VSR24_H, VSR24_J, VSR24_K,
|
|
VSR25, VSR25_H, VSR25_J, VSR25_K,
|
|
VSR26, VSR26_H, VSR26_J, VSR26_K,
|
|
VSR27, VSR27_H, VSR27_J, VSR27_K,
|
|
VSR28, VSR28_H, VSR28_J, VSR28_K,
|
|
VSR29, VSR29_H, VSR29_J, VSR29_K,
|
|
VSR30, VSR30_H, VSR30_J, VSR30_K,
|
|
VSR31, VSR31_H, VSR31_J, VSR31_K,
|
|
VSR32, VSR32_H, VSR32_J, VSR32_K,
|
|
VSR33, VSR33_H, VSR33_J, VSR33_K,
|
|
VSR34, VSR34_H, VSR34_J, VSR34_K,
|
|
VSR35, VSR35_H, VSR35_J, VSR35_K,
|
|
VSR36, VSR36_H, VSR36_J, VSR36_K,
|
|
VSR37, VSR37_H, VSR37_J, VSR37_K,
|
|
VSR38, VSR38_H, VSR38_J, VSR38_K,
|
|
VSR39, VSR39_H, VSR39_J, VSR39_K,
|
|
VSR40, VSR40_H, VSR40_J, VSR40_K,
|
|
VSR41, VSR41_H, VSR41_J, VSR41_K,
|
|
VSR42, VSR42_H, VSR42_J, VSR42_K,
|
|
VSR43, VSR43_H, VSR43_J, VSR43_K,
|
|
VSR44, VSR44_H, VSR44_J, VSR44_K,
|
|
VSR45, VSR45_H, VSR45_J, VSR45_K,
|
|
VSR46, VSR46_H, VSR46_J, VSR46_K,
|
|
VSR47, VSR47_H, VSR47_J, VSR47_K,
|
|
VSR48, VSR48_H, VSR48_J, VSR48_K,
|
|
VSR49, VSR49_H, VSR49_J, VSR49_K,
|
|
VSR50, VSR50_H, VSR50_J, VSR50_K,
|
|
VSR51, VSR51_H, VSR51_J, VSR51_K,
|
|
VSR52, VSR52_H, VSR52_J, VSR52_K,
|
|
VSR53, VSR53_H, VSR53_J, VSR53_K,
|
|
VSR54, VSR54_H, VSR54_J, VSR54_K,
|
|
VSR55, VSR55_H, VSR55_J, VSR55_K,
|
|
VSR56, VSR56_H, VSR56_J, VSR56_K,
|
|
VSR57, VSR57_H, VSR57_J, VSR57_K,
|
|
VSR58, VSR58_H, VSR58_J, VSR58_K,
|
|
VSR59, VSR59_H, VSR59_J, VSR59_K,
|
|
VSR60, VSR60_H, VSR60_J, VSR60_K,
|
|
VSR61, VSR61_H, VSR61_J, VSR61_K,
|
|
VSR62, VSR62_H, VSR62_J, VSR62_K,
|
|
VSR63, VSR63_H, VSR63_J, VSR63_K
|
|
);
|
|
|
|
alloc_class chunk3 (
|
|
// Chunk2 contains *all* 8 condition code registers.
|
|
CR0,
|
|
CR1,
|
|
CR2,
|
|
CR3,
|
|
CR4,
|
|
CR5,
|
|
CR6,
|
|
CR7
|
|
);
|
|
|
|
alloc_class chunk4 (
|
|
// special registers
|
|
// These registers are not allocated, but used for nodes generated by postalloc expand.
|
|
SR_XER,
|
|
SR_LR,
|
|
SR_CTR,
|
|
SR_VRSAVE,
|
|
SR_SPEFSCR,
|
|
SR_PPR
|
|
);
|
|
|
|
//-------Architecture Description Register Classes-----------------------
|
|
|
|
// Several register classes are automatically defined based upon
|
|
// information in this architecture description.
|
|
|
|
// 1) reg_class inline_cache_reg ( as defined in frame section )
|
|
// 2) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
|
|
//
|
|
|
|
// ----------------------------
|
|
// 32 Bit Register Classes
|
|
// ----------------------------
|
|
|
|
// We specify registers twice, once as read/write, and once read-only.
|
|
// We use the read-only registers for source operands. With this, we
|
|
// can include preset read only registers in this class, as a hard-coded
|
|
// '0'-register. (We used to simulate this on ppc.)
|
|
|
|
// 32 bit registers that can be read and written i.e. these registers
|
|
// can be dest (or src) of normal instructions.
|
|
reg_class bits32_reg_rw(
|
|
/*R0*/ // R0
|
|
/*R1*/ // SP
|
|
R2, // TOC
|
|
R3,
|
|
R4,
|
|
R5,
|
|
R6,
|
|
R7,
|
|
R8,
|
|
R9,
|
|
R10,
|
|
R11,
|
|
R12,
|
|
/*R13*/ // system thread id
|
|
R14,
|
|
R15,
|
|
/*R16*/ // R16_thread
|
|
R17,
|
|
R18,
|
|
R19,
|
|
R20,
|
|
R21,
|
|
R22,
|
|
R23,
|
|
R24,
|
|
R25,
|
|
R26,
|
|
R27,
|
|
R28,
|
|
/*R29,*/ // global TOC
|
|
R30,
|
|
R31
|
|
);
|
|
|
|
// 32 bit registers that can only be read i.e. these registers can
|
|
// only be src of all instructions.
|
|
reg_class bits32_reg_ro(
|
|
/*R0*/ // R0
|
|
/*R1*/ // SP
|
|
R2 // TOC
|
|
R3,
|
|
R4,
|
|
R5,
|
|
R6,
|
|
R7,
|
|
R8,
|
|
R9,
|
|
R10,
|
|
R11,
|
|
R12,
|
|
/*R13*/ // system thread id
|
|
R14,
|
|
R15,
|
|
/*R16*/ // R16_thread
|
|
R17,
|
|
R18,
|
|
R19,
|
|
R20,
|
|
R21,
|
|
R22,
|
|
R23,
|
|
R24,
|
|
R25,
|
|
R26,
|
|
R27,
|
|
R28,
|
|
/*R29,*/
|
|
R30,
|
|
R31
|
|
);
|
|
|
|
reg_class rscratch1_bits32_reg(R11);
|
|
reg_class rscratch2_bits32_reg(R12);
|
|
reg_class rarg1_bits32_reg(R3);
|
|
reg_class rarg2_bits32_reg(R4);
|
|
reg_class rarg3_bits32_reg(R5);
|
|
reg_class rarg4_bits32_reg(R6);
|
|
|
|
// ----------------------------
|
|
// 64 Bit Register Classes
|
|
// ----------------------------
|
|
// 64-bit build means 64-bit pointers means hi/lo pairs
|
|
|
|
reg_class rscratch1_bits64_reg(R11_H, R11);
|
|
reg_class rscratch2_bits64_reg(R12_H, R12);
|
|
reg_class rarg1_bits64_reg(R3_H, R3);
|
|
reg_class rarg2_bits64_reg(R4_H, R4);
|
|
reg_class rarg3_bits64_reg(R5_H, R5);
|
|
reg_class rarg4_bits64_reg(R6_H, R6);
|
|
reg_class rarg5_bits64_reg(R7_H, R7);
|
|
reg_class rarg6_bits64_reg(R8_H, R8);
|
|
// Thread register, 'written' by tlsLoadP, see there.
|
|
reg_class thread_bits64_reg(R16_H, R16);
|
|
|
|
reg_class r19_bits64_reg(R19_H, R19);
|
|
|
|
// 64 bit registers that can be read and written i.e. these registers
|
|
// can be dest (or src) of normal instructions.
|
|
reg_class bits64_reg_rw(
|
|
/*R0_H, R0*/ // R0
|
|
/*R1_H, R1*/ // SP
|
|
R2_H, R2, // TOC
|
|
R3_H, R3,
|
|
R4_H, R4,
|
|
R5_H, R5,
|
|
R6_H, R6,
|
|
R7_H, R7,
|
|
R8_H, R8,
|
|
R9_H, R9,
|
|
R10_H, R10,
|
|
R11_H, R11,
|
|
R12_H, R12,
|
|
/*R13_H, R13*/ // system thread id
|
|
R14_H, R14,
|
|
R15_H, R15,
|
|
/*R16_H, R16*/ // R16_thread
|
|
R17_H, R17,
|
|
R18_H, R18,
|
|
R19_H, R19,
|
|
R20_H, R20,
|
|
R21_H, R21,
|
|
R22_H, R22,
|
|
R23_H, R23,
|
|
R24_H, R24,
|
|
R25_H, R25,
|
|
R26_H, R26,
|
|
R27_H, R27,
|
|
R28_H, R28,
|
|
/*R29_H, R29,*/
|
|
R30_H, R30,
|
|
R31_H, R31
|
|
);
|
|
|
|
// 64 bit registers used excluding r2, r11 and r12
|
|
// Used to hold the TOC to avoid collisions with expanded LeafCall which uses
|
|
// r2, r11 and r12 internally.
|
|
reg_class bits64_reg_leaf_call(
|
|
/*R0_H, R0*/ // R0
|
|
/*R1_H, R1*/ // SP
|
|
/*R2_H, R2*/ // TOC
|
|
R3_H, R3,
|
|
R4_H, R4,
|
|
R5_H, R5,
|
|
R6_H, R6,
|
|
R7_H, R7,
|
|
R8_H, R8,
|
|
R9_H, R9,
|
|
R10_H, R10,
|
|
/*R11_H, R11*/
|
|
/*R12_H, R12*/
|
|
/*R13_H, R13*/ // system thread id
|
|
R14_H, R14,
|
|
R15_H, R15,
|
|
/*R16_H, R16*/ // R16_thread
|
|
R17_H, R17,
|
|
R18_H, R18,
|
|
R19_H, R19,
|
|
R20_H, R20,
|
|
R21_H, R21,
|
|
R22_H, R22,
|
|
R23_H, R23,
|
|
R24_H, R24,
|
|
R25_H, R25,
|
|
R26_H, R26,
|
|
R27_H, R27,
|
|
R28_H, R28,
|
|
/*R29_H, R29,*/
|
|
R30_H, R30,
|
|
R31_H, R31
|
|
);
|
|
|
|
// Used to hold the TOC to avoid collisions with expanded DynamicCall
|
|
// which uses r19 as inline cache internally and expanded LeafCall which uses
|
|
// r2, r11 and r12 internally.
|
|
reg_class bits64_constant_table_base(
|
|
/*R0_H, R0*/ // R0
|
|
/*R1_H, R1*/ // SP
|
|
/*R2_H, R2*/ // TOC
|
|
R3_H, R3,
|
|
R4_H, R4,
|
|
R5_H, R5,
|
|
R6_H, R6,
|
|
R7_H, R7,
|
|
R8_H, R8,
|
|
R9_H, R9,
|
|
R10_H, R10,
|
|
/*R11_H, R11*/
|
|
/*R12_H, R12*/
|
|
/*R13_H, R13*/ // system thread id
|
|
R14_H, R14,
|
|
R15_H, R15,
|
|
/*R16_H, R16*/ // R16_thread
|
|
R17_H, R17,
|
|
R18_H, R18,
|
|
/*R19_H, R19*/
|
|
R20_H, R20,
|
|
R21_H, R21,
|
|
R22_H, R22,
|
|
R23_H, R23,
|
|
R24_H, R24,
|
|
R25_H, R25,
|
|
R26_H, R26,
|
|
R27_H, R27,
|
|
R28_H, R28,
|
|
/*R29_H, R29,*/
|
|
R30_H, R30,
|
|
R31_H, R31
|
|
);
|
|
|
|
// 64 bit registers that can only be read i.e. these registers can
|
|
// only be src of all instructions.
|
|
reg_class bits64_reg_ro(
|
|
/*R0_H, R0*/ // R0
|
|
R1_H, R1,
|
|
R2_H, R2, // TOC
|
|
R3_H, R3,
|
|
R4_H, R4,
|
|
R5_H, R5,
|
|
R6_H, R6,
|
|
R7_H, R7,
|
|
R8_H, R8,
|
|
R9_H, R9,
|
|
R10_H, R10,
|
|
R11_H, R11,
|
|
R12_H, R12,
|
|
/*R13_H, R13*/ // system thread id
|
|
R14_H, R14,
|
|
R15_H, R15,
|
|
R16_H, R16, // R16_thread
|
|
R17_H, R17,
|
|
R18_H, R18,
|
|
R19_H, R19,
|
|
R20_H, R20,
|
|
R21_H, R21,
|
|
R22_H, R22,
|
|
R23_H, R23,
|
|
R24_H, R24,
|
|
R25_H, R25,
|
|
R26_H, R26,
|
|
R27_H, R27,
|
|
R28_H, R28,
|
|
/*R29_H, R29,*/ // TODO: let allocator handle TOC!!
|
|
R30_H, R30,
|
|
R31_H, R31
|
|
);
|
|
|
|
|
|
// ----------------------------
|
|
// Special Class for Condition Code Flags Register
|
|
|
|
reg_class int_flags(
|
|
/*CR0*/ // scratch
|
|
/*CR1*/ // scratch
|
|
/*CR2*/ // nv!
|
|
/*CR3*/ // nv!
|
|
/*CR4*/ // nv!
|
|
CR5,
|
|
CR6,
|
|
CR7
|
|
);
|
|
|
|
reg_class int_flags_ro(
|
|
CR0,
|
|
CR1,
|
|
CR2,
|
|
CR3,
|
|
CR4,
|
|
CR5,
|
|
CR6,
|
|
CR7
|
|
);
|
|
|
|
reg_class int_flags_CR0(CR0);
|
|
reg_class int_flags_CR1(CR1);
|
|
reg_class int_flags_CR6(CR6);
|
|
reg_class ctr_reg(SR_CTR);
|
|
|
|
// ----------------------------
|
|
// Float Register Classes
|
|
// ----------------------------
|
|
|
|
reg_class flt_reg(
|
|
F0,
|
|
F1,
|
|
F2,
|
|
F3,
|
|
F4,
|
|
F5,
|
|
F6,
|
|
F7,
|
|
F8,
|
|
F9,
|
|
F10,
|
|
F11,
|
|
F12,
|
|
F13,
|
|
F14, // nv!
|
|
F15, // nv!
|
|
F16, // nv!
|
|
F17, // nv!
|
|
F18, // nv!
|
|
F19, // nv!
|
|
F20, // nv!
|
|
F21, // nv!
|
|
F22, // nv!
|
|
F23, // nv!
|
|
F24, // nv!
|
|
F25, // nv!
|
|
F26, // nv!
|
|
F27, // nv!
|
|
F28, // nv!
|
|
F29, // nv!
|
|
F30, // nv!
|
|
F31 // nv!
|
|
);
|
|
|
|
// Double precision float registers have virtual `high halves' that
|
|
// are needed by the allocator.
|
|
reg_class dbl_reg(
|
|
F0, F0_H,
|
|
F1, F1_H,
|
|
F2, F2_H,
|
|
F3, F3_H,
|
|
F4, F4_H,
|
|
F5, F5_H,
|
|
F6, F6_H,
|
|
F7, F7_H,
|
|
F8, F8_H,
|
|
F9, F9_H,
|
|
F10, F10_H,
|
|
F11, F11_H,
|
|
F12, F12_H,
|
|
F13, F13_H,
|
|
F14, F14_H, // nv!
|
|
F15, F15_H, // nv!
|
|
F16, F16_H, // nv!
|
|
F17, F17_H, // nv!
|
|
F18, F18_H, // nv!
|
|
F19, F19_H, // nv!
|
|
F20, F20_H, // nv!
|
|
F21, F21_H, // nv!
|
|
F22, F22_H, // nv!
|
|
F23, F23_H, // nv!
|
|
F24, F24_H, // nv!
|
|
F25, F25_H, // nv!
|
|
F26, F26_H, // nv!
|
|
F27, F27_H, // nv!
|
|
F28, F28_H, // nv!
|
|
F29, F29_H, // nv!
|
|
F30, F30_H, // nv!
|
|
F31, F31_H // nv!
|
|
);
|
|
|
|
// ----------------------------
|
|
// Vector-Scalar Register Class
|
|
// ----------------------------
|
|
|
|
reg_class vs_reg(
|
|
VSR32, VSR32_H, VSR32_J, VSR32_K,
|
|
VSR33, VSR33_H, VSR33_J, VSR33_K,
|
|
VSR34, VSR34_H, VSR34_J, VSR34_K,
|
|
VSR35, VSR35_H, VSR35_J, VSR35_K,
|
|
VSR36, VSR36_H, VSR36_J, VSR36_K,
|
|
VSR37, VSR37_H, VSR37_J, VSR37_K,
|
|
VSR38, VSR38_H, VSR38_J, VSR38_K,
|
|
VSR39, VSR39_H, VSR39_J, VSR39_K,
|
|
VSR40, VSR40_H, VSR40_J, VSR40_K,
|
|
VSR41, VSR41_H, VSR41_J, VSR41_K,
|
|
VSR42, VSR42_H, VSR42_J, VSR42_K,
|
|
VSR43, VSR43_H, VSR43_J, VSR43_K,
|
|
VSR44, VSR44_H, VSR44_J, VSR44_K,
|
|
VSR45, VSR45_H, VSR45_J, VSR45_K,
|
|
VSR46, VSR46_H, VSR46_J, VSR46_K,
|
|
VSR47, VSR47_H, VSR47_J, VSR47_K,
|
|
VSR48, VSR48_H, VSR48_J, VSR48_K,
|
|
VSR49, VSR49_H, VSR49_J, VSR49_K,
|
|
VSR50, VSR50_H, VSR50_J, VSR50_K,
|
|
VSR51, VSR51_H, VSR51_J, VSR51_K,
|
|
VSR52, VSR52_H, VSR52_J, VSR52_K, // non-volatile
|
|
VSR53, VSR53_H, VSR53_J, VSR53_K, // non-volatile
|
|
VSR54, VSR54_H, VSR54_J, VSR54_K, // non-volatile
|
|
VSR55, VSR55_H, VSR55_J, VSR55_K, // non-volatile
|
|
VSR56, VSR56_H, VSR56_J, VSR56_K, // non-volatile
|
|
VSR57, VSR57_H, VSR57_J, VSR57_K, // non-volatile
|
|
VSR58, VSR58_H, VSR58_J, VSR58_K, // non-volatile
|
|
VSR59, VSR59_H, VSR59_J, VSR59_K, // non-volatile
|
|
VSR60, VSR60_H, VSR60_J, VSR60_K, // non-volatile
|
|
VSR61, VSR61_H, VSR61_J, VSR61_K, // non-volatile
|
|
VSR62, VSR62_H, VSR62_J, VSR62_K, // non-volatile
|
|
VSR63, VSR63_H, VSR63_J, VSR63_K // non-volatile
|
|
);
|
|
|
|
%}
|
|
|
|
//----------DEFINITION BLOCK---------------------------------------------------
|
|
// Define name --> value mappings to inform the ADLC of an integer valued name
|
|
// Current support includes integer values in the range [0, 0x7FFFFFFF]
|
|
// Format:
|
|
// int_def <name> ( <int_value>, <expression>);
|
|
// Generated Code in ad_<arch>.hpp
|
|
// #define <name> (<expression>)
|
|
// // value == <int_value>
|
|
// Generated code in ad_<arch>.cpp adlc_verification()
|
|
// assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
|
|
//
|
|
definitions %{
|
|
// The default cost (of an ALU instruction).
|
|
int_def DEFAULT_COST_LOW ( 30, 30);
|
|
int_def DEFAULT_COST ( 100, 100);
|
|
int_def HUGE_COST (1000000, 1000000);
|
|
|
|
// Memory refs
|
|
int_def MEMORY_REF_COST_LOW ( 200, DEFAULT_COST * 2);
|
|
int_def MEMORY_REF_COST ( 300, DEFAULT_COST * 3);
|
|
|
|
// Branches are even more expensive.
|
|
int_def BRANCH_COST ( 900, DEFAULT_COST * 9);
|
|
int_def CALL_COST ( 1300, DEFAULT_COST * 13);
|
|
%}
|
|
|
|
|
|
//----------SOURCE BLOCK-------------------------------------------------------
|
|
// This is a block of C++ code which provides values, functions, and
|
|
// definitions necessary in the rest of the architecture description.
|
|
source_hpp %{
|
|
// Header information of the source block.
|
|
// Method declarations/definitions which are used outside
|
|
// the ad-scope can conveniently be defined here.
|
|
//
|
|
// To keep related declarations/definitions/uses close together,
|
|
// we switch between source %{ }% and source_hpp %{ }% freely as needed.
|
|
|
|
#include "opto/convertnode.hpp"
|
|
|
|
// Returns true if Node n is followed by a MemBar node that
|
|
// will do an acquire. If so, this node must not do the acquire
|
|
// operation.
|
|
bool followed_by_acquire(const Node *n);
|
|
%}
|
|
|
|
source %{
|
|
|
|
#include "opto/c2_CodeStubs.hpp"
|
|
#include "oops/klass.inline.hpp"
|
|
|
|
void PhaseOutput::pd_perform_mach_node_analysis() {
|
|
}
|
|
|
|
int MachNode::pd_alignment_required() const {
|
|
return 1;
|
|
}
|
|
|
|
int MachNode::compute_padding(int current_offset) const {
|
|
return 0;
|
|
}
|
|
|
|
// Should the matcher clone input 'm' of node 'n'?
|
|
bool Matcher::pd_clone_node(Node* n, Node* m, Matcher::MStack& mstack) {
|
|
if (is_encode_and_store_pattern(n, m)) {
|
|
mstack.push(m, Visit);
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
// Should the Matcher clone shifts on addressing modes, expecting them
|
|
// to be subsumed into complex addressing expressions or compute them
|
|
// into registers?
|
|
bool Matcher::pd_clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) {
|
|
return clone_base_plus_offset_address(m, mstack, address_visited);
|
|
}
|
|
|
|
// Optimize load-acquire.
|
|
//
|
|
// Check if acquire is unnecessary due to following operation that does
|
|
// acquire anyways.
|
|
// Walk the pattern:
|
|
//
|
|
// n: Load.acq
|
|
// |
|
|
// MemBarAcquire
|
|
// | |
|
|
// Proj(ctrl) Proj(mem)
|
|
// | |
|
|
// MemBarRelease/Volatile
|
|
//
|
|
bool followed_by_acquire(const Node *load) {
|
|
assert(load->is_Load(), "So far implemented only for loads.");
|
|
|
|
// Find MemBarAcquire.
|
|
const Node *mba = nullptr;
|
|
for (DUIterator_Fast imax, i = load->fast_outs(imax); i < imax; i++) {
|
|
const Node *out = load->fast_out(i);
|
|
if (out->Opcode() == Op_MemBarAcquire) {
|
|
if (out->in(0) == load) continue; // Skip control edge, membar should be found via precedence edge.
|
|
mba = out;
|
|
break;
|
|
}
|
|
}
|
|
if (!mba) return false;
|
|
|
|
// Find following MemBar node.
|
|
//
|
|
// The following node must be reachable by control AND memory
|
|
// edge to assure no other operations are in between the two nodes.
|
|
//
|
|
// So first get the Proj node, mem_proj, to use it to iterate forward.
|
|
Node *mem_proj = nullptr;
|
|
for (DUIterator_Fast imax, i = mba->fast_outs(imax); i < imax; i++) {
|
|
mem_proj = mba->fast_out(i); // Runs out of bounds and asserts if Proj not found.
|
|
assert(mem_proj->is_Proj(), "only projections here");
|
|
ProjNode *proj = mem_proj->as_Proj();
|
|
if (proj->_con == TypeFunc::Memory &&
|
|
!Compile::current()->node_arena()->contains(mem_proj)) // Unmatched old-space only
|
|
break;
|
|
}
|
|
assert(mem_proj->as_Proj()->_con == TypeFunc::Memory, "Graph broken");
|
|
|
|
// Search MemBar behind Proj. If there are other memory operations
|
|
// behind the Proj we lost.
|
|
for (DUIterator_Fast jmax, j = mem_proj->fast_outs(jmax); j < jmax; j++) {
|
|
Node *x = mem_proj->fast_out(j);
|
|
// Proj might have an edge to a store or load node which precedes the membar.
|
|
if (x->is_Mem()) return false;
|
|
|
|
// On PPC64 release and volatile are implemented by an instruction
|
|
// that also has acquire semantics. I.e. there is no need for an
|
|
// acquire before these.
|
|
int xop = x->Opcode();
|
|
if (xop == Op_MemBarRelease || xop == Op_MemBarVolatile) {
|
|
// Make sure we're not missing Call/Phi/MergeMem by checking
|
|
// control edges. The control edge must directly lead back
|
|
// to the MemBarAcquire
|
|
Node *ctrl_proj = x->in(0);
|
|
if (ctrl_proj->is_Proj() && ctrl_proj->in(0) == mba) {
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
|
|
return false;
|
|
}
|
|
|
|
#define __ masm->
|
|
|
|
// Tertiary op of a LoadP or StoreP encoding.
|
|
#define REGP_OP true
|
|
|
|
// ****************************************************************************
|
|
|
|
// REQUIRED FUNCTIONALITY
|
|
|
|
// !!!!! Special hack to get all type of calls to specify the byte offset
|
|
// from the start of the call to the point where the return address
|
|
// will point.
|
|
|
|
// PPC port: Removed use of lazy constant construct.
|
|
|
|
int MachCallStaticJavaNode::ret_addr_offset() {
|
|
// It's only a single branch-and-link instruction.
|
|
return 4;
|
|
}
|
|
|
|
int MachCallDynamicJavaNode::ret_addr_offset() {
|
|
// Offset is 4 with postalloc expanded calls (bl is one instruction). We use
|
|
// postalloc expanded calls if we use inline caches and do not update method data.
|
|
if (UseInlineCaches) return 4;
|
|
|
|
int vtable_index = this->_vtable_index;
|
|
if (vtable_index < 0) {
|
|
// Must be invalid_vtable_index, not nonvirtual_vtable_index.
|
|
assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
|
|
return 12;
|
|
} else {
|
|
return 24 + MacroAssembler::instr_size_for_decode_klass_not_null();
|
|
}
|
|
}
|
|
|
|
int MachCallRuntimeNode::ret_addr_offset() {
|
|
if (rule() == CallRuntimeDirect_rule) {
|
|
// CallRuntimeDirectNode uses call_c.
|
|
#if defined(ABI_ELFv2)
|
|
return 28;
|
|
#else
|
|
return 40;
|
|
#endif
|
|
}
|
|
assert(rule() == CallLeafDirect_rule, "unexpected node with rule %u", rule());
|
|
// CallLeafDirectNode uses bl.
|
|
return 4;
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
// condition code conversions
|
|
|
|
static int cc_to_boint(int cc) {
|
|
return Assembler::bcondCRbiIs0 | (cc & 8);
|
|
}
|
|
|
|
static int cc_to_inverse_boint(int cc) {
|
|
return Assembler::bcondCRbiIs0 | (8-(cc & 8));
|
|
}
|
|
|
|
static int cc_to_biint(int cc, int flags_reg) {
|
|
return (flags_reg << 2) | (cc & 3);
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
// Compute padding required for nodes which need alignment. The padding
|
|
// is the number of bytes (not instructions) which will be inserted before
|
|
// the instruction. The padding must match the size of a NOP instruction.
|
|
|
|
// Add nop if a prefixed (two-word) instruction is going to cross a 64-byte boundary.
|
|
// (See Section 1.6 of Power ISA Version 3.1)
|
|
static int compute_prefix_padding(int current_offset) {
|
|
assert(PowerArchitecturePPC64 >= 10 && (CodeEntryAlignment & 63) == 0,
|
|
"Code buffer must be aligned to a multiple of 64 bytes");
|
|
if (is_aligned(current_offset + BytesPerInstWord, 64)) {
|
|
return BytesPerInstWord;
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
int loadConI32Node::compute_padding(int current_offset) const {
|
|
return compute_prefix_padding(current_offset);
|
|
}
|
|
|
|
int loadConL34Node::compute_padding(int current_offset) const {
|
|
return compute_prefix_padding(current_offset);
|
|
}
|
|
|
|
int addI_reg_imm32Node::compute_padding(int current_offset) const {
|
|
return compute_prefix_padding(current_offset);
|
|
}
|
|
|
|
int addL_reg_imm34Node::compute_padding(int current_offset) const {
|
|
return compute_prefix_padding(current_offset);
|
|
}
|
|
|
|
int addP_reg_imm34Node::compute_padding(int current_offset) const {
|
|
return compute_prefix_padding(current_offset);
|
|
}
|
|
|
|
int cmprb_Whitespace_reg_reg_prefixedNode::compute_padding(int current_offset) const {
|
|
return compute_prefix_padding(current_offset);
|
|
}
|
|
|
|
|
|
//=============================================================================
|
|
|
|
// Emit an interrupt that is caught by the debugger (for debugging compiler).
|
|
void emit_break(C2_MacroAssembler *masm) {
|
|
__ illtrap();
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
|
st->print("BREAKPOINT");
|
|
}
|
|
#endif
|
|
|
|
void MachBreakpointNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
|
emit_break(masm);
|
|
}
|
|
|
|
uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
|
|
return MachNode::size(ra_);
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
void emit_nop(C2_MacroAssembler *masm) {
|
|
__ nop();
|
|
}
|
|
|
|
static inline void emit_long(C2_MacroAssembler *masm, int value) {
|
|
*((int*)(__ pc())) = value;
|
|
__ set_inst_end(__ pc() + BytesPerInstWord);
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
%} // interrupt source
|
|
|
|
source_hpp %{ // Header information of the source block.
|
|
|
|
//--------------------------------------------------------------
|
|
//---< Used for optimization in Compile::Shorten_branches >---
|
|
//--------------------------------------------------------------
|
|
|
|
class C2_MacroAssembler;
|
|
|
|
class CallStubImpl {
|
|
|
|
public:
|
|
|
|
// Emit call stub, compiled java to interpreter.
|
|
static void emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset);
|
|
|
|
// Size of call trampoline stub.
|
|
// This doesn't need to be accurate to the byte, but it
|
|
// must be larger than or equal to the real size of the stub.
|
|
static uint size_call_trampoline() {
|
|
return MacroAssembler::trampoline_stub_size;
|
|
}
|
|
|
|
// number of relocations needed by a call trampoline stub
|
|
static uint reloc_call_trampoline() {
|
|
return 5;
|
|
}
|
|
|
|
};
|
|
|
|
%} // end source_hpp
|
|
|
|
source %{
|
|
|
|
// Emit a trampoline stub for a call to a target which is too far away.
|
|
//
|
|
// code sequences:
|
|
//
|
|
// call-site:
|
|
// branch-and-link to <destination> or <trampoline stub>
|
|
//
|
|
// Related trampoline stub for this call-site in the stub section:
|
|
// load the call target from the constant pool
|
|
// branch via CTR (LR/link still points to the call-site above)
|
|
|
|
void CallStubImpl::emit_trampoline_stub(C2_MacroAssembler *masm, int destination_toc_offset, int insts_call_instruction_offset) {
|
|
address stub = __ emit_trampoline_stub(destination_toc_offset, insts_call_instruction_offset);
|
|
if (stub == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
}
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
// Emit an inline branch-and-link call and a related trampoline stub.
|
|
//
|
|
// code sequences:
|
|
//
|
|
// call-site:
|
|
// branch-and-link to <destination> or <trampoline stub>
|
|
//
|
|
// Related trampoline stub for this call-site in the stub section:
|
|
// load the call target from the constant pool
|
|
// branch via CTR (LR/link still points to the call-site above)
|
|
//
|
|
|
|
typedef struct {
|
|
int insts_call_instruction_offset;
|
|
int ret_addr_offset;
|
|
} EmitCallOffsets;
|
|
|
|
// Emit a branch-and-link instruction that branches to a trampoline.
|
|
// - Remember the offset of the branch-and-link instruction.
|
|
// - Add a relocation at the branch-and-link instruction.
|
|
// - Emit a branch-and-link.
|
|
// - Remember the return pc offset.
|
|
EmitCallOffsets emit_call_with_trampoline_stub(C2_MacroAssembler *masm, address entry_point, relocInfo::relocType rtype) {
|
|
EmitCallOffsets offsets = { -1, -1 };
|
|
const int start_offset = __ offset();
|
|
offsets.insts_call_instruction_offset = __ offset();
|
|
|
|
// No entry point given, use the current pc.
|
|
if (entry_point == nullptr) entry_point = __ pc();
|
|
|
|
// Put the entry point as a constant into the constant pool.
|
|
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
|
|
if (entry_point_toc_addr == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return offsets;
|
|
}
|
|
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
|
|
|
// Emit the trampoline stub which will be related to the branch-and-link below.
|
|
CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, offsets.insts_call_instruction_offset);
|
|
if (ciEnv::current()->failing()) { return offsets; } // Code cache may be full.
|
|
__ relocate(rtype);
|
|
|
|
// Note: At this point we do not have the address of the trampoline
|
|
// stub, and the entry point might be too far away for bl, so __ pc()
|
|
// serves as dummy and the bl will be patched later.
|
|
__ bl((address) __ pc());
|
|
|
|
offsets.ret_addr_offset = __ offset() - start_offset;
|
|
|
|
return offsets;
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
// Factory for creating loadConL* nodes for large/small constant pool.
|
|
|
|
static inline jlong replicate_immF(float con) {
|
|
// Replicate float con 2 times and pack into vector.
|
|
int val = *((int*)&con);
|
|
jlong lval = val;
|
|
lval = (lval << 32) | (lval & 0xFFFFFFFFl);
|
|
return lval;
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
const RegMask& MachConstantBaseNode::_out_RegMask = BITS64_CONSTANT_TABLE_BASE_mask();
|
|
int ConstantTable::calculate_table_base_offset() const {
|
|
return 0; // absolute addressing, no offset
|
|
}
|
|
|
|
bool MachConstantBaseNode::requires_postalloc_expand() const { return true; }
|
|
void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
|
|
iRegPdstOper *op_dst = new iRegPdstOper();
|
|
MachNode *m1 = new loadToc_hiNode();
|
|
MachNode *m2 = new loadToc_loNode();
|
|
|
|
m1->add_req(nullptr);
|
|
m2->add_req(nullptr, m1);
|
|
m1->_opnds[0] = op_dst;
|
|
m2->_opnds[0] = op_dst;
|
|
m2->_opnds[1] = op_dst;
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
nodes->push(m1);
|
|
nodes->push(m2);
|
|
}
|
|
|
|
void MachConstantBaseNode::emit(C2_MacroAssembler* masm, PhaseRegAlloc* ra_) const {
|
|
// Is postalloc expanded.
|
|
ShouldNotReachHere();
|
|
}
|
|
|
|
uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
|
|
return 0;
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
|
st->print("-- \t// MachConstantBaseNode (empty encoding)");
|
|
}
|
|
#endif
|
|
|
|
//=============================================================================
|
|
|
|
#ifndef PRODUCT
|
|
void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
|
Compile* C = ra_->C;
|
|
const long framesize = C->output()->frame_slots() << LogBytesPerInt;
|
|
|
|
st->print("PROLOG\n\t");
|
|
if (C->output()->need_stack_bang(framesize)) {
|
|
st->print("stack_overflow_check\n\t");
|
|
}
|
|
|
|
if (!false /* TODO: PPC port C->is_frameless_method()*/) {
|
|
st->print("save return pc\n\t");
|
|
st->print("push frame %ld\n\t", -framesize);
|
|
}
|
|
|
|
if (C->stub_function() == nullptr) {
|
|
st->print("nmethod entry barrier\n\t");
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void MachPrologNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
|
Compile* C = ra_->C;
|
|
|
|
const long framesize = C->output()->frame_size_in_bytes();
|
|
assert(framesize % (2 * wordSize) == 0, "must preserve 2*wordSize alignment");
|
|
|
|
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
|
|
|
|
const Register return_pc = R20; // Must match return_addr() in frame section.
|
|
const Register callers_sp = R21;
|
|
const Register push_frame_temp = R22;
|
|
const Register toc_temp = R23;
|
|
assert_different_registers(R11, return_pc, callers_sp, push_frame_temp, toc_temp);
|
|
|
|
if (method_is_frameless) {
|
|
// Add nop at beginning of all frameless methods to prevent any
|
|
// oop instructions from getting overwritten by make_not_entrant
|
|
// (patching attempt would fail).
|
|
__ nop();
|
|
} else {
|
|
// Get return pc.
|
|
__ mflr(return_pc);
|
|
}
|
|
|
|
if (C->clinit_barrier_on_entry()) {
|
|
assert(!C->method()->holder()->is_not_initialized(), "initialization should have been started");
|
|
|
|
Label L_skip_barrier;
|
|
Register klass = toc_temp;
|
|
|
|
// Notify OOP recorder (don't need the relocation)
|
|
AddressLiteral md = __ constant_metadata_address(C->method()->holder()->constant_encoding());
|
|
__ load_const_optimized(klass, md.value(), R0);
|
|
__ clinit_barrier(klass, R16_thread, &L_skip_barrier /*L_fast_path*/);
|
|
|
|
__ load_const_optimized(klass, SharedRuntime::get_handle_wrong_method_stub(), R0);
|
|
__ mtctr(klass);
|
|
__ bctr();
|
|
|
|
__ bind(L_skip_barrier);
|
|
}
|
|
|
|
// Calls to C2R adapters often do not accept exceptional returns.
|
|
// We require that their callers must bang for them. But be
|
|
// careful, because some VM calls (such as call site linkage) can
|
|
// use several kilobytes of stack. But the stack safety zone should
|
|
// account for that. See bugs 4446381, 4468289, 4497237.
|
|
|
|
int bangsize = C->output()->bang_size_in_bytes();
|
|
assert(bangsize >= framesize || bangsize <= 0, "stack bang size incorrect");
|
|
if (C->output()->need_stack_bang(bangsize)) {
|
|
// Unfortunately we cannot use the function provided in
|
|
// assembler.cpp as we have to emulate the pipes. So I had to
|
|
// insert the code of generate_stack_overflow_check(), see
|
|
// assembler.cpp for some illuminative comments.
|
|
const int page_size = os::vm_page_size();
|
|
int bang_end = StackOverflow::stack_shadow_zone_size();
|
|
|
|
// This is how far the previous frame's stack banging extended.
|
|
const int bang_end_safe = bang_end;
|
|
|
|
if (bangsize > page_size) {
|
|
bang_end += bangsize;
|
|
}
|
|
|
|
int bang_offset = bang_end_safe;
|
|
|
|
while (bang_offset <= bang_end) {
|
|
// Need at least one stack bang at end of shadow zone.
|
|
|
|
// Again I had to copy code, this time from assembler_ppc.cpp,
|
|
// bang_stack_with_offset - see there for comments.
|
|
|
|
// Stack grows down, caller passes positive offset.
|
|
assert(bang_offset > 0, "must bang with positive offset");
|
|
|
|
long stdoffset = -bang_offset;
|
|
|
|
if (Assembler::is_simm(stdoffset, 16)) {
|
|
// Signed 16 bit offset, a simple std is ok.
|
|
if (UseLoadInstructionsForStackBangingPPC64) {
|
|
__ ld(R0, (int)(signed short)stdoffset, R1_SP);
|
|
} else {
|
|
__ std(R0, (int)(signed short)stdoffset, R1_SP);
|
|
}
|
|
} else if (Assembler::is_simm(stdoffset, 31)) {
|
|
// Use largeoffset calculations for addis & ld/std.
|
|
const int hi = MacroAssembler::largeoffset_si16_si16_hi(stdoffset);
|
|
const int lo = MacroAssembler::largeoffset_si16_si16_lo(stdoffset);
|
|
|
|
Register tmp = R11;
|
|
__ addis(tmp, R1_SP, hi);
|
|
if (UseLoadInstructionsForStackBangingPPC64) {
|
|
__ ld(R0, lo, tmp);
|
|
} else {
|
|
__ std(R0, lo, tmp);
|
|
}
|
|
} else {
|
|
ShouldNotReachHere();
|
|
}
|
|
|
|
bang_offset += page_size;
|
|
}
|
|
// R11 trashed
|
|
} // C->output()->need_stack_bang(framesize)
|
|
|
|
unsigned int bytes = (unsigned int)framesize;
|
|
long offset = Assembler::align_addr(bytes, frame::alignment_in_bytes);
|
|
ciMethod *currMethod = C->method();
|
|
|
|
if (!method_is_frameless) {
|
|
// Get callers sp.
|
|
__ mr(callers_sp, R1_SP);
|
|
|
|
// Push method's frame, modifies SP.
|
|
assert(Assembler::is_uimm(framesize, 32U), "wrong type");
|
|
// The ABI is already accounted for in 'framesize' via the
|
|
// 'out_preserve' area.
|
|
Register tmp = push_frame_temp;
|
|
// Had to insert code of push_frame((unsigned int)framesize, push_frame_temp).
|
|
if (Assembler::is_simm(-offset, 16)) {
|
|
__ stdu(R1_SP, -offset, R1_SP);
|
|
} else {
|
|
long x = -offset;
|
|
// Had to insert load_const(tmp, -offset).
|
|
__ lis( tmp, (int)((signed short)(((x >> 32) & 0xffff0000) >> 16)));
|
|
__ ori( tmp, tmp, ((x >> 32) & 0x0000ffff));
|
|
__ sldi(tmp, tmp, 32);
|
|
__ oris(tmp, tmp, (x & 0xffff0000) >> 16);
|
|
__ ori( tmp, tmp, (x & 0x0000ffff));
|
|
|
|
__ stdux(R1_SP, R1_SP, tmp);
|
|
}
|
|
}
|
|
#if 0 // TODO: PPC port
|
|
// For testing large constant pools, emit a lot of constants to constant pool.
|
|
// "Randomize" const_size.
|
|
if (ConstantsALot) {
|
|
const int num_consts = const_size();
|
|
for (int i = 0; i < num_consts; i++) {
|
|
__ long_constant(0xB0B5B00BBABE);
|
|
}
|
|
}
|
|
#endif
|
|
if (!method_is_frameless) {
|
|
// Save return pc.
|
|
__ std(return_pc, _abi0(lr), callers_sp);
|
|
}
|
|
|
|
if (C->stub_function() == nullptr) {
|
|
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
|
bs->nmethod_entry_barrier(masm, push_frame_temp);
|
|
}
|
|
|
|
C->output()->set_frame_complete(__ offset());
|
|
}
|
|
|
|
uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
|
|
// Variable size. determine dynamically.
|
|
return MachNode::size(ra_);
|
|
}
|
|
|
|
int MachPrologNode::reloc() const {
|
|
// Return number of relocatable values contained in this instruction.
|
|
return 1; // 1 reloc entry for load_const(toc).
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
#ifndef PRODUCT
|
|
void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
|
Compile* C = ra_->C;
|
|
|
|
st->print("EPILOG\n\t");
|
|
st->print("restore return pc\n\t");
|
|
st->print("pop frame\n\t");
|
|
|
|
if (do_polling() && C->is_method_compilation()) {
|
|
st->print("safepoint poll\n\t");
|
|
}
|
|
}
|
|
#endif
|
|
|
|
void MachEpilogNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
|
Compile* C = ra_->C;
|
|
|
|
const long framesize = ((long)C->output()->frame_slots()) << LogBytesPerInt;
|
|
assert(framesize >= 0, "negative frame-size?");
|
|
|
|
const bool method_needs_polling = do_polling() && C->is_method_compilation();
|
|
const bool method_is_frameless = false /* TODO: PPC port C->is_frameless_method()*/;
|
|
const Register return_pc = R31; // Must survive C-call to enable_stack_reserved_zone().
|
|
const Register temp = R12;
|
|
|
|
if (!method_is_frameless) {
|
|
// Restore return pc relative to callers' sp.
|
|
__ ld(return_pc, ((int)framesize) + _abi0(lr), R1_SP);
|
|
// Move return pc to LR.
|
|
__ mtlr(return_pc);
|
|
// Pop frame (fixed frame-size).
|
|
__ addi(R1_SP, R1_SP, (int)framesize);
|
|
}
|
|
|
|
if (StackReservedPages > 0 && C->has_reserved_stack_access()) {
|
|
__ reserved_stack_check(return_pc);
|
|
}
|
|
|
|
if (method_needs_polling) {
|
|
Label dummy_label;
|
|
Label* code_stub = &dummy_label;
|
|
if (!UseSIGTRAP && !C->output()->in_scratch_emit_size()) {
|
|
C2SafepointPollStub* stub = new (C->comp_arena()) C2SafepointPollStub(__ offset());
|
|
C->output()->add_stub(stub);
|
|
code_stub = &stub->entry();
|
|
__ relocate(relocInfo::poll_return_type);
|
|
}
|
|
__ safepoint_poll(*code_stub, temp, true /* at_return */, true /* in_nmethod */);
|
|
}
|
|
}
|
|
|
|
uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
|
|
// Variable size. Determine dynamically.
|
|
return MachNode::size(ra_);
|
|
}
|
|
|
|
int MachEpilogNode::reloc() const {
|
|
// Return number of relocatable values contained in this instruction.
|
|
return 1; // 1 for load_from_polling_page.
|
|
}
|
|
|
|
const Pipeline * MachEpilogNode::pipeline() const {
|
|
return MachNode::pipeline_class();
|
|
}
|
|
|
|
// =============================================================================
|
|
|
|
// Figure out which register class each belongs in: rc_int, rc_float, rc_vs or
|
|
// rc_stack.
|
|
enum RC { rc_bad, rc_int, rc_float, rc_vs, rc_stack };
|
|
|
|
static enum RC rc_class(OptoReg::Name reg) {
|
|
// Return the register class for the given register. The given register
|
|
// reg is a <register>_num value, which is an index into the MachRegisterNumbers
|
|
// enumeration in adGlobals_ppc.hpp.
|
|
|
|
if (reg == OptoReg::Bad) return rc_bad;
|
|
|
|
// We have 64 integer register halves, starting at index 0.
|
|
STATIC_ASSERT((int)ConcreteRegisterImpl::max_gpr == (int)MachRegisterNumbers::F0_num);
|
|
if (reg < ConcreteRegisterImpl::max_gpr) return rc_int;
|
|
|
|
// We have 64 floating-point register halves, starting at index 64.
|
|
STATIC_ASSERT((int)ConcreteRegisterImpl::max_fpr == (int)MachRegisterNumbers::VSR0_num);
|
|
if (reg < ConcreteRegisterImpl::max_fpr) return rc_float;
|
|
|
|
// We have 64 vector-scalar registers, starting at index 128.
|
|
STATIC_ASSERT((int)ConcreteRegisterImpl::max_vsr == (int)MachRegisterNumbers::CR0_num);
|
|
if (reg < ConcreteRegisterImpl::max_vsr) return rc_vs;
|
|
|
|
// Condition and special purpose registers are not allocated. We only accept stack from here.
|
|
assert(OptoReg::is_stack(reg), "what else is it?");
|
|
return rc_stack;
|
|
}
|
|
|
|
static int ld_st_helper(C2_MacroAssembler *masm, const char *op_str, uint opcode, int reg, int offset,
|
|
bool do_print, Compile* C, outputStream *st) {
|
|
|
|
assert(opcode == Assembler::LD_OPCODE ||
|
|
opcode == Assembler::STD_OPCODE ||
|
|
opcode == Assembler::LWZ_OPCODE ||
|
|
opcode == Assembler::STW_OPCODE ||
|
|
opcode == Assembler::LFD_OPCODE ||
|
|
opcode == Assembler::STFD_OPCODE ||
|
|
opcode == Assembler::LFS_OPCODE ||
|
|
opcode == Assembler::STFS_OPCODE,
|
|
"opcode not supported");
|
|
|
|
if (masm) {
|
|
int d =
|
|
(Assembler::LD_OPCODE == opcode || Assembler::STD_OPCODE == opcode) ?
|
|
Assembler::ds(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/)
|
|
: Assembler::d1(offset+0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/); // Makes no difference in opt build.
|
|
emit_long(masm, opcode | Assembler::rt(Matcher::_regEncode[reg]) | d | Assembler::ra(R1_SP));
|
|
}
|
|
#ifndef PRODUCT
|
|
else if (do_print) {
|
|
st->print("%-7s %s, [R1_SP + #%d+%d] \t// spill copy",
|
|
op_str,
|
|
Matcher::regName[reg],
|
|
offset, 0 /* TODO: PPC port C->frame_slots_sp_bias_in_bytes()*/);
|
|
}
|
|
#endif
|
|
return 4; // size
|
|
}
|
|
|
|
uint MachSpillCopyNode::implementation(C2_MacroAssembler *masm, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const {
|
|
Compile* C = ra_->C;
|
|
|
|
// Get registers to move.
|
|
OptoReg::Name src_hi = ra_->get_reg_second(in(1));
|
|
OptoReg::Name src_lo = ra_->get_reg_first(in(1));
|
|
OptoReg::Name dst_hi = ra_->get_reg_second(this);
|
|
OptoReg::Name dst_lo = ra_->get_reg_first(this);
|
|
|
|
enum RC src_hi_rc = rc_class(src_hi);
|
|
enum RC src_lo_rc = rc_class(src_lo);
|
|
enum RC dst_hi_rc = rc_class(dst_hi);
|
|
enum RC dst_lo_rc = rc_class(dst_lo);
|
|
|
|
assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register");
|
|
if (src_hi != OptoReg::Bad)
|
|
assert((src_lo&1)==0 && src_lo+1==src_hi &&
|
|
(dst_lo&1)==0 && dst_lo+1==dst_hi,
|
|
"expected aligned-adjacent pairs");
|
|
// Generate spill code!
|
|
int size = 0;
|
|
|
|
if (src_lo == dst_lo && src_hi == dst_hi)
|
|
return size; // Self copy, no move.
|
|
|
|
if (bottom_type()->isa_vect() != nullptr && ideal_reg() == Op_VecX) {
|
|
// Memory->Memory Spill.
|
|
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
|
|
int src_offset = ra_->reg2offset(src_lo);
|
|
int dst_offset = ra_->reg2offset(dst_lo);
|
|
if (masm) {
|
|
__ ld(R0, src_offset, R1_SP);
|
|
__ std(R0, dst_offset, R1_SP);
|
|
__ ld(R0, src_offset+8, R1_SP);
|
|
__ std(R0, dst_offset+8, R1_SP);
|
|
}
|
|
size += 16;
|
|
}
|
|
// VectorSRegister->Memory Spill.
|
|
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_stack) {
|
|
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
|
|
int dst_offset = ra_->reg2offset(dst_lo);
|
|
if (PowerArchitecturePPC64 >= 9) {
|
|
if (is_aligned(dst_offset, 16)) {
|
|
if (masm) {
|
|
__ stxv(Rsrc, dst_offset, R1_SP); // matches storeV16_Power9
|
|
}
|
|
size += 4;
|
|
} else {
|
|
// Other alignment can be used by Vector API (VectorPayload in rearrangeOp,
|
|
// observed with VectorRearrangeTest.java on Power9).
|
|
if (masm) {
|
|
__ addi(R0, R1_SP, dst_offset);
|
|
__ stxvx(Rsrc, R0); // matches storeV16_Power9 (regarding element ordering)
|
|
}
|
|
size += 8;
|
|
}
|
|
} else {
|
|
if (masm) {
|
|
__ addi(R0, R1_SP, dst_offset);
|
|
__ stxvd2x(Rsrc, R0); // matches storeV16_Power8
|
|
}
|
|
size += 8;
|
|
}
|
|
}
|
|
// Memory->VectorSRegister Spill.
|
|
else if (src_lo_rc == rc_stack && dst_lo_rc == rc_vs) {
|
|
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
|
|
int src_offset = ra_->reg2offset(src_lo);
|
|
if (PowerArchitecturePPC64 >= 9) {
|
|
if (is_aligned(src_offset, 16)) {
|
|
if (masm) {
|
|
__ lxv(Rdst, src_offset, R1_SP);
|
|
}
|
|
size += 4;
|
|
} else {
|
|
if (masm) {
|
|
__ addi(R0, R1_SP, src_offset);
|
|
__ lxvx(Rdst, R0);
|
|
}
|
|
size += 8;
|
|
}
|
|
} else {
|
|
if (masm) {
|
|
__ addi(R0, R1_SP, src_offset);
|
|
__ lxvd2x(Rdst, R0);
|
|
}
|
|
size += 8;
|
|
}
|
|
}
|
|
// VectorSRegister->VectorSRegister.
|
|
else if (src_lo_rc == rc_vs && dst_lo_rc == rc_vs) {
|
|
VectorSRegister Rsrc = as_VectorSRegister(Matcher::_regEncode[src_lo]);
|
|
VectorSRegister Rdst = as_VectorSRegister(Matcher::_regEncode[dst_lo]);
|
|
if (masm) {
|
|
__ xxlor(Rdst, Rsrc, Rsrc);
|
|
}
|
|
size += 4;
|
|
}
|
|
else {
|
|
ShouldNotReachHere(); // No VSR spill.
|
|
}
|
|
return size;
|
|
}
|
|
|
|
// --------------------------------------
|
|
// Memory->Memory Spill. Use R0 to hold the value.
|
|
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
|
|
int src_offset = ra_->reg2offset(src_lo);
|
|
int dst_offset = ra_->reg2offset(dst_lo);
|
|
if (src_hi != OptoReg::Bad) {
|
|
assert(src_hi_rc==rc_stack && dst_hi_rc==rc_stack,
|
|
"expected same type of move for high parts");
|
|
size += ld_st_helper(masm, "LD ", Assembler::LD_OPCODE, R0_num, src_offset, !do_size, C, st);
|
|
if (!masm && !do_size) st->print("\n\t");
|
|
size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, R0_num, dst_offset, !do_size, C, st);
|
|
} else {
|
|
size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, R0_num, src_offset, !do_size, C, st);
|
|
if (!masm && !do_size) st->print("\n\t");
|
|
size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, R0_num, dst_offset, !do_size, C, st);
|
|
}
|
|
return size;
|
|
}
|
|
|
|
// --------------------------------------
|
|
// Check for float->int copy; requires a trip through memory.
|
|
if (src_lo_rc == rc_float && dst_lo_rc == rc_int) {
|
|
Unimplemented();
|
|
}
|
|
|
|
// --------------------------------------
|
|
// Check for integer reg-reg copy.
|
|
if (src_lo_rc == rc_int && dst_lo_rc == rc_int) {
|
|
Register Rsrc = as_Register(Matcher::_regEncode[src_lo]);
|
|
Register Rdst = as_Register(Matcher::_regEncode[dst_lo]);
|
|
size = (Rsrc != Rdst) ? 4 : 0;
|
|
|
|
if (masm) {
|
|
if (size) {
|
|
__ mr(Rdst, Rsrc);
|
|
}
|
|
}
|
|
#ifndef PRODUCT
|
|
else if (!do_size) {
|
|
if (size) {
|
|
st->print("%-7s %s, %s \t// spill copy", "MR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
|
|
} else {
|
|
st->print("%-7s %s, %s \t// spill copy", "MR-NOP", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
|
|
}
|
|
}
|
|
#endif
|
|
return size;
|
|
}
|
|
|
|
// Check for integer store.
|
|
if (src_lo_rc == rc_int && dst_lo_rc == rc_stack) {
|
|
int dst_offset = ra_->reg2offset(dst_lo);
|
|
if (src_hi != OptoReg::Bad) {
|
|
assert(src_hi_rc==rc_int && dst_hi_rc==rc_stack,
|
|
"expected same type of move for high parts");
|
|
size += ld_st_helper(masm, "STD ", Assembler::STD_OPCODE, src_lo, dst_offset, !do_size, C, st);
|
|
} else {
|
|
size += ld_st_helper(masm, "STW ", Assembler::STW_OPCODE, src_lo, dst_offset, !do_size, C, st);
|
|
}
|
|
return size;
|
|
}
|
|
|
|
// Check for integer load.
|
|
if (dst_lo_rc == rc_int && src_lo_rc == rc_stack) {
|
|
int src_offset = ra_->reg2offset(src_lo);
|
|
if (src_hi != OptoReg::Bad) {
|
|
assert(dst_hi_rc==rc_int && src_hi_rc==rc_stack,
|
|
"expected same type of move for high parts");
|
|
size += ld_st_helper(masm, "LD ", Assembler::LD_OPCODE, dst_lo, src_offset, !do_size, C, st);
|
|
} else {
|
|
size += ld_st_helper(masm, "LWZ ", Assembler::LWZ_OPCODE, dst_lo, src_offset, !do_size, C, st);
|
|
}
|
|
return size;
|
|
}
|
|
|
|
// Check for float reg-reg copy.
|
|
if (src_lo_rc == rc_float && dst_lo_rc == rc_float) {
|
|
if (masm) {
|
|
FloatRegister Rsrc = as_FloatRegister(Matcher::_regEncode[src_lo]);
|
|
FloatRegister Rdst = as_FloatRegister(Matcher::_regEncode[dst_lo]);
|
|
__ fmr(Rdst, Rsrc);
|
|
}
|
|
#ifndef PRODUCT
|
|
else if (!do_size) {
|
|
st->print("%-7s %s, %s \t// spill copy", "FMR", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
|
|
}
|
|
#endif
|
|
return 4;
|
|
}
|
|
|
|
// Check for float store.
|
|
if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) {
|
|
int dst_offset = ra_->reg2offset(dst_lo);
|
|
if (src_hi != OptoReg::Bad) {
|
|
assert(src_hi_rc==rc_float && dst_hi_rc==rc_stack,
|
|
"expected same type of move for high parts");
|
|
size += ld_st_helper(masm, "STFD", Assembler::STFD_OPCODE, src_lo, dst_offset, !do_size, C, st);
|
|
} else {
|
|
size += ld_st_helper(masm, "STFS", Assembler::STFS_OPCODE, src_lo, dst_offset, !do_size, C, st);
|
|
}
|
|
return size;
|
|
}
|
|
|
|
// Check for float load.
|
|
if (dst_lo_rc == rc_float && src_lo_rc == rc_stack) {
|
|
int src_offset = ra_->reg2offset(src_lo);
|
|
if (src_hi != OptoReg::Bad) {
|
|
assert(dst_hi_rc==rc_float && src_hi_rc==rc_stack,
|
|
"expected same type of move for high parts");
|
|
size += ld_st_helper(masm, "LFD ", Assembler::LFD_OPCODE, dst_lo, src_offset, !do_size, C, st);
|
|
} else {
|
|
size += ld_st_helper(masm, "LFS ", Assembler::LFS_OPCODE, dst_lo, src_offset, !do_size, C, st);
|
|
}
|
|
return size;
|
|
}
|
|
|
|
// --------------------------------------------------------------------
|
|
// Check for hi bits still needing moving. Only happens for misaligned
|
|
// arguments to native calls.
|
|
if (src_hi == dst_hi)
|
|
return size; // Self copy; no move.
|
|
|
|
assert(src_hi_rc != rc_bad && dst_hi_rc != rc_bad, "src_hi & dst_hi cannot be Bad");
|
|
ShouldNotReachHere(); // Unimplemented
|
|
return 0;
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
|
if (!ra_)
|
|
st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx);
|
|
else
|
|
implementation(nullptr, ra_, false, st);
|
|
}
|
|
#endif
|
|
|
|
void MachSpillCopyNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
|
implementation(masm, ra_, false, nullptr);
|
|
}
|
|
|
|
uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
|
|
return implementation(nullptr, ra_, true, nullptr);
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void MachNopNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
|
st->print("NOP \t// %d nops to pad for loops or prefixed instructions.", _count);
|
|
}
|
|
#endif
|
|
|
|
void MachNopNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *) const {
|
|
// _count contains the number of nops needed for padding.
|
|
for (int i = 0; i < _count; i++) {
|
|
__ nop();
|
|
}
|
|
}
|
|
|
|
uint MachNopNode::size(PhaseRegAlloc *ra_) const {
|
|
return _count * 4;
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
|
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
|
|
char reg_str[128];
|
|
ra_->dump_register(this, reg_str, sizeof(reg_str));
|
|
st->print("ADDI %s, SP, %d \t// box node", reg_str, offset);
|
|
}
|
|
#endif
|
|
|
|
void BoxLockNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
|
int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
|
|
int reg = ra_->get_encode(this);
|
|
|
|
if (Assembler::is_simm(offset, 16)) {
|
|
__ addi(as_Register(reg), R1, offset);
|
|
} else {
|
|
ShouldNotReachHere();
|
|
}
|
|
}
|
|
|
|
uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
|
|
// BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_).
|
|
return 4;
|
|
}
|
|
|
|
#ifndef PRODUCT
|
|
void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
|
|
st->print_cr("---- MachUEPNode ----");
|
|
st->print_cr("...");
|
|
}
|
|
#endif
|
|
|
|
void MachUEPNode::emit(C2_MacroAssembler *masm, PhaseRegAlloc *ra_) const {
|
|
// This is the unverified entry point.
|
|
__ ic_check(CodeEntryAlignment);
|
|
// Argument is valid and klass is as expected, continue.
|
|
}
|
|
|
|
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
|
|
// Variable size. Determine dynamically.
|
|
return MachNode::size(ra_);
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
%} // interrupt source
|
|
|
|
source_hpp %{ // Header information of the source block.
|
|
|
|
class HandlerImpl {
|
|
|
|
public:
|
|
|
|
static int emit_exception_handler(C2_MacroAssembler *masm);
|
|
static int emit_deopt_handler(C2_MacroAssembler* masm);
|
|
|
|
static uint size_exception_handler() {
|
|
// The exception_handler is a b64_patchable.
|
|
return MacroAssembler::b64_patchable_size;
|
|
}
|
|
|
|
static uint size_deopt_handler() {
|
|
// The deopt_handler is a bl64_patchable.
|
|
return MacroAssembler::bl64_patchable_size;
|
|
}
|
|
|
|
};
|
|
|
|
class Node::PD {
|
|
public:
|
|
enum NodeFlags {
|
|
_last_flag = Node::_last_flag
|
|
};
|
|
};
|
|
|
|
%} // end source_hpp
|
|
|
|
source %{
|
|
|
|
int HandlerImpl::emit_exception_handler(C2_MacroAssembler *masm) {
|
|
address base = __ start_a_stub(size_exception_handler());
|
|
if (base == nullptr) {
|
|
ciEnv::current()->record_failure("CodeCache is full");
|
|
return 0; // CodeBuffer::expand failed
|
|
}
|
|
|
|
int offset = __ offset();
|
|
__ b64_patchable((address)OptoRuntime::exception_blob()->content_begin(),
|
|
relocInfo::runtime_call_type);
|
|
assert(__ offset() - offset == (int)size_exception_handler(), "must be fixed size");
|
|
__ end_a_stub();
|
|
|
|
return offset;
|
|
}
|
|
|
|
// The deopt_handler is like the exception handler, but it calls to
|
|
// the deoptimization blob instead of jumping to the exception blob.
|
|
int HandlerImpl::emit_deopt_handler(C2_MacroAssembler* masm) {
|
|
address base = __ start_a_stub(size_deopt_handler());
|
|
if (base == nullptr) {
|
|
ciEnv::current()->record_failure("CodeCache is full");
|
|
return 0; // CodeBuffer::expand failed
|
|
}
|
|
|
|
int offset = __ offset();
|
|
__ bl64_patchable((address)SharedRuntime::deopt_blob()->unpack(),
|
|
relocInfo::runtime_call_type);
|
|
assert(__ offset() - offset == (int) size_deopt_handler(), "must be fixed size");
|
|
__ end_a_stub();
|
|
|
|
return offset;
|
|
}
|
|
|
|
//=============================================================================
|
|
|
|
// Use a frame slots bias for frameless methods if accessing the stack.
|
|
static int frame_slots_bias(int reg_enc, PhaseRegAlloc* ra_) {
|
|
if (as_Register(reg_enc) == R1_SP) {
|
|
return 0; // TODO: PPC port ra_->C->frame_slots_sp_bias_in_bytes();
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
bool Matcher::match_rule_supported(int opcode) {
|
|
if (!has_match_rule(opcode)) {
|
|
return false; // no match rule present
|
|
}
|
|
|
|
switch (opcode) {
|
|
case Op_CountLeadingZerosI:
|
|
case Op_CountLeadingZerosL:
|
|
return UseCountLeadingZerosInstructionsPPC64;
|
|
case Op_CountTrailingZerosI:
|
|
case Op_CountTrailingZerosL:
|
|
return (UseCountLeadingZerosInstructionsPPC64 || UseCountTrailingZerosInstructionsPPC64);
|
|
case Op_PopCountI:
|
|
case Op_PopCountL:
|
|
return UsePopCountInstruction;
|
|
case Op_ConvF2HF:
|
|
case Op_ConvHF2F:
|
|
return VM_Version::supports_float16();
|
|
case Op_AddVB:
|
|
case Op_AddVS:
|
|
case Op_AddVI:
|
|
case Op_AddVF:
|
|
case Op_AddVD:
|
|
case Op_SubVB:
|
|
case Op_SubVS:
|
|
case Op_SubVI:
|
|
case Op_SubVF:
|
|
case Op_SubVD:
|
|
case Op_MulVS:
|
|
case Op_MulVF:
|
|
case Op_MulVD:
|
|
case Op_DivVF:
|
|
case Op_DivVD:
|
|
case Op_AbsVF:
|
|
case Op_AbsVD:
|
|
case Op_NegVF:
|
|
case Op_NegVD:
|
|
case Op_SqrtVF:
|
|
case Op_SqrtVD:
|
|
case Op_AddVL:
|
|
case Op_SubVL:
|
|
case Op_MulVI:
|
|
case Op_RoundDoubleModeV:
|
|
case Op_MinV:
|
|
case Op_MaxV:
|
|
case Op_AndV:
|
|
case Op_OrV:
|
|
case Op_XorV:
|
|
case Op_AddReductionVI:
|
|
case Op_MulReductionVI:
|
|
case Op_AndReductionV:
|
|
case Op_OrReductionV:
|
|
case Op_XorReductionV:
|
|
case Op_MinReductionV:
|
|
case Op_MaxReductionV:
|
|
return SuperwordUseVSX;
|
|
case Op_PopCountVI:
|
|
case Op_PopCountVL:
|
|
return (SuperwordUseVSX && UsePopCountInstruction);
|
|
case Op_CountLeadingZerosV:
|
|
return SuperwordUseVSX && UseCountLeadingZerosInstructionsPPC64;
|
|
case Op_CountTrailingZerosV:
|
|
return SuperwordUseVSX && UseCountTrailingZerosInstructionsPPC64;
|
|
case Op_FmaF:
|
|
case Op_FmaD:
|
|
return UseFMA;
|
|
case Op_FmaVF:
|
|
case Op_FmaVD:
|
|
return (SuperwordUseVSX && UseFMA);
|
|
|
|
case Op_Digit:
|
|
return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isDigit);
|
|
case Op_LowerCase:
|
|
return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isLowerCase);
|
|
case Op_UpperCase:
|
|
return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isUpperCase);
|
|
case Op_Whitespace:
|
|
return vmIntrinsics::is_intrinsic_available(vmIntrinsics::_isWhitespace);
|
|
|
|
case Op_CacheWB:
|
|
case Op_CacheWBPreSync:
|
|
case Op_CacheWBPostSync:
|
|
return VM_Version::supports_data_cache_line_flush();
|
|
}
|
|
|
|
return true; // Per default match rules are supported.
|
|
}
|
|
|
|
bool Matcher::match_rule_supported_auto_vectorization(int opcode, int vlen, BasicType bt) {
|
|
return match_rule_supported_vector(opcode, vlen, bt);
|
|
}
|
|
|
|
bool Matcher::match_rule_supported_vector(int opcode, int vlen, BasicType bt) {
|
|
if (!match_rule_supported(opcode) || !vector_size_supported(bt, vlen)) {
|
|
return false;
|
|
}
|
|
// Special cases
|
|
switch (opcode) {
|
|
// Reductions only support INT at the moment.
|
|
case Op_AddReductionVI:
|
|
case Op_MulReductionVI:
|
|
case Op_AndReductionV:
|
|
case Op_OrReductionV:
|
|
case Op_XorReductionV:
|
|
case Op_MinReductionV:
|
|
case Op_MaxReductionV:
|
|
return bt == T_INT;
|
|
// MaxV, MinV need types == INT || LONG.
|
|
case Op_MaxV:
|
|
case Op_MinV:
|
|
return bt == T_INT || bt == T_LONG;
|
|
}
|
|
return true; // Per default match rules are supported.
|
|
}
|
|
|
|
bool Matcher::match_rule_supported_vector_masked(int opcode, int vlen, BasicType bt) {
|
|
return false;
|
|
}
|
|
|
|
bool Matcher::vector_needs_partial_operations(Node* node, const TypeVect* vt) {
|
|
return false;
|
|
}
|
|
|
|
bool Matcher::vector_rearrange_requires_load_shuffle(BasicType elem_bt, int vlen) {
|
|
return false;
|
|
}
|
|
|
|
const RegMask* Matcher::predicate_reg_mask(void) {
|
|
return nullptr;
|
|
}
|
|
|
|
// Vector calling convention not yet implemented.
|
|
bool Matcher::supports_vector_calling_convention(void) {
|
|
return false;
|
|
}
|
|
|
|
OptoRegPair Matcher::vector_return_value(uint ideal_reg) {
|
|
Unimplemented();
|
|
return OptoRegPair(0, 0);
|
|
}
|
|
|
|
// Vector width in bytes.
|
|
int Matcher::vector_width_in_bytes(BasicType bt) {
|
|
if (SuperwordUseVSX) {
|
|
assert(MaxVectorSize == 16, "");
|
|
return 16;
|
|
} else {
|
|
assert(MaxVectorSize == 8, "");
|
|
return 8;
|
|
}
|
|
}
|
|
|
|
// Vector ideal reg.
|
|
uint Matcher::vector_ideal_reg(int size) {
|
|
if (SuperwordUseVSX) {
|
|
assert(MaxVectorSize == 16 && size == 16, "");
|
|
return Op_VecX;
|
|
} else {
|
|
assert(MaxVectorSize == 8 && size == 8, "");
|
|
return Op_RegL;
|
|
}
|
|
}
|
|
|
|
// Limits on vector size (number of elements) loaded into vector.
|
|
int Matcher::max_vector_size(const BasicType bt) {
|
|
assert(is_java_primitive(bt), "only primitive type vectors");
|
|
return vector_width_in_bytes(bt)/type2aelembytes(bt);
|
|
}
|
|
|
|
int Matcher::min_vector_size(const BasicType bt) {
|
|
return max_vector_size(bt); // Same as max.
|
|
}
|
|
|
|
int Matcher::max_vector_size_auto_vectorization(const BasicType bt) {
|
|
return Matcher::max_vector_size(bt);
|
|
}
|
|
|
|
int Matcher::scalable_vector_reg_size(const BasicType bt) {
|
|
return -1;
|
|
}
|
|
|
|
// RETURNS: whether this branch offset is short enough that a short
|
|
// branch can be used.
|
|
//
|
|
// If the platform does not provide any short branch variants, then
|
|
// this method should return `false' for offset 0.
|
|
//
|
|
// `Compile::Fill_buffer' will decide on basis of this information
|
|
// whether to do the pass `Compile::Shorten_branches' at all.
|
|
//
|
|
// And `Compile::Shorten_branches' will decide on basis of this
|
|
// information whether to replace particular branch sites by short
|
|
// ones.
|
|
bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
|
|
// Is the offset within the range of a ppc64 pc relative branch?
|
|
bool b;
|
|
|
|
const int safety_zone = 3 * BytesPerInstWord;
|
|
b = Assembler::is_simm((offset<0 ? offset-safety_zone : offset+safety_zone),
|
|
29 - 16 + 1 + 2);
|
|
return b;
|
|
}
|
|
|
|
/* TODO: PPC port
|
|
// Make a new machine dependent decode node (with its operands).
|
|
MachTypeNode *Matcher::make_decode_node() {
|
|
assert(CompressedOops::base() == nullptr && CompressedOops::shift() == 0,
|
|
"This method is only implemented for unscaled cOops mode so far");
|
|
MachTypeNode *decode = new decodeN_unscaledNode();
|
|
decode->set_opnd_array(0, new iRegPdstOper());
|
|
decode->set_opnd_array(1, new iRegNsrcOper());
|
|
return decode;
|
|
}
|
|
*/
|
|
|
|
MachOper* Matcher::pd_specialize_generic_vector_operand(MachOper* original_opnd, uint ideal_reg, bool is_temp) {
|
|
ShouldNotReachHere(); // generic vector operands not supported
|
|
return nullptr;
|
|
}
|
|
|
|
bool Matcher::is_reg2reg_move(MachNode* m) {
|
|
ShouldNotReachHere(); // generic vector operands not supported
|
|
return false;
|
|
}
|
|
|
|
bool Matcher::is_generic_vector(MachOper* opnd) {
|
|
ShouldNotReachHere(); // generic vector operands not supported
|
|
return false;
|
|
}
|
|
|
|
// Return whether or not this register is ever used as an argument. This
|
|
// function is used on startup to build the trampoline stubs in generateOptoStub.
|
|
// Registers not mentioned will be killed by the VM call in the trampoline, and
|
|
// arguments in those registers not be available to the callee.
|
|
bool Matcher::can_be_java_arg(int reg) {
|
|
// We must include the virtual halves in order to get STDs and LDs
|
|
// instead of STWs and LWs in the trampoline stubs.
|
|
|
|
if ( reg == R3_num || reg == R3_H_num
|
|
|| reg == R4_num || reg == R4_H_num
|
|
|| reg == R5_num || reg == R5_H_num
|
|
|| reg == R6_num || reg == R6_H_num
|
|
|| reg == R7_num || reg == R7_H_num
|
|
|| reg == R8_num || reg == R8_H_num
|
|
|| reg == R9_num || reg == R9_H_num
|
|
|| reg == R10_num || reg == R10_H_num)
|
|
return true;
|
|
|
|
if ( reg == F1_num || reg == F1_H_num
|
|
|| reg == F2_num || reg == F2_H_num
|
|
|| reg == F3_num || reg == F3_H_num
|
|
|| reg == F4_num || reg == F4_H_num
|
|
|| reg == F5_num || reg == F5_H_num
|
|
|| reg == F6_num || reg == F6_H_num
|
|
|| reg == F7_num || reg == F7_H_num
|
|
|| reg == F8_num || reg == F8_H_num
|
|
|| reg == F9_num || reg == F9_H_num
|
|
|| reg == F10_num || reg == F10_H_num
|
|
|| reg == F11_num || reg == F11_H_num
|
|
|| reg == F12_num || reg == F12_H_num
|
|
|| reg == F13_num || reg == F13_H_num)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
bool Matcher::is_spillable_arg(int reg) {
|
|
return can_be_java_arg(reg);
|
|
}
|
|
|
|
uint Matcher::int_pressure_limit()
|
|
{
|
|
return (INTPRESSURE == -1) ? 26 : INTPRESSURE;
|
|
}
|
|
|
|
uint Matcher::float_pressure_limit()
|
|
{
|
|
return (FLOATPRESSURE == -1) ? 28 : FLOATPRESSURE;
|
|
}
|
|
|
|
bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) {
|
|
return false;
|
|
}
|
|
|
|
// Register for DIVI projection of divmodI.
|
|
RegMask Matcher::divI_proj_mask() {
|
|
ShouldNotReachHere();
|
|
return RegMask();
|
|
}
|
|
|
|
// Register for MODI projection of divmodI.
|
|
RegMask Matcher::modI_proj_mask() {
|
|
ShouldNotReachHere();
|
|
return RegMask();
|
|
}
|
|
|
|
// Register for DIVL projection of divmodL.
|
|
RegMask Matcher::divL_proj_mask() {
|
|
ShouldNotReachHere();
|
|
return RegMask();
|
|
}
|
|
|
|
// Register for MODL projection of divmodL.
|
|
RegMask Matcher::modL_proj_mask() {
|
|
ShouldNotReachHere();
|
|
return RegMask();
|
|
}
|
|
|
|
const RegMask Matcher::method_handle_invoke_SP_save_mask() {
|
|
return RegMask();
|
|
}
|
|
|
|
%}
|
|
|
|
//----------ENCODING BLOCK-----------------------------------------------------
|
|
// This block specifies the encoding classes used by the compiler to output
|
|
// byte streams. Encoding classes are parameterized macros used by
|
|
// Machine Instruction Nodes in order to generate the bit encoding of the
|
|
// instruction. Operands specify their base encoding interface with the
|
|
// interface keyword. There are currently supported four interfaces,
|
|
// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
|
|
// operand to generate a function which returns its register number when
|
|
// queried. CONST_INTER causes an operand to generate a function which
|
|
// returns the value of the constant when queried. MEMORY_INTER causes an
|
|
// operand to generate four functions which return the Base Register, the
|
|
// Index Register, the Scale Value, and the Offset Value of the operand when
|
|
// queried. COND_INTER causes an operand to generate six functions which
|
|
// return the encoding code (ie - encoding bits for the instruction)
|
|
// associated with each basic boolean condition for a conditional instruction.
|
|
//
|
|
// Instructions specify two basic values for encoding. Again, a function
|
|
// is available to check if the constant displacement is an oop. They use the
|
|
// ins_encode keyword to specify their encoding classes (which must be
|
|
// a sequence of enc_class names, and their parameters, specified in
|
|
// the encoding block), and they use the
|
|
// opcode keyword to specify, in order, their primary, secondary, and
|
|
// tertiary opcode. Only the opcode sections which a particular instruction
|
|
// needs for encoding need to be specified.
|
|
encode %{
|
|
enc_class enc_unimplemented %{
|
|
__ unimplemented("Unimplemented mach node encoding in AD file.", 13);
|
|
%}
|
|
|
|
enc_class enc_untested %{
|
|
#ifdef ASSERT
|
|
__ untested("Untested mach node encoding in AD file.");
|
|
#else
|
|
#endif
|
|
%}
|
|
|
|
enc_class enc_lbz(iRegIdst dst, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lbz($dst$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
// Load acquire.
|
|
enc_class enc_lbz_ac(iRegIdst dst, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lbz($dst$$Register, Idisp, $mem$$base$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
|
|
enc_class enc_lhz(iRegIdst dst, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lhz($dst$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
// Load acquire.
|
|
enc_class enc_lhz_ac(iRegIdst dst, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lhz($dst$$Register, Idisp, $mem$$base$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
|
|
enc_class enc_lwz(iRegIdst dst, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lwz($dst$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
// Load acquire.
|
|
enc_class enc_lwz_ac(iRegIdst dst, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lwz($dst$$Register, Idisp, $mem$$base$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
|
|
enc_class enc_ld(iRegLdst dst, memoryAlg4 mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
// Operand 'ds' requires 4-alignment.
|
|
assert((Idisp & 0x3) == 0, "unaligned offset");
|
|
__ ld($dst$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
// Load acquire.
|
|
enc_class enc_ld_ac(iRegLdst dst, memoryAlg4 mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
// Operand 'ds' requires 4-alignment.
|
|
assert((Idisp & 0x3) == 0, "unaligned offset");
|
|
__ ld($dst$$Register, Idisp, $mem$$base$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
|
|
enc_class enc_lfd(RegF dst, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
enc_class enc_load_long_constL(iRegLdst dst, immL src, iRegLdst toc) %{
|
|
int toc_offset = 0;
|
|
|
|
address const_toc_addr;
|
|
// Create a non-oop constant, no relocation needed.
|
|
// If it is an IC, it has a virtual_call_Relocation.
|
|
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
|
if (const_toc_addr == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
|
|
// Get the constant's TOC offset.
|
|
toc_offset = __ offset_to_method_toc(const_toc_addr);
|
|
|
|
// Keep the current instruction offset in mind.
|
|
((loadConLNode*)this)->_cbuf_insts_offset = __ offset();
|
|
|
|
__ ld($dst$$Register, toc_offset, $toc$$Register);
|
|
%}
|
|
|
|
enc_class enc_load_long_constL_hi(iRegLdst dst, iRegLdst toc, immL src) %{
|
|
if (!ra_->C->output()->in_scratch_emit_size()) {
|
|
address const_toc_addr;
|
|
// Create a non-oop constant, no relocation needed.
|
|
// If it is an IC, it has a virtual_call_Relocation.
|
|
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
|
if (const_toc_addr == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
|
|
// Get the constant's TOC offset.
|
|
const int toc_offset = __ offset_to_method_toc(const_toc_addr);
|
|
// Store the toc offset of the constant.
|
|
((loadConL_hiNode*)this)->_const_toc_offset = toc_offset;
|
|
|
|
// Also keep the current instruction offset in mind.
|
|
((loadConL_hiNode*)this)->_cbuf_insts_offset = __ offset();
|
|
}
|
|
|
|
__ addis($dst$$Register, $toc$$Register, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset));
|
|
%}
|
|
|
|
%} // encode
|
|
|
|
source %{
|
|
|
|
typedef struct {
|
|
loadConL_hiNode *_large_hi;
|
|
loadConL_loNode *_large_lo;
|
|
loadConLNode *_small;
|
|
MachNode *_last;
|
|
} loadConLNodesTuple;
|
|
|
|
loadConLNodesTuple loadConLNodesTuple_create(PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
|
|
OptoReg::Name reg_second, OptoReg::Name reg_first) {
|
|
loadConLNodesTuple nodes;
|
|
|
|
const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
|
|
if (large_constant_pool) {
|
|
// Create new nodes.
|
|
loadConL_hiNode *m1 = new loadConL_hiNode();
|
|
loadConL_loNode *m2 = new loadConL_loNode();
|
|
|
|
// inputs for new nodes
|
|
m1->add_req(nullptr, toc);
|
|
m2->add_req(nullptr, m1);
|
|
|
|
// operands for new nodes
|
|
m1->_opnds[0] = new iRegLdstOper(); // dst
|
|
m1->_opnds[1] = immSrc; // src
|
|
m1->_opnds[2] = new iRegPdstOper(); // toc
|
|
m2->_opnds[0] = new iRegLdstOper(); // dst
|
|
m2->_opnds[1] = immSrc; // src
|
|
m2->_opnds[2] = new iRegLdstOper(); // base
|
|
|
|
// Initialize ins_attrib TOC fields.
|
|
m1->_const_toc_offset = -1;
|
|
m2->_const_toc_offset_hi_node = m1;
|
|
|
|
// Initialize ins_attrib instruction offset.
|
|
m1->_cbuf_insts_offset = -1;
|
|
|
|
// register allocation for new nodes
|
|
ra_->set_pair(m1->_idx, reg_second, reg_first);
|
|
ra_->set_pair(m2->_idx, reg_second, reg_first);
|
|
|
|
// Create result.
|
|
nodes._large_hi = m1;
|
|
nodes._large_lo = m2;
|
|
nodes._small = nullptr;
|
|
nodes._last = nodes._large_lo;
|
|
assert(m2->bottom_type()->isa_long(), "must be long");
|
|
} else {
|
|
loadConLNode *m2 = new loadConLNode();
|
|
|
|
// inputs for new nodes
|
|
m2->add_req(nullptr, toc);
|
|
|
|
// operands for new nodes
|
|
m2->_opnds[0] = new iRegLdstOper(); // dst
|
|
m2->_opnds[1] = immSrc; // src
|
|
m2->_opnds[2] = new iRegPdstOper(); // toc
|
|
|
|
// Initialize ins_attrib instruction offset.
|
|
m2->_cbuf_insts_offset = -1;
|
|
|
|
// register allocation for new nodes
|
|
ra_->set_pair(m2->_idx, reg_second, reg_first);
|
|
|
|
// Create result.
|
|
nodes._large_hi = nullptr;
|
|
nodes._large_lo = nullptr;
|
|
nodes._small = m2;
|
|
nodes._last = nodes._small;
|
|
assert(m2->bottom_type()->isa_long(), "must be long");
|
|
}
|
|
|
|
return nodes;
|
|
}
|
|
|
|
typedef struct {
|
|
loadConL_hiNode *_large_hi;
|
|
loadConL_loNode *_large_lo;
|
|
mtvsrdNode *_moved;
|
|
xxspltdNode *_replicated;
|
|
loadConLNode *_small;
|
|
MachNode *_last;
|
|
} loadConLReplicatedNodesTuple;
|
|
|
|
loadConLReplicatedNodesTuple loadConLReplicatedNodesTuple_create(Compile *C, PhaseRegAlloc *ra_, Node *toc, immLOper *immSrc,
|
|
vecXOper *dst, immI_0Oper *zero,
|
|
OptoReg::Name reg_second, OptoReg::Name reg_first,
|
|
OptoReg::Name reg_vec_second, OptoReg::Name reg_vec_first) {
|
|
loadConLReplicatedNodesTuple nodes;
|
|
|
|
const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
|
|
if (large_constant_pool) {
|
|
// Create new nodes.
|
|
loadConL_hiNode *m1 = new loadConL_hiNode();
|
|
loadConL_loNode *m2 = new loadConL_loNode();
|
|
mtvsrdNode *m3 = new mtvsrdNode();
|
|
xxspltdNode *m4 = new xxspltdNode();
|
|
|
|
// inputs for new nodes
|
|
m1->add_req(nullptr, toc);
|
|
m2->add_req(nullptr, m1);
|
|
m3->add_req(nullptr, m2);
|
|
m4->add_req(nullptr, m3);
|
|
|
|
// operands for new nodes
|
|
m1->_opnds[0] = new iRegLdstOper(); // dst
|
|
m1->_opnds[1] = immSrc; // src
|
|
m1->_opnds[2] = new iRegPdstOper(); // toc
|
|
|
|
m2->_opnds[0] = new iRegLdstOper(); // dst
|
|
m2->_opnds[1] = immSrc; // src
|
|
m2->_opnds[2] = new iRegLdstOper(); // base
|
|
|
|
m3->_opnds[0] = new vecXOper(); // dst
|
|
m3->_opnds[1] = new iRegLdstOper(); // src
|
|
|
|
m4->_opnds[0] = new vecXOper(); // dst
|
|
m4->_opnds[1] = new vecXOper(); // src
|
|
m4->_opnds[2] = zero;
|
|
|
|
// Initialize ins_attrib TOC fields.
|
|
m1->_const_toc_offset = -1;
|
|
m2->_const_toc_offset_hi_node = m1;
|
|
|
|
// Initialize ins_attrib instruction offset.
|
|
m1->_cbuf_insts_offset = -1;
|
|
|
|
// register allocation for new nodes
|
|
ra_->set_pair(m1->_idx, reg_second, reg_first);
|
|
ra_->set_pair(m2->_idx, reg_second, reg_first);
|
|
ra_->set1(m3->_idx, reg_second);
|
|
ra_->set2(m3->_idx, reg_vec_first);
|
|
ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
|
|
|
|
// Create result.
|
|
nodes._large_hi = m1;
|
|
nodes._large_lo = m2;
|
|
nodes._moved = m3;
|
|
nodes._replicated = m4;
|
|
nodes._small = nullptr;
|
|
nodes._last = nodes._replicated;
|
|
assert(m2->bottom_type()->isa_long(), "must be long");
|
|
} else {
|
|
loadConLNode *m2 = new loadConLNode();
|
|
mtvsrdNode *m3 = new mtvsrdNode();
|
|
xxspltdNode *m4 = new xxspltdNode();
|
|
|
|
// inputs for new nodes
|
|
m2->add_req(nullptr, toc);
|
|
|
|
// operands for new nodes
|
|
m2->_opnds[0] = new iRegLdstOper(); // dst
|
|
m2->_opnds[1] = immSrc; // src
|
|
m2->_opnds[2] = new iRegPdstOper(); // toc
|
|
|
|
m3->_opnds[0] = new vecXOper(); // dst
|
|
m3->_opnds[1] = new iRegLdstOper(); // src
|
|
|
|
m4->_opnds[0] = new vecXOper(); // dst
|
|
m4->_opnds[1] = new vecXOper(); // src
|
|
m4->_opnds[2] = zero;
|
|
|
|
// Initialize ins_attrib instruction offset.
|
|
m2->_cbuf_insts_offset = -1;
|
|
ra_->set1(m3->_idx, reg_second);
|
|
ra_->set2(m3->_idx, reg_vec_first);
|
|
ra_->set_pair(m4->_idx, reg_vec_second, reg_vec_first);
|
|
|
|
// register allocation for new nodes
|
|
ra_->set_pair(m2->_idx, reg_second, reg_first);
|
|
|
|
// Create result.
|
|
nodes._large_hi = nullptr;
|
|
nodes._large_lo = nullptr;
|
|
nodes._small = m2;
|
|
nodes._moved = m3;
|
|
nodes._replicated = m4;
|
|
nodes._last = nodes._replicated;
|
|
assert(m2->bottom_type()->isa_long(), "must be long");
|
|
}
|
|
|
|
return nodes;
|
|
}
|
|
|
|
%} // source
|
|
|
|
encode %{
|
|
// Postalloc expand emitter for loading a long constant from the method's TOC.
|
|
// Enc_class needed as consttanttablebase is not supported by postalloc
|
|
// expand.
|
|
enc_class postalloc_expand_load_long_constant(iRegLdst dst, immL src, iRegLdst toc) %{
|
|
// Create new nodes.
|
|
loadConLNodesTuple loadConLNodes =
|
|
loadConLNodesTuple_create(ra_, n_toc, op_src,
|
|
ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
// Push new nodes.
|
|
if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi);
|
|
if (loadConLNodes._last) nodes->push(loadConLNodes._last);
|
|
|
|
// some asserts
|
|
assert(nodes->length() >= 1, "must have created at least 1 node");
|
|
assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
|
|
%}
|
|
|
|
enc_class enc_load_long_constP(iRegLdst dst, immP src, iRegLdst toc) %{
|
|
int toc_offset = 0;
|
|
|
|
intptr_t val = $src$$constant;
|
|
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
|
|
address const_toc_addr;
|
|
RelocationHolder r; // Initializes type to none.
|
|
if (constant_reloc == relocInfo::oop_type) {
|
|
// Create an oop constant and a corresponding relocation.
|
|
AddressLiteral a = __ constant_oop_address((jobject)val);
|
|
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
|
r = a.rspec();
|
|
} else if (constant_reloc == relocInfo::metadata_type) {
|
|
// Notify OOP recorder (don't need the relocation)
|
|
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
|
|
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
|
} else {
|
|
// Create a non-oop constant, no relocation needed.
|
|
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
|
}
|
|
|
|
if (const_toc_addr == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
__ relocate(r); // If set above.
|
|
// Get the constant's TOC offset.
|
|
toc_offset = __ offset_to_method_toc(const_toc_addr);
|
|
|
|
__ ld($dst$$Register, toc_offset, $toc$$Register);
|
|
%}
|
|
|
|
enc_class enc_load_long_constP_hi(iRegLdst dst, immP src, iRegLdst toc) %{
|
|
if (!ra_->C->output()->in_scratch_emit_size()) {
|
|
intptr_t val = $src$$constant;
|
|
relocInfo::relocType constant_reloc = $src->constant_reloc(); // src
|
|
address const_toc_addr;
|
|
RelocationHolder r; // Initializes type to none.
|
|
if (constant_reloc == relocInfo::oop_type) {
|
|
// Create an oop constant and a corresponding relocation.
|
|
AddressLiteral a = __ constant_oop_address((jobject)val);
|
|
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
|
r = a.rspec();
|
|
} else if (constant_reloc == relocInfo::metadata_type) {
|
|
// Notify OOP recorder (don't need the relocation)
|
|
AddressLiteral a = __ constant_metadata_address((Metadata *)val);
|
|
const_toc_addr = __ address_constant((address)a.value(), RelocationHolder::none);
|
|
} else { // non-oop pointers, e.g. card mark base, heap top
|
|
// Create a non-oop constant, no relocation needed.
|
|
const_toc_addr = __ long_constant((jlong)$src$$constant);
|
|
}
|
|
|
|
if (const_toc_addr == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
__ relocate(r); // If set above.
|
|
// Get the constant's TOC offset.
|
|
const int toc_offset = __ offset_to_method_toc(const_toc_addr);
|
|
// Store the toc offset of the constant.
|
|
((loadConP_hiNode*)this)->_const_toc_offset = toc_offset;
|
|
}
|
|
|
|
__ addis($dst$$Register, $toc$$Register, MacroAssembler::largeoffset_si16_si16_hi(_const_toc_offset));
|
|
%}
|
|
|
|
// Postalloc expand emitter for loading a ptr constant from the method's TOC.
|
|
// Enc_class needed as consttanttablebase is not supported by postalloc
|
|
// expand.
|
|
enc_class postalloc_expand_load_ptr_constant(iRegPdst dst, immP src, iRegLdst toc) %{
|
|
const bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
|
|
if (large_constant_pool) {
|
|
// Create new nodes.
|
|
loadConP_hiNode *m1 = new loadConP_hiNode();
|
|
loadConP_loNode *m2 = new loadConP_loNode();
|
|
|
|
// inputs for new nodes
|
|
m1->add_req(nullptr, n_toc);
|
|
m2->add_req(nullptr, m1);
|
|
|
|
// operands for new nodes
|
|
m1->_opnds[0] = new iRegPdstOper(); // dst
|
|
m1->_opnds[1] = op_src; // src
|
|
m1->_opnds[2] = new iRegPdstOper(); // toc
|
|
m2->_opnds[0] = new iRegPdstOper(); // dst
|
|
m2->_opnds[1] = op_src; // src
|
|
m2->_opnds[2] = new iRegLdstOper(); // base
|
|
|
|
// Initialize ins_attrib TOC fields.
|
|
m1->_const_toc_offset = -1;
|
|
m2->_const_toc_offset_hi_node = m1;
|
|
|
|
// Register allocation for new nodes.
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(m1);
|
|
nodes->push(m2);
|
|
assert(m2->bottom_type()->isa_ptr(), "must be ptr");
|
|
} else {
|
|
loadConPNode *m2 = new loadConPNode();
|
|
|
|
// inputs for new nodes
|
|
m2->add_req(nullptr, n_toc);
|
|
|
|
// operands for new nodes
|
|
m2->_opnds[0] = new iRegPdstOper(); // dst
|
|
m2->_opnds[1] = op_src; // src
|
|
m2->_opnds[2] = new iRegPdstOper(); // toc
|
|
|
|
// Register allocation for new nodes.
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(m2);
|
|
assert(m2->bottom_type()->isa_ptr(), "must be ptr");
|
|
}
|
|
%}
|
|
|
|
// Enc_class needed as consttanttablebase is not supported by postalloc
|
|
// expand.
|
|
enc_class postalloc_expand_load_float_constant(regF dst, immF src, iRegLdst toc) %{
|
|
bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
|
|
|
|
MachNode *m2;
|
|
if (large_constant_pool) {
|
|
m2 = new loadConFCompNode();
|
|
} else {
|
|
m2 = new loadConFNode();
|
|
}
|
|
// inputs for new nodes
|
|
m2->add_req(nullptr, n_toc);
|
|
|
|
// operands for new nodes
|
|
m2->_opnds[0] = op_dst;
|
|
m2->_opnds[1] = op_src;
|
|
m2->_opnds[2] = new iRegPdstOper(); // constanttablebase
|
|
|
|
// register allocation for new nodes
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
nodes->push(m2);
|
|
%}
|
|
|
|
// Enc_class needed as consttanttablebase is not supported by postalloc
|
|
// expand.
|
|
enc_class postalloc_expand_load_double_constant(regD dst, immD src, iRegLdst toc) %{
|
|
bool large_constant_pool = true; // TODO: PPC port C->cfg()->_consts_size > 4000;
|
|
|
|
MachNode *m2;
|
|
if (large_constant_pool) {
|
|
m2 = new loadConDCompNode();
|
|
} else {
|
|
m2 = new loadConDNode();
|
|
}
|
|
// inputs for new nodes
|
|
m2->add_req(nullptr, n_toc);
|
|
|
|
// operands for new nodes
|
|
m2->_opnds[0] = op_dst;
|
|
m2->_opnds[1] = op_src;
|
|
m2->_opnds[2] = new iRegPdstOper(); // constanttablebase
|
|
|
|
// register allocation for new nodes
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
nodes->push(m2);
|
|
%}
|
|
|
|
enc_class enc_stw(iRegIsrc src, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ stw($src$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
enc_class enc_std(iRegIsrc src, memoryAlg4 mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
// Operand 'ds' requires 4-alignment.
|
|
assert((Idisp & 0x3) == 0, "unaligned offset");
|
|
__ std($src$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
enc_class enc_stfs(RegF src, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ stfs($src$$FloatRegister, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
enc_class enc_stfd(RegF src, memory mem) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ stfd($src$$FloatRegister, Idisp, $mem$$base$$Register);
|
|
%}
|
|
|
|
enc_class postalloc_expand_encode_oop(iRegNdst dst, iRegPdst src, flagsReg crx) %{
|
|
// use isel instruction with Power 7
|
|
cmpP_reg_imm16Node *n_compare = new cmpP_reg_imm16Node();
|
|
encodeP_subNode *n_sub_base = new encodeP_subNode();
|
|
encodeP_shiftNode *n_shift = new encodeP_shiftNode();
|
|
cond_set_0_oopNode *n_cond_set = new cond_set_0_oopNode();
|
|
|
|
n_compare->add_req(n_region, n_src);
|
|
n_compare->_opnds[0] = op_crx;
|
|
n_compare->_opnds[1] = op_src;
|
|
n_compare->_opnds[2] = new immL16Oper(0);
|
|
|
|
n_sub_base->add_req(n_region, n_src);
|
|
n_sub_base->_opnds[0] = op_dst;
|
|
n_sub_base->_opnds[1] = op_src;
|
|
n_sub_base->_bottom_type = _bottom_type;
|
|
|
|
n_shift->add_req(n_region, n_sub_base);
|
|
n_shift->_opnds[0] = op_dst;
|
|
n_shift->_opnds[1] = op_dst;
|
|
n_shift->_bottom_type = _bottom_type;
|
|
|
|
n_cond_set->add_req(n_region, n_compare, n_shift);
|
|
n_cond_set->_opnds[0] = op_dst;
|
|
n_cond_set->_opnds[1] = op_crx;
|
|
n_cond_set->_opnds[2] = op_dst;
|
|
n_cond_set->_bottom_type = _bottom_type;
|
|
|
|
ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
|
|
ra_->set_pair(n_sub_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(n_compare);
|
|
nodes->push(n_sub_base);
|
|
nodes->push(n_shift);
|
|
nodes->push(n_cond_set);
|
|
|
|
assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed.
|
|
%}
|
|
|
|
enc_class postalloc_expand_encode_oop_not_null(iRegNdst dst, iRegPdst src) %{
|
|
|
|
encodeP_subNode *n1 = new encodeP_subNode();
|
|
n1->add_req(n_region, n_src);
|
|
n1->_opnds[0] = op_dst;
|
|
n1->_opnds[1] = op_src;
|
|
n1->_bottom_type = _bottom_type;
|
|
|
|
encodeP_shiftNode *n2 = new encodeP_shiftNode();
|
|
n2->add_req(n_region, n1);
|
|
n2->_opnds[0] = op_dst;
|
|
n2->_opnds[1] = op_dst;
|
|
n2->_bottom_type = _bottom_type;
|
|
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(n1);
|
|
nodes->push(n2);
|
|
assert(!(ra_->is_oop(this)), "sanity"); // This is not supposed to be GC'ed.
|
|
%}
|
|
|
|
enc_class postalloc_expand_decode_oop(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
|
|
decodeN_shiftNode *n_shift = new decodeN_shiftNode();
|
|
cmpN_reg_imm0Node *n_compare = new cmpN_reg_imm0Node();
|
|
|
|
n_compare->add_req(n_region, n_src);
|
|
n_compare->_opnds[0] = op_crx;
|
|
n_compare->_opnds[1] = op_src;
|
|
n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
|
|
|
|
n_shift->add_req(n_region, n_src);
|
|
n_shift->_opnds[0] = op_dst;
|
|
n_shift->_opnds[1] = op_src;
|
|
n_shift->_bottom_type = _bottom_type;
|
|
|
|
// use isel instruction with Power 7
|
|
decodeN_addNode *n_add_base = new decodeN_addNode();
|
|
n_add_base->add_req(n_region, n_shift);
|
|
n_add_base->_opnds[0] = op_dst;
|
|
n_add_base->_opnds[1] = op_dst;
|
|
n_add_base->_bottom_type = _bottom_type;
|
|
|
|
cond_set_0_ptrNode *n_cond_set = new cond_set_0_ptrNode();
|
|
n_cond_set->add_req(n_region, n_compare, n_add_base);
|
|
n_cond_set->_opnds[0] = op_dst;
|
|
n_cond_set->_opnds[1] = op_crx;
|
|
n_cond_set->_opnds[2] = op_dst;
|
|
n_cond_set->_bottom_type = _bottom_type;
|
|
|
|
assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
|
|
ra_->set_oop(n_cond_set, true);
|
|
|
|
ra_->set_pair(n_shift->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
|
|
ra_->set_pair(n_add_base->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(n_compare);
|
|
nodes->push(n_shift);
|
|
nodes->push(n_add_base);
|
|
nodes->push(n_cond_set);
|
|
|
|
%}
|
|
|
|
enc_class postalloc_expand_decode_oop_not_null(iRegPdst dst, iRegNsrc src) %{
|
|
decodeN_shiftNode *n1 = new decodeN_shiftNode();
|
|
n1->add_req(n_region, n_src);
|
|
n1->_opnds[0] = op_dst;
|
|
n1->_opnds[1] = op_src;
|
|
n1->_bottom_type = _bottom_type;
|
|
|
|
decodeN_addNode *n2 = new decodeN_addNode();
|
|
n2->add_req(n_region, n1);
|
|
n2->_opnds[0] = op_dst;
|
|
n2->_opnds[1] = op_dst;
|
|
n2->_bottom_type = _bottom_type;
|
|
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
|
|
ra_->set_oop(n2, true);
|
|
|
|
nodes->push(n1);
|
|
nodes->push(n2);
|
|
%}
|
|
|
|
|
|
// This enc_class is needed so that scheduler gets proper
|
|
// input mapping for latency computation.
|
|
enc_class enc_andc(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
__ andc($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
|
|
enc_class enc_convI2B_regI__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
|
|
Label done;
|
|
__ cmpwi($crx$$CondRegister, $src$$Register, 0);
|
|
__ li($dst$$Register, $zero$$constant);
|
|
__ beq($crx$$CondRegister, done);
|
|
__ li($dst$$Register, $notzero$$constant);
|
|
__ bind(done);
|
|
%}
|
|
|
|
enc_class enc_convP2B_regP__cmove(iRegIdst dst, iRegPsrc src, flagsReg crx, immI16 zero, immI16 notzero) %{
|
|
Label done;
|
|
__ cmpdi($crx$$CondRegister, $src$$Register, 0);
|
|
__ li($dst$$Register, $zero$$constant);
|
|
__ beq($crx$$CondRegister, done);
|
|
__ li($dst$$Register, $notzero$$constant);
|
|
__ bind(done);
|
|
%}
|
|
|
|
enc_class enc_cmove_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL mem ) %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
Label done;
|
|
__ bso($crx$$CondRegister, done);
|
|
__ ld($dst$$Register, Idisp, $mem$$base$$Register);
|
|
__ bind(done);
|
|
%}
|
|
|
|
enc_class enc_cmove_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
|
|
Label done;
|
|
__ bso($crx$$CondRegister, done);
|
|
__ mffprd($dst$$Register, $src$$FloatRegister);
|
|
__ bind(done);
|
|
%}
|
|
|
|
enc_class enc_bc(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
|
|
Label d; // dummy
|
|
__ bind(d);
|
|
Label* p = ($lbl$$label);
|
|
// `p' is `nullptr' when this encoding class is used only to
|
|
// determine the size of the encoded instruction.
|
|
Label& l = (nullptr == p)? d : *(p);
|
|
int cc = $cmp$$cmpcode;
|
|
int flags_reg = $crx$$reg;
|
|
assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
|
|
int bhint = Assembler::bhintNoHint;
|
|
|
|
if (UseStaticBranchPredictionForUncommonPathsPPC64) {
|
|
if (_prob <= PROB_NEVER) {
|
|
bhint = Assembler::bhintIsNotTaken;
|
|
} else if (_prob >= PROB_ALWAYS) {
|
|
bhint = Assembler::bhintIsTaken;
|
|
}
|
|
}
|
|
|
|
__ bc(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
|
|
cc_to_biint(cc, flags_reg),
|
|
l);
|
|
%}
|
|
|
|
enc_class enc_bc_far(flagsRegSrc crx, cmpOp cmp, Label lbl) %{
|
|
// The scheduler doesn't know about branch shortening, so we set the opcode
|
|
// to ppc64Opcode_bc in order to hide this detail from the scheduler.
|
|
Label d; // dummy
|
|
__ bind(d);
|
|
Label* p = ($lbl$$label);
|
|
// `p' is `nullptr' when this encoding class is used only to
|
|
// determine the size of the encoded instruction.
|
|
Label& l = (nullptr == p)? d : *(p);
|
|
int cc = $cmp$$cmpcode;
|
|
int flags_reg = $crx$$reg;
|
|
int bhint = Assembler::bhintNoHint;
|
|
|
|
if (UseStaticBranchPredictionForUncommonPathsPPC64) {
|
|
if (_prob <= PROB_NEVER) {
|
|
bhint = Assembler::bhintIsNotTaken;
|
|
} else if (_prob >= PROB_ALWAYS) {
|
|
bhint = Assembler::bhintIsTaken;
|
|
}
|
|
}
|
|
|
|
// Tell the conditional far branch to optimize itself when being relocated.
|
|
__ bc_far(Assembler::add_bhint_to_boint(bhint, cc_to_boint(cc)),
|
|
cc_to_biint(cc, flags_reg),
|
|
l,
|
|
MacroAssembler::bc_far_optimize_on_relocate);
|
|
%}
|
|
|
|
// Postalloc expand emitter for loading a replicatef float constant from
|
|
// the method's TOC.
|
|
// Enc_class needed as consttanttablebase is not supported by postalloc
|
|
// expand.
|
|
enc_class postalloc_expand_load_replF_constant(iRegLdst dst, immF src, iRegLdst toc) %{
|
|
// Create new nodes.
|
|
|
|
// Make an operand with the bit pattern to load as float.
|
|
immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
|
|
|
|
loadConLNodesTuple loadConLNodes =
|
|
loadConLNodesTuple_create(ra_, n_toc, op_repl,
|
|
ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
// Push new nodes.
|
|
if (loadConLNodes._large_hi) nodes->push(loadConLNodes._large_hi);
|
|
if (loadConLNodes._last) nodes->push(loadConLNodes._last);
|
|
|
|
assert(nodes->length() >= 1, "must have created at least 1 node");
|
|
assert(loadConLNodes._last->bottom_type()->isa_long(), "must be long");
|
|
%}
|
|
|
|
enc_class postalloc_expand_load_replF_constant_vsx(vecX dst, immF src, iRegLdst toc, iRegLdst tmp) %{
|
|
// Create new nodes.
|
|
|
|
// Make an operand with the bit pattern to load as float.
|
|
immLOper *op_repl = new immLOper((jlong)replicate_immF(op_src->constantF()));
|
|
immI_0Oper *op_zero = new immI_0Oper(0);
|
|
|
|
loadConLReplicatedNodesTuple loadConLNodes =
|
|
loadConLReplicatedNodesTuple_create(C, ra_, n_toc, op_repl, op_dst, op_zero,
|
|
ra_->get_reg_second(n_tmp), ra_->get_reg_first(n_tmp),
|
|
ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
// Push new nodes.
|
|
if (loadConLNodes._large_hi) { nodes->push(loadConLNodes._large_hi); }
|
|
if (loadConLNodes._large_lo) { nodes->push(loadConLNodes._large_lo); }
|
|
if (loadConLNodes._moved) { nodes->push(loadConLNodes._moved); }
|
|
if (loadConLNodes._last) { nodes->push(loadConLNodes._last); }
|
|
|
|
assert(nodes->length() >= 1, "must have created at least 1 node");
|
|
%}
|
|
|
|
// This enc_class is needed so that scheduler gets proper
|
|
// input mapping for latency computation.
|
|
enc_class enc_poll(immI dst, iRegLdst poll) %{
|
|
// Fake operand dst needed for PPC scheduler.
|
|
assert($dst$$constant == 0x0, "dst must be 0x0");
|
|
|
|
// Mark the code position where the load from the safepoint
|
|
// polling page was emitted as relocInfo::poll_type.
|
|
__ relocate(relocInfo::poll_type);
|
|
__ load_from_polling_page($poll$$Register);
|
|
%}
|
|
|
|
// A Java static call or a runtime call.
|
|
//
|
|
// Branch-and-link relative to a trampoline.
|
|
// The trampoline loads the target address and does a long branch to there.
|
|
// In case we call java, the trampoline branches to a interpreter_stub
|
|
// which loads the inline cache and the real call target from the constant pool.
|
|
//
|
|
// This basically looks like this:
|
|
//
|
|
// >>>> consts -+ -+
|
|
// | |- offset1
|
|
// [call target1] | <-+
|
|
// [IC cache] |- offset2
|
|
// [call target2] <--+
|
|
//
|
|
// <<<< consts
|
|
// >>>> insts
|
|
//
|
|
// bl offset16 -+ -+ ??? // How many bits available?
|
|
// | |
|
|
// <<<< insts | |
|
|
// >>>> stubs | |
|
|
// | |- trampoline_stub_Reloc
|
|
// trampoline stub: | <-+
|
|
// r2 = toc |
|
|
// r2 = [r2 + offset1] | // Load call target1 from const section
|
|
// mtctr r2 |
|
|
// bctr |- static_stub_Reloc
|
|
// comp_to_interp_stub: <---+
|
|
// r1 = toc
|
|
// ICreg = [r1 + IC_offset] // Load IC from const section
|
|
// r1 = [r1 + offset2] // Load call target2 from const section
|
|
// mtctr r1
|
|
// bctr
|
|
//
|
|
// <<<< stubs
|
|
//
|
|
// The call instruction in the code either
|
|
// - Branches directly to a compiled method if the offset is encodable in instruction.
|
|
// - Branches to the trampoline stub if the offset to the compiled method is not encodable.
|
|
// - Branches to the compiled_to_interp stub if the target is interpreted.
|
|
//
|
|
// Further there are three relocations from the loads to the constants in
|
|
// the constant section.
|
|
//
|
|
// Usage of r1 and r2 in the stubs allows to distinguish them.
|
|
enc_class enc_java_static_call(method meth) %{
|
|
address entry_point = (address)$meth$$method;
|
|
|
|
if (!_method) {
|
|
// A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap.
|
|
emit_call_with_trampoline_stub(masm, entry_point, relocInfo::runtime_call_type);
|
|
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
|
|
} else {
|
|
// Remember the offset not the address.
|
|
const int start_offset = __ offset();
|
|
|
|
// The trampoline stub.
|
|
// No entry point given, use the current pc.
|
|
// Make sure branch fits into
|
|
if (entry_point == nullptr) entry_point = __ pc();
|
|
|
|
// Put the entry point as a constant into the constant pool.
|
|
const address entry_point_toc_addr = __ address_constant(entry_point, RelocationHolder::none);
|
|
if (entry_point_toc_addr == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
const int entry_point_toc_offset = __ offset_to_method_toc(entry_point_toc_addr);
|
|
|
|
// Emit the trampoline stub which will be related to the branch-and-link below.
|
|
CallStubImpl::emit_trampoline_stub(masm, entry_point_toc_offset, start_offset);
|
|
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
|
|
int method_index = resolved_method_index(masm);
|
|
__ relocate(_optimized_virtual ? opt_virtual_call_Relocation::spec(method_index)
|
|
: static_call_Relocation::spec(method_index));
|
|
|
|
// The real call.
|
|
// Note: At this point we do not have the address of the trampoline
|
|
// stub, and the entry point might be too far away for bl, so __ pc()
|
|
// serves as dummy and the bl will be patched later.
|
|
__ set_inst_mark();
|
|
__ bl(__ pc()); // Emits a relocation.
|
|
|
|
// The stub for call to interpreter.
|
|
address stub = CompiledDirectCall::emit_to_interp_stub(masm);
|
|
__ clear_inst_mark();
|
|
if (stub == nullptr) {
|
|
ciEnv::current()->record_failure("CodeCache is full");
|
|
return;
|
|
}
|
|
}
|
|
__ post_call_nop();
|
|
%}
|
|
|
|
// Second node of expanded dynamic call - the call.
|
|
enc_class enc_java_dynamic_call_sched(method meth) %{
|
|
if (!ra_->C->output()->in_scratch_emit_size()) {
|
|
// Create a call trampoline stub for the given method.
|
|
const address entry_point = !($meth$$method) ? nullptr : (address)$meth$$method;
|
|
const address entry_point_const = __ address_constant(entry_point, RelocationHolder::none);
|
|
if (entry_point_const == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
const int entry_point_const_toc_offset = __ offset_to_method_toc(entry_point_const);
|
|
CallStubImpl::emit_trampoline_stub(masm, entry_point_const_toc_offset, __ offset());
|
|
if (ra_->C->env()->failing()) { return; } // Code cache may be full.
|
|
|
|
// Build relocation at call site with ic position as data.
|
|
assert((_load_ic_hi_node != nullptr && _load_ic_node == nullptr) ||
|
|
(_load_ic_hi_node == nullptr && _load_ic_node != nullptr),
|
|
"must have one, but can't have both");
|
|
assert((_load_ic_hi_node != nullptr && _load_ic_hi_node->_cbuf_insts_offset != -1) ||
|
|
(_load_ic_node != nullptr && _load_ic_node->_cbuf_insts_offset != -1),
|
|
"must contain instruction offset");
|
|
const int virtual_call_oop_addr_offset = _load_ic_hi_node != nullptr
|
|
? _load_ic_hi_node->_cbuf_insts_offset
|
|
: _load_ic_node->_cbuf_insts_offset;
|
|
const address virtual_call_oop_addr = __ addr_at(virtual_call_oop_addr_offset);
|
|
assert(MacroAssembler::is_load_const_from_method_toc_at(virtual_call_oop_addr),
|
|
"should be load from TOC");
|
|
int method_index = resolved_method_index(masm);
|
|
__ relocate(virtual_call_Relocation::spec(virtual_call_oop_addr, method_index));
|
|
}
|
|
|
|
// At this point I do not have the address of the trampoline stub,
|
|
// and the entry point might be too far away for bl. Pc() serves
|
|
// as dummy and bl will be patched later.
|
|
__ bl((address) __ pc());
|
|
__ post_call_nop();
|
|
%}
|
|
|
|
// postalloc expand emitter for virtual calls.
|
|
enc_class postalloc_expand_java_dynamic_call_sched(method meth, iRegLdst toc) %{
|
|
|
|
// Create the nodes for loading the IC from the TOC.
|
|
loadConLNodesTuple loadConLNodes_IC =
|
|
loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) Universe::non_oop_word()),
|
|
OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
|
|
|
|
// Create the call node.
|
|
CallDynamicJavaDirectSchedNode *call = new CallDynamicJavaDirectSchedNode();
|
|
call->_method_handle_invoke = _method_handle_invoke;
|
|
call->_vtable_index = _vtable_index;
|
|
call->_method = _method;
|
|
call->_optimized_virtual = _optimized_virtual;
|
|
call->_tf = _tf;
|
|
call->_entry_point = _entry_point;
|
|
call->_cnt = _cnt;
|
|
call->_guaranteed_safepoint = true;
|
|
call->_oop_map = _oop_map;
|
|
call->_jvms = _jvms;
|
|
call->_jvmadj = _jvmadj;
|
|
call->_has_ea_local_in_scope = _has_ea_local_in_scope;
|
|
call->_in_rms = _in_rms;
|
|
call->_nesting = _nesting;
|
|
call->_override_symbolic_info = _override_symbolic_info;
|
|
call->_arg_escape = _arg_escape;
|
|
|
|
// New call needs all inputs of old call.
|
|
// Req...
|
|
for (uint i = 0; i < req(); ++i) {
|
|
// The expanded node does not need toc any more.
|
|
// Add the inline cache constant here instead. This expresses the
|
|
// register of the inline cache must be live at the call.
|
|
// Else we would have to adapt JVMState by -1.
|
|
if (i == mach_constant_base_node_input()) {
|
|
call->add_req(loadConLNodes_IC._last);
|
|
} else {
|
|
call->add_req(in(i));
|
|
}
|
|
}
|
|
// ...as well as prec
|
|
for (uint i = req(); i < len(); ++i) {
|
|
call->add_prec(in(i));
|
|
}
|
|
|
|
// Remember nodes loading the inline cache into r19.
|
|
call->_load_ic_hi_node = loadConLNodes_IC._large_hi;
|
|
call->_load_ic_node = loadConLNodes_IC._small;
|
|
|
|
// Operands for new nodes.
|
|
call->_opnds[0] = _opnds[0];
|
|
call->_opnds[1] = _opnds[1];
|
|
|
|
// Only the inline cache is associated with a register.
|
|
assert(Matcher::inline_cache_reg() == OptoReg::Name(R19_num), "ic reg should be R19");
|
|
|
|
// Push new nodes.
|
|
if (loadConLNodes_IC._large_hi) nodes->push(loadConLNodes_IC._large_hi);
|
|
if (loadConLNodes_IC._last) nodes->push(loadConLNodes_IC._last);
|
|
nodes->push(call);
|
|
%}
|
|
|
|
// Compound version of call dynamic
|
|
// Toc is only passed so that it can be used in ins_encode statement.
|
|
// In the code we have to use $constanttablebase.
|
|
enc_class enc_java_dynamic_call(method meth, iRegLdst toc) %{
|
|
int start_offset = __ offset();
|
|
|
|
Register Rtoc = (ra_) ? $constanttablebase : R2_TOC;
|
|
|
|
int vtable_index = this->_vtable_index;
|
|
if (vtable_index < 0) {
|
|
// Must be invalid_vtable_index, not nonvirtual_vtable_index.
|
|
assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
|
|
Register ic_reg = as_Register(Matcher::inline_cache_reg_encode());
|
|
|
|
// Virtual call relocation will point to ic load.
|
|
address virtual_call_meta_addr = __ pc();
|
|
// Load a clear inline cache.
|
|
AddressLiteral empty_ic((address) Universe::non_oop_word());
|
|
bool success = __ load_const_from_method_toc(ic_reg, empty_ic, Rtoc, /*fixed_size*/ true);
|
|
if (!success) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
// CALL to fixup routine. Fixup routine uses ScopeDesc info
|
|
// to determine who we intended to call.
|
|
__ relocate(virtual_call_Relocation::spec(virtual_call_meta_addr));
|
|
emit_call_with_trampoline_stub(masm, (address)$meth$$method, relocInfo::none);
|
|
if (ciEnv::current()->failing()) { return; } // Code cache may be full.
|
|
assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
|
|
"Fix constant in ret_addr_offset(), expected %d", __ offset() - start_offset);
|
|
} else {
|
|
assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
|
|
// Go thru the vtable. Get receiver klass. Receiver already
|
|
// checked for non-null. If we'll go thru a C2I adapter, the
|
|
// interpreter expects method in R19_method.
|
|
|
|
__ load_klass(R11_scratch1, R3);
|
|
|
|
int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
|
|
int v_off = entry_offset + in_bytes(vtableEntry::method_offset());
|
|
__ li(R19_method, v_off);
|
|
__ ldx(R19_method/*method*/, R19_method/*method offset*/, R11_scratch1/*class*/);
|
|
// NOTE: for vtable dispatches, the vtable entry will never be
|
|
// null. However it may very well end up in handle_wrong_method
|
|
// if the method is abstract for the particular class.
|
|
__ ld(R11_scratch1, in_bytes(Method::from_compiled_offset()), R19_method);
|
|
// Call target. Either compiled code or C2I adapter.
|
|
__ mtctr(R11_scratch1);
|
|
__ bctrl();
|
|
assert(((MachCallDynamicJavaNode*)this)->ret_addr_offset() == __ offset() - start_offset,
|
|
"Fix constant in ret_addr_offset(), expected %d", __ offset() - start_offset);
|
|
}
|
|
__ post_call_nop();
|
|
%}
|
|
|
|
// a runtime call
|
|
enc_class enc_java_to_runtime_call (method meth) %{
|
|
const address start_pc = __ pc();
|
|
|
|
#if defined(ABI_ELFv2)
|
|
address entry= !($meth$$method) ? nullptr : (address)$meth$$method;
|
|
__ call_c(entry, relocInfo::runtime_call_type);
|
|
__ post_call_nop();
|
|
#else
|
|
// The function we're going to call.
|
|
FunctionDescriptor fdtemp;
|
|
const FunctionDescriptor* fd = !($meth$$method) ? &fdtemp : (FunctionDescriptor*)$meth$$method;
|
|
|
|
Register Rtoc = R12_scratch2;
|
|
// Calculate the method's TOC.
|
|
__ calculate_address_from_global_toc(Rtoc, __ method_toc());
|
|
// Put entry, env, toc into the constant pool, this needs up to 3 constant
|
|
// pool entries; call_c_using_toc will optimize the call.
|
|
bool success = __ call_c_using_toc(fd, relocInfo::runtime_call_type, Rtoc);
|
|
if (!success) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
__ post_call_nop();
|
|
#endif
|
|
|
|
// Check the ret_addr_offset.
|
|
assert(((MachCallRuntimeNode*)this)->ret_addr_offset() == __ last_calls_return_pc() - start_pc,
|
|
"Fix constant in ret_addr_offset()");
|
|
%}
|
|
|
|
// Move to ctr for leaf call.
|
|
// This enc_class is needed so that scheduler gets proper
|
|
// input mapping for latency computation.
|
|
enc_class enc_leaf_call_mtctr(iRegLsrc src) %{
|
|
__ mtctr($src$$Register);
|
|
%}
|
|
|
|
// Postalloc expand emitter for runtime leaf calls.
|
|
enc_class postalloc_expand_java_to_runtime_call(method meth, iRegLdst toc) %{
|
|
loadConLNodesTuple loadConLNodes_Entry;
|
|
#if defined(ABI_ELFv2)
|
|
jlong entry_address = (jlong) this->entry_point();
|
|
assert(entry_address, "need address here");
|
|
loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
|
|
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
|
|
#else
|
|
// Get the struct that describes the function we are about to call.
|
|
FunctionDescriptor* fd = (FunctionDescriptor*) this->entry_point();
|
|
assert(fd, "need fd here");
|
|
jlong entry_address = (jlong) fd->entry();
|
|
// new nodes
|
|
loadConLNodesTuple loadConLNodes_Env;
|
|
loadConLNodesTuple loadConLNodes_Toc;
|
|
|
|
// Create nodes and operands for loading the entry point.
|
|
loadConLNodes_Entry = loadConLNodesTuple_create(ra_, n_toc, new immLOper(entry_address),
|
|
OptoReg::Name(R12_H_num), OptoReg::Name(R12_num));
|
|
|
|
|
|
// Create nodes and operands for loading the env pointer.
|
|
if (fd->env() != nullptr) {
|
|
loadConLNodes_Env = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->env()),
|
|
OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
|
|
} else {
|
|
loadConLNodes_Env._large_hi = nullptr;
|
|
loadConLNodes_Env._large_lo = nullptr;
|
|
loadConLNodes_Env._small = nullptr;
|
|
loadConLNodes_Env._last = new loadConL16Node();
|
|
loadConLNodes_Env._last->_opnds[0] = new iRegLdstOper();
|
|
loadConLNodes_Env._last->_opnds[1] = new immL16Oper(0);
|
|
ra_->set_pair(loadConLNodes_Env._last->_idx, OptoReg::Name(R11_H_num), OptoReg::Name(R11_num));
|
|
}
|
|
|
|
// Create nodes and operands for loading the Toc point.
|
|
loadConLNodes_Toc = loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) fd->toc()),
|
|
OptoReg::Name(R2_H_num), OptoReg::Name(R2_num));
|
|
#endif // ABI_ELFv2
|
|
// mtctr node
|
|
MachNode *mtctr = new CallLeafDirect_mtctrNode();
|
|
|
|
assert(loadConLNodes_Entry._last != nullptr, "entry must exist");
|
|
mtctr->add_req(nullptr, loadConLNodes_Entry._last);
|
|
|
|
mtctr->_opnds[0] = new iRegLdstOper();
|
|
mtctr->_opnds[1] = new iRegLdstOper();
|
|
|
|
// call node
|
|
MachCallLeafNode *call = new CallLeafDirectNode();
|
|
|
|
call->_opnds[0] = _opnds[0];
|
|
call->_opnds[1] = new methodOper((intptr_t) entry_address); // May get set later.
|
|
|
|
// Make the new call node look like the old one.
|
|
call->_name = _name;
|
|
call->_tf = _tf;
|
|
call->_entry_point = _entry_point;
|
|
call->_cnt = _cnt;
|
|
call->_guaranteed_safepoint = false;
|
|
call->_oop_map = _oop_map;
|
|
guarantee(!_jvms, "You must clone the jvms and adapt the offsets by fix_jvms().");
|
|
call->_jvms = nullptr;
|
|
call->_jvmadj = _jvmadj;
|
|
call->_in_rms = _in_rms;
|
|
call->_nesting = _nesting;
|
|
|
|
// New call needs all inputs of old call.
|
|
// Req...
|
|
for (uint i = 0; i < req(); ++i) {
|
|
if (i != mach_constant_base_node_input()) {
|
|
call->add_req(in(i));
|
|
}
|
|
}
|
|
|
|
// These must be reqired edges, as the registers are live up to
|
|
// the call. Else the constants are handled as kills.
|
|
call->add_req(mtctr);
|
|
#if !defined(ABI_ELFv2)
|
|
call->add_req(loadConLNodes_Env._last);
|
|
call->add_req(loadConLNodes_Toc._last);
|
|
#endif
|
|
|
|
// ...as well as prec
|
|
for (uint i = req(); i < len(); ++i) {
|
|
call->add_prec(in(i));
|
|
}
|
|
|
|
// registers
|
|
ra_->set1(mtctr->_idx, OptoReg::Name(SR_CTR_num));
|
|
|
|
// Insert the new nodes.
|
|
if (loadConLNodes_Entry._large_hi) nodes->push(loadConLNodes_Entry._large_hi);
|
|
if (loadConLNodes_Entry._last) nodes->push(loadConLNodes_Entry._last);
|
|
#if !defined(ABI_ELFv2)
|
|
if (loadConLNodes_Env._large_hi) nodes->push(loadConLNodes_Env._large_hi);
|
|
if (loadConLNodes_Env._last) nodes->push(loadConLNodes_Env._last);
|
|
if (loadConLNodes_Toc._large_hi) nodes->push(loadConLNodes_Toc._large_hi);
|
|
if (loadConLNodes_Toc._last) nodes->push(loadConLNodes_Toc._last);
|
|
#endif
|
|
nodes->push(mtctr);
|
|
nodes->push(call);
|
|
%}
|
|
%}
|
|
|
|
//----------FRAME--------------------------------------------------------------
|
|
// Definition of frame structure and management information.
|
|
|
|
frame %{
|
|
// These two registers define part of the calling convention between
|
|
// compiled code and the interpreter.
|
|
|
|
// Inline Cache Register or method for I2C.
|
|
inline_cache_reg(R19); // R19_method
|
|
|
|
// Optional: name the operand used by cisc-spilling to access
|
|
// [stack_pointer + offset].
|
|
cisc_spilling_operand_name(indOffset);
|
|
|
|
// Number of stack slots consumed by a Monitor enter.
|
|
sync_stack_slots((frame::jit_monitor_size / VMRegImpl::stack_slot_size));
|
|
|
|
// Compiled code's Frame Pointer.
|
|
frame_pointer(R1); // R1_SP
|
|
|
|
// Interpreter stores its frame pointer in a register which is
|
|
// stored to the stack by I2CAdaptors. I2CAdaptors convert from
|
|
// interpreted java to compiled java.
|
|
//
|
|
// R14_state holds pointer to caller's cInterpreter.
|
|
interpreter_frame_pointer(R14); // R14_state
|
|
|
|
stack_alignment(frame::alignment_in_bytes);
|
|
|
|
// Number of outgoing stack slots killed above the
|
|
// out_preserve_stack_slots for calls to C. Supports the var-args
|
|
// backing area for register parms.
|
|
//
|
|
varargs_C_out_slots_killed(((frame::native_abi_reg_args_size - frame::jit_out_preserve_size) / VMRegImpl::stack_slot_size));
|
|
|
|
// The after-PROLOG location of the return address. Location of
|
|
// return address specifies a type (REG or STACK) and a number
|
|
// representing the register number (i.e. - use a register name) or
|
|
// stack slot.
|
|
//
|
|
// A: Link register is stored in stack slot ...
|
|
// M: ... but it's in the caller's frame according to PPC-64 ABI.
|
|
// J: Therefore, we make sure that the link register is also in R11_scratch1
|
|
// at the end of the prolog.
|
|
// B: We use R20, now.
|
|
//return_addr(REG R20);
|
|
|
|
// G: After reading the comments made by all the luminaries on their
|
|
// failure to tell the compiler where the return address really is,
|
|
// I hardly dare to try myself. However, I'm convinced it's in slot
|
|
// 4 what apparently works and saves us some spills.
|
|
return_addr(STACK 4);
|
|
|
|
// Location of native (C/C++) and interpreter return values. This
|
|
// is specified to be the same as Java. In the 32-bit VM, long
|
|
// values are actually returned from native calls in O0:O1 and
|
|
// returned to the interpreter in I0:I1. The copying to and from
|
|
// the register pairs is done by the appropriate call and epilog
|
|
// opcodes. This simplifies the register allocator.
|
|
c_return_value %{
|
|
assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
|
|
(ideal_reg == Op_RegN && CompressedOops::base() == nullptr && CompressedOops::shift() == 0),
|
|
"only return normal values");
|
|
// enum names from opcodes.hpp: Op_Node Op_Set Op_RegN Op_RegI Op_RegP Op_RegF Op_RegD Op_RegL
|
|
static int typeToRegLo[Op_RegL+1] = { 0, 0, R3_num, R3_num, R3_num, F1_num, F1_num, R3_num };
|
|
static int typeToRegHi[Op_RegL+1] = { 0, 0, OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num };
|
|
return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]);
|
|
%}
|
|
|
|
// Location of compiled Java return values. Same as C
|
|
return_value %{
|
|
assert((ideal_reg >= Op_RegI && ideal_reg <= Op_RegL) ||
|
|
(ideal_reg == Op_RegN && CompressedOops::base() == nullptr && CompressedOops::shift() == 0),
|
|
"only return normal values");
|
|
// enum names from opcodes.hpp: Op_Node Op_Set Op_RegN Op_RegI Op_RegP Op_RegF Op_RegD Op_RegL
|
|
static int typeToRegLo[Op_RegL+1] = { 0, 0, R3_num, R3_num, R3_num, F1_num, F1_num, R3_num };
|
|
static int typeToRegHi[Op_RegL+1] = { 0, 0, OptoReg::Bad, R3_H_num, R3_H_num, OptoReg::Bad, F1_H_num, R3_H_num };
|
|
return OptoRegPair(typeToRegHi[ideal_reg], typeToRegLo[ideal_reg]);
|
|
%}
|
|
%}
|
|
|
|
|
|
//----------ATTRIBUTES---------------------------------------------------------
|
|
|
|
//----------Operand Attributes-------------------------------------------------
|
|
op_attrib op_cost(1); // Required cost attribute.
|
|
|
|
//----------Instruction Attributes---------------------------------------------
|
|
|
|
// Cost attribute. required.
|
|
ins_attrib ins_cost(DEFAULT_COST);
|
|
|
|
// Is this instruction a non-matching short branch variant of some
|
|
// long branch? Not required.
|
|
ins_attrib ins_short_branch(0);
|
|
|
|
ins_attrib ins_is_TrapBasedCheckNode(true);
|
|
|
|
// Number of constants.
|
|
// This instruction uses the given number of constants
|
|
// (optional attribute).
|
|
// This is needed to determine in time whether the constant pool will
|
|
// exceed 4000 entries. Before postalloc_expand the overall number of constants
|
|
// is determined. It's also used to compute the constant pool size
|
|
// in Output().
|
|
ins_attrib ins_num_consts(0);
|
|
|
|
// Required alignment attribute (must be a power of 2) specifies the
|
|
// alignment that some part of the instruction (not necessarily the
|
|
// start) requires. If > 1, a compute_padding() function must be
|
|
// provided for the instruction.
|
|
ins_attrib ins_alignment(1);
|
|
|
|
// Enforce/prohibit rematerializations.
|
|
// - If an instruction is attributed with 'ins_cannot_rematerialize(true)'
|
|
// then rematerialization of that instruction is prohibited and the
|
|
// instruction's value will be spilled if necessary.
|
|
// Causes that MachNode::rematerialize() returns false.
|
|
// - If an instruction is attributed with 'ins_should_rematerialize(true)'
|
|
// then rematerialization should be enforced and a copy of the instruction
|
|
// should be inserted if possible; rematerialization is not guaranteed.
|
|
// Note: this may result in rematerializations in front of every use.
|
|
// Causes that MachNode::rematerialize() can return true.
|
|
// (optional attribute)
|
|
ins_attrib ins_cannot_rematerialize(false);
|
|
ins_attrib ins_should_rematerialize(false);
|
|
|
|
// Instruction has variable size depending on alignment.
|
|
ins_attrib ins_variable_size_depending_on_alignment(false);
|
|
|
|
// Instruction is a nop.
|
|
ins_attrib ins_is_nop(false);
|
|
|
|
// Instruction is mapped to a MachIfFastLock node (instead of MachFastLock).
|
|
ins_attrib ins_use_mach_if_fast_lock_node(false);
|
|
|
|
// Field for the toc offset of a constant.
|
|
//
|
|
// This is needed if the toc offset is not encodable as an immediate in
|
|
// the PPC load instruction. If so, the upper (hi) bits of the offset are
|
|
// added to the toc, and from this a load with immediate is performed.
|
|
// With postalloc expand, we get two nodes that require the same offset
|
|
// but which don't know about each other. The offset is only known
|
|
// when the constant is added to the constant pool during emitting.
|
|
// It is generated in the 'hi'-node adding the upper bits, and saved
|
|
// in this node. The 'lo'-node has a link to the 'hi'-node and reads
|
|
// the offset from there when it gets encoded.
|
|
ins_attrib ins_field_const_toc_offset(0);
|
|
ins_attrib ins_field_const_toc_offset_hi_node(0);
|
|
|
|
// A field that can hold the instructions offset in the code buffer.
|
|
// Set in the nodes emitter.
|
|
ins_attrib ins_field_cbuf_insts_offset(-1);
|
|
|
|
// Fields for referencing a call's load-IC-node.
|
|
// If the toc offset can not be encoded as an immediate in a load, we
|
|
// use two nodes.
|
|
ins_attrib ins_field_load_ic_hi_node(0);
|
|
ins_attrib ins_field_load_ic_node(0);
|
|
|
|
// Whether this node is expanded during code emission into a sequence of
|
|
// instructions and the first instruction can perform an implicit null check.
|
|
ins_attrib ins_is_late_expanded_null_check_candidate(false);
|
|
|
|
//----------OPERANDS-----------------------------------------------------------
|
|
// Operand definitions must precede instruction definitions for correct
|
|
// parsing in the ADLC because operands constitute user defined types
|
|
// which are used in instruction definitions.
|
|
//
|
|
// Formats are generated automatically for constants and base registers.
|
|
|
|
operand vecX() %{
|
|
constraint(ALLOC_IN_RC(vs_reg));
|
|
match(VecX);
|
|
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
//----------Simple Operands----------------------------------------------------
|
|
// Immediate Operands
|
|
|
|
// Integer Immediate: 32-bit
|
|
operand immI() %{
|
|
match(ConI);
|
|
op_cost(40);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immI8() %{
|
|
predicate(Assembler::is_simm(n->get_int(), 8));
|
|
op_cost(0);
|
|
match(ConI);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Integer Immediate: 16-bit
|
|
operand immI16() %{
|
|
predicate(Assembler::is_simm(n->get_int(), 16));
|
|
op_cost(0);
|
|
match(ConI);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Integer Immediate: 32-bit, where lowest 16 bits are 0x0000.
|
|
operand immIhi16() %{
|
|
predicate(((n->get_int() & 0xffff0000) != 0) && ((n->get_int() & 0xffff) == 0));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Integer Immediate: 32-bit immediate for prefixed addi and load/store.
|
|
operand immI32() %{
|
|
predicate(PowerArchitecturePPC64 >= 10);
|
|
op_cost(0);
|
|
match(ConI);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immInegpow2() %{
|
|
predicate(is_power_of_2(-(juint)(n->get_int())));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immIpow2minus1() %{
|
|
predicate(is_power_of_2((juint)(n->get_int()) + 1u));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immIpowerOf2() %{
|
|
predicate(is_power_of_2((juint)(n->get_int())));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Unsigned Integer Immediate: the values 0-31
|
|
operand uimmI5() %{
|
|
predicate(Assembler::is_uimm(n->get_int(), 5));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Unsigned Integer Immediate: 6-bit
|
|
operand uimmI6() %{
|
|
predicate(Assembler::is_uimm(n->get_int(), 6));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Unsigned Integer Immediate: 6-bit int, greater than 32
|
|
operand uimmI6_ge32() %{
|
|
predicate(Assembler::is_uimm(n->get_int(), 6) && n->get_int() >= 32);
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Unsigned Integer Immediate: 15-bit
|
|
operand uimmI15() %{
|
|
predicate(Assembler::is_uimm(n->get_int(), 15));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Unsigned Integer Immediate: 16-bit
|
|
operand uimmI16() %{
|
|
predicate(Assembler::is_uimm(n->get_int(), 16));
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// constant 'int 0'.
|
|
operand immI_0() %{
|
|
predicate(n->get_int() == 0);
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// constant 'int 1'.
|
|
operand immI_1() %{
|
|
predicate(n->get_int() == 1);
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// constant 'int -1'.
|
|
operand immI_minus1() %{
|
|
predicate(n->get_int() == -1);
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// int value 16.
|
|
operand immI_16() %{
|
|
predicate(n->get_int() == 16);
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// int value 24.
|
|
operand immI_24() %{
|
|
predicate(n->get_int() == 24);
|
|
match(ConI);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Compressed oops constants
|
|
// Pointer Immediate
|
|
operand immN() %{
|
|
match(ConN);
|
|
|
|
op_cost(10);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// nullptr Pointer Immediate
|
|
operand immN_0() %{
|
|
predicate(n->get_narrowcon() == 0);
|
|
match(ConN);
|
|
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Compressed klass constants
|
|
operand immNKlass() %{
|
|
match(ConNKlass);
|
|
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// This operand can be used to avoid matching of an instruct
|
|
// with chain rule.
|
|
operand immNKlass_NM() %{
|
|
match(ConNKlass);
|
|
predicate(false);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Pointer Immediate: 64-bit
|
|
operand immP() %{
|
|
match(ConP);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Operand to avoid match of loadConP.
|
|
// This operand can be used to avoid matching of an instruct
|
|
// with chain rule.
|
|
operand immP_NM() %{
|
|
match(ConP);
|
|
predicate(false);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// constant 'pointer 0'.
|
|
operand immP_0() %{
|
|
predicate(n->get_ptr() == 0);
|
|
match(ConP);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// pointer 0x0 or 0x1
|
|
operand immP_0or1() %{
|
|
predicate((n->get_ptr() == 0) || (n->get_ptr() == 1));
|
|
match(ConP);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immL() %{
|
|
match(ConL);
|
|
op_cost(40);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immLmax30() %{
|
|
predicate((n->get_long() <= 30));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: 16-bit
|
|
operand immL16() %{
|
|
predicate(Assembler::is_simm(n->get_long(), 16));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: 16-bit, 4-aligned
|
|
operand immL16Alg4() %{
|
|
predicate(Assembler::is_simm(n->get_long(), 16) && ((n->get_long() & 0x3) == 0));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: 16-bit, 16-aligned
|
|
operand immL16Alg16() %{
|
|
predicate(Assembler::is_simm(n->get_long(), 16) && ((n->get_long() & 0xf) == 0));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: 32-bit, where lowest 16 bits are 0x0000.
|
|
operand immL32hi16() %{
|
|
predicate(Assembler::is_simm(n->get_long(), 32) && ((n->get_long() & 0xffffL) == 0L));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: 32-bit
|
|
operand immL32() %{
|
|
predicate(Assembler::is_simm(n->get_long(), 32));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: 34-bit, immediate field in prefixed addi and load/store.
|
|
operand immL34() %{
|
|
predicate(PowerArchitecturePPC64 >= 10 && Assembler::is_simm(n->get_long(), 34));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: 64-bit, where highest 16 bits are not 0x0000.
|
|
operand immLhighest16() %{
|
|
predicate((n->get_long() & 0xffff000000000000L) != 0L && (n->get_long() & 0x0000ffffffffffffL) == 0L);
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immLnegpow2() %{
|
|
predicate(is_power_of_2(-(julong)(n->get_long())));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
operand immLpow2minus1() %{
|
|
predicate(is_power_of_2((julong)(n->get_long()) + 1ull));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// constant 'long 0'.
|
|
operand immL_0() %{
|
|
predicate(n->get_long() == 0L);
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// constat ' long -1'.
|
|
operand immL_minus1() %{
|
|
predicate(n->get_long() == -1L);
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Long Immediate: low 32-bit mask
|
|
operand immL_32bits() %{
|
|
predicate(n->get_long() == 0xFFFFFFFFL);
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Unsigned Long Immediate: 16-bit
|
|
operand uimmL16() %{
|
|
predicate(Assembler::is_uimm(n->get_long(), 16));
|
|
match(ConL);
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Float Immediate
|
|
operand immF() %{
|
|
match(ConF);
|
|
op_cost(40);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Float Immediate: +0.0f.
|
|
operand immF_0() %{
|
|
predicate(jint_cast(n->getf()) == 0);
|
|
match(ConF);
|
|
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Double Immediate
|
|
operand immD() %{
|
|
match(ConD);
|
|
op_cost(40);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Double Immediate: +0.0d.
|
|
operand immD_0() %{
|
|
predicate(jlong_cast(n->getd()) == 0);
|
|
match(ConD);
|
|
|
|
op_cost(0);
|
|
format %{ %}
|
|
interface(CONST_INTER);
|
|
%}
|
|
|
|
// Integer Register Operands
|
|
// Integer Destination Register
|
|
// See definition of reg_class bits32_reg_rw.
|
|
operand iRegIdst() %{
|
|
constraint(ALLOC_IN_RC(bits32_reg_rw));
|
|
match(RegI);
|
|
match(rscratch1RegI);
|
|
match(rscratch2RegI);
|
|
match(rarg1RegI);
|
|
match(rarg2RegI);
|
|
match(rarg3RegI);
|
|
match(rarg4RegI);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Integer Source Register
|
|
// See definition of reg_class bits32_reg_ro.
|
|
operand iRegIsrc() %{
|
|
constraint(ALLOC_IN_RC(bits32_reg_ro));
|
|
match(RegI);
|
|
match(rscratch1RegI);
|
|
match(rscratch2RegI);
|
|
match(rarg1RegI);
|
|
match(rarg2RegI);
|
|
match(rarg3RegI);
|
|
match(rarg4RegI);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rscratch1RegI() %{
|
|
constraint(ALLOC_IN_RC(rscratch1_bits32_reg));
|
|
match(iRegIdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rscratch2RegI() %{
|
|
constraint(ALLOC_IN_RC(rscratch2_bits32_reg));
|
|
match(iRegIdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg1RegI() %{
|
|
constraint(ALLOC_IN_RC(rarg1_bits32_reg));
|
|
match(iRegIdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg2RegI() %{
|
|
constraint(ALLOC_IN_RC(rarg2_bits32_reg));
|
|
match(iRegIdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg3RegI() %{
|
|
constraint(ALLOC_IN_RC(rarg3_bits32_reg));
|
|
match(iRegIdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg4RegI() %{
|
|
constraint(ALLOC_IN_RC(rarg4_bits32_reg));
|
|
match(iRegIdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg1RegL() %{
|
|
constraint(ALLOC_IN_RC(rarg1_bits64_reg));
|
|
match(iRegLdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Pointer Destination Register
|
|
// See definition of reg_class bits64_reg_rw.
|
|
operand iRegPdst() %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_rw));
|
|
match(RegP);
|
|
match(rscratch1RegP);
|
|
match(rscratch2RegP);
|
|
match(rarg1RegP);
|
|
match(rarg2RegP);
|
|
match(rarg3RegP);
|
|
match(rarg4RegP);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Pointer Destination Register
|
|
// Operand not using r11 and r12 (killed in epilog).
|
|
operand iRegPdstNoScratch() %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_leaf_call));
|
|
match(RegP);
|
|
match(rarg1RegP);
|
|
match(rarg2RegP);
|
|
match(rarg3RegP);
|
|
match(rarg4RegP);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Pointer Source Register
|
|
// See definition of reg_class bits64_reg_ro.
|
|
operand iRegPsrc() %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(RegP);
|
|
match(iRegPdst);
|
|
match(rscratch1RegP);
|
|
match(rscratch2RegP);
|
|
match(rarg1RegP);
|
|
match(rarg2RegP);
|
|
match(rarg3RegP);
|
|
match(rarg4RegP);
|
|
match(rarg5RegP);
|
|
match(rarg6RegP);
|
|
match(threadRegP);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Thread operand.
|
|
operand threadRegP() %{
|
|
constraint(ALLOC_IN_RC(thread_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ "R16" %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rscratch1RegP() %{
|
|
constraint(ALLOC_IN_RC(rscratch1_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ "R11" %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rscratch2RegP() %{
|
|
constraint(ALLOC_IN_RC(rscratch2_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg1RegP() %{
|
|
constraint(ALLOC_IN_RC(rarg1_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg2RegP() %{
|
|
constraint(ALLOC_IN_RC(rarg2_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg3RegP() %{
|
|
constraint(ALLOC_IN_RC(rarg3_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg4RegP() %{
|
|
constraint(ALLOC_IN_RC(rarg4_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg5RegP() %{
|
|
constraint(ALLOC_IN_RC(rarg5_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rarg6RegP() %{
|
|
constraint(ALLOC_IN_RC(rarg6_bits64_reg));
|
|
match(iRegPdst);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand iRegNsrc() %{
|
|
constraint(ALLOC_IN_RC(bits32_reg_ro));
|
|
match(RegN);
|
|
match(iRegNdst);
|
|
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand iRegNdst() %{
|
|
constraint(ALLOC_IN_RC(bits32_reg_rw));
|
|
match(RegN);
|
|
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Long Destination Register
|
|
// See definition of reg_class bits64_reg_rw.
|
|
operand iRegLdst() %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_rw));
|
|
match(RegL);
|
|
match(rscratch1RegL);
|
|
match(rscratch2RegL);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Long Source Register
|
|
// See definition of reg_class bits64_reg_ro.
|
|
operand iRegLsrc() %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(RegL);
|
|
match(iRegLdst);
|
|
match(rscratch1RegL);
|
|
match(rscratch2RegL);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Special operand for ConvL2I.
|
|
operand iRegL2Isrc(iRegLsrc reg) %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(ConvL2I reg);
|
|
format %{ "ConvL2I($reg)" %}
|
|
interface(REG_INTER)
|
|
%}
|
|
|
|
operand rscratch1RegL() %{
|
|
constraint(ALLOC_IN_RC(rscratch1_bits64_reg));
|
|
match(RegL);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand rscratch2RegL() %{
|
|
constraint(ALLOC_IN_RC(rscratch2_bits64_reg));
|
|
match(RegL);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Condition Code Flag Registers
|
|
operand flagsReg() %{
|
|
constraint(ALLOC_IN_RC(int_flags));
|
|
match(RegFlags);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand flagsRegSrc() %{
|
|
constraint(ALLOC_IN_RC(int_flags_ro));
|
|
match(RegFlags);
|
|
match(flagsReg);
|
|
match(flagsRegCR0);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Condition Code Flag Register CR0
|
|
operand flagsRegCR0() %{
|
|
constraint(ALLOC_IN_RC(int_flags_CR0));
|
|
match(RegFlags);
|
|
format %{ "CR0" %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand flagsRegCR1() %{
|
|
constraint(ALLOC_IN_RC(int_flags_CR1));
|
|
match(RegFlags);
|
|
format %{ "CR1" %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand flagsRegCR6() %{
|
|
constraint(ALLOC_IN_RC(int_flags_CR6));
|
|
match(RegFlags);
|
|
format %{ "CR6" %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand regCTR() %{
|
|
constraint(ALLOC_IN_RC(ctr_reg));
|
|
// RegFlags should work. Introducing a RegSpecial type would cause a
|
|
// lot of changes.
|
|
match(RegFlags);
|
|
format %{"SR_CTR" %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand regD() %{
|
|
constraint(ALLOC_IN_RC(dbl_reg));
|
|
match(RegD);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
operand regF() %{
|
|
constraint(ALLOC_IN_RC(flt_reg));
|
|
match(RegF);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Special Registers
|
|
|
|
// Method Register
|
|
operand inline_cache_regP(iRegPdst reg) %{
|
|
constraint(ALLOC_IN_RC(r19_bits64_reg)); // inline_cache_reg
|
|
match(reg);
|
|
format %{ %}
|
|
interface(REG_INTER);
|
|
%}
|
|
|
|
// Operands to remove register moves in unscaled mode.
|
|
// Match read/write registers with an EncodeP node if neither shift nor add are required.
|
|
operand iRegP2N(iRegPsrc reg) %{
|
|
predicate(false /* TODO: PPC port MatchDecodeNodes*/&& CompressedOops::shift() == 0);
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(EncodeP reg);
|
|
format %{ "$reg" %}
|
|
interface(REG_INTER)
|
|
%}
|
|
|
|
operand iRegN2P(iRegNsrc reg) %{
|
|
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
|
|
constraint(ALLOC_IN_RC(bits32_reg_ro));
|
|
match(DecodeN reg);
|
|
format %{ "$reg" %}
|
|
interface(REG_INTER)
|
|
%}
|
|
|
|
operand iRegN2P_klass(iRegNsrc reg) %{
|
|
predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
|
|
constraint(ALLOC_IN_RC(bits32_reg_ro));
|
|
match(DecodeNKlass reg);
|
|
format %{ "$reg" %}
|
|
interface(REG_INTER)
|
|
%}
|
|
|
|
//----------Complex Operands---------------------------------------------------
|
|
// Indirect Memory Reference
|
|
operand indirect(iRegPsrc reg) %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(reg);
|
|
op_cost(100);
|
|
format %{ "[$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp(0x0);
|
|
%}
|
|
%}
|
|
|
|
// Indirect with Offset
|
|
operand indOffset16(iRegPsrc reg, immL16 offset) %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(AddP reg offset);
|
|
op_cost(100);
|
|
format %{ "[$reg + $offset]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($offset);
|
|
%}
|
|
%}
|
|
|
|
// Indirect with 4-aligned Offset
|
|
operand indOffset16Alg4(iRegPsrc reg, immL16Alg4 offset) %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(AddP reg offset);
|
|
op_cost(100);
|
|
format %{ "[$reg + $offset]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($offset);
|
|
%}
|
|
%}
|
|
|
|
// Indirect with 16-aligned Offset
|
|
operand indOffset16Alg16(iRegPsrc reg, immL16Alg16 offset) %{
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(AddP reg offset);
|
|
op_cost(100);
|
|
format %{ "[$reg + $offset]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($offset);
|
|
%}
|
|
%}
|
|
|
|
//----------Complex Operands for Compressed OOPs-------------------------------
|
|
// Compressed OOPs with narrow_oop_shift == 0.
|
|
|
|
// Indirect Memory Reference, compressed OOP
|
|
operand indirectNarrow(iRegNsrc reg) %{
|
|
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(DecodeN reg);
|
|
op_cost(100);
|
|
format %{ "[$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp(0x0);
|
|
%}
|
|
%}
|
|
|
|
operand indirectNarrow_klass(iRegNsrc reg) %{
|
|
predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(DecodeNKlass reg);
|
|
op_cost(100);
|
|
format %{ "[$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp(0x0);
|
|
%}
|
|
%}
|
|
|
|
// Indirect with Offset, compressed OOP
|
|
operand indOffset16Narrow(iRegNsrc reg, immL16 offset) %{
|
|
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(AddP (DecodeN reg) offset);
|
|
op_cost(100);
|
|
format %{ "[$reg + $offset]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($offset);
|
|
%}
|
|
%}
|
|
|
|
operand indOffset16Narrow_klass(iRegNsrc reg, immL16 offset) %{
|
|
predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(AddP (DecodeNKlass reg) offset);
|
|
op_cost(100);
|
|
format %{ "[$reg + $offset]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($offset);
|
|
%}
|
|
%}
|
|
|
|
// Indirect with 4-aligned Offset, compressed OOP
|
|
operand indOffset16NarrowAlg4(iRegNsrc reg, immL16Alg4 offset) %{
|
|
predicate(false /* TODO: PPC port MatchDecodeNodes*/);
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(AddP (DecodeN reg) offset);
|
|
op_cost(100);
|
|
format %{ "[$reg + $offset]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($offset);
|
|
%}
|
|
%}
|
|
|
|
operand indOffset16NarrowAlg4_klass(iRegNsrc reg, immL16Alg4 offset) %{
|
|
predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0);
|
|
constraint(ALLOC_IN_RC(bits64_reg_ro));
|
|
match(AddP (DecodeNKlass reg) offset);
|
|
op_cost(100);
|
|
format %{ "[$reg + $offset]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base($reg);
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($offset);
|
|
%}
|
|
%}
|
|
|
|
//----------Special Memory Operands--------------------------------------------
|
|
// Stack Slot Operand
|
|
//
|
|
// This operand is used for loading and storing temporary values on
|
|
// the stack where a match requires a value to flow through memory.
|
|
operand stackSlotI(sRegI reg) %{
|
|
constraint(ALLOC_IN_RC(stack_slots));
|
|
op_cost(100);
|
|
//match(RegI);
|
|
format %{ "[sp+$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base(0x1); // R1_SP
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($reg); // Stack Offset
|
|
%}
|
|
%}
|
|
|
|
operand stackSlotL(sRegL reg) %{
|
|
constraint(ALLOC_IN_RC(stack_slots));
|
|
op_cost(100);
|
|
//match(RegL);
|
|
format %{ "[sp+$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base(0x1); // R1_SP
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($reg); // Stack Offset
|
|
%}
|
|
%}
|
|
|
|
operand stackSlotP(sRegP reg) %{
|
|
constraint(ALLOC_IN_RC(stack_slots));
|
|
op_cost(100);
|
|
//match(RegP);
|
|
format %{ "[sp+$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base(0x1); // R1_SP
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($reg); // Stack Offset
|
|
%}
|
|
%}
|
|
|
|
operand stackSlotF(sRegF reg) %{
|
|
constraint(ALLOC_IN_RC(stack_slots));
|
|
op_cost(100);
|
|
//match(RegF);
|
|
format %{ "[sp+$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base(0x1); // R1_SP
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($reg); // Stack Offset
|
|
%}
|
|
%}
|
|
|
|
operand stackSlotD(sRegD reg) %{
|
|
constraint(ALLOC_IN_RC(stack_slots));
|
|
op_cost(100);
|
|
//match(RegD);
|
|
format %{ "[sp+$reg]" %}
|
|
interface(MEMORY_INTER) %{
|
|
base(0x1); // R1_SP
|
|
index(0x0);
|
|
scale(0x0);
|
|
disp($reg); // Stack Offset
|
|
%}
|
|
%}
|
|
|
|
// Operands for expressing Control Flow
|
|
// NOTE: Label is a predefined operand which should not be redefined in
|
|
// the AD file. It is generically handled within the ADLC.
|
|
|
|
//----------Conditional Branch Operands----------------------------------------
|
|
// Comparison Op
|
|
//
|
|
// This is the operation of the comparison, and is limited to the
|
|
// following set of codes: L (<), LE (<=), G (>), GE (>=), E (==), NE
|
|
// (!=).
|
|
//
|
|
// Other attributes of the comparison, such as unsignedness, are specified
|
|
// by the comparison instruction that sets a condition code flags register.
|
|
// That result is represented by a flags operand whose subtype is appropriate
|
|
// to the unsignedness (etc.) of the comparison.
|
|
//
|
|
// Later, the instruction which matches both the Comparison Op (a Bool) and
|
|
// the flags (produced by the Cmp) specifies the coding of the comparison op
|
|
// by matching a specific subtype of Bool operand below.
|
|
|
|
// When used for floating point comparisons: unordered same as less.
|
|
operand cmpOp() %{
|
|
match(Bool);
|
|
format %{ "" %}
|
|
interface(COND_INTER) %{
|
|
// BO only encodes bit 4 of bcondCRbiIsX, as bits 1-3 are always '100'.
|
|
// BO & BI
|
|
equal(0xA); // 10 10: bcondCRbiIs1 & Condition::equal
|
|
not_equal(0x2); // 00 10: bcondCRbiIs0 & Condition::equal
|
|
less(0x8); // 10 00: bcondCRbiIs1 & Condition::less
|
|
greater_equal(0x0); // 00 00: bcondCRbiIs0 & Condition::less
|
|
less_equal(0x1); // 00 01: bcondCRbiIs0 & Condition::greater
|
|
greater(0x9); // 10 01: bcondCRbiIs1 & Condition::greater
|
|
overflow(0xB); // 10 11: bcondCRbiIs1 & Condition::summary_overflow
|
|
no_overflow(0x3); // 00 11: bcondCRbiIs0 & Condition::summary_overflow
|
|
%}
|
|
%}
|
|
|
|
//----------OPERAND CLASSES----------------------------------------------------
|
|
// Operand Classes are groups of operands that are used to simplify
|
|
// instruction definitions by not requiring the AD writer to specify
|
|
// separate instructions for every form of operand when the
|
|
// instruction accepts multiple operand types with the same basic
|
|
// encoding and format. The classic case of this is memory operands.
|
|
// Indirect is not included since its use is limited to Compare & Swap.
|
|
|
|
opclass memory(indirect, indOffset16 /*, indIndex, tlsReference*/, indirectNarrow, indirectNarrow_klass, indOffset16Narrow, indOffset16Narrow_klass);
|
|
// Memory operand where offsets are 4-aligned. Required for ld, std.
|
|
opclass memoryAlg4(indirect, indOffset16Alg4, indirectNarrow, indOffset16NarrowAlg4, indOffset16NarrowAlg4_klass);
|
|
opclass memoryAlg16(indirect, indOffset16Alg16);
|
|
opclass indirectMemory(indirect, indirectNarrow);
|
|
|
|
// Special opclass for I and ConvL2I.
|
|
opclass iRegIsrc_iRegL2Isrc(iRegIsrc, iRegL2Isrc);
|
|
|
|
// Operand classes to match encode and decode. iRegN_P2N is only used
|
|
// for storeN. I have never seen an encode node elsewhere.
|
|
opclass iRegN_P2N(iRegNsrc, iRegP2N);
|
|
opclass iRegP_N2P(iRegPsrc, iRegN2P, iRegN2P_klass);
|
|
|
|
//----------PIPELINE-----------------------------------------------------------
|
|
|
|
pipeline %{
|
|
|
|
// See J.M.Tendler et al. "Power4 system microarchitecture", IBM
|
|
// J. Res. & Dev., No. 1, Jan. 2002.
|
|
|
|
//----------ATTRIBUTES---------------------------------------------------------
|
|
attributes %{
|
|
|
|
// Power4 instructions are of fixed length.
|
|
fixed_size_instructions;
|
|
|
|
// TODO: if `bundle' means number of instructions fetched
|
|
// per cycle, this is 8. If `bundle' means Power4 `group', that is
|
|
// max instructions issued per cycle, this is 5.
|
|
max_instructions_per_bundle = 8;
|
|
|
|
// A Power4 instruction is 4 bytes long.
|
|
instruction_unit_size = 4;
|
|
|
|
// The Power4 processor fetches 64 bytes...
|
|
instruction_fetch_unit_size = 64;
|
|
|
|
// ...in one line
|
|
instruction_fetch_units = 1
|
|
|
|
// Unused, list one so that array generated by adlc is not empty.
|
|
// Aix compiler chokes if _nop_count = 0.
|
|
nops(fxNop);
|
|
%}
|
|
|
|
//----------RESOURCES----------------------------------------------------------
|
|
// Resources are the functional units available to the machine
|
|
resources(
|
|
PPC_BR, // branch unit
|
|
PPC_CR, // condition unit
|
|
PPC_FX1, // integer arithmetic unit 1
|
|
PPC_FX2, // integer arithmetic unit 2
|
|
PPC_LDST1, // load/store unit 1
|
|
PPC_LDST2, // load/store unit 2
|
|
PPC_FP1, // float arithmetic unit 1
|
|
PPC_FP2, // float arithmetic unit 2
|
|
PPC_LDST = PPC_LDST1 | PPC_LDST2,
|
|
PPC_FX = PPC_FX1 | PPC_FX2,
|
|
PPC_FP = PPC_FP1 | PPC_FP2
|
|
);
|
|
|
|
//----------PIPELINE DESCRIPTION-----------------------------------------------
|
|
// Pipeline Description specifies the stages in the machine's pipeline
|
|
pipe_desc(
|
|
// Power4 longest pipeline path
|
|
PPC_IF, // instruction fetch
|
|
PPC_IC,
|
|
//PPC_BP, // branch prediction
|
|
PPC_D0, // decode
|
|
PPC_D1, // decode
|
|
PPC_D2, // decode
|
|
PPC_D3, // decode
|
|
PPC_Xfer1,
|
|
PPC_GD, // group definition
|
|
PPC_MP, // map
|
|
PPC_ISS, // issue
|
|
PPC_RF, // resource fetch
|
|
PPC_EX1, // execute (all units)
|
|
PPC_EX2, // execute (FP, LDST)
|
|
PPC_EX3, // execute (FP, LDST)
|
|
PPC_EX4, // execute (FP)
|
|
PPC_EX5, // execute (FP)
|
|
PPC_EX6, // execute (FP)
|
|
PPC_WB, // write back
|
|
PPC_Xfer2,
|
|
PPC_CP
|
|
);
|
|
|
|
//----------PIPELINE CLASSES---------------------------------------------------
|
|
// Pipeline Classes describe the stages in which input and output are
|
|
// referenced by the hardware pipeline.
|
|
|
|
// Simple pipeline classes.
|
|
|
|
// Default pipeline class.
|
|
pipe_class pipe_class_default() %{
|
|
single_instruction;
|
|
fixed_latency(2);
|
|
%}
|
|
|
|
// Pipeline class for empty instructions.
|
|
pipe_class pipe_class_empty() %{
|
|
single_instruction;
|
|
fixed_latency(0);
|
|
%}
|
|
|
|
// Pipeline class for compares.
|
|
pipe_class pipe_class_compare() %{
|
|
single_instruction;
|
|
fixed_latency(16);
|
|
%}
|
|
|
|
// Pipeline class for traps.
|
|
pipe_class pipe_class_trap() %{
|
|
single_instruction;
|
|
fixed_latency(100);
|
|
%}
|
|
|
|
// Pipeline class for memory operations.
|
|
pipe_class pipe_class_memory() %{
|
|
single_instruction;
|
|
fixed_latency(16);
|
|
%}
|
|
|
|
// Pipeline class for call.
|
|
pipe_class pipe_class_call() %{
|
|
single_instruction;
|
|
fixed_latency(100);
|
|
%}
|
|
|
|
// Define the class for the Nop node.
|
|
define %{
|
|
MachNop = pipe_class_default;
|
|
%}
|
|
|
|
%}
|
|
|
|
//----------INSTRUCTIONS-------------------------------------------------------
|
|
|
|
// Naming of instructions:
|
|
// opA_operB / opA_operB_operC:
|
|
// Operation 'op' with one or two source operands 'oper'. Result
|
|
// type is A, source operand types are B and C.
|
|
// Iff A == B == C, B and C are left out.
|
|
//
|
|
// The instructions are ordered according to the following scheme:
|
|
// - loads
|
|
// - load constants
|
|
// - prefetch
|
|
// - store
|
|
// - encode/decode
|
|
// - membar
|
|
// - conditional moves
|
|
// - compare & swap
|
|
// - arithmetic and logic operations
|
|
// * int: Add, Sub, Mul, Div, Mod
|
|
// * int: lShift, arShift, urShift, rot
|
|
// * float: Add, Sub, Mul, Div
|
|
// * and, or, xor ...
|
|
// - register moves: float <-> int, reg <-> stack, repl
|
|
// - cast (high level type cast, XtoP, castPP, castII, not_null etc.
|
|
// - conv (low level type cast requiring bit changes (sign extend etc)
|
|
// - compares, range & zero checks.
|
|
// - branches
|
|
// - complex operations, intrinsics, min, max, replicate
|
|
// - lock
|
|
// - Calls
|
|
//
|
|
// If there are similar instructions with different types they are sorted:
|
|
// int before float
|
|
// small before big
|
|
// signed before unsigned
|
|
// e.g., loadS before loadUS before loadI before loadF.
|
|
|
|
|
|
//----------Load/Store Instructions--------------------------------------------
|
|
|
|
//----------Load Instructions--------------------------------------------------
|
|
|
|
// Converts byte to int.
|
|
// As convB2I_reg, but without match rule. The match rule of convB2I_reg
|
|
// reuses the 'amount' operand, but adlc expects that operand specification
|
|
// and operands in match rule are equivalent.
|
|
instruct convB2I_reg_2(iRegIdst dst, iRegIsrc src) %{
|
|
effect(DEF dst, USE src);
|
|
format %{ "EXTSB $dst, $src \t// byte->int" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ extsb($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct loadUB_indirect(iRegIdst dst, indirectMemory mem) %{
|
|
// match-rule, false predicate
|
|
match(Set dst (LoadB mem));
|
|
predicate(false);
|
|
|
|
format %{ "LBZ $dst, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_lbz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct loadUB_indirect_ac(iRegIdst dst, indirectMemory mem) %{
|
|
// match-rule, false predicate
|
|
match(Set dst (LoadB mem));
|
|
predicate(false);
|
|
|
|
format %{ "LBZ $dst, $mem\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lbz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B.
|
|
instruct loadB_indirect_Ex(iRegIdst dst, indirectMemory mem) %{
|
|
match(Set dst (LoadB mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
|
|
expand %{
|
|
iRegIdst tmp;
|
|
loadUB_indirect(tmp, mem);
|
|
convB2I_reg_2(dst, tmp);
|
|
%}
|
|
%}
|
|
|
|
instruct loadB_indirect_ac_Ex(iRegIdst dst, indirectMemory mem) %{
|
|
match(Set dst (LoadB mem));
|
|
ins_cost(3*MEMORY_REF_COST + DEFAULT_COST);
|
|
expand %{
|
|
iRegIdst tmp;
|
|
loadUB_indirect_ac(tmp, mem);
|
|
convB2I_reg_2(dst, tmp);
|
|
%}
|
|
%}
|
|
|
|
instruct loadUB_indOffset16(iRegIdst dst, indOffset16 mem) %{
|
|
// match-rule, false predicate
|
|
match(Set dst (LoadB mem));
|
|
predicate(false);
|
|
|
|
format %{ "LBZ $dst, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_lbz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct loadUB_indOffset16_ac(iRegIdst dst, indOffset16 mem) %{
|
|
// match-rule, false predicate
|
|
match(Set dst (LoadB mem));
|
|
predicate(false);
|
|
|
|
format %{ "LBZ $dst, $mem\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lbz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Byte (8bit signed). LoadB = LoadUB + ConvUB2B.
|
|
instruct loadB_indOffset16_Ex(iRegIdst dst, indOffset16 mem) %{
|
|
match(Set dst (LoadB mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
|
|
|
|
expand %{
|
|
iRegIdst tmp;
|
|
loadUB_indOffset16(tmp, mem);
|
|
convB2I_reg_2(dst, tmp);
|
|
%}
|
|
%}
|
|
|
|
instruct loadB_indOffset16_ac_Ex(iRegIdst dst, indOffset16 mem) %{
|
|
match(Set dst (LoadB mem));
|
|
ins_cost(3*MEMORY_REF_COST + DEFAULT_COST);
|
|
|
|
expand %{
|
|
iRegIdst tmp;
|
|
loadUB_indOffset16_ac(tmp, mem);
|
|
convB2I_reg_2(dst, tmp);
|
|
%}
|
|
%}
|
|
|
|
// Load Unsigned Byte (8bit UNsigned) into an int reg.
|
|
instruct loadUB(iRegIdst dst, memory mem) %{
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
match(Set dst (LoadUB mem));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LBZ $dst, $mem \t// byte, zero-extend to int" %}
|
|
size(4);
|
|
ins_encode( enc_lbz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Unsigned Byte (8bit UNsigned) acquire.
|
|
instruct loadUB_ac(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadUB mem));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LBZ $dst, $mem \t// byte, zero-extend to int, acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lbz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Unsigned Byte (8bit UNsigned) into a Long Register.
|
|
instruct loadUB2L(iRegLdst dst, memory mem) %{
|
|
match(Set dst (ConvI2L (LoadUB mem)));
|
|
predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LBZ $dst, $mem \t// byte, zero-extend to long" %}
|
|
size(4);
|
|
ins_encode( enc_lbz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct loadUB2L_ac(iRegLdst dst, memory mem) %{
|
|
match(Set dst (ConvI2L (LoadUB mem)));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LBZ $dst, $mem \t// byte, zero-extend to long, acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lbz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Short (16bit signed)
|
|
instruct loadS(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadS mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LHA $dst, $mem" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lha($dst$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Short (16bit signed) acquire.
|
|
instruct loadS_ac(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadS mem));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LHA $dst, $mem\t acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lha($dst$$Register, Idisp, $mem$$base$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Char (16bit unsigned)
|
|
instruct loadUS(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadUS mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LHZ $dst, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_lhz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Char (16bit unsigned) acquire.
|
|
instruct loadUS_ac(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadUS mem));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LHZ $dst, $mem \t// acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lhz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Unsigned Short/Char (16bit UNsigned) into a Long Register.
|
|
instruct loadUS2L(iRegLdst dst, memory mem) %{
|
|
match(Set dst (ConvI2L (LoadUS mem)));
|
|
predicate(_kids[0]->_leaf->as_Load()->is_unordered() || followed_by_acquire(_kids[0]->_leaf));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LHZ $dst, $mem \t// short, zero-extend to long" %}
|
|
size(4);
|
|
ins_encode( enc_lhz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Unsigned Short/Char (16bit UNsigned) into a Long Register acquire.
|
|
instruct loadUS2L_ac(iRegLdst dst, memory mem) %{
|
|
match(Set dst (ConvI2L (LoadUS mem)));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LHZ $dst, $mem \t// short, zero-extend to long, acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lhz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Integer.
|
|
instruct loadI(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadI mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Integer acquire.
|
|
instruct loadI_ac(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadI mem));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// load acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lwz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Match loading integer and casting it to unsigned int in
|
|
// long register.
|
|
// LoadI + ConvI2L + AndL 0xffffffff.
|
|
instruct loadUI2L(iRegLdst dst, memory mem, immL_32bits mask) %{
|
|
match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
|
|
predicate(_kids[0]->_kids[0]->_leaf->as_Load()->is_unordered());
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// zero-extend to long" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Match loading integer and casting it to long.
|
|
instruct loadI2L(iRegLdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (ConvI2L (LoadI mem)));
|
|
predicate(_kids[0]->_leaf->as_Load()->is_unordered());
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWA $dst, $mem \t// loadI2L" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lwa($dst$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Match loading integer and casting it to long - acquire.
|
|
instruct loadI2L_ac(iRegLdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (ConvI2L (LoadI mem)));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LWA $dst, $mem \t// loadI2L acquire"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lwa($dst$$Register, Idisp, $mem$$base$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Long - aligned
|
|
instruct loadL(iRegLdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (LoadL mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $mem \t// long" %}
|
|
size(4);
|
|
ins_encode( enc_ld(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Long - aligned acquire.
|
|
instruct loadL_ac(iRegLdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (LoadL mem));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $mem \t// long acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_ld_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Long - UNaligned
|
|
instruct loadL_unaligned(iRegLdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (LoadL_unaligned mem));
|
|
// predicate(...) // Unaligned_ac is not needed (and wouldn't make sense).
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $mem \t// unaligned long" %}
|
|
size(4);
|
|
ins_encode( enc_ld(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load nodes for superwords
|
|
|
|
// Load Aligned Packed Byte
|
|
instruct loadV8(iRegLdst dst, memoryAlg4 mem) %{
|
|
predicate(n->as_LoadVector()->memory_size() == 8);
|
|
match(Set dst (LoadVector mem));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $mem \t// load 8-byte Vector" %}
|
|
size(4);
|
|
ins_encode( enc_ld(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Aligned Packed Byte
|
|
// Note: The Power8 instruction loads the contents in a special order in Little Endian mode.
|
|
instruct loadV16_Power8(vecX dst, indirect mem) %{
|
|
predicate(n->as_LoadVector()->memory_size() == 16 && PowerArchitecturePPC64 == 8);
|
|
match(Set dst (LoadVector mem));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LXVD2X $dst, $mem \t// load 16-byte Vector" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ lxvd2x($dst$$VectorSRegister, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct loadV16_Power9(vecX dst, memoryAlg16 mem) %{
|
|
predicate(n->as_LoadVector()->memory_size() == 16 && PowerArchitecturePPC64 >= 9);
|
|
match(Set dst (LoadVector mem));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LXV $dst, $mem \t// load 16-byte Vector" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ lxv($dst$$VectorSRegister, $mem$$disp, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load Range, range = array length (=jint)
|
|
instruct loadRange(iRegIdst dst, memory mem) %{
|
|
match(Set dst (LoadRange mem));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// range" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Compressed Pointer
|
|
instruct loadN(iRegNdst dst, memory mem) %{
|
|
match(Set dst (LoadN mem));
|
|
predicate((n->as_Load()->is_unordered() || followed_by_acquire(n)) && n->as_Load()->barrier_data() == 0);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// load compressed ptr" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Compressed Pointer acquire.
|
|
instruct loadN_ac(iRegNdst dst, memory mem) %{
|
|
match(Set dst (LoadN mem));
|
|
predicate(n->as_Load()->barrier_data() == 0);
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// load acquire compressed ptr\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_lwz_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Compressed Pointer and decode it if narrow_oop_shift == 0.
|
|
instruct loadN2P_unscaled(iRegPdst dst, memory mem) %{
|
|
match(Set dst (DecodeN (LoadN mem)));
|
|
predicate(_kids[0]->_leaf->as_Load()->is_unordered() && CompressedOops::shift() == 0 && _kids[0]->_leaf->as_Load()->barrier_data() == 0);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// DecodeN (unscaled)" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct loadN2P_klass_unscaled(iRegPdst dst, memory mem) %{
|
|
match(Set dst (DecodeNKlass (LoadNKlass mem)));
|
|
predicate(CompressedKlassPointers::base() == nullptr && CompressedKlassPointers::shift() == 0 &&
|
|
_kids[0]->_leaf->as_Load()->is_unordered());
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// DecodeN (unscaled)" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Pointer
|
|
instruct loadP(iRegPdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (LoadP mem));
|
|
predicate((n->as_Load()->is_unordered() || followed_by_acquire(n)) && n->as_Load()->barrier_data() == 0);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $mem \t// ptr" %}
|
|
size(4);
|
|
ins_encode( enc_ld(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Pointer acquire.
|
|
instruct loadP_ac(iRegPdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (LoadP mem));
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
predicate(n->as_Load()->barrier_data() == 0);
|
|
|
|
format %{ "LD $dst, $mem \t// ptr acquire\n\t"
|
|
"TWI $dst\n\t"
|
|
"ISYNC" %}
|
|
size(12);
|
|
ins_encode( enc_ld_ac(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// LoadP + CastP2L
|
|
instruct loadP2X(iRegLdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (CastP2X (LoadP mem)));
|
|
predicate(_kids[0]->_leaf->as_Load()->is_unordered() && _kids[0]->_leaf->as_Load()->barrier_data() == 0);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $mem \t// ptr + p2x" %}
|
|
size(4);
|
|
ins_encode( enc_ld(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load compressed klass pointer.
|
|
instruct loadNKlass(iRegNdst dst, memory mem) %{
|
|
match(Set dst (LoadNKlass mem));
|
|
predicate(!UseCompactObjectHeaders);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $mem \t// compressed klass ptr" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct loadNKlassCompactHeaders(iRegNdst dst, memory mem) %{
|
|
match(Set dst (LoadNKlass mem));
|
|
predicate(UseCompactObjectHeaders);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "load_narrow_klass_compact $dst, $mem \t// compressed class ptr" %}
|
|
size(8);
|
|
ins_encode %{
|
|
assert($mem$$index$$Register == R0, "must not have indexed address: %s[%s]", $mem$$base$$Register.name(), $mem$$index$$Register.name());
|
|
__ load_narrow_klass_compact_c2($dst$$Register, $mem$$base$$Register, $mem$$disp);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Klass Pointer
|
|
instruct loadKlass(iRegPdst dst, memoryAlg4 mem) %{
|
|
match(Set dst (LoadKlass mem));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $mem \t// klass ptr" %}
|
|
size(4);
|
|
ins_encode( enc_ld(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Float
|
|
instruct loadF(regF dst, memory mem) %{
|
|
match(Set dst (LoadF mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LFS $dst, $mem" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Float acquire.
|
|
instruct loadF_ac(regF dst, memory mem, flagsRegCR0 cr0) %{
|
|
match(Set dst (LoadF mem));
|
|
effect(TEMP cr0);
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LFS $dst, $mem \t// acquire\n\t"
|
|
"FCMPU cr0, $dst, $dst\n\t"
|
|
"BNE cr0, next\n"
|
|
"next:\n\t"
|
|
"ISYNC" %}
|
|
size(16);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
Label next;
|
|
__ lfs($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
|
__ fcmpu(CR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
|
__ bne(CR0, next);
|
|
__ bind(next);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Double - aligned
|
|
instruct loadD(regD dst, memory mem) %{
|
|
match(Set dst (LoadD mem));
|
|
predicate(n->as_Load()->is_unordered() || followed_by_acquire(n));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LFD $dst, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_lfd(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Double - aligned acquire.
|
|
instruct loadD_ac(regD dst, memory mem, flagsRegCR0 cr0) %{
|
|
match(Set dst (LoadD mem));
|
|
effect(TEMP cr0);
|
|
ins_cost(3*MEMORY_REF_COST);
|
|
|
|
format %{ "LFD $dst, $mem \t// acquire\n\t"
|
|
"FCMPU cr0, $dst, $dst\n\t"
|
|
"BNE cr0, next\n"
|
|
"next:\n\t"
|
|
"ISYNC" %}
|
|
size(16);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
Label next;
|
|
__ lfd($dst$$FloatRegister, Idisp, $mem$$base$$Register);
|
|
__ fcmpu(CR0, $dst$$FloatRegister, $dst$$FloatRegister);
|
|
__ bne(CR0, next);
|
|
__ bind(next);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load Double - UNaligned
|
|
instruct loadD_unaligned(regD dst, memory mem) %{
|
|
match(Set dst (LoadD_unaligned mem));
|
|
// predicate(...) // Unaligned_ac is not needed (and wouldn't make sense).
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LFD $dst, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_lfd(dst, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
//----------Constants--------------------------------------------------------
|
|
|
|
// Load MachConstantTableBase: add hi offset to global toc.
|
|
// TODO: Handle hidden register r29 in bundler!
|
|
instruct loadToc_hi(iRegLdst dst) %{
|
|
effect(DEF dst);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "ADDIS $dst, R29, DISP.hi \t// load TOC hi" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ calculate_address_from_global_toc_hi16only($dst$$Register, __ method_toc());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load MachConstantTableBase: add lo offset to global toc.
|
|
instruct loadToc_lo(iRegLdst dst, iRegLdst src) %{
|
|
effect(DEF dst, USE src);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "ADDI $dst, $src, DISP.lo \t// load TOC lo" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ calculate_address_from_global_toc_lo16only($dst$$Register, __ method_toc());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load 16-bit integer constant 0xssss????
|
|
instruct loadConI16(iRegIdst dst, immI16 src) %{
|
|
match(Set dst src);
|
|
|
|
format %{ "LI $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load integer constant 0x????0000
|
|
instruct loadConIhi16(iRegIdst dst, immIhi16 src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "LIS $dst, $src.hi" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// Lis sign extends 16-bit src then shifts it 16 bit to the left.
|
|
__ lis($dst$$Register, (int)((short)(($src$$constant & 0xFFFF0000) >> 16)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Part 2 of loading 32 bit constant: hi16 is is src1 (properly shifted
|
|
// and sign extended), this adds the low 16 bits.
|
|
instruct loadConI32_lo16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "ORI $dst, $src1.hi, $src2.lo" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct loadConI32(iRegIdst dst, immI32 src) %{
|
|
match(Set dst src);
|
|
// This macro is valid only in Power 10 and up, but adding the following predicate here
|
|
// caused a build error, so we comment it out for now.
|
|
// predicate(PowerArchitecturePPC64 >= 10);
|
|
ins_cost(DEFAULT_COST+1);
|
|
|
|
format %{ "PLI $dst, $src" %}
|
|
size(8);
|
|
ins_encode %{
|
|
assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
|
|
__ pli($dst$$Register, $src$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
ins_alignment(2);
|
|
%}
|
|
|
|
instruct loadConI_Ex(iRegIdst dst, immI src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
expand %{
|
|
// Would like to use $src$$constant.
|
|
immI16 srcLo %{ _opnds[1]->constant() %}
|
|
// srcHi can be 0000 if srcLo sign-extends to a negative number.
|
|
immIhi16 srcHi %{ _opnds[1]->constant() %}
|
|
iRegIdst tmpI;
|
|
loadConIhi16(tmpI, srcHi);
|
|
loadConI32_lo16(dst, tmpI, srcLo);
|
|
%}
|
|
%}
|
|
|
|
// No constant pool entries required.
|
|
instruct loadConL16(iRegLdst dst, immL16 src) %{
|
|
match(Set dst src);
|
|
|
|
format %{ "LI $dst, $src \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short) ($src$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load long constant 0xssssssss????0000
|
|
instruct loadConL32hi16(iRegLdst dst, immL32hi16 src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "LIS $dst, $src.hi \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ lis($dst$$Register, (int)((short)(($src$$constant & 0xFFFF0000) >> 16)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// To load a 32 bit constant: merge lower 16 bits into already loaded
|
|
// high 16 bits.
|
|
instruct loadConL32_lo16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "ORI $dst, $src1, $src2.lo" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load 32-bit long constant
|
|
instruct loadConL32_Ex(iRegLdst dst, immL32 src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
expand %{
|
|
// Would like to use $src$$constant.
|
|
immL16 srcLo %{ _opnds[1]->constant() /*& 0x0000FFFFL */%}
|
|
// srcHi can be 0000 if srcLo sign-extends to a negative number.
|
|
immL32hi16 srcHi %{ _opnds[1]->constant() /*& 0xFFFF0000L */%}
|
|
iRegLdst tmpL;
|
|
loadConL32hi16(tmpL, srcHi);
|
|
loadConL32_lo16(dst, tmpL, srcLo);
|
|
%}
|
|
%}
|
|
|
|
// Load 34-bit long constant using prefixed addi. No constant pool entries required.
|
|
instruct loadConL34(iRegLdst dst, immL34 src) %{
|
|
match(Set dst src);
|
|
// This macro is valid only in Power 10 and up, but adding the following predicate here
|
|
// caused a build error, so we comment it out for now.
|
|
// predicate(PowerArchitecturePPC64 >= 10);
|
|
ins_cost(DEFAULT_COST+1);
|
|
|
|
format %{ "PLI $dst, $src \t// long" %}
|
|
size(8);
|
|
ins_encode %{
|
|
assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
|
|
__ pli($dst$$Register, $src$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
ins_alignment(2);
|
|
%}
|
|
|
|
// Load long constant 0x????000000000000.
|
|
instruct loadConLhighest16_Ex(iRegLdst dst, immLhighest16 src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
immL32hi16 srcHi %{ _opnds[1]->constant() >> 32 /*& 0xFFFF0000L */%}
|
|
immI shift32 %{ 32 %}
|
|
iRegLdst tmpL;
|
|
loadConL32hi16(tmpL, srcHi);
|
|
lshiftL_regL_immI(dst, tmpL, shift32);
|
|
%}
|
|
%}
|
|
|
|
// Expand node for constant pool load: small offset.
|
|
instruct loadConL(iRegLdst dst, immL src, iRegLdst toc) %{
|
|
effect(DEF dst, USE src, USE toc);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
ins_num_consts(1);
|
|
// Needed so that CallDynamicJavaDirect can compute the address of this
|
|
// instruction for relocation.
|
|
ins_field_cbuf_insts_offset(int);
|
|
|
|
format %{ "LD $dst, offset, $toc \t// load long $src from TOC" %}
|
|
size(4);
|
|
ins_encode( enc_load_long_constL(dst, src, toc) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Expand node for constant pool load: large offset.
|
|
instruct loadConL_hi(iRegLdst dst, immL src, iRegLdst toc) %{
|
|
effect(DEF dst, USE src, USE toc);
|
|
predicate(false);
|
|
|
|
ins_num_consts(1);
|
|
ins_field_const_toc_offset(int);
|
|
// Needed so that CallDynamicJavaDirect can compute the address of this
|
|
// instruction for relocation.
|
|
ins_field_cbuf_insts_offset(int);
|
|
|
|
format %{ "ADDIS $dst, $toc, offset \t// load long $src from TOC (hi)" %}
|
|
size(4);
|
|
ins_encode( enc_load_long_constL_hi(dst, toc, src) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand node for constant pool load: large offset.
|
|
// No constant pool entries required.
|
|
instruct loadConL_lo(iRegLdst dst, immL src, iRegLdst base) %{
|
|
effect(DEF dst, USE src, USE base);
|
|
predicate(false);
|
|
|
|
ins_field_const_toc_offset_hi_node(loadConL_hiNode*);
|
|
|
|
format %{ "LD $dst, offset, $base \t// load long $src from TOC (lo)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int offset = ra_->C->output()->in_scratch_emit_size() ? 0 : _const_toc_offset_hi_node->_const_toc_offset;
|
|
__ ld($dst$$Register, MacroAssembler::largeoffset_si16_si16_lo(offset), $base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load long constant from constant table. Expand in case of
|
|
// offset > 16 bit is needed.
|
|
// Adlc adds toc node MachConstantTableBase.
|
|
instruct loadConL_Ex(iRegLdst dst, immL src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, offset, $constanttablebase\t// load long $src from table, postalloc expanded" %}
|
|
// We can not inline the enc_class for the expand as that does not support constanttablebase.
|
|
postalloc_expand( postalloc_expand_load_long_constant(dst, src, constanttablebase) );
|
|
%}
|
|
|
|
// Load nullptr as compressed oop.
|
|
instruct loadConN0(iRegNdst dst, immN_0 src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "LI $dst, $src \t// compressed ptr" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, 0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load hi part of compressed oop constant.
|
|
instruct loadConN_hi(iRegNdst dst, immN src) %{
|
|
effect(DEF dst, USE src);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "LIS $dst, $src \t// narrow oop hi" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ lis($dst$$Register, 0); // Will get patched.
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Add lo part of compressed oop constant to already loaded hi part.
|
|
instruct loadConN_lo(iRegNdst dst, iRegNsrc src1, immN src2) %{
|
|
effect(DEF dst, USE src1, USE src2);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "ORI $dst, $src1, $src2 \t// narrow oop lo" %}
|
|
size(4);
|
|
ins_encode %{
|
|
AddressLiteral addrlit = __ constant_oop_address((jobject)$src2$$constant);
|
|
__ relocate(addrlit.rspec(), /*compressed format*/ 1);
|
|
__ ori($dst$$Register, $src1$$Register, 0); // Will get patched.
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct rldicl(iRegLdst dst, iRegLsrc src, immI16 shift, immI16 mask_begin) %{
|
|
effect(DEF dst, USE src, USE shift, USE mask_begin);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ rldicl($dst$$Register, $src$$Register, $shift$$constant, $mask_begin$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Needed to postalloc expand loadConN: ConN is loaded as ConI
|
|
// leaving the upper 32 bits with sign-extension bits.
|
|
// This clears these bits: dst = src & 0xFFFFFFFF.
|
|
// TODO: Eventually call this maskN_regN_FFFFFFFF.
|
|
instruct clearMs32b(iRegNdst dst, iRegNsrc src) %{
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "MASK $dst, $src, 0xFFFFFFFF" %} // mask
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src$$Register, 0x20);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Optimize DecodeN for disjoint base.
|
|
// Load base of compressed oops into a register
|
|
instruct loadBase(iRegLdst dst) %{
|
|
effect(DEF dst);
|
|
|
|
format %{ "LoadConst $dst, heapbase" %}
|
|
ins_encode %{
|
|
__ load_const_optimized($dst$$Register, CompressedOops::base(), R0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Loading ConN must be postalloc expanded so that edges between
|
|
// the nodes are safe. They may not interfere with a safepoint.
|
|
// GL TODO: This needs three instructions: better put this into the constant pool.
|
|
instruct loadConN_Ex(iRegNdst dst, immN src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
format %{ "LoadN $dst, $src \t// postalloc expanded" %} // mask
|
|
postalloc_expand %{
|
|
MachNode *m1 = new loadConN_hiNode();
|
|
MachNode *m2 = new loadConN_loNode();
|
|
MachNode *m3 = new clearMs32bNode();
|
|
m1->add_req(nullptr);
|
|
m2->add_req(nullptr, m1);
|
|
m3->add_req(nullptr, m2);
|
|
m1->_opnds[0] = op_dst;
|
|
m1->_opnds[1] = op_src;
|
|
m2->_opnds[0] = op_dst;
|
|
m2->_opnds[1] = op_dst;
|
|
m2->_opnds[2] = op_src;
|
|
m3->_opnds[0] = op_dst;
|
|
m3->_opnds[1] = op_dst;
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(m3->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
nodes->push(m1);
|
|
nodes->push(m2);
|
|
nodes->push(m3);
|
|
%}
|
|
%}
|
|
|
|
// We have seen a safepoint between the hi and lo parts, and this node was handled
|
|
// as an oop. Therefore this needs a match rule so that build_oop_map knows this is
|
|
// not a narrow oop.
|
|
instruct loadConNKlass_hi(iRegNdst dst, immNKlass_NM src) %{
|
|
match(Set dst src);
|
|
effect(DEF dst, USE src);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "LIS $dst, $src \t// narrow klass hi" %}
|
|
size(4);
|
|
ins_encode %{
|
|
intptr_t Csrc = CompressedKlassPointers::encode((Klass *)$src$$constant);
|
|
__ lis($dst$$Register, (int)(short)((Csrc >> 16) & 0xffff));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// As loadConNKlass_hi this must be recognized as narrow klass, not oop!
|
|
instruct loadConNKlass_mask(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
|
|
match(Set dst src1);
|
|
effect(TEMP src2);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MASK $dst, $src2, 0xFFFFFFFF" %} // mask
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src2$$Register, 0x20);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// This needs a match rule so that build_oop_map knows this is
|
|
// not a narrow oop.
|
|
instruct loadConNKlass_lo(iRegNdst dst, immNKlass_NM src1, iRegNsrc src2) %{
|
|
match(Set dst src1);
|
|
effect(TEMP src2);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "ORI $dst, $src1, $src2 \t// narrow klass lo" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// Notify OOP recorder (don't need the relocation)
|
|
AddressLiteral md = __ constant_metadata_address((Klass*)$src1$$constant);
|
|
intptr_t Csrc = CompressedKlassPointers::encode((Klass*)md.value());
|
|
__ ori($dst$$Register, $src2$$Register, Csrc & 0xffff);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Loading ConNKlass must be postalloc expanded so that edges between
|
|
// the nodes are safe. They may not interfere with a safepoint.
|
|
instruct loadConNKlass_Ex(iRegNdst dst, immNKlass src) %{
|
|
match(Set dst src);
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
format %{ "LoadN $dst, $src \t// postalloc expanded" %} // mask
|
|
postalloc_expand %{
|
|
// Load high bits into register. Sign extended.
|
|
MachNode *m1 = new loadConNKlass_hiNode();
|
|
m1->add_req(nullptr);
|
|
m1->_opnds[0] = op_dst;
|
|
m1->_opnds[1] = op_src;
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
nodes->push(m1);
|
|
|
|
MachNode *m2 = m1;
|
|
if (!Assembler::is_uimm((jlong)CompressedKlassPointers::encode((Klass *)op_src->constant()), 31)) {
|
|
// Value might be 1-extended. Mask out these bits.
|
|
m2 = new loadConNKlass_maskNode();
|
|
m2->add_req(nullptr, m1);
|
|
m2->_opnds[0] = op_dst;
|
|
m2->_opnds[1] = op_src;
|
|
m2->_opnds[2] = op_dst;
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
nodes->push(m2);
|
|
}
|
|
|
|
MachNode *m3 = new loadConNKlass_loNode();
|
|
m3->add_req(nullptr, m2);
|
|
m3->_opnds[0] = op_dst;
|
|
m3->_opnds[1] = op_src;
|
|
m3->_opnds[2] = op_dst;
|
|
ra_->set_pair(m3->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
nodes->push(m3);
|
|
%}
|
|
%}
|
|
|
|
// 0x1 is used in object initialization (initial object header).
|
|
// No constant pool entries required.
|
|
instruct loadConP0or1(iRegPdst dst, immP_0or1 src) %{
|
|
match(Set dst src);
|
|
|
|
format %{ "LI $dst, $src \t// ptr" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand node for constant pool load: small offset.
|
|
// The match rule is needed to generate the correct bottom_type(),
|
|
// however this node should never match. The use of predicate is not
|
|
// possible since ADLC forbids predicates for chain rules. The higher
|
|
// costs do not prevent matching in this case. For that reason the
|
|
// operand immP_NM with predicate(false) is used.
|
|
instruct loadConP(iRegPdst dst, immP_NM src, iRegLdst toc) %{
|
|
match(Set dst src);
|
|
effect(TEMP toc);
|
|
|
|
ins_num_consts(1);
|
|
|
|
format %{ "LD $dst, offset, $toc \t// load ptr $src from TOC" %}
|
|
size(4);
|
|
ins_encode( enc_load_long_constP(dst, src, toc) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Expand node for constant pool load: large offset.
|
|
instruct loadConP_hi(iRegPdst dst, immP_NM src, iRegLdst toc) %{
|
|
effect(DEF dst, USE src, USE toc);
|
|
predicate(false);
|
|
|
|
ins_num_consts(1);
|
|
ins_field_const_toc_offset(int);
|
|
|
|
format %{ "ADDIS $dst, $toc, offset \t// load ptr $src from TOC (hi)" %}
|
|
size(4);
|
|
ins_encode( enc_load_long_constP_hi(dst, src, toc) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand node for constant pool load: large offset.
|
|
instruct loadConP_lo(iRegPdst dst, immP_NM src, iRegLdst base) %{
|
|
match(Set dst src);
|
|
effect(TEMP base);
|
|
|
|
ins_field_const_toc_offset_hi_node(loadConP_hiNode*);
|
|
|
|
format %{ "LD $dst, offset, $base \t// load ptr $src from TOC (lo)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int offset = ra_->C->output()->in_scratch_emit_size() ? 0 : _const_toc_offset_hi_node->_const_toc_offset;
|
|
__ ld($dst$$Register, MacroAssembler::largeoffset_si16_si16_lo(offset), $base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load pointer constant from constant table. Expand in case an
|
|
// offset > 16 bit is needed.
|
|
// Adlc adds toc node MachConstantTableBase.
|
|
instruct loadConP_Ex(iRegPdst dst, immP src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
// This rule does not use "expand" because then
|
|
// the result type is not known to be an Oop. An ADLC
|
|
// enhancement will be needed to make that work - not worth it!
|
|
|
|
// If this instruction rematerializes, it prolongs the live range
|
|
// of the toc node, causing illegal graphs.
|
|
// assert(edge_from_to(_reg_node[reg_lo],def)) fails in verify_good_schedule().
|
|
ins_cannot_rematerialize(true);
|
|
|
|
format %{ "LD $dst, offset, $constanttablebase \t// load ptr $src from table, postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_load_ptr_constant(dst, src, constanttablebase) );
|
|
%}
|
|
|
|
// Expand node for constant pool load: small offset.
|
|
instruct loadConF(regF dst, immF src, iRegLdst toc) %{
|
|
effect(DEF dst, USE src, USE toc);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
ins_num_consts(1);
|
|
|
|
format %{ "LFS $dst, offset, $toc \t// load float $src from TOC" %}
|
|
size(4);
|
|
ins_encode %{
|
|
address float_address = __ float_constant($src$$constant);
|
|
if (float_address == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
__ lfs($dst$$FloatRegister, __ offset_to_method_toc(float_address), $toc$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Expand node for constant pool load: large offset.
|
|
instruct loadConFComp(regF dst, immF src, iRegLdst toc) %{
|
|
effect(DEF dst, USE src, USE toc);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
ins_num_consts(1);
|
|
|
|
format %{ "ADDIS $toc, $toc, offset_hi\n\t"
|
|
"LFS $dst, offset_lo, $toc \t// load float $src from TOC (hi/lo)\n\t"
|
|
"ADDIS $toc, $toc, -offset_hi"%}
|
|
size(12);
|
|
ins_encode %{
|
|
FloatRegister Rdst = $dst$$FloatRegister;
|
|
Register Rtoc = $toc$$Register;
|
|
address float_address = __ float_constant($src$$constant);
|
|
if (float_address == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
int offset = __ offset_to_method_toc(float_address);
|
|
int hi = (offset + (1<<15))>>16;
|
|
int lo = offset - hi * (1<<16);
|
|
|
|
__ addis(Rtoc, Rtoc, hi);
|
|
__ lfs(Rdst, lo, Rtoc);
|
|
__ addis(Rtoc, Rtoc, -hi);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Adlc adds toc node MachConstantTableBase.
|
|
instruct loadConF_Ex(regF dst, immF src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
// See loadConP.
|
|
ins_cannot_rematerialize(true);
|
|
|
|
format %{ "LFS $dst, offset, $constanttablebase \t// load $src from table, postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_load_float_constant(dst, src, constanttablebase) );
|
|
%}
|
|
|
|
// Expand node for constant pool load: small offset.
|
|
instruct loadConD(regD dst, immD src, iRegLdst toc) %{
|
|
effect(DEF dst, USE src, USE toc);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
ins_num_consts(1);
|
|
|
|
format %{ "LFD $dst, offset, $toc \t// load double $src from TOC" %}
|
|
size(4);
|
|
ins_encode %{
|
|
address float_address = __ double_constant($src$$constant);
|
|
if (float_address == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
int offset = __ offset_to_method_toc(float_address);
|
|
__ lfd($dst$$FloatRegister, offset, $toc$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Expand node for constant pool load: large offset.
|
|
instruct loadConDComp(regD dst, immD src, iRegLdst toc) %{
|
|
effect(DEF dst, USE src, USE toc);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
ins_num_consts(1);
|
|
|
|
format %{ "ADDIS $toc, $toc, offset_hi\n\t"
|
|
"LFD $dst, offset_lo, $toc \t// load double $src from TOC (hi/lo)\n\t"
|
|
"ADDIS $toc, $toc, -offset_hi" %}
|
|
size(12);
|
|
ins_encode %{
|
|
FloatRegister Rdst = $dst$$FloatRegister;
|
|
Register Rtoc = $toc$$Register;
|
|
address float_address = __ double_constant($src$$constant);
|
|
if (float_address == nullptr) {
|
|
ciEnv::current()->record_out_of_memory_failure();
|
|
return;
|
|
}
|
|
int offset = __ offset_to_method_toc(float_address);
|
|
int hi = (offset + (1<<15))>>16;
|
|
int lo = offset - hi * (1<<16);
|
|
|
|
__ addis(Rtoc, Rtoc, hi);
|
|
__ lfd(Rdst, lo, Rtoc);
|
|
__ addis(Rtoc, Rtoc, -hi);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Adlc adds toc node MachConstantTableBase.
|
|
instruct loadConD_Ex(regD dst, immD src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
// See loadConP.
|
|
ins_cannot_rematerialize(true);
|
|
|
|
format %{ "ConD $dst, offset, $constanttablebase \t// load $src from table, postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_load_double_constant(dst, src, constanttablebase) );
|
|
%}
|
|
|
|
// Prefetch instructions.
|
|
// Must be safe to execute with invalid address (cannot fault).
|
|
|
|
// Special prefetch versions which use the dcbz instruction.
|
|
instruct prefetch_alloc_zero(indirectMemory mem, iRegLsrc src) %{
|
|
match(PrefetchAllocation (AddP mem src));
|
|
predicate(AllocatePrefetchStyle == 3);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many with zero" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ dcbz($src$$Register, $mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct prefetch_alloc_zero_no_offset(indirectMemory mem) %{
|
|
match(PrefetchAllocation mem);
|
|
predicate(AllocatePrefetchStyle == 3);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many with zero" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ dcbz($mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct prefetch_alloc(indirectMemory mem, iRegLsrc src) %{
|
|
match(PrefetchAllocation (AddP mem src));
|
|
predicate(AllocatePrefetchStyle != 3);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "PREFETCH $mem, 2, $src \t// Prefetch write-many" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ dcbtst($src$$Register, $mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct prefetch_alloc_no_offset(indirectMemory mem) %{
|
|
match(PrefetchAllocation mem);
|
|
predicate(AllocatePrefetchStyle != 3);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "PREFETCH $mem, 2 \t// Prefetch write-many" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ dcbtst($mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
//----------Store Instructions-------------------------------------------------
|
|
|
|
// Store Byte
|
|
instruct storeB(memory mem, iRegIsrc src) %{
|
|
match(Set mem (StoreB mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STB $src, $mem \t// byte" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ stb($src$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Char/Short
|
|
instruct storeC(memory mem, iRegIsrc src) %{
|
|
match(Set mem (StoreC mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STH $src, $mem \t// short" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int Idisp = $mem$$disp + frame_slots_bias($mem$$base, ra_);
|
|
__ sth($src$$Register, Idisp, $mem$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Integer
|
|
instruct storeI(memory mem, iRegIsrc src) %{
|
|
match(Set mem (StoreI mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STW $src, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_stw(src, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// ConvL2I + StoreI.
|
|
instruct storeI_convL2I(memory mem, iRegLsrc src) %{
|
|
match(Set mem (StoreI mem (ConvL2I src)));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STW l2i($src), $mem" %}
|
|
size(4);
|
|
ins_encode( enc_stw(src, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Long
|
|
instruct storeL(memoryAlg4 mem, iRegLsrc src) %{
|
|
match(Set mem (StoreL mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STD $src, $mem \t// long" %}
|
|
size(4);
|
|
ins_encode( enc_std(src, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store super word nodes.
|
|
|
|
// Store Aligned Packed Byte long register to memory
|
|
instruct storeA8B(memoryAlg4 mem, iRegLsrc src) %{
|
|
predicate(n->as_StoreVector()->memory_size() == 8);
|
|
match(Set mem (StoreVector mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STD $mem, $src \t// packed8B" %}
|
|
size(4);
|
|
ins_encode( enc_std(src, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Packed Byte long register to memory
|
|
// Note: The Power8 instruction stores the contents in a special order in Little Endian mode.
|
|
instruct storeV16_Power8(indirect mem, vecX src) %{
|
|
predicate(n->as_StoreVector()->memory_size() == 16 && PowerArchitecturePPC64 == 8);
|
|
match(Set mem (StoreVector mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STXVD2X $mem, $src \t// store 16-byte Vector" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ stxvd2x($src$$VectorSRegister, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct storeV16_Power9(memoryAlg16 mem, vecX src) %{
|
|
predicate(n->as_StoreVector()->memory_size() == 16 && PowerArchitecturePPC64 >= 9);
|
|
match(Set mem (StoreVector mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STXV $mem, $src \t// store 16-byte Vector" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ stxv($src$$VectorSRegister, $mem$$disp, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Reinterpret: only one vector size used: either L or X
|
|
instruct reinterpretL(iRegLdst dst) %{
|
|
match(Set dst (VectorReinterpret dst));
|
|
ins_cost(0);
|
|
format %{ "reinterpret $dst" %}
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_empty);
|
|
%}
|
|
|
|
instruct reinterpretX(vecX dst) %{
|
|
match(Set dst (VectorReinterpret dst));
|
|
ins_cost(0);
|
|
format %{ "reinterpret $dst" %}
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_empty);
|
|
%}
|
|
|
|
// Store Compressed Oop
|
|
instruct storeN(memory dst, iRegN_P2N src) %{
|
|
match(Set dst (StoreN dst src));
|
|
predicate(n->as_Store()->barrier_data() == 0);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STW $src, $dst \t// compressed oop" %}
|
|
size(4);
|
|
ins_encode( enc_stw(src, dst) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Compressed KLass
|
|
instruct storeNKlass(memory dst, iRegN_P2N src) %{
|
|
match(Set dst (StoreNKlass dst src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STW $src, $dst \t// compressed klass" %}
|
|
size(4);
|
|
ins_encode( enc_stw(src, dst) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Pointer
|
|
instruct storeP(memoryAlg4 dst, iRegPsrc src) %{
|
|
match(Set dst (StoreP dst src));
|
|
predicate(n->as_Store()->barrier_data() == 0);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STD $src, $dst \t// ptr" %}
|
|
size(4);
|
|
ins_encode( enc_std(src, dst) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Float
|
|
instruct storeF(memory mem, regF src) %{
|
|
match(Set mem (StoreF mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STFS $src, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_stfs(src, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store Double
|
|
instruct storeD(memory mem, regD src) %{
|
|
match(Set mem (StoreD mem src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STFD $src, $mem" %}
|
|
size(4);
|
|
ins_encode( enc_stfd(src, mem) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Convert oop pointer into compressed form.
|
|
|
|
// Nodes for postalloc expand.
|
|
|
|
// Shift node for expand.
|
|
instruct encodeP_shift(iRegNdst dst, iRegNsrc src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (EncodeP src));
|
|
predicate(false);
|
|
|
|
format %{ "SRDI $dst, $src, 3 \t// encode" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srdi($dst$$Register, $src$$Register, CompressedOops::shift() & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Add node for expand.
|
|
instruct encodeP_sub(iRegPdst dst, iRegPdst src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (EncodeP src));
|
|
predicate(false);
|
|
|
|
format %{ "SUB $dst, $src, oop_base \t// encode" %}
|
|
ins_encode %{
|
|
__ sub_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Conditional sub base.
|
|
instruct cond_sub_base(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (EncodeP (Binary crx src1)));
|
|
predicate(false);
|
|
|
|
format %{ "BEQ $crx, done\n\t"
|
|
"SUB $dst, $src1, heapbase \t// encode: subtract base if != nullptr\n"
|
|
"done:" %}
|
|
ins_encode %{
|
|
Label done;
|
|
__ beq($crx$$CondRegister, done);
|
|
__ sub_const_optimized($dst$$Register, $src1$$Register, CompressedOops::base(), R0);
|
|
__ bind(done);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Power 7 can use isel instruction
|
|
instruct cond_set_0_oop(iRegNdst dst, flagsRegSrc crx, iRegPsrc src1) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (EncodeP (Binary crx src1)));
|
|
predicate(false);
|
|
|
|
format %{ "CMOVE $dst, $crx eq, 0, $src1 \t// encode: preserve 0" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// This is a Power7 instruction for which no machine description exists.
|
|
__ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Disjoint narrow oop base.
|
|
instruct encodeP_Disjoint(iRegNdst dst, iRegPsrc src) %{
|
|
match(Set dst (EncodeP src));
|
|
predicate(CompressedOops::base_disjoint());
|
|
|
|
format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with disjoint base" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ rldicl($dst$$Register, $src$$Register, 64-CompressedOops::shift(), 32);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// shift != 0, base != 0
|
|
instruct encodeP_Ex(iRegNdst dst, flagsReg crx, iRegPsrc src) %{
|
|
match(Set dst (EncodeP src));
|
|
effect(TEMP crx);
|
|
predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull &&
|
|
CompressedOops::shift() != 0 &&
|
|
CompressedOops::base_overlaps());
|
|
|
|
format %{ "EncodeP $dst, $crx, $src \t// postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_encode_oop(dst, src, crx));
|
|
%}
|
|
|
|
// shift != 0, base != 0
|
|
instruct encodeP_not_null_Ex(iRegNdst dst, iRegPsrc src) %{
|
|
match(Set dst (EncodeP src));
|
|
predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull &&
|
|
CompressedOops::shift() != 0 &&
|
|
CompressedOops::base_overlaps());
|
|
|
|
format %{ "EncodeP $dst, $src\t// $src != Null, postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_encode_oop_not_null(dst, src) );
|
|
%}
|
|
|
|
// shift != 0, base == 0
|
|
// TODO: This is the same as encodeP_shift. Merge!
|
|
instruct encodeP_not_null_base_null(iRegNdst dst, iRegPsrc src) %{
|
|
match(Set dst (EncodeP src));
|
|
predicate(CompressedOops::shift() != 0 &&
|
|
CompressedOops::base() == nullptr);
|
|
|
|
format %{ "SRDI $dst, $src, #3 \t// encodeP, $src != nullptr" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srdi($dst$$Register, $src$$Register, CompressedOops::shift() & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Compressed OOPs with narrow_oop_shift == 0.
|
|
// shift == 0, base == 0
|
|
instruct encodeP_narrow_oop_shift_0(iRegNdst dst, iRegPsrc src) %{
|
|
match(Set dst (EncodeP src));
|
|
predicate(CompressedOops::shift() == 0);
|
|
|
|
format %{ "MR $dst, $src \t// Ptr->Narrow" %}
|
|
// variable size, 0 or 4.
|
|
ins_encode %{
|
|
__ mr_if_needed($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Decode nodes.
|
|
|
|
// Shift node for expand.
|
|
instruct decodeN_shift(iRegPdst dst, iRegPsrc src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (DecodeN src));
|
|
predicate(false);
|
|
|
|
format %{ "SLDI $dst, $src, #3 \t// DecodeN" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sldi($dst$$Register, $src$$Register, CompressedOops::shift());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Add node for expand.
|
|
instruct decodeN_add(iRegPdst dst, iRegPdst src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (DecodeN src));
|
|
predicate(false);
|
|
|
|
format %{ "ADD $dst, $src, heapbase \t// DecodeN, add oop base" %}
|
|
ins_encode %{
|
|
__ add_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// conditianal add base for expand
|
|
instruct cond_add_base(iRegPdst dst, flagsRegSrc crx, iRegPsrc src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
// NOTICE that the rule is nonsense - we just have to make sure that:
|
|
// - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
|
|
// - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC.
|
|
match(Set dst (DecodeN (Binary crx src)));
|
|
predicate(false);
|
|
|
|
format %{ "BEQ $crx, done\n\t"
|
|
"ADD $dst, $src, heapbase \t// DecodeN: add oop base if $src != nullptr\n"
|
|
"done:" %}
|
|
ins_encode %{
|
|
Label done;
|
|
__ beq($crx$$CondRegister, done);
|
|
__ add_const_optimized($dst$$Register, $src$$Register, CompressedOops::base(), R0);
|
|
__ bind(done);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cond_set_0_ptr(iRegPdst dst, flagsRegSrc crx, iRegPsrc src1) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
// NOTICE that the rule is nonsense - we just have to make sure that:
|
|
// - _matrule->_rChild->_opType == "DecodeN" (see InstructForm::captures_bottom_type() in formssel.cpp)
|
|
// - we have to match 'crx' to avoid an "illegal USE of non-input: flagsReg crx" error in ADLC.
|
|
match(Set dst (DecodeN (Binary crx src1)));
|
|
predicate(false);
|
|
|
|
format %{ "CMOVE $dst, $crx eq, 0, $src1 \t// decode: preserve 0" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// This is a Power7 instruction for which no machine description exists.
|
|
__ isel_0($dst$$Register, $crx$$CondRegister, Assembler::equal, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// shift != 0, base != 0
|
|
instruct decodeN_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
|
|
match(Set dst (DecodeN src));
|
|
predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
|
|
n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
|
|
CompressedOops::shift() != 0 &&
|
|
CompressedOops::base() != nullptr);
|
|
ins_cost(4 * DEFAULT_COST); // Should be more expensive than decodeN_Disjoint_isel_Ex.
|
|
effect(TEMP crx);
|
|
|
|
format %{ "DecodeN $dst, $src \t// Kills $crx, postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_decode_oop(dst, src, crx) );
|
|
%}
|
|
|
|
// shift != 0, base == 0
|
|
instruct decodeN_nullBase(iRegPdst dst, iRegNsrc src) %{
|
|
match(Set dst (DecodeN src));
|
|
predicate(CompressedOops::shift() != 0 &&
|
|
CompressedOops::base() == nullptr);
|
|
|
|
format %{ "SLDI $dst, $src, #3 \t// DecodeN (zerobased)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sldi($dst$$Register, $src$$Register, CompressedOops::shift());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Optimize DecodeN for disjoint base.
|
|
// Shift narrow oop and or it into register that already contains the heap base.
|
|
// Base == dst must hold, and is assured by construction in postaloc_expand.
|
|
instruct decodeN_mergeDisjoint(iRegPdst dst, iRegNsrc src, iRegLsrc base) %{
|
|
match(Set dst (DecodeN src));
|
|
effect(TEMP base);
|
|
predicate(false);
|
|
|
|
format %{ "RLDIMI $dst, $src, shift, 32-shift \t// DecodeN (disjoint base)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ rldimi($dst$$Register, $src$$Register, CompressedOops::shift(), 32-CompressedOops::shift());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Optimize DecodeN for disjoint base.
|
|
// This node requires only one cycle on the critical path.
|
|
// We must postalloc_expand as we can not express use_def effects where
|
|
// the used register is L and the def'ed register P.
|
|
instruct decodeN_Disjoint_notNull_Ex(iRegPdst dst, iRegNsrc src) %{
|
|
match(Set dst (DecodeN src));
|
|
effect(TEMP_DEF dst);
|
|
predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
|
|
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
|
|
CompressedOops::base_disjoint());
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MOV $dst, heapbase \t\n"
|
|
"RLDIMI $dst, $src, shift, 32-shift \t// decode with disjoint base" %}
|
|
postalloc_expand %{
|
|
loadBaseNode *n1 = new loadBaseNode();
|
|
n1->add_req(nullptr);
|
|
n1->_opnds[0] = op_dst;
|
|
|
|
decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
|
|
n2->add_req(n_region, n_src, n1);
|
|
n2->_opnds[0] = op_dst;
|
|
n2->_opnds[1] = op_src;
|
|
n2->_opnds[2] = op_dst;
|
|
n2->_bottom_type = _bottom_type;
|
|
|
|
assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
|
|
ra_->set_oop(n2, true);
|
|
|
|
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(n1);
|
|
nodes->push(n2);
|
|
%}
|
|
%}
|
|
|
|
instruct decodeN_Disjoint_isel_Ex(iRegPdst dst, iRegNsrc src, flagsReg crx) %{
|
|
match(Set dst (DecodeN src));
|
|
effect(TEMP_DEF dst, TEMP crx);
|
|
predicate((n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
|
|
n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant) &&
|
|
CompressedOops::base_disjoint());
|
|
ins_cost(3 * DEFAULT_COST);
|
|
|
|
format %{ "DecodeN $dst, $src \t// decode with disjoint base using isel" %}
|
|
postalloc_expand %{
|
|
loadBaseNode *n1 = new loadBaseNode();
|
|
n1->add_req(nullptr);
|
|
n1->_opnds[0] = op_dst;
|
|
|
|
cmpN_reg_imm0Node *n_compare = new cmpN_reg_imm0Node();
|
|
n_compare->add_req(n_region, n_src);
|
|
n_compare->_opnds[0] = op_crx;
|
|
n_compare->_opnds[1] = op_src;
|
|
n_compare->_opnds[2] = new immN_0Oper(TypeNarrowOop::NULL_PTR);
|
|
|
|
decodeN_mergeDisjointNode *n2 = new decodeN_mergeDisjointNode();
|
|
n2->add_req(n_region, n_src, n1);
|
|
n2->_opnds[0] = op_dst;
|
|
n2->_opnds[1] = op_src;
|
|
n2->_opnds[2] = op_dst;
|
|
n2->_bottom_type = _bottom_type;
|
|
|
|
cond_set_0_ptrNode *n_cond_set = new cond_set_0_ptrNode();
|
|
n_cond_set->add_req(n_region, n_compare, n2);
|
|
n_cond_set->_opnds[0] = op_dst;
|
|
n_cond_set->_opnds[1] = op_crx;
|
|
n_cond_set->_opnds[2] = op_dst;
|
|
n_cond_set->_bottom_type = _bottom_type;
|
|
|
|
assert(ra_->is_oop(this) == true, "A decodeN node must produce an oop!");
|
|
ra_->set_oop(n_cond_set, true);
|
|
|
|
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n_compare->_idx, ra_->get_reg_second(n_crx), ra_->get_reg_first(n_crx));
|
|
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n_cond_set->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(n1);
|
|
nodes->push(n_compare);
|
|
nodes->push(n2);
|
|
nodes->push(n_cond_set);
|
|
%}
|
|
%}
|
|
|
|
// src != 0, shift != 0, base != 0
|
|
instruct decodeN_notNull_addBase_Ex(iRegPdst dst, iRegNsrc src) %{
|
|
match(Set dst (DecodeN src));
|
|
predicate((n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
|
|
n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant) &&
|
|
CompressedOops::shift() != 0 &&
|
|
CompressedOops::base() != nullptr);
|
|
ins_cost(2 * DEFAULT_COST);
|
|
|
|
format %{ "DecodeN $dst, $src \t// $src != nullptr, postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_decode_oop_not_null(dst, src));
|
|
%}
|
|
|
|
// Compressed OOPs with narrow_oop_shift == 0.
|
|
instruct decodeN_unscaled(iRegPdst dst, iRegNsrc src) %{
|
|
match(Set dst (DecodeN src));
|
|
predicate(CompressedOops::shift() == 0);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MR $dst, $src \t// DecodeN (unscaled)" %}
|
|
// variable size, 0 or 4.
|
|
ins_encode %{
|
|
__ mr_if_needed($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Convert compressed oop into int for vectors alignment masking.
|
|
instruct decodeN2I_unscaled(iRegIdst dst, iRegNsrc src) %{
|
|
match(Set dst (ConvL2I (CastP2X (DecodeN src))));
|
|
predicate(CompressedOops::shift() == 0);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MR $dst, $src \t// (int)DecodeN (unscaled)" %}
|
|
// variable size, 0 or 4.
|
|
ins_encode %{
|
|
__ mr_if_needed($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Convert klass pointer into compressed form.
|
|
|
|
// Nodes for postalloc expand.
|
|
|
|
// Shift node for expand.
|
|
instruct encodePKlass_shift(iRegNdst dst, iRegNsrc src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (EncodePKlass src));
|
|
predicate(false);
|
|
|
|
format %{ "SRDI $dst, $src, 3 \t// encode" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srdi($dst$$Register, $src$$Register, CompressedKlassPointers::shift());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Add node for expand.
|
|
instruct encodePKlass_sub_base(iRegPdst dst, iRegLsrc base, iRegPdst src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (EncodePKlass (Binary base src)));
|
|
predicate(false);
|
|
|
|
format %{ "SUB $dst, $base, $src \t// encode" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ subf($dst$$Register, $base$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Disjoint narrow oop base.
|
|
instruct encodePKlass_Disjoint(iRegNdst dst, iRegPsrc src) %{
|
|
match(Set dst (EncodePKlass src));
|
|
predicate(false /* TODO: PPC port CompressedKlassPointers::base_disjoint()*/);
|
|
|
|
format %{ "EXTRDI $dst, $src, #32, #3 \t// encode with disjoint base" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ rldicl($dst$$Register, $src$$Register, 64-CompressedKlassPointers::shift(), 32);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// shift != 0, base != 0
|
|
instruct encodePKlass_not_null_Ex(iRegNdst dst, iRegLsrc base, iRegPsrc src) %{
|
|
match(Set dst (EncodePKlass (Binary base src)));
|
|
predicate(false);
|
|
|
|
format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %}
|
|
postalloc_expand %{
|
|
encodePKlass_sub_baseNode *n1 = new encodePKlass_sub_baseNode();
|
|
n1->add_req(n_region, n_base, n_src);
|
|
n1->_opnds[0] = op_dst;
|
|
n1->_opnds[1] = op_base;
|
|
n1->_opnds[2] = op_src;
|
|
n1->_bottom_type = _bottom_type;
|
|
|
|
encodePKlass_shiftNode *n2 = new encodePKlass_shiftNode();
|
|
n2->add_req(n_region, n1);
|
|
n2->_opnds[0] = op_dst;
|
|
n2->_opnds[1] = op_dst;
|
|
n2->_bottom_type = _bottom_type;
|
|
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(n1);
|
|
nodes->push(n2);
|
|
%}
|
|
%}
|
|
|
|
// shift != 0, base != 0
|
|
instruct encodePKlass_not_null_ExEx(iRegNdst dst, iRegPsrc src) %{
|
|
match(Set dst (EncodePKlass src));
|
|
//predicate(CompressedKlassPointers::shift() != 0 &&
|
|
// true /* TODO: PPC port CompressedKlassPointers::base_overlaps()*/);
|
|
|
|
//format %{ "EncodePKlass $dst, $src\t// $src != Null, postalloc expanded" %}
|
|
ins_cost(DEFAULT_COST*2); // Don't count constant.
|
|
expand %{
|
|
immL baseImm %{ (jlong)(intptr_t)CompressedKlassPointers::base() %}
|
|
iRegLdst base;
|
|
loadConL_Ex(base, baseImm);
|
|
encodePKlass_not_null_Ex(dst, base, src);
|
|
%}
|
|
%}
|
|
|
|
// Decode nodes.
|
|
|
|
// Shift node for expand.
|
|
instruct decodeNKlass_shift(iRegPdst dst, iRegPsrc src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (DecodeNKlass src));
|
|
predicate(false);
|
|
|
|
format %{ "SLDI $dst, $src, #3 \t// DecodeNKlass" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sldi($dst$$Register, $src$$Register, CompressedKlassPointers::shift());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Add node for expand.
|
|
|
|
instruct decodeNKlass_add_base(iRegPdst dst, iRegLsrc base, iRegPdst src) %{
|
|
// The match rule is needed to make it a 'MachTypeNode'!
|
|
match(Set dst (DecodeNKlass (Binary base src)));
|
|
predicate(false);
|
|
|
|
format %{ "ADD $dst, $base, $src \t// DecodeNKlass, add klass base" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ add($dst$$Register, $base$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src != 0, shift != 0, base != 0
|
|
instruct decodeNKlass_notNull_addBase_Ex(iRegPdst dst, iRegLsrc base, iRegNsrc src) %{
|
|
match(Set dst (DecodeNKlass (Binary base src)));
|
|
//effect(kill src); // We need a register for the immediate result after shifting.
|
|
predicate(false);
|
|
|
|
format %{ "DecodeNKlass $dst = $base + ($src << 3) \t// $src != nullptr, postalloc expanded" %}
|
|
postalloc_expand %{
|
|
decodeNKlass_add_baseNode *n1 = new decodeNKlass_add_baseNode();
|
|
n1->add_req(n_region, n_base, n_src);
|
|
n1->_opnds[0] = op_dst;
|
|
n1->_opnds[1] = op_base;
|
|
n1->_opnds[2] = op_src;
|
|
n1->_bottom_type = _bottom_type;
|
|
|
|
decodeNKlass_shiftNode *n2 = new decodeNKlass_shiftNode();
|
|
n2->add_req(n_region, n1);
|
|
n2->_opnds[0] = op_dst;
|
|
n2->_opnds[1] = op_dst;
|
|
n2->_bottom_type = _bottom_type;
|
|
|
|
ra_->set_pair(n1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
ra_->set_pair(n2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this));
|
|
|
|
nodes->push(n1);
|
|
nodes->push(n2);
|
|
%}
|
|
%}
|
|
|
|
// src != 0, shift != 0, base != 0
|
|
instruct decodeNKlass_notNull_addBase_ExEx(iRegPdst dst, iRegNsrc src) %{
|
|
match(Set dst (DecodeNKlass src));
|
|
// predicate(CompressedKlassPointers::shift() != 0 &&
|
|
// CompressedKlassPointers::base() != 0);
|
|
|
|
//format %{ "DecodeNKlass $dst, $src \t// $src != nullptr, expanded" %}
|
|
|
|
ins_cost(DEFAULT_COST*2); // Don't count constant.
|
|
expand %{
|
|
// We add first, then we shift. Like this, we can get along with one register less.
|
|
// But we have to load the base pre-shifted.
|
|
immL baseImm %{ (jlong)((intptr_t)CompressedKlassPointers::base() >> CompressedKlassPointers::shift()) %}
|
|
iRegLdst base;
|
|
loadConL_Ex(base, baseImm);
|
|
decodeNKlass_notNull_addBase_Ex(dst, base, src);
|
|
%}
|
|
%}
|
|
|
|
//----------MemBar Instructions-----------------------------------------------
|
|
// Memory barrier flavors
|
|
|
|
instruct membar_acquire() %{
|
|
match(LoadFence);
|
|
ins_cost(4*MEMORY_REF_COST);
|
|
|
|
format %{ "MEMBAR-acquire" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ acquire();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct unnecessary_membar_acquire() %{
|
|
match(MemBarAcquire);
|
|
ins_cost(0);
|
|
|
|
format %{ " -- \t// redundant MEMBAR-acquire - empty" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct membar_acquire_lock() %{
|
|
match(MemBarAcquireLock);
|
|
ins_cost(0);
|
|
|
|
format %{ " -- \t// redundant MEMBAR-acquire - empty (acquire as part of CAS in prior FastLock)" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct membar_release() %{
|
|
match(MemBarRelease);
|
|
match(StoreFence);
|
|
ins_cost(4*MEMORY_REF_COST);
|
|
|
|
format %{ "MEMBAR-release" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ release();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct membar_storestore() %{
|
|
match(MemBarStoreStore);
|
|
match(StoreStoreFence);
|
|
ins_cost(4*MEMORY_REF_COST);
|
|
|
|
format %{ "MEMBAR-store-store" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ membar(Assembler::StoreStore);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct membar_release_lock() %{
|
|
match(MemBarReleaseLock);
|
|
ins_cost(0);
|
|
|
|
format %{ " -- \t// redundant MEMBAR-release - empty (release in FastUnlock)" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct membar_volatile() %{
|
|
match(MemBarVolatile);
|
|
ins_cost(4*MEMORY_REF_COST);
|
|
|
|
format %{ "MEMBAR-volatile" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fence();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// This optimization is wrong on PPC. The following pattern is not supported:
|
|
// MemBarVolatile
|
|
// ^ ^
|
|
// | |
|
|
// CtrlProj MemProj
|
|
// ^ ^
|
|
// | |
|
|
// | Load
|
|
// |
|
|
// MemBarVolatile
|
|
//
|
|
// The first MemBarVolatile could get optimized out! According to
|
|
// Vladimir, this pattern can not occur on Oracle platforms.
|
|
// However, it does occur on PPC64 (because of membars in
|
|
// inline_unsafe_load_store).
|
|
//
|
|
// Add this node again if we found a good solution for inline_unsafe_load_store().
|
|
// Don't forget to look at the implementation of post_store_load_barrier again,
|
|
// we did other fixes in that method.
|
|
//instruct unnecessary_membar_volatile() %{
|
|
// match(MemBarVolatile);
|
|
// predicate(Matcher::post_store_load_barrier(n));
|
|
// ins_cost(0);
|
|
//
|
|
// format %{ " -- \t// redundant MEMBAR-volatile - empty" %}
|
|
// size(0);
|
|
// ins_encode( /*empty*/ );
|
|
// ins_pipe(pipe_class_default);
|
|
//%}
|
|
|
|
instruct membar_CPUOrder() %{
|
|
match(MemBarCPUOrder);
|
|
ins_cost(0);
|
|
|
|
format %{ " -- \t// MEMBAR-CPUOrder - empty: PPC64 processors are self-consistent." %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Conditional Move---------------------------------------------------
|
|
|
|
// Cmove using isel.
|
|
instruct cmovI_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (CMoveI (Binary cmp crx) (Binary dst src)));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// This is a Power7 instruction for which no machine description
|
|
// exists. Anyways, the scheduler should be off on Power7.
|
|
int cc = $cmp$$cmpcode;
|
|
__ isel($dst$$Register, $crx$$CondRegister,
|
|
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Cmove using isel.
|
|
instruct cmovL_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegLdst dst, iRegLsrc src) %{
|
|
match(Set dst (CMoveL (Binary cmp crx) (Binary dst src)));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// This is a Power7 instruction for which no machine description
|
|
// exists. Anyways, the scheduler should be off on Power7.
|
|
int cc = $cmp$$cmpcode;
|
|
__ isel($dst$$Register, $crx$$CondRegister,
|
|
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Cmove using isel.
|
|
instruct cmovN_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegNdst dst, iRegNsrc src) %{
|
|
match(Set dst (CMoveN (Binary cmp crx) (Binary dst src)));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// This is a Power7 instruction for which no machine description
|
|
// exists. Anyways, the scheduler should be off on Power7.
|
|
int cc = $cmp$$cmpcode;
|
|
__ isel($dst$$Register, $crx$$CondRegister,
|
|
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Cmove using isel.
|
|
instruct cmovP_reg_isel(cmpOp cmp, flagsRegSrc crx, iRegPdst dst, iRegPsrc src) %{
|
|
match(Set dst (CMoveP (Binary cmp crx) (Binary dst src)));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMOVE $cmp, $crx, $dst, $src\n\t" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// This is a Power7 instruction for which no machine description
|
|
// exists. Anyways, the scheduler should be off on Power7.
|
|
int cc = $cmp$$cmpcode;
|
|
__ isel($dst$$Register, $crx$$CondRegister,
|
|
(Assembler::Condition)(cc & 3), /*invert*/((~cc) & 8), $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovF_reg(cmpOp cmp, flagsRegSrc crx, regF dst, regF src) %{
|
|
match(Set dst (CMoveF (Binary cmp crx) (Binary dst src)));
|
|
ins_cost(DEFAULT_COST+BRANCH_COST);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode %{
|
|
Label done;
|
|
assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
|
|
// Branch if not (cmp crx).
|
|
__ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
|
|
__ fmr($dst$$FloatRegister, $src$$FloatRegister);
|
|
__ bind(done);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovD_reg(cmpOp cmp, flagsRegSrc crx, regD dst, regD src) %{
|
|
match(Set dst (CMoveD (Binary cmp crx) (Binary dst src)));
|
|
ins_cost(DEFAULT_COST+BRANCH_COST);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "CMOVEF $cmp, $crx, $dst, $src\n\t" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode %{
|
|
Label done;
|
|
assert((Assembler::bcondCRbiIs1 & ~Assembler::bcondCRbiIs0) == 8, "check encoding");
|
|
// Branch if not (cmp crx).
|
|
__ bc(cc_to_inverse_boint($cmp$$cmpcode), cc_to_biint($cmp$$cmpcode, $crx$$reg), done);
|
|
__ fmr($dst$$FloatRegister, $src$$FloatRegister);
|
|
__ bind(done);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Compare-And-Swap---------------------------------------------------
|
|
|
|
// CompareAndSwap{P,I,L} have more than one output, therefore "CmpI
|
|
// (CompareAndSwap ...)" or "If (CmpI (CompareAndSwap ..))" cannot be
|
|
// matched.
|
|
|
|
// Strong versions:
|
|
|
|
instruct compareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndSwapB mem_ptr (Binary src1 src2)));
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
$res$$Register, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndSwapS mem_ptr (Binary src1 src2)));
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
$res$$Register, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndSwapI mem_ptr (Binary src1 src2)));
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
$res$$Register, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndSwapN mem_ptr (Binary src1 src2)));
|
|
predicate(n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
$res$$Register, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndSwapL mem_ptr (Binary src1 src2)));
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
$res$$Register, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndSwapP mem_ptr (Binary src1 src2)));
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
predicate(n->as_LoadStore()->barrier_data() == 0);
|
|
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
$res$$Register, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Weak versions:
|
|
|
|
instruct weakCompareAndSwapB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapB mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGB $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapB mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) );
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgb(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapS mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGH $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapS mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst));
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgh(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapI mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapI mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
|
// value is never passed to caller.
|
|
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapN_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapN mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGW $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapN_acq_regP_regN_regN(iRegIdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapN mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
|
// value is never passed to caller.
|
|
__ cmpxchgw(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapL_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapL mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
// value is never passed to caller.
|
|
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapL_acq_regP_regL_regL(iRegIdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapL mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
|
// value is never passed to caller.
|
|
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapP_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapP mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGD $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct weakCompareAndSwapP_acq_regP_regP_regP(iRegIdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (WeakCompareAndSwapP mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0); // TEMP_DEF to avoid jump
|
|
format %{ "weak CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as bool; ptr" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
// Acquire only needed in successful case. Weak node is allowed to report unsuccessful in additional rare cases and
|
|
// value is never passed to caller.
|
|
__ cmpxchgd(CR0, R0, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
support_IRIW_for_not_multiple_copy_atomic_cpu ? MacroAssembler::MemBarAcq : MacroAssembler::MemBarFenceAfter,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update(), $res$$Register, nullptr, true, /*weak*/ true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// CompareAndExchange
|
|
|
|
instruct compareAndExchangeB_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeB mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGB $res, $mem_ptr, $src1, $src2; as int" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeB_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeB mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGB acq $res, $mem_ptr, $src1, $src2; as int" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgb(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
instruct compareAndExchangeS_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeS mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGH $res, $mem_ptr, $src1, $src2; as int" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeS_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeS mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGH acq $res, $mem_ptr, $src1, $src2; as int" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgh(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeI_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeI mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as int" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeI_acq_regP_regI_regI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeI mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as int" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeN_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeN mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst && n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGW $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeN_acq_regP_regN_regN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src1, iRegNsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeN mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst) && n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGW acq $res, $mem_ptr, $src1, $src2; as narrow oop" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgw(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeL_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeL mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as long" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeL_acq_regP_regL_regL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeL mem_ptr (Binary src1 src2)));
|
|
predicate(((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as long" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeP_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeP mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() != MemNode::acquire && ((CompareAndSwapNode*)n)->order() != MemNode::seqcst)
|
|
&& n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGD $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct compareAndExchangeP_acq_regP_regP_regP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src1, iRegPsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set res (CompareAndExchangeP mem_ptr (Binary src1 src2)));
|
|
predicate((((CompareAndSwapNode*)n)->order() == MemNode::acquire || ((CompareAndSwapNode*)n)->order() == MemNode::seqcst)
|
|
&& n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "CMPXCHGD acq $res, $mem_ptr, $src1, $src2; as ptr; ptr" %}
|
|
ins_encode %{
|
|
// CmpxchgX sets CR0 to cmpX(src1, src2) and Rres to 'true'/'false'.
|
|
__ cmpxchgd(CR0, $res$$Register, $src1$$Register, $src2$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::MemBarNone, MacroAssembler::cmpxchgx_hint_atomic_update(),
|
|
noreg, nullptr, true);
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
// isync would be sufficient in case of CompareAndExchangeAcquire, but we currently don't optimize for that.
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Special RMW
|
|
|
|
instruct getAndAddB(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndAddB mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndAddB $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandaddb($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
R0, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct getAndAddS(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndAddS mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndAddS $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandaddh($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
R0, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
instruct getAndAddI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndAddI mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndAddI $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandaddw($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
R0, MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct getAndAddL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndAddL mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndAddL $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandaddd($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
R0, MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct getAndSetB(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndSetB mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndSetB $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandsetb($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
noreg, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct getAndSetS(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndSetS mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndSetS $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandseth($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
noreg, noreg, noreg, MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
instruct getAndSetI(iRegIdst res, iRegPdst mem_ptr, iRegIsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndSetI mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndSetI $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandsetw($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct getAndSetL(iRegLdst res, iRegPdst mem_ptr, iRegLsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndSetL mem_ptr src));
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndSetL $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandsetd($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct getAndSetP(iRegPdst res, iRegPdst mem_ptr, iRegPsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndSetP mem_ptr src));
|
|
predicate(n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndSetP $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandsetd($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct getAndSetN(iRegNdst res, iRegPdst mem_ptr, iRegNsrc src, flagsRegCR0 cr0) %{
|
|
match(Set res (GetAndSetN mem_ptr src));
|
|
predicate(n->as_LoadStore()->barrier_data() == 0);
|
|
effect(TEMP_DEF res, TEMP cr0);
|
|
format %{ "GetAndSetN $res, $mem_ptr, $src" %}
|
|
ins_encode %{
|
|
__ getandsetw($res$$Register, $src$$Register, $mem_ptr$$Register,
|
|
MacroAssembler::cmpxchgx_hint_atomic_update());
|
|
if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
|
|
__ isync();
|
|
} else {
|
|
__ sync();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Arithmetic Instructions--------------------------------------------
|
|
// Addition Instructions
|
|
|
|
// Register Addition
|
|
instruct addI_reg_reg(iRegIdst dst, iRegIsrc_iRegL2Isrc src1, iRegIsrc_iRegL2Isrc src2) %{
|
|
match(Set dst (AddI src1 src2));
|
|
format %{ "ADD $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ add($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand does not work with above instruct. (??)
|
|
instruct addI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
// no match-rule
|
|
effect(DEF dst, USE src1, USE src2);
|
|
format %{ "ADD $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ add($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct tree_addI_addI_addI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
|
|
match(Set dst (AddI (AddI (AddI src1 src2) src3) src4));
|
|
ins_cost(DEFAULT_COST*3);
|
|
|
|
expand %{
|
|
// FIXME: we should do this in the ideal world.
|
|
iRegIdst tmp1;
|
|
iRegIdst tmp2;
|
|
addI_reg_reg(tmp1, src1, src2);
|
|
addI_reg_reg_2(tmp2, src3, src4); // Adlc complains about addI_reg_reg.
|
|
addI_reg_reg(dst, tmp1, tmp2);
|
|
%}
|
|
%}
|
|
|
|
// Immediate Addition
|
|
instruct addI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
|
|
match(Set dst (AddI src1 src2));
|
|
format %{ "ADDI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ addi($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate Addition with 16-bit shifted operand
|
|
instruct addI_reg_immhi16(iRegIdst dst, iRegIsrc src1, immIhi16 src2) %{
|
|
match(Set dst (AddI src1 src2));
|
|
format %{ "ADDIS $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate Addition using prefixed addi
|
|
instruct addI_reg_imm32(iRegIdst dst, iRegIsrc src1, immI32 src2) %{
|
|
match(Set dst (AddI src1 src2));
|
|
predicate(PowerArchitecturePPC64 >= 10);
|
|
ins_cost(DEFAULT_COST+1);
|
|
format %{ "PADDI $dst, $src1, $src2" %}
|
|
size(8);
|
|
ins_encode %{
|
|
assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
|
|
__ paddi($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
ins_alignment(2);
|
|
%}
|
|
|
|
// Long Addition
|
|
instruct addL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (AddL src1 src2));
|
|
format %{ "ADD $dst, $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ add($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand does not work with above instruct. (??)
|
|
instruct addL_reg_reg_2(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
// no match-rule
|
|
effect(DEF dst, USE src1, USE src2);
|
|
format %{ "ADD $dst, $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ add($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct tree_addL_addL_addL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2, iRegLsrc src3, iRegLsrc src4) %{
|
|
match(Set dst (AddL (AddL (AddL src1 src2) src3) src4));
|
|
ins_cost(DEFAULT_COST*3);
|
|
|
|
expand %{
|
|
// FIXME: we should do this in the ideal world.
|
|
iRegLdst tmp1;
|
|
iRegLdst tmp2;
|
|
addL_reg_reg(tmp1, src1, src2);
|
|
addL_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg.
|
|
addL_reg_reg(dst, tmp1, tmp2);
|
|
%}
|
|
%}
|
|
|
|
// AddL + ConvL2I.
|
|
instruct addI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (ConvL2I (AddL src1 src2)));
|
|
|
|
format %{ "ADD $dst, $src1, $src2 \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ add($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// No constant pool entries required.
|
|
instruct addL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
|
|
match(Set dst (AddL src1 src2));
|
|
|
|
format %{ "ADDI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ addi($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Long Immediate Addition with 16-bit shifted operand.
|
|
// No constant pool entries required.
|
|
instruct addL_reg_immhi16(iRegLdst dst, iRegLsrc src1, immL32hi16 src2) %{
|
|
match(Set dst (AddL src1 src2));
|
|
|
|
format %{ "ADDIS $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Long Immediate Addition using prefixed addi
|
|
// No constant pool entries required.
|
|
instruct addL_reg_imm34(iRegLdst dst, iRegLsrc src1, immL34 src2) %{
|
|
match(Set dst (AddL src1 src2));
|
|
predicate(PowerArchitecturePPC64 >= 10);
|
|
ins_cost(DEFAULT_COST+1);
|
|
|
|
format %{ "PADDI $dst, $src1, $src2" %}
|
|
size(8);
|
|
ins_encode %{
|
|
assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
|
|
__ paddi($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
ins_alignment(2);
|
|
%}
|
|
|
|
// Pointer Register Addition
|
|
instruct addP_reg_reg(iRegPdst dst, iRegP_N2P src1, iRegLsrc src2) %{
|
|
match(Set dst (AddP src1 src2));
|
|
format %{ "ADD $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ add($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Pointer Immediate Addition
|
|
// No constant pool entries required.
|
|
instruct addP_reg_imm16(iRegPdst dst, iRegP_N2P src1, immL16 src2) %{
|
|
match(Set dst (AddP src1 src2));
|
|
|
|
format %{ "ADDI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ addi($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Pointer Immediate Addition with 16-bit shifted operand.
|
|
// No constant pool entries required.
|
|
instruct addP_reg_immhi16(iRegPdst dst, iRegP_N2P src1, immL32hi16 src2) %{
|
|
match(Set dst (AddP src1 src2));
|
|
|
|
format %{ "ADDIS $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ addis($dst$$Register, $src1$$Register, ($src2$$constant)>>16);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Pointer Immediate Addition using prefixed addi
|
|
// No constant pool entries required.
|
|
instruct addP_reg_imm34(iRegPdst dst, iRegP_N2P src1, immL34 src2) %{
|
|
match(Set dst (AddP src1 src2));
|
|
predicate(PowerArchitecturePPC64 >= 10);
|
|
ins_cost(DEFAULT_COST+1);
|
|
|
|
format %{ "PADDI $dst, $src1, $src2" %}
|
|
size(8);
|
|
ins_encode %{
|
|
assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
|
|
__ paddi($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
ins_alignment(2);
|
|
%}
|
|
|
|
//---------------------
|
|
// Subtraction Instructions
|
|
|
|
// Register Subtraction
|
|
instruct subI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (SubI src1 src2));
|
|
format %{ "SUBF $dst, $src2, $src1" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ subf($dst$$Register, $src2$$Register, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate Subtraction
|
|
// Immediate Subtraction: The compiler converts "x-c0" into "x+ -c0" (see SubLNode::Ideal),
|
|
// Don't try to use addi with - $src2$$constant since it can overflow when $src2$$constant == minI16.
|
|
|
|
// SubI from constant (using subfic).
|
|
instruct subI_imm16_reg(iRegIdst dst, immI16 src1, iRegIsrc src2) %{
|
|
match(Set dst (SubI src1 src2));
|
|
format %{ "SUBI $dst, $src1, $src2" %}
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ subfic($dst$$Register, $src2$$Register, $src1$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Turn the sign-bit of an integer into a 32-bit mask, 0x0...0 for
|
|
// positive integers and 0xF...F for negative ones.
|
|
instruct signmask32I_regI(iRegIdst dst, iRegIsrc src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "SRAWI $dst, $src, #31" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srawi($dst$$Register, $src$$Register, 0x1f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct absI_reg_Ex(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (AbsI src));
|
|
ins_cost(DEFAULT_COST*3);
|
|
|
|
expand %{
|
|
iRegIdst tmp1;
|
|
iRegIdst tmp2;
|
|
signmask32I_regI(tmp1, src);
|
|
xorI_reg_reg(tmp2, tmp1, src);
|
|
subI_reg_reg(dst, tmp2, tmp1);
|
|
%}
|
|
%}
|
|
|
|
instruct negI_regI(iRegIdst dst, immI_0 zero, iRegIsrc src2) %{
|
|
match(Set dst (SubI zero src2));
|
|
format %{ "NEG $dst, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ neg($dst$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Long subtraction
|
|
instruct subL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (SubL src1 src2));
|
|
format %{ "SUBF $dst, $src2, $src1 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ subf($dst$$Register, $src2$$Register, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// SubL + convL2I.
|
|
instruct subI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (ConvL2I (SubL src1 src2)));
|
|
|
|
format %{ "SUBF $dst, $src2, $src1 \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ subf($dst$$Register, $src2$$Register, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
|
|
// positive longs and 0xF...F for negative ones.
|
|
instruct signmask64I_regL(iRegIdst dst, iRegLsrc src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "SRADI $dst, $src, #63" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sradi($dst$$Register, $src$$Register, 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Turn the sign-bit of a long into a 64-bit mask, 0x0...0 for
|
|
// positive longs and 0xF...F for negative ones.
|
|
instruct signmask64L_regL(iRegLdst dst, iRegLsrc src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "SRADI $dst, $src, #63" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sradi($dst$$Register, $src$$Register, 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct absL_reg_Ex(iRegLdst dst, iRegLsrc src) %{
|
|
match(Set dst (AbsL src));
|
|
ins_cost(DEFAULT_COST*3);
|
|
|
|
expand %{
|
|
iRegLdst tmp1;
|
|
iRegLdst tmp2;
|
|
signmask64L_regL(tmp1, src);
|
|
xorL_reg_reg(tmp2, tmp1, src);
|
|
subL_reg_reg(dst, tmp2, tmp1);
|
|
%}
|
|
%}
|
|
|
|
// Long negation
|
|
instruct negL_reg_reg(iRegLdst dst, immL_0 zero, iRegLsrc src2) %{
|
|
match(Set dst (SubL zero src2));
|
|
format %{ "NEG $dst, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ neg($dst$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// NegL + ConvL2I.
|
|
instruct negI_con0_regL(iRegIdst dst, immL_0 zero, iRegLsrc src2) %{
|
|
match(Set dst (ConvL2I (SubL zero src2)));
|
|
|
|
format %{ "NEG $dst, $src2 \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ neg($dst$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Multiplication Instructions
|
|
// Integer Multiplication
|
|
|
|
// Register Multiplication
|
|
instruct mulI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (MulI src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MULLW $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mullw($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate Multiplication
|
|
instruct mulI_reg_imm16(iRegIdst dst, iRegIsrc src1, immI16 src2) %{
|
|
match(Set dst (MulI src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MULLI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mulli($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct mulL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (MulL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MULLD $dst $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mulld($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Multiply high for optimized long division by constant.
|
|
instruct mulHighL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (MulHiL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MULHD $dst $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mulhd($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate Multiplication
|
|
instruct mulL_reg_imm16(iRegLdst dst, iRegLsrc src1, immL16 src2) %{
|
|
match(Set dst (MulL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "MULLI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mulli($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Integer Division with Immediate -1: Negate.
|
|
instruct divI_reg_immIvalueMinus1(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{
|
|
match(Set dst (DivI src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "NEG $dst, $src1 \t// /-1" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ neg($dst$$Register, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Integer Division with constant, but not -1.
|
|
// We should be able to improve this by checking the type of src2.
|
|
// It might well be that src2 is known to be positive.
|
|
instruct divI_reg_regnotMinus1(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (DivI src1 src2));
|
|
predicate(n->in(2)->find_int_con(-1) != -1); // src2 is a constant, but not -1
|
|
ins_cost(2*DEFAULT_COST);
|
|
|
|
format %{ "DIVW $dst, $src1, $src2 \t// /not-1" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ divw($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovI_bne_negI_reg(iRegIdst dst, flagsRegSrc crx, iRegIsrc src1) %{
|
|
effect(USE_DEF dst, USE src1, USE crx);
|
|
predicate(false);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "CMOVE $dst, neg($src1), $crx" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode %{
|
|
Label done;
|
|
__ bne($crx$$CondRegister, done);
|
|
__ neg($dst$$Register, $src1$$Register);
|
|
__ bind(done);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Integer Division with Registers not containing constants.
|
|
instruct divI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (DivI src1 src2));
|
|
ins_cost(10*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI16 imm %{ (int)-1 %}
|
|
flagsReg tmp1;
|
|
cmpI_reg_imm16(tmp1, src2, imm); // check src2 == -1
|
|
divI_reg_regnotMinus1(dst, src1, src2); // dst = src1 / src2
|
|
cmovI_bne_negI_reg(dst, tmp1, src1); // cmove dst = neg(src1) if src2 == -1
|
|
%}
|
|
%}
|
|
|
|
// Long Division with Immediate -1: Negate.
|
|
instruct divL_reg_immLvalueMinus1(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{
|
|
match(Set dst (DivL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "NEG $dst, $src1 \t// /-1, long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ neg($dst$$Register, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Long Division with constant, but not -1.
|
|
instruct divL_reg_regnotMinus1(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (DivL src1 src2));
|
|
predicate(n->in(2)->find_long_con(-1L) != -1L); // Src2 is a constant, but not -1.
|
|
ins_cost(2*DEFAULT_COST);
|
|
|
|
format %{ "DIVD $dst, $src1, $src2 \t// /not-1, long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ divd($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovL_bne_negL_reg(iRegLdst dst, flagsRegSrc crx, iRegLsrc src1) %{
|
|
effect(USE_DEF dst, USE src1, USE crx);
|
|
predicate(false);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "CMOVE $dst, neg($src1), $crx" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode %{
|
|
Label done;
|
|
__ bne($crx$$CondRegister, done);
|
|
__ neg($dst$$Register, $src1$$Register);
|
|
__ bind(done);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Long Division with Registers not containing constants.
|
|
instruct divL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (DivL src1 src2));
|
|
ins_cost(10*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immL16 imm %{ (int)-1 %}
|
|
flagsReg tmp1;
|
|
cmpL_reg_imm16(tmp1, src2, imm); // check src2 == -1
|
|
divL_reg_regnotMinus1(dst, src1, src2); // dst = src1 / src2
|
|
cmovL_bne_negL_reg(dst, tmp1, src1); // cmove dst = neg(src1) if src2 == -1
|
|
%}
|
|
%}
|
|
|
|
// Integer Remainder with registers.
|
|
instruct modI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (ModI src1 src2));
|
|
ins_cost(10*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI16 imm %{ (int)-1 %}
|
|
flagsReg tmp1;
|
|
iRegIdst tmp2;
|
|
iRegIdst tmp3;
|
|
cmpI_reg_imm16(tmp1, src2, imm); // check src2 == -1
|
|
divI_reg_regnotMinus1(tmp2, src1, src2); // tmp2 = src1 / src2
|
|
cmovI_bne_negI_reg(tmp2, tmp1, src1); // cmove tmp2 = neg(src1) if src2 == -1
|
|
mulI_reg_reg(tmp3, src2, tmp2); // tmp3 = src2 * tmp2
|
|
subI_reg_reg(dst, src1, tmp3); // dst = src1 - tmp3
|
|
%}
|
|
%}
|
|
|
|
// Long Remainder with registers
|
|
instruct modL_reg_reg_Ex(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (ModL src1 src2));
|
|
ins_cost(10*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immL16 imm %{ (int)-1 %}
|
|
flagsReg tmp1;
|
|
iRegLdst tmp2;
|
|
iRegLdst tmp3;
|
|
cmpL_reg_imm16(tmp1, src2, imm); // check src2 == -1
|
|
divL_reg_regnotMinus1(tmp2, src1, src2); // tmp2 = src1 / src2
|
|
cmovL_bne_negL_reg(tmp2, tmp1, src1); // cmove tmp2 = neg(src1) if src2 == -1
|
|
mulL_reg_reg(tmp3, src2, tmp2); // tmp3 = src2 * tmp2
|
|
subL_reg_reg(dst, src1, tmp3); // dst = src1 - tmp3
|
|
%}
|
|
%}
|
|
|
|
instruct udivI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (UDivI src1 src2));
|
|
format %{ "DIVWU $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ divwu($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct umodI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (UModI src1 src2));
|
|
expand %{
|
|
iRegIdst tmp1;
|
|
iRegIdst tmp2;
|
|
udivI_reg_reg(tmp1, src1, src2);
|
|
// Compute lower 32 bit result using signed instructions as suggested by ISA.
|
|
// Upper 32 bit will contain garbage.
|
|
mulI_reg_reg(tmp2, src2, tmp1);
|
|
subI_reg_reg(dst, src1, tmp2);
|
|
%}
|
|
%}
|
|
|
|
instruct udivL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (UDivL src1 src2));
|
|
format %{ "DIVDU $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ divdu($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct umodL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (UModL src1 src2));
|
|
expand %{
|
|
iRegLdst tmp1;
|
|
iRegLdst tmp2;
|
|
udivL_reg_reg(tmp1, src1, src2);
|
|
mulL_reg_reg(tmp2, src2, tmp1);
|
|
subL_reg_reg(dst, src1, tmp2);
|
|
%}
|
|
%}
|
|
|
|
// Integer Shift Instructions
|
|
|
|
// Register Shift Left
|
|
|
|
// Clear all but the lowest #mask bits.
|
|
// Used to normalize shift amounts in registers.
|
|
instruct maskI_reg_imm(iRegIdst dst, iRegIsrc src, uimmI6 mask) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src, USE mask);
|
|
predicate(false);
|
|
|
|
format %{ "MASK $dst, $src, $mask \t// clear $mask upper bits" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src$$Register, $mask$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct lShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "SLW $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ slw($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct lShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (LShiftI src1 src2));
|
|
ins_cost(DEFAULT_COST*2);
|
|
expand %{
|
|
uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
|
|
iRegIdst tmpI;
|
|
maskI_reg_imm(tmpI, src2, mask);
|
|
lShiftI_reg_reg(dst, src1, tmpI);
|
|
%}
|
|
%}
|
|
|
|
// Register Shift Left Immediate
|
|
instruct lShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
|
|
match(Set dst (LShiftI src1 src2));
|
|
|
|
format %{ "SLWI $dst, $src1, ($src2 & 0x1f)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ slwi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// AndI with negpow2-constant + LShiftI
|
|
instruct lShiftI_andI_immInegpow2_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{
|
|
match(Set dst (LShiftI (AndI src1 src2) src3));
|
|
predicate(UseRotateAndMaskInstructionsPPC64);
|
|
|
|
format %{ "RLWINM $dst, lShiftI(AndI($src1, $src2), $src3)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
long src3 = $src3$$constant;
|
|
long maskbits = src3 + log2i_exact(-(juint)$src2$$constant);
|
|
if (maskbits >= 32) {
|
|
__ li($dst$$Register, 0); // addi
|
|
} else {
|
|
__ rlwinm($dst$$Register, $src1$$Register, src3 & 0x1f, 0, (31-maskbits) & 0x1f);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// RShiftI + AndI with negpow2-constant + LShiftI
|
|
instruct lShiftI_andI_immInegpow2_rShiftI_imm5(iRegIdst dst, iRegIsrc src1, immInegpow2 src2, uimmI5 src3) %{
|
|
match(Set dst (LShiftI (AndI (RShiftI src1 src3) src2) src3));
|
|
predicate(UseRotateAndMaskInstructionsPPC64);
|
|
|
|
format %{ "RLWINM $dst, lShiftI(AndI(RShiftI($src1, $src3), $src2), $src3)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
long src3 = $src3$$constant;
|
|
long maskbits = src3 + log2i_exact(-(juint)$src2$$constant);
|
|
if (maskbits >= 32) {
|
|
__ li($dst$$Register, 0); // addi
|
|
} else {
|
|
__ rlwinm($dst$$Register, $src1$$Register, 0, 0, (31-maskbits) & 0x1f);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct lShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "SLD $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sld($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Shift Left
|
|
instruct lShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (LShiftL src1 src2));
|
|
ins_cost(DEFAULT_COST*2);
|
|
expand %{
|
|
uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
|
|
iRegIdst tmpI;
|
|
maskI_reg_imm(tmpI, src2, mask);
|
|
lShiftL_regL_regI(dst, src1, tmpI);
|
|
%}
|
|
%}
|
|
|
|
// Register Shift Left Immediate
|
|
instruct lshiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
|
|
match(Set dst (LShiftL src1 src2));
|
|
format %{ "SLDI $dst, $src1, ($src2 & 0x3f)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sldi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// If we shift more than 32 bits, we need not convert I2L.
|
|
instruct lShiftL_regI_immGE32(iRegLdst dst, iRegIsrc src1, uimmI6_ge32 src2) %{
|
|
match(Set dst (LShiftL (ConvI2L src1) src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
size(4);
|
|
format %{ "SLDI $dst, i2l($src1), $src2" %}
|
|
ins_encode %{
|
|
__ sldi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Shift a postivie int to the left.
|
|
// Clrlsldi clears the upper 32 bits and shifts.
|
|
instruct scaledPositiveI2L_lShiftL_convI2L_reg_imm6(iRegLdst dst, iRegIsrc src1, uimmI6 src2) %{
|
|
match(Set dst (LShiftL (ConvI2L src1) src2));
|
|
predicate(((ConvI2LNode*)(_kids[0]->_leaf))->type()->is_long()->is_positive_int());
|
|
|
|
format %{ "SLDI $dst, i2l(positive_int($src1)), $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrlsldi($dst$$Register, $src1$$Register, 0x20, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct arShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "SRAW $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sraw($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Arithmetic Shift Right
|
|
instruct arShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (RShiftI src1 src2));
|
|
ins_cost(DEFAULT_COST*2);
|
|
expand %{
|
|
uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
|
|
iRegIdst tmpI;
|
|
maskI_reg_imm(tmpI, src2, mask);
|
|
arShiftI_reg_reg(dst, src1, tmpI);
|
|
%}
|
|
%}
|
|
|
|
// Register Arithmetic Shift Right Immediate
|
|
instruct arShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
|
|
match(Set dst (RShiftI src1 src2));
|
|
|
|
format %{ "SRAWI $dst, $src1, ($src2 & 0x1f)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srawi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct arShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "SRAD $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srad($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Shift Right Arithmetic Long
|
|
instruct arShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (RShiftL src1 src2));
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
expand %{
|
|
uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
|
|
iRegIdst tmpI;
|
|
maskI_reg_imm(tmpI, src2, mask);
|
|
arShiftL_regL_regI(dst, src1, tmpI);
|
|
%}
|
|
%}
|
|
|
|
// Register Shift Right Immediate
|
|
instruct arShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
|
|
match(Set dst (RShiftL src1 src2));
|
|
|
|
format %{ "SRADI $dst, $src1, ($src2 & 0x3f)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sradi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// RShiftL + ConvL2I
|
|
instruct convL2I_arShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{
|
|
match(Set dst (ConvL2I (RShiftL src1 src2)));
|
|
|
|
format %{ "SRADI $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ sradi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct urShiftI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "SRW $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srw($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Shift Right
|
|
instruct urShiftI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (URShiftI src1 src2));
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
expand %{
|
|
uimmI6 mask %{ 0x3b /* clear 59 bits, keep 5 */ %}
|
|
iRegIdst tmpI;
|
|
maskI_reg_imm(tmpI, src2, mask);
|
|
urShiftI_reg_reg(dst, src1, tmpI);
|
|
%}
|
|
%}
|
|
|
|
// Register Shift Right Immediate
|
|
instruct urShiftI_reg_imm(iRegIdst dst, iRegIsrc src1, immI src2) %{
|
|
match(Set dst (URShiftI src1 src2));
|
|
|
|
format %{ "SRWI $dst, $src1, ($src2 & 0x1f)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srwi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x1f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct urShiftL_regL_regI(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "SRD $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srd($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Shift Right
|
|
instruct urShiftL_regL_regI_Ex(iRegLdst dst, iRegLsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (URShiftL src1 src2));
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
expand %{
|
|
uimmI6 mask %{ 0x3a /* clear 58 bits, keep 6 */ %}
|
|
iRegIdst tmpI;
|
|
maskI_reg_imm(tmpI, src2, mask);
|
|
urShiftL_regL_regI(dst, src1, tmpI);
|
|
%}
|
|
%}
|
|
|
|
// Register Shift Right Immediate
|
|
instruct urShiftL_regL_immI(iRegLdst dst, iRegLsrc src1, immI src2) %{
|
|
match(Set dst (URShiftL src1 src2));
|
|
|
|
format %{ "SRDI $dst, $src1, ($src2 & 0x3f)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// URShiftL + ConvL2I.
|
|
instruct convL2I_urShiftL_regL_immI(iRegIdst dst, iRegLsrc src1, immI src2) %{
|
|
match(Set dst (ConvL2I (URShiftL src1 src2)));
|
|
|
|
format %{ "SRDI $dst, $src1, ($src2 & 0x3f) \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Shift Right Immediate with a CastP2X
|
|
instruct shrP_convP2X_reg_imm6(iRegLdst dst, iRegP_N2P src1, uimmI6 src2) %{
|
|
match(Set dst (URShiftL (CastP2X src1) src2));
|
|
|
|
format %{ "SRDI $dst, $src1, $src2 \t// Cast ptr $src1 to long and shift" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srdi($dst$$Register, $src1$$Register, ($src2$$constant) & 0x3f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Bitfield Extract: URShiftI + AndI
|
|
instruct andI_urShiftI_regI_immI_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immI src2, immIpow2minus1 src3) %{
|
|
match(Set dst (AndI (URShiftI src1 src2) src3));
|
|
|
|
format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// int bitfield extract" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int rshift = ($src2$$constant) & 0x1f;
|
|
int length = log2i_exact((juint)$src3$$constant + 1u);
|
|
if (rshift + length > 32) {
|
|
// if necessary, adjust mask to omit rotated bits.
|
|
length = 32 - rshift;
|
|
}
|
|
__ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Bitfield Extract: URShiftL + AndL
|
|
instruct andL_urShiftL_regL_immI_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immI src2, immLpow2minus1 src3) %{
|
|
match(Set dst (AndL (URShiftL src1 src2) src3));
|
|
|
|
format %{ "EXTRDI $dst, $src1, shift=$src2, mask=$src3 \t// long bitfield extract" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int rshift = ($src2$$constant) & 0x3f;
|
|
int length = log2i_exact((julong)$src3$$constant + 1ull);
|
|
if (rshift + length > 64) {
|
|
// if necessary, adjust mask to omit rotated bits.
|
|
length = 64 - rshift;
|
|
}
|
|
__ extrdi($dst$$Register, $src1$$Register, length, 64 - (rshift + length));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct sxtI_reg(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (ConvL2I (ConvI2L src)));
|
|
|
|
format %{ "EXTSW $dst, $src \t// int->int" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ extsw($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Rotate Instructions------------------------------------------------
|
|
|
|
// Rotate Left by 8-bit immediate
|
|
instruct rotlI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 lshift, immI8 rshift) %{
|
|
match(Set dst (OrI (LShiftI src lshift) (URShiftI src rshift)));
|
|
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
|
|
|
|
format %{ "ROTLWI $dst, $src, $lshift" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ rotlwi($dst$$Register, $src$$Register, $lshift$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Rotate Right by 8-bit immediate
|
|
instruct rotrI_reg_immi8(iRegIdst dst, iRegIsrc src, immI8 rshift, immI8 lshift) %{
|
|
match(Set dst (OrI (URShiftI src rshift) (LShiftI src lshift)));
|
|
predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
|
|
|
|
format %{ "ROTRWI $dst, $rshift" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ rotrwi($dst$$Register, $src$$Register, $rshift$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Floating Point Arithmetic Instructions-----------------------------
|
|
|
|
// Add float single precision
|
|
instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
match(Set dst (AddF src1 src2));
|
|
|
|
format %{ "FADDS $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fadds($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Add float double precision
|
|
instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
match(Set dst (AddD src1 src2));
|
|
|
|
format %{ "FADD $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fadd($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Sub float single precision
|
|
instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
match(Set dst (SubF src1 src2));
|
|
|
|
format %{ "FSUBS $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fsubs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Sub float double precision
|
|
instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
match(Set dst (SubD src1 src2));
|
|
format %{ "FSUB $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fsub($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Mul float single precision
|
|
instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
match(Set dst (MulF src1 src2));
|
|
format %{ "FMULS $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fmuls($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Mul float double precision
|
|
instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
match(Set dst (MulD src1 src2));
|
|
format %{ "FMUL $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fmul($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Div float single precision
|
|
instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
|
|
match(Set dst (DivF src1 src2));
|
|
format %{ "FDIVS $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fdivs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Div float double precision
|
|
instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
|
|
match(Set dst (DivD src1 src2));
|
|
format %{ "FDIV $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fdiv($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Absolute float single precision
|
|
instruct absF_reg(regF dst, regF src) %{
|
|
match(Set dst (AbsF src));
|
|
format %{ "FABS $dst, $src \t// float" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fabs($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Absolute float double precision
|
|
instruct absD_reg(regD dst, regD src) %{
|
|
match(Set dst (AbsD src));
|
|
format %{ "FABS $dst, $src \t// double" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fabs($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct negF_reg(regF dst, regF src) %{
|
|
match(Set dst (NegF src));
|
|
format %{ "FNEG $dst, $src \t// float" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fneg($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct negD_reg(regD dst, regD src) %{
|
|
match(Set dst (NegD src));
|
|
format %{ "FNEG $dst, $src \t// double" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fneg($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// AbsF + NegF.
|
|
instruct negF_absF_reg(regF dst, regF src) %{
|
|
match(Set dst (NegF (AbsF src)));
|
|
format %{ "FNABS $dst, $src \t// float" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fnabs($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// AbsD + NegD.
|
|
instruct negD_absD_reg(regD dst, regD src) %{
|
|
match(Set dst (NegD (AbsD src)));
|
|
format %{ "FNABS $dst, $src \t// double" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fnabs($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Sqrt float double precision
|
|
instruct sqrtD_reg(regD dst, regD src) %{
|
|
match(Set dst (SqrtD src));
|
|
format %{ "FSQRT $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fsqrt($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Single-precision sqrt.
|
|
instruct sqrtF_reg(regF dst, regF src) %{
|
|
match(Set dst (SqrtF src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "FSQRTS $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fsqrts($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
// Multiply-Accumulate
|
|
// src1 * src2 + src3
|
|
instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
|
|
match(Set dst (FmaF src3 (Binary src1 src2)));
|
|
|
|
format %{ "FMADDS $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fmadds($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * src2 + src3
|
|
instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
|
|
match(Set dst (FmaD src3 (Binary src1 src2)));
|
|
|
|
format %{ "FMADD $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fmadd($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * (-src2) + src3 = -(src1*src2-src3)
|
|
// "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
|
|
instruct mnsubF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
|
|
match(Set dst (FmaF src3 (Binary src1 (NegF src2))));
|
|
|
|
format %{ "FNMSUBS $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fnmsubs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * (-src2) + src3 = -(src1*src2-src3)
|
|
// "(-src1) * src2 + src3" has been idealized to "src2 * (-src1) + src3"
|
|
instruct mnsubD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
|
|
match(Set dst (FmaD src3 (Binary src1 (NegD src2))));
|
|
|
|
format %{ "FNMSUB $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fnmsub($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * (-src2) - src3 = -(src1*src2+src3)
|
|
// "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
|
|
instruct mnaddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
|
|
match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2))));
|
|
|
|
format %{ "FNMADDS $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fnmadds($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * (-src2) - src3 = -(src1*src2+src3)
|
|
// "(-src1) * src2 - src3" has been idealized to "src2 * (-src1) - src3"
|
|
instruct mnaddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
|
|
match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2))));
|
|
|
|
format %{ "FNMADD $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fnmadd($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * src2 - src3
|
|
instruct msubF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
|
|
match(Set dst (FmaF (NegF src3) (Binary src1 src2)));
|
|
|
|
format %{ "FMSUBS $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fmsubs($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * src2 - src3
|
|
instruct msubD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
|
|
match(Set dst (FmaD (NegD src3) (Binary src1 src2)));
|
|
|
|
format %{ "FMSUB $dst, $src1, $src2, $src3" %}
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ fmsub($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister, $src3$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
//----------Logical Instructions-----------------------------------------------
|
|
|
|
// And Instructions
|
|
|
|
// Register And
|
|
instruct andI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (AndI src1 src2));
|
|
format %{ "AND $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ andr($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Left shifted Immediate And
|
|
instruct andI_reg_immIhi16(iRegIdst dst, iRegIsrc src1, immIhi16 src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (AndI src1 src2));
|
|
effect(KILL cr0);
|
|
format %{ "ANDIS $dst, $src1, $src2.hi" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ andis_($dst$$Register, $src1$$Register, (int)((unsigned short)(($src2$$constant & 0xFFFF0000) >> 16)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate And
|
|
instruct andI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (AndI src1 src2));
|
|
effect(KILL cr0);
|
|
|
|
format %{ "ANDI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// FIXME: avoid andi_ ?
|
|
__ andi_($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate And where the immediate is a negative power of 2.
|
|
instruct andI_reg_immInegpow2(iRegIdst dst, iRegIsrc src1, immInegpow2 src2) %{
|
|
match(Set dst (AndI src1 src2));
|
|
format %{ "ANDWI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrrdi($dst$$Register, $src1$$Register, log2i_exact(-(juint)$src2$$constant));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct andI_reg_immIpow2minus1(iRegIdst dst, iRegIsrc src1, immIpow2minus1 src2) %{
|
|
match(Set dst (AndI src1 src2));
|
|
format %{ "ANDWI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src1$$Register, 64 - log2i_exact((juint)$src2$$constant + 1u));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src1, immIpowerOf2 src2) %{
|
|
match(Set dst (AndI src1 src2));
|
|
predicate(UseRotateAndMaskInstructionsPPC64);
|
|
format %{ "ANDWI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int bitpos = 31 - log2i_exact((juint)$src2$$constant);
|
|
__ rlwinm($dst$$Register, $src1$$Register, 0, bitpos, bitpos);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register And Long
|
|
instruct andL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (AndL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "AND $dst, $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ andr($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate And long
|
|
instruct andL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (AndL src1 src2));
|
|
effect(KILL cr0);
|
|
|
|
format %{ "ANDI $dst, $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// FIXME: avoid andi_ ?
|
|
__ andi_($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate And Long where the immediate is a negative power of 2.
|
|
instruct andL_reg_immLnegpow2(iRegLdst dst, iRegLsrc src1, immLnegpow2 src2) %{
|
|
match(Set dst (AndL src1 src2));
|
|
format %{ "ANDDI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrrdi($dst$$Register, $src1$$Register, log2i_exact(-(julong)$src2$$constant));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct andL_reg_immLpow2minus1(iRegLdst dst, iRegLsrc src1, immLpow2minus1 src2) %{
|
|
match(Set dst (AndL src1 src2));
|
|
format %{ "ANDDI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src1$$Register, 64 - log2i_exact((julong)$src2$$constant + 1ull));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// AndL + ConvL2I.
|
|
instruct convL2I_andL_reg_immLpow2minus1(iRegIdst dst, iRegLsrc src1, immLpow2minus1 src2) %{
|
|
match(Set dst (ConvL2I (AndL src1 src2)));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "ANDDI $dst, $src1, $src2 \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src1$$Register, 64 - log2i_exact((julong)$src2$$constant + 1ull));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Or Instructions
|
|
|
|
// Register Or
|
|
instruct orI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (OrI src1 src2));
|
|
format %{ "OR $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand does not work with above instruct. (??)
|
|
instruct orI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
// no match-rule
|
|
effect(DEF dst, USE src1, USE src2);
|
|
format %{ "OR $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct tree_orI_orI_orI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
|
|
match(Set dst (OrI (OrI (OrI src1 src2) src3) src4));
|
|
ins_cost(DEFAULT_COST*3);
|
|
|
|
expand %{
|
|
// FIXME: we should do this in the ideal world.
|
|
iRegIdst tmp1;
|
|
iRegIdst tmp2;
|
|
orI_reg_reg(tmp1, src1, src2);
|
|
orI_reg_reg_2(tmp2, src3, src4); // Adlc complains about orI_reg_reg.
|
|
orI_reg_reg(dst, tmp1, tmp2);
|
|
%}
|
|
%}
|
|
|
|
// Immediate Or
|
|
instruct orI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{
|
|
match(Set dst (OrI src1 src2));
|
|
format %{ "ORI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ ori($dst$$Register, $src1$$Register, ($src2$$constant) & 0xFFFF);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Or Long
|
|
instruct orL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (OrL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
size(4);
|
|
format %{ "OR $dst, $src1, $src2 \t// long" %}
|
|
ins_encode %{
|
|
__ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// OrL + ConvL2I.
|
|
instruct orI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (ConvL2I (OrL src1 src2)));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "OR $dst, $src1, $src2 \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ or_unchecked($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate Or long
|
|
instruct orL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 con) %{
|
|
match(Set dst (OrL src1 con));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "ORI $dst, $src1, $con \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ ori($dst$$Register, $src1$$Register, ($con$$constant) & 0xFFFF);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Xor Instructions
|
|
|
|
// Register Xor
|
|
instruct xorI_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (XorI src1 src2));
|
|
format %{ "XOR $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xorr($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand does not work with above instruct. (??)
|
|
instruct xorI_reg_reg_2(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
// no match-rule
|
|
effect(DEF dst, USE src1, USE src2);
|
|
format %{ "XOR $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xorr($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct tree_xorI_xorI_xorI_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, iRegIsrc src3, iRegIsrc src4) %{
|
|
match(Set dst (XorI (XorI (XorI src1 src2) src3) src4));
|
|
ins_cost(DEFAULT_COST*3);
|
|
|
|
expand %{
|
|
// FIXME: we should do this in the ideal world.
|
|
iRegIdst tmp1;
|
|
iRegIdst tmp2;
|
|
xorI_reg_reg(tmp1, src1, src2);
|
|
xorI_reg_reg_2(tmp2, src3, src4); // Adlc complains about xorI_reg_reg.
|
|
xorI_reg_reg(dst, tmp1, tmp2);
|
|
%}
|
|
%}
|
|
|
|
// Immediate Xor
|
|
instruct xorI_reg_uimm16(iRegIdst dst, iRegIsrc src1, uimmI16 src2) %{
|
|
match(Set dst (XorI src1 src2));
|
|
format %{ "XORI $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xori($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Register Xor Long
|
|
instruct xorL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (XorL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "XOR $dst, $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xorr($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// XorL + ConvL2I.
|
|
instruct xorI_regL_regL(iRegIdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set dst (ConvL2I (XorL src1 src2)));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "XOR $dst, $src1, $src2 \t// long + l2i" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xorr($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Immediate Xor Long
|
|
instruct xorL_reg_uimm16(iRegLdst dst, iRegLsrc src1, uimmL16 src2) %{
|
|
match(Set dst (XorL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "XORI $dst, $src1, $src2 \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xori($dst$$Register, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct notI_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2) %{
|
|
match(Set dst (XorI src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "NOT $dst, $src1 ($src2)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ nor($dst$$Register, $src1$$Register, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct notL_reg(iRegLdst dst, iRegLsrc src1, immL_minus1 src2) %{
|
|
match(Set dst (XorL src1 src2));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "NOT $dst, $src1 ($src2) \t// long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ nor($dst$$Register, $src1$$Register, $src1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// And-complement
|
|
instruct andcI_reg_reg(iRegIdst dst, iRegIsrc src1, immI_minus1 src2, iRegIsrc src3) %{
|
|
match(Set dst (AndI (XorI src1 src2) src3));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "ANDW $dst, xori($src1, $src2), $src3" %}
|
|
size(4);
|
|
ins_encode( enc_andc(dst, src3, src1) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// And-complement
|
|
instruct andcL_reg_reg(iRegLdst dst, iRegLsrc src1, iRegLsrc src2) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src1, USE src2);
|
|
predicate(false);
|
|
|
|
format %{ "ANDC $dst, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ andc($dst$$Register, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Moves between int/long and float/double----------------------------
|
|
//
|
|
// The following rules move values from int/long registers/stack-locations
|
|
// to float/double registers/stack-locations and vice versa, without doing any
|
|
// conversions. These rules are used to implement the bit-conversion methods
|
|
// of java.lang.Float etc., e.g.
|
|
// int floatToIntBits(float value)
|
|
// float intBitsToFloat(int bits)
|
|
//
|
|
// Notes on the implementation on ppc64:
|
|
// For Power7 and earlier, the rules are limited to those which move between a
|
|
// register and a stack-location, because we always have to go through memory
|
|
// when moving between a float register and an integer register.
|
|
// This restriction is removed in Power8 with the introduction of the mtfprd
|
|
// and mffprd instructions.
|
|
|
|
instruct moveL2D_reg(regD dst, iRegLsrc src) %{
|
|
match(Set dst (MoveL2D src));
|
|
|
|
format %{ "MTFPRD $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mtfprd($dst$$FloatRegister, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct moveI2D_reg(regD dst, iRegIsrc src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "MTFPRWA $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mtfprwa($dst$$FloatRegister, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//---------- Chain stack slots between similar types --------
|
|
|
|
// These are needed so that the rules below can match.
|
|
|
|
// Load integer from stack slot
|
|
instruct stkI_to_regI(iRegIdst dst, stackSlotI src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $src" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, src) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store integer to stack slot
|
|
instruct regI_to_stkI(stackSlotI dst, iRegIsrc src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STW $src, $dst \t// stk" %}
|
|
size(4);
|
|
ins_encode( enc_stw(src, dst) ); // rs=rt
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Load long from stack slot
|
|
instruct stkL_to_regL(iRegLdst dst, stackSlotL src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LD $dst, $src \t// long" %}
|
|
size(4);
|
|
ins_encode( enc_ld(dst, src) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Store long to stack slot
|
|
instruct regL_to_stkL(stackSlotL dst, iRegLsrc src) %{
|
|
match(Set dst src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STD $src, $dst \t// long" %}
|
|
size(4);
|
|
ins_encode( enc_std(src, dst) ); // rs=rt
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
//----------Moves between int and float
|
|
|
|
// Move float value from float stack-location to integer register.
|
|
instruct moveF2I_stack_reg(iRegIdst dst, stackSlotF src) %{
|
|
match(Set dst (MoveF2I src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LWZ $dst, $src \t// MoveF2I" %}
|
|
size(4);
|
|
ins_encode( enc_lwz(dst, src) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Move float value from float register to integer stack-location.
|
|
instruct moveF2I_reg_stack(stackSlotI dst, regF src) %{
|
|
match(Set dst (MoveF2I src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STFS $src, $dst \t// MoveF2I" %}
|
|
size(4);
|
|
ins_encode( enc_stfs(src, dst) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Move integer value from integer stack-location to float register.
|
|
instruct moveI2F_stack_reg(regF dst, stackSlotI src) %{
|
|
match(Set dst (MoveI2F src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "LFS $dst, $src \t// MoveI2F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
int Idisp = $src$$disp + frame_slots_bias($src$$base, ra_);
|
|
__ lfs($dst$$FloatRegister, Idisp, $src$$base$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Move integer value from integer register to float stack-location.
|
|
instruct moveI2F_reg_stack(stackSlotF dst, iRegIsrc src) %{
|
|
match(Set dst (MoveI2F src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STW $src, $dst \t// MoveI2F" %}
|
|
size(4);
|
|
ins_encode( enc_stw(src, dst) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
|
|
//----------Moves between long and double
|
|
|
|
// Move double value from double stack-location to long register.
|
|
instruct moveD2L_stack_reg(iRegLdst dst, stackSlotD src) %{
|
|
match(Set dst (MoveD2L src));
|
|
ins_cost(MEMORY_REF_COST);
|
|
size(4);
|
|
format %{ "LD $dst, $src \t// MoveD2L" %}
|
|
ins_encode( enc_ld(dst, src) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// Move double value from double register to long stack-location.
|
|
instruct moveD2L_reg_stack(stackSlotL dst, regD src) %{
|
|
match(Set dst (MoveD2L src));
|
|
effect(DEF dst, USE src);
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
format %{ "STFD $src, $dst \t// MoveD2L" %}
|
|
size(4);
|
|
ins_encode( enc_stfd(src, dst) );
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
|
|
//----------Register Move Instructions-----------------------------------------
|
|
|
|
// Replicate for Superword
|
|
|
|
instruct moveReg(iRegLdst dst, iRegIsrc src) %{
|
|
predicate(false);
|
|
effect(DEF dst, USE src);
|
|
|
|
format %{ "MR $dst, $src \t// replicate " %}
|
|
// variable size, 0 or 4.
|
|
ins_encode %{
|
|
__ mr_if_needed($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Cast instructions (Java-level type cast)---------------------------
|
|
|
|
// Cast Long to Pointer for unsafe natives.
|
|
instruct castX2P(iRegPdst dst, iRegLsrc src) %{
|
|
match(Set dst (CastX2P src));
|
|
|
|
format %{ "MR $dst, $src \t// Long->Ptr" %}
|
|
// variable size, 0 or 4.
|
|
ins_encode %{
|
|
__ mr_if_needed($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Cast Pointer to Long for unsafe natives.
|
|
instruct castP2X(iRegLdst dst, iRegP_N2P src) %{
|
|
match(Set dst (CastP2X src));
|
|
|
|
format %{ "MR $dst, $src \t// Ptr->Long" %}
|
|
// variable size, 0 or 4.
|
|
ins_encode %{
|
|
__ mr_if_needed($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct castPP(iRegPdst dst) %{
|
|
match(Set dst (CastPP dst));
|
|
format %{ " -- \t// castPP of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct castII(iRegIdst dst) %{
|
|
match(Set dst (CastII dst));
|
|
format %{ " -- \t// castII of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct castLL(iRegLdst dst) %{
|
|
match(Set dst (CastLL dst));
|
|
format %{ " -- \t// castLL of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct castFF(regF dst) %{
|
|
match(Set dst (CastFF dst));
|
|
format %{ " -- \t// castFF of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct castDD(regD dst) %{
|
|
match(Set dst (CastDD dst));
|
|
format %{ " -- \t// castDD of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct castVV8(iRegLdst dst) %{
|
|
match(Set dst (CastVV dst));
|
|
format %{ " -- \t// castVV of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct castVV16(vecX dst) %{
|
|
match(Set dst (CastVV dst));
|
|
format %{ " -- \t// castVV of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct checkCastPP(iRegPdst dst) %{
|
|
match(Set dst (CheckCastPP dst));
|
|
format %{ " -- \t// checkcastPP of $dst" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Convert instructions-----------------------------------------------
|
|
|
|
// Convert to boolean.
|
|
|
|
// int_to_bool(src) : { 1 if src != 0
|
|
// { 0 else
|
|
//
|
|
// strategy:
|
|
// 1) Count leading zeros of 32 bit-value src,
|
|
// this returns 32 (0b10.0000) iff src == 0 and <32 otherwise.
|
|
// 2) Shift 5 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise.
|
|
// 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0.
|
|
|
|
// convI2Bool
|
|
instruct convI2Bool_reg__cntlz_Ex(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (Conv2B src));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI shiftAmount %{ 0x5 %}
|
|
uimmI16 mask %{ 0x1 %}
|
|
iRegIdst tmp1;
|
|
iRegIdst tmp2;
|
|
countLeadingZerosI(tmp1, src);
|
|
urShiftI_reg_imm(tmp2, tmp1, shiftAmount);
|
|
xorI_reg_uimm16(dst, tmp2, mask);
|
|
%}
|
|
%}
|
|
|
|
instruct convI2Bool_reg__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx) %{
|
|
match(Set dst (Conv2B src));
|
|
effect(TEMP crx);
|
|
predicate(!UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMPWI $crx, $src, #0 \t// convI2B"
|
|
"LI $dst, #0\n\t"
|
|
"BEQ $crx, done\n\t"
|
|
"LI $dst, #1\n"
|
|
"done:" %}
|
|
size(16);
|
|
ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x0, 0x1) );
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// ConvI2B + XorI
|
|
instruct xorI_convI2Bool_reg_immIvalue1__cntlz_Ex(iRegIdst dst, iRegIsrc src, immI_1 mask) %{
|
|
match(Set dst (XorI (Conv2B src) mask));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI shiftAmount %{ 0x5 %}
|
|
iRegIdst tmp1;
|
|
countLeadingZerosI(tmp1, src);
|
|
urShiftI_reg_imm(dst, tmp1, shiftAmount);
|
|
%}
|
|
%}
|
|
|
|
instruct xorI_convI2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegIsrc src, flagsReg crx, immI_1 mask) %{
|
|
match(Set dst (XorI (Conv2B src) mask));
|
|
effect(TEMP crx);
|
|
predicate(!UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMPWI $crx, $src, #0 \t// Xor(convI2B($src), $mask)"
|
|
"LI $dst, #1\n\t"
|
|
"BEQ $crx, done\n\t"
|
|
"LI $dst, #0\n"
|
|
"done:" %}
|
|
size(16);
|
|
ins_encode( enc_convI2B_regI__cmove(dst, src, crx, 0x1, 0x0) );
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// AndI 0b0..010..0 + ConvI2B
|
|
instruct convI2Bool_andI_reg_immIpowerOf2(iRegIdst dst, iRegIsrc src, immIpowerOf2 mask) %{
|
|
match(Set dst (Conv2B (AndI src mask)));
|
|
predicate(UseRotateAndMaskInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "RLWINM $dst, $src, $mask \t// convI2B(AndI($src, $mask))" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ rlwinm($dst$$Register, $src$$Register, 32 - log2i_exact((juint)($mask$$constant)), 31, 31);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Convert pointer to boolean.
|
|
//
|
|
// ptr_to_bool(src) : { 1 if src != 0
|
|
// { 0 else
|
|
//
|
|
// strategy:
|
|
// 1) Count leading zeros of 64 bit-value src,
|
|
// this returns 64 (0b100.0000) iff src == 0 and <64 otherwise.
|
|
// 2) Shift 6 bits to the right, result is 0b1 iff src == 0, 0b0 otherwise.
|
|
// 3) Xori the result to get 0b1 if src != 0 and 0b0 if src == 0.
|
|
|
|
// ConvP2B
|
|
instruct convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src) %{
|
|
match(Set dst (Conv2B src));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI shiftAmount %{ 0x6 %}
|
|
uimmI16 mask %{ 0x1 %}
|
|
iRegIdst tmp1;
|
|
iRegIdst tmp2;
|
|
countLeadingZerosP(tmp1, src);
|
|
urShiftI_reg_imm(tmp2, tmp1, shiftAmount);
|
|
xorI_reg_uimm16(dst, tmp2, mask);
|
|
%}
|
|
%}
|
|
|
|
instruct convP2Bool_reg__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx) %{
|
|
match(Set dst (Conv2B src));
|
|
effect(TEMP crx);
|
|
predicate(!UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMPDI $crx, $src, #0 \t// convP2B"
|
|
"LI $dst, #0\n\t"
|
|
"BEQ $crx, done\n\t"
|
|
"LI $dst, #1\n"
|
|
"done:" %}
|
|
size(16);
|
|
ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x0, 0x1) );
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// ConvP2B + XorI
|
|
instruct xorI_convP2Bool_reg__cntlz_Ex(iRegIdst dst, iRegP_N2P src, immI_1 mask) %{
|
|
match(Set dst (XorI (Conv2B src) mask));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI shiftAmount %{ 0x6 %}
|
|
iRegIdst tmp1;
|
|
countLeadingZerosP(tmp1, src);
|
|
urShiftI_reg_imm(dst, tmp1, shiftAmount);
|
|
%}
|
|
%}
|
|
|
|
instruct xorI_convP2Bool_reg_immIvalue1__cmove(iRegIdst dst, iRegP_N2P src, flagsReg crx, immI_1 mask) %{
|
|
match(Set dst (XorI (Conv2B src) mask));
|
|
effect(TEMP crx);
|
|
predicate(!UseCountLeadingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CMPDI $crx, $src, #0 \t// XorI(convP2B($src), $mask)"
|
|
"LI $dst, #1\n\t"
|
|
"BEQ $crx, done\n\t"
|
|
"LI $dst, #0\n"
|
|
"done:" %}
|
|
size(16);
|
|
ins_encode( enc_convP2B_regP__cmove(dst, src, crx, 0x1, 0x0) );
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// if src1 < src2, return -1 else return 0
|
|
instruct cmpLTMask_reg_reg_Ex(iRegIdst dst, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set dst (CmpLTMask src1 src2));
|
|
ins_cost(DEFAULT_COST*4);
|
|
|
|
expand %{
|
|
iRegLdst src1s;
|
|
iRegLdst src2s;
|
|
iRegLdst diff;
|
|
convI2L_reg(src1s, src1); // Ensure proper sign extension.
|
|
convI2L_reg(src2s, src2); // Ensure proper sign extension.
|
|
subL_reg_reg(diff, src1s, src2s);
|
|
// Need to consider >=33 bit result, therefore we need signmaskL.
|
|
signmask64I_regL(dst, diff);
|
|
%}
|
|
%}
|
|
|
|
instruct cmpLTMask_reg_immI0(iRegIdst dst, iRegIsrc src1, immI_0 src2) %{
|
|
match(Set dst (CmpLTMask src1 src2)); // if src1 < src2, return -1 else return 0
|
|
format %{ "SRAWI $dst, $src1, $src2 \t// CmpLTMask" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ srawi($dst$$Register, $src1$$Register, 0x1f);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Arithmetic Conversion Instructions---------------------------------
|
|
|
|
// Convert to Byte -- nop
|
|
// Convert to Short -- nop
|
|
|
|
// Convert to Int
|
|
|
|
instruct convB2I_reg(iRegIdst dst, iRegIsrc src, immI_24 amount) %{
|
|
match(Set dst (RShiftI (LShiftI src amount) amount));
|
|
format %{ "EXTSB $dst, $src \t// byte->int" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ extsb($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct extsh(iRegIdst dst, iRegIsrc src) %{
|
|
effect(DEF dst, USE src);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ extsh($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// LShiftI 16 + RShiftI 16 converts short to int.
|
|
instruct convS2I_reg(iRegIdst dst, iRegIsrc src, immI_16 amount) %{
|
|
match(Set dst (RShiftI (LShiftI src amount) amount));
|
|
format %{ "EXTSH $dst, $src \t// short->int" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ extsh($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// ConvL2I + ConvI2L: Sign extend int in long register.
|
|
instruct sxtI_L2L_reg(iRegLdst dst, iRegLsrc src) %{
|
|
match(Set dst (ConvI2L (ConvL2I src)));
|
|
|
|
format %{ "EXTSW $dst, $src \t// long->long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ extsw($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct convL2I_reg(iRegIdst dst, iRegLsrc src) %{
|
|
match(Set dst (ConvL2I src));
|
|
format %{ "MR $dst, $src \t// long->int" %}
|
|
// variable size, 0 or 4
|
|
ins_encode %{
|
|
__ mr_if_needed($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct convD2IRaw_regD(regD dst, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "FCTIWZ $dst, $src \t// convD2I, $src != NaN" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fctiwz($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovI_bso_stackSlotL(iRegIdst dst, flagsRegSrc crx, stackSlotL src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE crx, USE src);
|
|
predicate(false);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "cmovI $crx, $dst, $src" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovI_bso_reg(iRegIdst dst, flagsRegSrc crx, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE crx, USE src);
|
|
predicate(false);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "cmovI $crx, $dst, $src" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
instruct cmovI_bso_reg_conLvalue0_Ex(iRegIdst dst, flagsRegSrc crx, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE crx, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "CmovI $dst, $crx, $src \t// postalloc expanded" %}
|
|
postalloc_expand %{
|
|
//
|
|
// replaces
|
|
//
|
|
// region dst crx src
|
|
// \ | | /
|
|
// dst=cmovI_bso_reg_conLvalue0
|
|
//
|
|
// with
|
|
//
|
|
// region dst
|
|
// \ /
|
|
// dst=loadConI16(0)
|
|
// |
|
|
// ^ region dst crx src
|
|
// | \ | | /
|
|
// dst=cmovI_bso_reg
|
|
//
|
|
|
|
// Create new nodes.
|
|
MachNode *m1 = new loadConI16Node();
|
|
MachNode *m2 = new cmovI_bso_regNode();
|
|
|
|
// inputs for new nodes
|
|
m1->add_req(n_region);
|
|
m2->add_req(n_region, n_crx, n_src);
|
|
|
|
// precedences for new nodes
|
|
m2->add_prec(m1);
|
|
|
|
// operands for new nodes
|
|
m1->_opnds[0] = op_dst;
|
|
m1->_opnds[1] = new immI16Oper(0);
|
|
|
|
m2->_opnds[0] = op_dst;
|
|
m2->_opnds[1] = op_crx;
|
|
m2->_opnds[2] = op_src;
|
|
|
|
// registers for new nodes
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
|
|
|
|
// Insert new nodes.
|
|
nodes->push(m1);
|
|
nodes->push(m2);
|
|
%}
|
|
%}
|
|
|
|
|
|
// Double to Int conversion, NaN is mapped to 0. Special version for Power8.
|
|
instruct convD2I_reg_mffprd_ExEx(iRegIdst dst, regD src) %{
|
|
match(Set dst (ConvD2I src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regD tmpD;
|
|
flagsReg crx;
|
|
cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
|
|
convD2IRaw_regD(tmpD, src); // Convert float to int (speculated).
|
|
cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpD); // Cmove based on NaN check.
|
|
%}
|
|
%}
|
|
|
|
instruct convF2IRaw_regF(regF dst, regF src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "FCTIWZ $dst, $src \t// convF2I, $src != NaN" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fctiwz($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
// Float to Int conversion, NaN is mapped to 0. Special version for Power8.
|
|
instruct convF2I_regF_mffprd_ExEx(iRegIdst dst, regF src) %{
|
|
match(Set dst (ConvF2I src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regF tmpF;
|
|
flagsReg crx;
|
|
cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
|
|
convF2IRaw_regF(tmpF, src); // Convert float to int (speculated).
|
|
cmovI_bso_reg_conLvalue0_Ex(dst, crx, tmpF); // Cmove based on NaN check.
|
|
%}
|
|
%}
|
|
|
|
// Convert to Long
|
|
|
|
instruct convI2L_reg(iRegLdst dst, iRegIsrc src) %{
|
|
match(Set dst (ConvI2L src));
|
|
format %{ "EXTSW $dst, $src \t// int->long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ extsw($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Zero-extend: convert unsigned int to long (convUI2L).
|
|
instruct zeroExtendL_regI(iRegLdst dst, iRegIsrc src, immL_32bits mask) %{
|
|
match(Set dst (AndL (ConvI2L src) mask));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CLRLDI $dst, $src, #32 \t// zero-extend int to long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src$$Register, 32);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Zero-extend: convert unsigned int to long in long register.
|
|
instruct zeroExtendL_regL(iRegLdst dst, iRegLsrc src, immL_32bits mask) %{
|
|
match(Set dst (AndL src mask));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CLRLDI $dst, $src, #32 \t// zero-extend int to long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrldi($dst$$Register, $src$$Register, 32);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct convF2LRaw_regF(regF dst, regF src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "FCTIDZ $dst, $src \t// convF2L, $src != NaN" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fctidz($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovL_bso_stackSlotL(iRegLdst dst, flagsRegSrc crx, stackSlotL src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE crx, USE src);
|
|
predicate(false);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "cmovL $crx, $dst, $src" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode( enc_cmove_bso_stackSlotL(dst, crx, src) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmovL_bso_reg(iRegLdst dst, flagsRegSrc crx, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE crx, USE src);
|
|
predicate(false);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "cmovL $crx, $dst, $src" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(8);
|
|
ins_encode( enc_cmove_bso_reg(dst, crx, src) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
instruct cmovL_bso_reg_conLvalue0_Ex(iRegLdst dst, flagsRegSrc crx, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE crx, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "CmovL $dst, $crx, $src \t// postalloc expanded" %}
|
|
postalloc_expand %{
|
|
//
|
|
// replaces
|
|
//
|
|
// region dst crx src
|
|
// \ | | /
|
|
// dst=cmovL_bso_reg_conLvalue0
|
|
//
|
|
// with
|
|
//
|
|
// region dst
|
|
// \ /
|
|
// dst=loadConL16(0)
|
|
// |
|
|
// ^ region dst crx src
|
|
// | \ | | /
|
|
// dst=cmovL_bso_reg
|
|
//
|
|
|
|
// Create new nodes.
|
|
MachNode *m1 = new loadConL16Node();
|
|
MachNode *m2 = new cmovL_bso_regNode();
|
|
|
|
// inputs for new nodes
|
|
m1->add_req(n_region);
|
|
m2->add_req(n_region, n_crx, n_src);
|
|
m2->add_prec(m1);
|
|
|
|
// operands for new nodes
|
|
m1->_opnds[0] = op_dst;
|
|
m1->_opnds[1] = new immL16Oper(0);
|
|
m2->_opnds[0] = op_dst;
|
|
m2->_opnds[1] = op_crx;
|
|
m2->_opnds[2] = op_src;
|
|
|
|
// registers for new nodes
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // dst
|
|
|
|
// Insert new nodes.
|
|
nodes->push(m1);
|
|
nodes->push(m2);
|
|
%}
|
|
%}
|
|
|
|
|
|
// Float to Long conversion, NaN is mapped to 0. Special version for Power8.
|
|
instruct convF2L_reg_mffprd_ExEx(iRegLdst dst, regF src) %{
|
|
match(Set dst (ConvF2L src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regF tmpF;
|
|
flagsReg crx;
|
|
cmpFUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
|
|
convF2LRaw_regF(tmpF, src); // Convert float to long (speculated).
|
|
cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpF); // Cmove based on NaN check.
|
|
%}
|
|
%}
|
|
|
|
instruct convD2LRaw_regD(regD dst, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "FCTIDZ $dst, $src \t// convD2L $src != NaN" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fctidz($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
// Double to Long conversion, NaN is mapped to 0. Special version for Power8.
|
|
instruct convD2L_reg_mffprd_ExEx(iRegLdst dst, regD src) %{
|
|
match(Set dst (ConvD2L src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regD tmpD;
|
|
flagsReg crx;
|
|
cmpDUnordered_reg_reg(crx, src, src); // Check whether src is NaN.
|
|
convD2LRaw_regD(tmpD, src); // Convert float to long (speculated).
|
|
cmovL_bso_reg_conLvalue0_Ex(dst, crx, tmpD); // Cmove based on NaN check.
|
|
%}
|
|
%}
|
|
|
|
// Convert to Float
|
|
|
|
// Placed here as needed in expand.
|
|
instruct convL2DRaw_regD(regD dst, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "FCFID $dst, $src \t// convL2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fcfid($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Placed here as needed in expand.
|
|
instruct convD2F_reg(regF dst, regD src) %{
|
|
match(Set dst (ConvD2F src));
|
|
format %{ "FRSP $dst, $src \t// convD2F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ frsp($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct convL2FRaw_regF(regF dst, regD src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "FCFIDS $dst, $src \t// convL2F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fcfids($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
// Integer to Float conversion. Special version for Power8.
|
|
instruct convI2F_ireg_mtfprd_Ex(regF dst, iRegIsrc src) %{
|
|
match(Set dst (ConvI2F src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regD tmpD;
|
|
moveI2D_reg(tmpD, src);
|
|
convL2FRaw_regF(dst, tmpD); // Convert to float.
|
|
%}
|
|
%}
|
|
|
|
|
|
// L2F to avoid runtime call. Special version for Power8.
|
|
instruct convL2F_ireg_mtfprd_Ex(regF dst, iRegLsrc src) %{
|
|
match(Set dst (ConvL2F src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regD tmpD;
|
|
moveL2D_reg(tmpD, src);
|
|
convL2FRaw_regF(dst, tmpD); // Convert to float.
|
|
%}
|
|
%}
|
|
|
|
// Moved up as used in expand.
|
|
//instruct convD2F_reg(regF dst, regD src) %{%}
|
|
|
|
// Convert to Double
|
|
|
|
|
|
// Integer to Double conversion. Special version for Power8.
|
|
instruct convI2D_reg_mtfprd_Ex(regD dst, iRegIsrc src) %{
|
|
match(Set dst (ConvI2D src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regD tmpD;
|
|
moveI2D_reg(tmpD, src);
|
|
convL2DRaw_regD(dst, tmpD); // Convert to double.
|
|
%}
|
|
%}
|
|
|
|
|
|
// Long to Double conversion. Special version for Power8.
|
|
instruct convL2D_reg_mtfprd_Ex(regD dst, iRegLsrc src) %{
|
|
match(Set dst (ConvL2D src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
regD tmpD;
|
|
moveL2D_reg(tmpD, src);
|
|
convL2DRaw_regD(dst, tmpD); // Convert to double.
|
|
%}
|
|
%}
|
|
|
|
instruct convF2D_reg(regD dst, regF src) %{
|
|
match(Set dst (ConvF2D src));
|
|
format %{ "FMR $dst, $src \t// float->double" %}
|
|
// variable size, 0 or 4
|
|
ins_encode %{
|
|
__ fmr_if_needed($dst$$FloatRegister, $src$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct convF2HF_reg_reg(iRegIdst dst, regF src, regF tmp) %{
|
|
match(Set dst (ConvF2HF src));
|
|
effect(TEMP tmp);
|
|
ins_cost(3 * DEFAULT_COST);
|
|
size(12);
|
|
format %{ "xscvdphp $tmp, $src\t# convert to half precision\n\t"
|
|
"mffprd $dst, $tmp\t# move result from $tmp to $dst\n\t"
|
|
"extsh $dst, $dst\t# make it a proper short"
|
|
%}
|
|
ins_encode %{
|
|
__ f2hf($dst$$Register, $src$$FloatRegister, $tmp$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct convHF2F_reg_reg(regF dst, iRegIsrc src) %{
|
|
match(Set dst (ConvHF2F src));
|
|
ins_cost(2 * DEFAULT_COST);
|
|
size(8);
|
|
format %{ "mtfprd $dst, $src\t# move source from $src to $dst\n\t"
|
|
"xscvhpdp $dst, $dst\t# convert from half precision"
|
|
%}
|
|
ins_encode %{
|
|
__ hf2f($dst$$FloatRegister, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Control Flow Instructions------------------------------------------
|
|
// Compare Instructions
|
|
|
|
// Compare Integers
|
|
instruct cmpI_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set crx (CmpI src1 src2));
|
|
size(4);
|
|
format %{ "CMPW $crx, $src1, $src2" %}
|
|
ins_encode %{
|
|
__ cmpw($crx$$CondRegister, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpI_reg_imm16(flagsReg crx, iRegIsrc src1, immI16 src2) %{
|
|
match(Set crx (CmpI src1 src2));
|
|
format %{ "CMPWI $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// (src1 & src2) == 0?
|
|
instruct testI_reg_imm(flagsRegCR0 cr0, iRegIsrc src1, uimmI16 src2, immI_0 zero) %{
|
|
match(Set cr0 (CmpI (AndI src1 src2) zero));
|
|
// r0 is killed
|
|
format %{ "ANDI R0, $src1, $src2 \t// BTST int" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ andi_(R0, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpL_reg_reg(flagsReg crx, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set crx (CmpL src1 src2));
|
|
format %{ "CMPD $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpd($crx$$CondRegister, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpL_reg_imm16(flagsReg crx, iRegLsrc src1, immL16 src2) %{
|
|
match(Set crx (CmpL src1 src2));
|
|
format %{ "CMPDI $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpdi($crx$$CondRegister, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// Added CmpUL for LoopPredicate.
|
|
instruct cmpUL_reg_reg(flagsReg crx, iRegLsrc src1, iRegLsrc src2) %{
|
|
match(Set crx (CmpUL src1 src2));
|
|
format %{ "CMPLD $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpld($crx$$CondRegister, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpUL_reg_imm16(flagsReg crx, iRegLsrc src1, uimmL16 src2) %{
|
|
match(Set crx (CmpUL src1 src2));
|
|
format %{ "CMPLDI $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpldi($crx$$CondRegister, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct testL_reg_reg(flagsRegCR0 cr0, iRegLsrc src1, iRegLsrc src2, immL_0 zero) %{
|
|
match(Set cr0 (CmpL (AndL src1 src2) zero));
|
|
// r0 is killed
|
|
format %{ "AND R0, $src1, $src2 \t// BTST long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ and_(R0, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct testL_reg_imm(flagsRegCR0 cr0, iRegLsrc src1, uimmL16 src2, immL_0 zero) %{
|
|
match(Set cr0 (CmpL (AndL src1 src2) zero));
|
|
// r0 is killed
|
|
format %{ "ANDI R0, $src1, $src2 \t// BTST long" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ andi_(R0, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// Manifest a CmpL3 result in an integer register.
|
|
instruct cmpL3_reg_reg(iRegIdst dst, iRegLsrc src1, iRegLsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (CmpL3 src1 src2));
|
|
effect(KILL cr0);
|
|
ins_cost(DEFAULT_COST * 5);
|
|
size((VM_Version::has_brw() ? 16 : 20));
|
|
|
|
format %{ "cmpL3_reg_reg $dst, $src1, $src2" %}
|
|
|
|
ins_encode %{
|
|
__ cmpd(CR0, $src1$$Register, $src2$$Register);
|
|
__ set_cmp3($dst$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Implicit range checks.
|
|
// A range check in the ideal world has one of the following shapes:
|
|
// - (If le (CmpU length index)), (IfTrue throw exception)
|
|
// - (If lt (CmpU index length)), (IfFalse throw exception)
|
|
//
|
|
// Match range check 'If le (CmpU length index)'.
|
|
instruct rangeCheck_iReg_uimm15(cmpOp cmp, iRegIsrc src_length, uimmI15 index, label labl) %{
|
|
match(If cmp (CmpU src_length index));
|
|
effect(USE labl);
|
|
predicate(TrapBasedRangeChecks &&
|
|
_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::le &&
|
|
PROB_UNLIKELY(_leaf->as_If()->_prob) >= PROB_ALWAYS &&
|
|
(Matcher::branches_to_uncommon_trap(_leaf)));
|
|
|
|
ins_is_TrapBasedCheckNode(true);
|
|
|
|
format %{ "TWI $index $cmp $src_length \t// RangeCheck => trap $labl" %}
|
|
size(4);
|
|
ins_encode %{
|
|
if ($cmp$$cmpcode == 0x1 /* less_equal */) {
|
|
__ trap_range_check_le($src_length$$Register, $index$$constant);
|
|
} else {
|
|
// Both successors are uncommon traps, probability is 0.
|
|
// Node got flipped during fixup flow.
|
|
assert($cmp$$cmpcode == 0x9, "must be greater");
|
|
__ trap_range_check_g($src_length$$Register, $index$$constant);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_trap);
|
|
%}
|
|
|
|
// Match range check 'If lt (CmpU index length)'.
|
|
instruct rangeCheck_iReg_iReg(cmpOp cmp, iRegIsrc src_index, iRegIsrc src_length, label labl) %{
|
|
match(If cmp (CmpU src_index src_length));
|
|
effect(USE labl);
|
|
predicate(TrapBasedRangeChecks &&
|
|
_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt &&
|
|
_leaf->as_If()->_prob >= PROB_ALWAYS &&
|
|
(Matcher::branches_to_uncommon_trap(_leaf)));
|
|
|
|
ins_is_TrapBasedCheckNode(true);
|
|
|
|
format %{ "TW $src_index $cmp $src_length \t// RangeCheck => trap $labl" %}
|
|
size(4);
|
|
ins_encode %{
|
|
if ($cmp$$cmpcode == 0x0 /* greater_equal */) {
|
|
__ trap_range_check_ge($src_index$$Register, $src_length$$Register);
|
|
} else {
|
|
// Both successors are uncommon traps, probability is 0.
|
|
// Node got flipped during fixup flow.
|
|
assert($cmp$$cmpcode == 0x8, "must be less");
|
|
__ trap_range_check_l($src_index$$Register, $src_length$$Register);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_trap);
|
|
%}
|
|
|
|
// Match range check 'If lt (CmpU index length)'.
|
|
instruct rangeCheck_uimm15_iReg(cmpOp cmp, iRegIsrc src_index, uimmI15 length, label labl) %{
|
|
match(If cmp (CmpU src_index length));
|
|
effect(USE labl);
|
|
predicate(TrapBasedRangeChecks &&
|
|
_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::lt &&
|
|
_leaf->as_If()->_prob >= PROB_ALWAYS &&
|
|
(Matcher::branches_to_uncommon_trap(_leaf)));
|
|
|
|
ins_is_TrapBasedCheckNode(true);
|
|
|
|
format %{ "TWI $src_index $cmp $length \t// RangeCheck => trap $labl" %}
|
|
size(4);
|
|
ins_encode %{
|
|
if ($cmp$$cmpcode == 0x0 /* greater_equal */) {
|
|
__ trap_range_check_ge($src_index$$Register, $length$$constant);
|
|
} else {
|
|
// Both successors are uncommon traps, probability is 0.
|
|
// Node got flipped during fixup flow.
|
|
assert($cmp$$cmpcode == 0x8, "must be less");
|
|
__ trap_range_check_l($src_index$$Register, $length$$constant);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_trap);
|
|
%}
|
|
|
|
instruct compU_reg_reg(flagsReg crx, iRegIsrc src1, iRegIsrc src2) %{
|
|
match(Set crx (CmpU src1 src2));
|
|
format %{ "CMPLW $crx, $src1, $src2 \t// unsigned" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmplw($crx$$CondRegister, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct compU_reg_uimm16(flagsReg crx, iRegIsrc src1, uimmI16 src2) %{
|
|
match(Set crx (CmpU src1 src2));
|
|
size(4);
|
|
format %{ "CMPLWI $crx, $src1, $src2" %}
|
|
ins_encode %{
|
|
__ cmplwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// Implicit zero checks (more implicit null checks).
|
|
// No constant pool entries required.
|
|
instruct zeroCheckN_iReg_imm0(cmpOp cmp, iRegNsrc value, immN_0 zero, label labl) %{
|
|
match(If cmp (CmpN value zero));
|
|
effect(USE labl);
|
|
predicate(TrapBasedNullChecks &&
|
|
_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
|
|
_leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
|
|
Matcher::branches_to_uncommon_trap(_leaf));
|
|
ins_cost(1);
|
|
|
|
ins_is_TrapBasedCheckNode(true);
|
|
|
|
format %{ "TDI $value $cmp $zero \t// ZeroCheckN => trap $labl" %}
|
|
size(4);
|
|
ins_encode %{
|
|
if ($cmp$$cmpcode == 0xA) {
|
|
__ trap_null_check($value$$Register);
|
|
} else {
|
|
// Both successors are uncommon traps, probability is 0.
|
|
// Node got flipped during fixup flow.
|
|
assert($cmp$$cmpcode == 0x2 , "must be equal(0xA) or notEqual(0x2)");
|
|
__ trap_null_check($value$$Register, Assembler::traptoGreaterThanUnsigned);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_trap);
|
|
%}
|
|
|
|
// Compare narrow oops.
|
|
instruct cmpN_reg_reg(flagsReg crx, iRegNsrc src1, iRegNsrc src2) %{
|
|
match(Set crx (CmpN src1 src2));
|
|
|
|
size(4);
|
|
ins_cost(2);
|
|
format %{ "CMPLW $crx, $src1, $src2 \t// compressed ptr" %}
|
|
ins_encode %{
|
|
__ cmplw($crx$$CondRegister, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpN_reg_imm0(flagsReg crx, iRegNsrc src1, immN_0 src2) %{
|
|
match(Set crx (CmpN src1 src2));
|
|
// Make this more expensive than zeroCheckN_iReg_imm0.
|
|
ins_cost(2);
|
|
|
|
format %{ "CMPLWI $crx, $src1, $src2 \t// compressed ptr" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmplwi($crx$$CondRegister, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// Implicit zero checks (more implicit null checks).
|
|
// No constant pool entries required.
|
|
instruct zeroCheckP_reg_imm0(cmpOp cmp, iRegP_N2P value, immP_0 zero, label labl) %{
|
|
match(If cmp (CmpP value zero));
|
|
effect(USE labl);
|
|
predicate(TrapBasedNullChecks &&
|
|
_kids[0]->_leaf->as_Bool()->_test._test == BoolTest::ne &&
|
|
_leaf->as_If()->_prob >= PROB_LIKELY_MAG(4) &&
|
|
Matcher::branches_to_uncommon_trap(_leaf));
|
|
ins_cost(1); // Should not be cheaper than zeroCheckN.
|
|
|
|
ins_is_TrapBasedCheckNode(true);
|
|
|
|
format %{ "TDI $value $cmp $zero \t// ZeroCheckP => trap $labl" %}
|
|
size(4);
|
|
ins_encode %{
|
|
if ($cmp$$cmpcode == 0xA) {
|
|
__ trap_null_check($value$$Register);
|
|
} else {
|
|
// Both successors are uncommon traps, probability is 0.
|
|
// Node got flipped during fixup flow.
|
|
assert($cmp$$cmpcode == 0x2 , "must be equal(0xA) or notEqual(0x2)");
|
|
__ trap_null_check($value$$Register, Assembler::traptoGreaterThanUnsigned);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_trap);
|
|
%}
|
|
|
|
// Compare Pointers
|
|
instruct cmpP_reg_reg(flagsReg crx, iRegP_N2P src1, iRegP_N2P src2) %{
|
|
match(Set crx (CmpP src1 src2));
|
|
format %{ "CMPLD $crx, $src1, $src2 \t// ptr" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpld($crx$$CondRegister, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpP_reg_null(flagsReg crx, iRegP_N2P src1, immP_0or1 src2) %{
|
|
match(Set crx (CmpP src1 src2));
|
|
format %{ "CMPLDI $crx, $src1, $src2 \t// ptr" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpldi($crx$$CondRegister, $src1$$Register, (int)((short)($src2$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// Used in postalloc expand.
|
|
instruct cmpP_reg_imm16(flagsReg crx, iRegPsrc src1, immL16 src2) %{
|
|
// This match rule prevents reordering of node before a safepoint.
|
|
// This only makes sense if this instructions is used exclusively
|
|
// for the expansion of EncodeP!
|
|
match(Set crx (CmpP src1 src2));
|
|
predicate(false);
|
|
|
|
format %{ "CMPDI $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cmpdi($crx$$CondRegister, $src1$$Register, $src2$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
//----------Float Compares----------------------------------------------------
|
|
|
|
instruct cmpFUnordered_reg_reg(flagsReg crx, regF src1, regF src2) %{
|
|
// Needs matchrule, see cmpDUnordered.
|
|
match(Set crx (CmpF src1 src2));
|
|
// no match-rule, false predicate
|
|
predicate(false);
|
|
|
|
format %{ "cmpFUrd $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fcmpu($crx$$CondRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmov_bns_less(flagsReg crx) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF crx);
|
|
predicate(false);
|
|
|
|
ins_variable_size_depending_on_alignment(true);
|
|
|
|
format %{ "cmov $crx" %}
|
|
// Worst case is branch + move + stop, no stop without scheduler.
|
|
size(12);
|
|
ins_encode %{
|
|
Label done;
|
|
__ bns($crx$$CondRegister, done); // not unordered -> keep crx
|
|
__ li(R0, 0);
|
|
__ cmpwi($crx$$CondRegister, R0, 1); // unordered -> set crx to 'less'
|
|
__ bind(done);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Compare floating, generate condition code.
|
|
instruct cmpF_reg_reg_Ex(flagsReg crx, regF src1, regF src2) %{
|
|
// FIXME: should we match 'If cmp (CmpF src1 src2))' ??
|
|
//
|
|
// The following code sequence occurs a lot in mpegaudio:
|
|
//
|
|
// block BXX:
|
|
// 0: instruct cmpFUnordered_reg_reg (cmpF_reg_reg-0):
|
|
// cmpFUrd CR6, F11, F9
|
|
// 4: instruct cmov_bns_less (cmpF_reg_reg-1):
|
|
// cmov CR6
|
|
// 8: instruct branchConSched:
|
|
// B_FARle CR6, B56 P=0.500000 C=-1.000000
|
|
match(Set crx (CmpF src1 src2));
|
|
ins_cost(DEFAULT_COST+BRANCH_COST);
|
|
|
|
format %{ "CmpF $crx, $src1, $src2 \t// postalloc expanded" %}
|
|
postalloc_expand %{
|
|
//
|
|
// replaces
|
|
//
|
|
// region src1 src2
|
|
// \ | |
|
|
// crx=cmpF_reg_reg
|
|
//
|
|
// with
|
|
//
|
|
// region src1 src2
|
|
// \ | |
|
|
// crx=cmpFUnordered_reg_reg
|
|
// |
|
|
// ^ region
|
|
// | \
|
|
// crx=cmov_bns_less
|
|
//
|
|
|
|
// Create new nodes.
|
|
MachNode *m1 = new cmpFUnordered_reg_regNode();
|
|
MachNode *m2 = new cmov_bns_lessNode();
|
|
|
|
// inputs for new nodes
|
|
m1->add_req(n_region, n_src1, n_src2);
|
|
m2->add_req(n_region);
|
|
m2->add_prec(m1);
|
|
|
|
// operands for new nodes
|
|
m1->_opnds[0] = op_crx;
|
|
m1->_opnds[1] = op_src1;
|
|
m1->_opnds[2] = op_src2;
|
|
m2->_opnds[0] = op_crx;
|
|
|
|
// registers for new nodes
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
|
|
|
|
// Insert new nodes.
|
|
nodes->push(m1);
|
|
nodes->push(m2);
|
|
%}
|
|
%}
|
|
|
|
// Compare float, generate -1,0,1
|
|
instruct cmpF3_reg_reg(iRegIdst dst, regF src1, regF src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (CmpF3 src1 src2));
|
|
effect(KILL cr0);
|
|
ins_cost(DEFAULT_COST * 6);
|
|
size((VM_Version::has_brw() ? 20 : 24));
|
|
|
|
format %{ "cmpF3_reg_reg $dst, $src1, $src2" %}
|
|
|
|
ins_encode %{
|
|
__ fcmpu(CR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
__ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmpDUnordered_reg_reg(flagsReg crx, regD src1, regD src2) %{
|
|
// Needs matchrule so that ideal opcode is Cmp. This causes that gcm places the
|
|
// node right before the conditional move using it.
|
|
// In jck test api/java_awt/geom/QuadCurve2DFloat/index.html#SetCurveTesttestCase7,
|
|
// compilation of java.awt.geom.RectangularShape::getBounds()Ljava/awt/Rectangle
|
|
// crashed in register allocation where the flags Reg between cmpDUnoredered and a
|
|
// conditional move was supposed to be spilled.
|
|
match(Set crx (CmpD src1 src2));
|
|
// False predicate, shall not be matched.
|
|
predicate(false);
|
|
|
|
format %{ "cmpFUrd $crx, $src1, $src2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fcmpu($crx$$CondRegister, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmpD_reg_reg_Ex(flagsReg crx, regD src1, regD src2) %{
|
|
match(Set crx (CmpD src1 src2));
|
|
ins_cost(DEFAULT_COST+BRANCH_COST);
|
|
|
|
format %{ "CmpD $crx, $src1, $src2 \t// postalloc expanded" %}
|
|
postalloc_expand %{
|
|
//
|
|
// replaces
|
|
//
|
|
// region src1 src2
|
|
// \ | |
|
|
// crx=cmpD_reg_reg
|
|
//
|
|
// with
|
|
//
|
|
// region src1 src2
|
|
// \ | |
|
|
// crx=cmpDUnordered_reg_reg
|
|
// |
|
|
// ^ region
|
|
// | \
|
|
// crx=cmov_bns_less
|
|
//
|
|
|
|
// create new nodes
|
|
MachNode *m1 = new cmpDUnordered_reg_regNode();
|
|
MachNode *m2 = new cmov_bns_lessNode();
|
|
|
|
// inputs for new nodes
|
|
m1->add_req(n_region, n_src1, n_src2);
|
|
m2->add_req(n_region);
|
|
m2->add_prec(m1);
|
|
|
|
// operands for new nodes
|
|
m1->_opnds[0] = op_crx;
|
|
m1->_opnds[1] = op_src1;
|
|
m1->_opnds[2] = op_src2;
|
|
m2->_opnds[0] = op_crx;
|
|
|
|
// registers for new nodes
|
|
ra_->set_pair(m1->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
|
|
ra_->set_pair(m2->_idx, ra_->get_reg_second(this), ra_->get_reg_first(this)); // crx
|
|
|
|
// Insert new nodes.
|
|
nodes->push(m1);
|
|
nodes->push(m2);
|
|
%}
|
|
%}
|
|
|
|
// Compare double, generate -1,0,1
|
|
instruct cmpD3_reg_reg(iRegIdst dst, regD src1, regD src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (CmpD3 src1 src2));
|
|
effect(KILL cr0);
|
|
ins_cost(DEFAULT_COST * 6);
|
|
size((VM_Version::has_brw() ? 20 : 24));
|
|
|
|
format %{ "cmpD3_reg_reg $dst, $src1, $src2" %}
|
|
|
|
ins_encode %{
|
|
__ fcmpu(CR0, $src1$$FloatRegister, $src2$$FloatRegister);
|
|
__ set_cmpu3($dst$$Register, true); // C2 requires unordered to get treated like less
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Compare char
|
|
instruct cmprb_Digit_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
|
|
match(Set dst (Digit src1));
|
|
effect(TEMP src2, TEMP crx);
|
|
ins_cost(3 * DEFAULT_COST);
|
|
|
|
format %{ "LI $src2, 0x3930\n\t"
|
|
"CMPRB $crx, 0, $src1, $src2\n\t"
|
|
"SETB $dst, $crx" %}
|
|
size(12);
|
|
ins_encode %{
|
|
// 0x30: 0, 0x39: 9
|
|
__ li($src2$$Register, 0x3930);
|
|
// compare src1 with ranges 0x30 to 0x39
|
|
__ cmprb($crx$$CondRegister, 0, $src1$$Register, $src2$$Register);
|
|
__ setb($dst$$Register, $crx$$CondRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmprb_LowerCase_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
|
|
match(Set dst (LowerCase src1));
|
|
effect(TEMP src2, TEMP crx);
|
|
ins_cost(12 * DEFAULT_COST);
|
|
|
|
format %{ "LI $src2, 0x7A61\n\t"
|
|
"CMPRB $crx, 0, $src1, $src2\n\t"
|
|
"BGT $crx, done\n\t"
|
|
"LIS $src2, (signed short)0xF6DF\n\t"
|
|
"ORI $src2, $src2, 0xFFF8\n\t"
|
|
"CMPRB $crx, 1, $src1, $src2\n\t"
|
|
"BGT $crx, done\n\t"
|
|
"LIS $src2, (signed short)0xAAB5\n\t"
|
|
"ORI $src2, $src2, 0xBABA\n\t"
|
|
"INSRDI $src2, $src2, 32, 0\n\t"
|
|
"CMPEQB $crx, 1, $src1, $src2\n"
|
|
"done:\n\t"
|
|
"SETB $dst, $crx" %}
|
|
|
|
size(48);
|
|
ins_encode %{
|
|
Label done;
|
|
// 0x61: a, 0x7A: z
|
|
__ li($src2$$Register, 0x7A61);
|
|
// compare src1 with ranges 0x61 to 0x7A
|
|
__ cmprb($crx$$CondRegister, 0, $src1$$Register, $src2$$Register);
|
|
__ bgt($crx$$CondRegister, done);
|
|
|
|
// 0xDF: sharp s, 0xFF: y with diaeresis, 0xF7 is not the lower case
|
|
__ lis($src2$$Register, (signed short)0xF6DF);
|
|
__ ori($src2$$Register, $src2$$Register, 0xFFF8);
|
|
// compare src1 with ranges 0xDF to 0xF6 and 0xF8 to 0xFF
|
|
__ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
|
|
__ bgt($crx$$CondRegister, done);
|
|
|
|
// 0xAA: feminine ordinal indicator
|
|
// 0xB5: micro sign
|
|
// 0xBA: masculine ordinal indicator
|
|
__ lis($src2$$Register, (signed short)0xAAB5);
|
|
__ ori($src2$$Register, $src2$$Register, 0xBABA);
|
|
__ insrdi($src2$$Register, $src2$$Register, 32, 0);
|
|
// compare src1 with 0xAA, 0xB5, and 0xBA
|
|
__ cmpeqb($crx$$CondRegister, $src1$$Register, $src2$$Register);
|
|
|
|
__ bind(done);
|
|
__ setb($dst$$Register, $crx$$CondRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmprb_UpperCase_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
|
|
match(Set dst (UpperCase src1));
|
|
effect(TEMP src2, TEMP crx);
|
|
ins_cost(7 * DEFAULT_COST);
|
|
|
|
format %{ "LI $src2, 0x5A41\n\t"
|
|
"CMPRB $crx, 0, $src1, $src2\n\t"
|
|
"BGT $crx, done\n\t"
|
|
"LIS $src2, (signed short)0xD6C0\n\t"
|
|
"ORI $src2, $src2, 0xDED8\n\t"
|
|
"CMPRB $crx, 1, $src1, $src2\n"
|
|
"done:\n\t"
|
|
"SETB $dst, $crx" %}
|
|
|
|
size(28);
|
|
ins_encode %{
|
|
Label done;
|
|
// 0x41: A, 0x5A: Z
|
|
__ li($src2$$Register, 0x5A41);
|
|
// compare src1 with a range 0x41 to 0x5A
|
|
__ cmprb($crx$$CondRegister, 0, $src1$$Register, $src2$$Register);
|
|
__ bgt($crx$$CondRegister, done);
|
|
|
|
// 0xC0: a with grave, 0xDE: thorn, 0xD7 is not the upper case
|
|
__ lis($src2$$Register, (signed short)0xD6C0);
|
|
__ ori($src2$$Register, $src2$$Register, 0xDED8);
|
|
// compare src1 with ranges 0xC0 to 0xD6 and 0xD8 to 0xDE
|
|
__ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
|
|
|
|
__ bind(done);
|
|
__ setb($dst$$Register, $crx$$CondRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cmprb_Whitespace_reg_reg(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
|
|
match(Set dst (Whitespace src1));
|
|
predicate(PowerArchitecturePPC64 <= 9);
|
|
effect(TEMP src2, TEMP crx);
|
|
ins_cost(4 * DEFAULT_COST);
|
|
|
|
format %{ "LI $src2, 0x0D09\n\t"
|
|
"ADDIS $src2, 0x201C\n\t"
|
|
"CMPRB $crx, 1, $src1, $src2\n\t"
|
|
"SETB $dst, $crx" %}
|
|
size(16);
|
|
ins_encode %{
|
|
// 0x09 to 0x0D, 0x1C to 0x20
|
|
__ li($src2$$Register, 0x0D09);
|
|
__ addis($src2$$Register, $src2$$Register, 0x0201C);
|
|
// compare src with ranges 0x09 to 0x0D and 0x1C to 0x20
|
|
__ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
|
|
__ setb($dst$$Register, $crx$$CondRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Power 10 version, using prefixed addi to load 32-bit constant
|
|
instruct cmprb_Whitespace_reg_reg_prefixed(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsReg crx) %{
|
|
match(Set dst (Whitespace src1));
|
|
predicate(PowerArchitecturePPC64 >= 10);
|
|
effect(TEMP src2, TEMP crx);
|
|
ins_cost(3 * DEFAULT_COST);
|
|
|
|
format %{ "PLI $src2, 0x201C0D09\n\t"
|
|
"CMPRB $crx, 1, $src1, $src2\n\t"
|
|
"SETB $dst, $crx" %}
|
|
size(16);
|
|
ins_encode %{
|
|
// 0x09 to 0x0D, 0x1C to 0x20
|
|
assert( ((intptr_t)(__ pc()) & 0x3c) != 0x3c, "Bad alignment for prefixed instruction at " INTPTR_FORMAT, (intptr_t)(__ pc()));
|
|
__ pli($src2$$Register, 0x201C0D09);
|
|
// compare src with ranges 0x09 to 0x0D and 0x1C to 0x20
|
|
__ cmprb($crx$$CondRegister, 1, $src1$$Register, $src2$$Register);
|
|
__ setb($dst$$Register, $crx$$CondRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
ins_alignment(2);
|
|
%}
|
|
|
|
//----------Branches---------------------------------------------------------
|
|
// Jump
|
|
|
|
// Direct Branch.
|
|
instruct branch(label labl) %{
|
|
match(Goto);
|
|
effect(USE labl);
|
|
ins_cost(BRANCH_COST);
|
|
|
|
format %{ "B $labl" %}
|
|
size(4);
|
|
ins_encode %{
|
|
Label d; // dummy
|
|
__ bind(d);
|
|
Label* p = $labl$$label;
|
|
// `p' is `nullptr' when this encoding class is used only to
|
|
// determine the size of the encoded instruction.
|
|
Label& l = (nullptr == p)? d : *(p);
|
|
__ b(l);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Conditional Near Branch
|
|
instruct branchCon(cmpOp cmp, flagsRegSrc crx, label lbl) %{
|
|
// Same match rule as `branchConFar'.
|
|
match(If cmp crx);
|
|
effect(USE lbl);
|
|
ins_cost(BRANCH_COST);
|
|
|
|
// If set to 1 this indicates that the current instruction is a
|
|
// short variant of a long branch. This avoids using this
|
|
// instruction in first-pass matching. It will then only be used in
|
|
// the `Shorten_branches' pass.
|
|
ins_short_branch(1);
|
|
|
|
format %{ "B$cmp $crx, $lbl" %}
|
|
size(4);
|
|
ins_encode( enc_bc(crx, cmp, lbl) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// This is for cases when the ppc64 `bc' instruction does not
|
|
// reach far enough. So we emit a far branch here, which is more
|
|
// expensive.
|
|
//
|
|
// Conditional Far Branch
|
|
instruct branchConFar(cmpOp cmp, flagsRegSrc crx, label lbl) %{
|
|
// Same match rule as `branchCon'.
|
|
match(If cmp crx);
|
|
effect(USE crx, USE lbl);
|
|
// Higher cost than `branchCon'.
|
|
ins_cost(5*BRANCH_COST);
|
|
|
|
// This is not a short variant of a branch, but the long variant.
|
|
ins_short_branch(0);
|
|
|
|
format %{ "B_FAR$cmp $crx, $lbl" %}
|
|
size(8);
|
|
ins_encode( enc_bc_far(crx, cmp, lbl) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct branchLoopEnd(cmpOp cmp, flagsRegSrc crx, label labl) %{
|
|
match(CountedLoopEnd cmp crx);
|
|
effect(USE labl);
|
|
ins_cost(BRANCH_COST);
|
|
|
|
// short variant.
|
|
ins_short_branch(1);
|
|
|
|
format %{ "B$cmp $crx, $labl \t// counted loop end" %}
|
|
size(4);
|
|
ins_encode( enc_bc(crx, cmp, labl) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct branchLoopEndFar(cmpOp cmp, flagsRegSrc crx, label labl) %{
|
|
match(CountedLoopEnd cmp crx);
|
|
effect(USE labl);
|
|
ins_cost(BRANCH_COST);
|
|
|
|
// Long variant.
|
|
ins_short_branch(0);
|
|
|
|
format %{ "B_FAR$cmp $crx, $labl \t// counted loop end" %}
|
|
size(8);
|
|
ins_encode( enc_bc_far(crx, cmp, labl) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// ============================================================================
|
|
// Java runtime operations, intrinsics and other complex operations.
|
|
|
|
// The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
|
|
// array for an instance of the superklass. Set a hidden internal cache on a
|
|
// hit (cache is checked with exposed code in gen_subtype_check()). Return
|
|
// not zero for a miss or zero for a hit. The encoding ALSO sets flags.
|
|
//
|
|
// GL TODO: Improve this.
|
|
// - result should not be a TEMP
|
|
// - Add match rule as on sparc avoiding additional Cmp.
|
|
instruct partialSubtypeCheck(iRegPdst result, iRegP_N2P subklass, iRegP_N2P superklass,
|
|
iRegPdst tmp_klass, iRegPdst tmp_arrayptr) %{
|
|
match(Set result (PartialSubtypeCheck subklass superklass));
|
|
predicate(!UseSecondarySupersTable);
|
|
effect(TEMP_DEF result, TEMP tmp_klass, TEMP tmp_arrayptr);
|
|
ins_cost(DEFAULT_COST*10);
|
|
|
|
format %{ "PartialSubtypeCheck $result = ($subklass instanceOf $superklass) tmp: $tmp_klass, $tmp_arrayptr" %}
|
|
ins_encode %{
|
|
__ check_klass_subtype_slow_path($subklass$$Register, $superklass$$Register, $tmp_arrayptr$$Register,
|
|
$tmp_klass$$Register, nullptr, $result$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Two versions of partialSubtypeCheck, both used when we need to
|
|
// search for a super class in the secondary supers array. The first
|
|
// is used when we don't know _a priori_ the class being searched
|
|
// for. The second, far more common, is used when we do know: this is
|
|
// used for instanceof, checkcast, and any case where C2 can determine
|
|
// it by constant propagation.
|
|
instruct partialSubtypeCheckVarSuper(iRegPsrc sub, iRegPsrc super, iRegPdst result,
|
|
iRegPdst tempR1, iRegPdst tempR2, iRegPdst tempR3, iRegPdst tempR4,
|
|
flagsRegCR0 cr0, regCTR ctr)
|
|
%{
|
|
match(Set result (PartialSubtypeCheck sub super));
|
|
predicate(UseSecondarySupersTable);
|
|
effect(KILL cr0, KILL ctr, TEMP_DEF result, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP tempR4);
|
|
|
|
ins_cost(DEFAULT_COST * 10); // slightly larger than the next version
|
|
format %{ "partialSubtypeCheck $result, $sub, $super" %}
|
|
ins_encode %{
|
|
__ lookup_secondary_supers_table_var($sub$$Register, $super$$Register,
|
|
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
|
$result$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
instruct partialSubtypeCheckConstSuper(rarg3RegP sub, rarg2RegP super_reg, immP super_con, rarg6RegP result,
|
|
rarg1RegP tempR1, rarg5RegP tempR2, rarg4RegP tempR3, rscratch1RegP tempR4,
|
|
flagsRegCR0 cr0, regCTR ctr)
|
|
%{
|
|
match(Set result (PartialSubtypeCheck sub (Binary super_reg super_con)));
|
|
predicate(UseSecondarySupersTable);
|
|
effect(KILL cr0, KILL ctr, TEMP tempR1, TEMP tempR2, TEMP tempR3, TEMP tempR4);
|
|
|
|
ins_cost(DEFAULT_COST*8); // smaller than the other version
|
|
format %{ "partialSubtypeCheck $result, $sub, $super_reg" %}
|
|
|
|
ins_encode %{
|
|
u1 super_klass_slot = ((Klass*)$super_con$$constant)->hash_slot();
|
|
if (InlineSecondarySupersTest) {
|
|
__ lookup_secondary_supers_table_const($sub$$Register, $super_reg$$Register,
|
|
$tempR1$$Register, $tempR2$$Register, $tempR3$$Register, $tempR4$$Register,
|
|
$result$$Register, super_klass_slot);
|
|
} else {
|
|
address stub = StubRoutines::lookup_secondary_supers_table_stub(super_klass_slot);
|
|
Register r_stub_addr = $tempR1$$Register;
|
|
__ add_const_optimized(r_stub_addr, R29_TOC, MacroAssembler::offset_to_global_toc(stub), R0);
|
|
__ mtctr(r_stub_addr);
|
|
__ bctrl();
|
|
}
|
|
%}
|
|
|
|
ins_pipe(pipe_class_memory);
|
|
%}
|
|
|
|
// inlined locking and unlocking
|
|
|
|
instruct cmpFastLock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
|
|
predicate(LockingMode != LM_LIGHTWEIGHT);
|
|
match(Set crx (FastLock oop box));
|
|
effect(TEMP tmp1, TEMP tmp2);
|
|
|
|
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
|
|
ins_encode %{
|
|
__ compiler_fast_lock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
|
$tmp1$$Register, $tmp2$$Register, /*tmp3*/ R0);
|
|
// If locking was successful, crx should indicate 'EQ'.
|
|
// The compiler generates a branch to the runtime call to
|
|
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpFastUnlock(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
|
|
predicate(LockingMode != LM_LIGHTWEIGHT);
|
|
match(Set crx (FastUnlock oop box));
|
|
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
|
|
|
format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %}
|
|
ins_encode %{
|
|
__ compiler_fast_unlock_object($crx$$CondRegister, $oop$$Register, $box$$Register,
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
|
// If unlocking was successful, crx should indicate 'EQ'.
|
|
// The compiler generates a branch to the runtime call to
|
|
// _complete_monitor_unlocking_Java for the case where crx is 'NE'.
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpFastLockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2) %{
|
|
predicate(LockingMode == LM_LIGHTWEIGHT && !UseObjectMonitorTable);
|
|
match(Set crx (FastLock oop box));
|
|
effect(TEMP tmp1, TEMP tmp2);
|
|
|
|
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2" %}
|
|
ins_encode %{
|
|
__ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
|
|
$tmp1$$Register, $tmp2$$Register, noreg /*tmp3*/);
|
|
// If locking was successful, crx should indicate 'EQ'.
|
|
// The compiler generates a branch to the runtime call to
|
|
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpFastLockMonitorTable(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3, flagsRegCR1 cr1) %{
|
|
predicate(LockingMode == LM_LIGHTWEIGHT && UseObjectMonitorTable);
|
|
match(Set crx (FastLock oop box));
|
|
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr1);
|
|
|
|
format %{ "FASTLOCK $oop, $box, $tmp1, $tmp2, $tmp3" %}
|
|
ins_encode %{
|
|
__ fast_lock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
|
// If locking was successful, crx should indicate 'EQ'.
|
|
// The compiler generates a branch to the runtime call to
|
|
// _complete_monitor_locking_Java for the case where crx is 'NE'.
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct cmpFastUnlockLightweight(flagsRegCR0 crx, iRegPdst oop, iRegPdst box, iRegPdst tmp1, iRegPdst tmp2, iRegPdst tmp3) %{
|
|
predicate(LockingMode == LM_LIGHTWEIGHT);
|
|
match(Set crx (FastUnlock oop box));
|
|
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3);
|
|
|
|
format %{ "FASTUNLOCK $oop, $box, $tmp1, $tmp2" %}
|
|
ins_encode %{
|
|
__ fast_unlock_lightweight($crx$$CondRegister, $oop$$Register, $box$$Register,
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register);
|
|
// If unlocking was successful, crx should indicate 'EQ'.
|
|
// The compiler generates a branch to the runtime call to
|
|
// _complete_monitor_unlocking_Java for the case where crx is 'NE'.
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// Align address.
|
|
instruct align_addr(iRegPdst dst, iRegPsrc src, immLnegpow2 mask) %{
|
|
match(Set dst (CastX2P (AndL (CastP2X src) mask)));
|
|
|
|
format %{ "ANDDI $dst, $src, $mask \t// next aligned address" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ clrrdi($dst$$Register, $src$$Register, log2i_exact(-(julong)$mask$$constant));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Array size computation.
|
|
instruct array_size(iRegLdst dst, iRegPsrc end, iRegPsrc start) %{
|
|
match(Set dst (SubL (CastP2X end) (CastP2X start)));
|
|
|
|
format %{ "SUB $dst, $end, $start \t// array size in bytes" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ subf($dst$$Register, $start$$Register, $end$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Clear-array with constant short array length. The versions below can use dcbz with cnt > 30.
|
|
instruct inlineCallClearArrayShort(immLmax30 cnt, rarg2RegP base, Universe dummy, regCTR ctr) %{
|
|
match(Set dummy (ClearArray cnt base));
|
|
effect(USE_KILL base, KILL ctr);
|
|
ins_cost(2 * MEMORY_REF_COST);
|
|
|
|
format %{ "ClearArray $cnt, $base" %}
|
|
ins_encode %{
|
|
__ clear_memory_constlen($base$$Register, $cnt$$constant, R0); // kills base, R0
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Clear-array with constant large array length.
|
|
instruct inlineCallClearArrayLarge(immL cnt, rarg2RegP base, Universe dummy, iRegLdst tmp, regCTR ctr) %{
|
|
match(Set dummy (ClearArray cnt base));
|
|
effect(USE_KILL base, TEMP tmp, KILL ctr);
|
|
ins_cost(3 * MEMORY_REF_COST);
|
|
|
|
format %{ "ClearArray $cnt, $base \t// KILL $tmp" %}
|
|
ins_encode %{
|
|
__ clear_memory_doubleword($base$$Register, $tmp$$Register, R0, $cnt$$constant); // kills base, R0
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Clear-array with dynamic array length.
|
|
instruct inlineCallClearArray(rarg1RegL cnt, rarg2RegP base, Universe dummy, regCTR ctr) %{
|
|
match(Set dummy (ClearArray cnt base));
|
|
effect(USE_KILL cnt, USE_KILL base, KILL ctr);
|
|
ins_cost(4 * MEMORY_REF_COST);
|
|
|
|
format %{ "ClearArray $cnt, $base" %}
|
|
ins_encode %{
|
|
__ clear_memory_doubleword($base$$Register, $cnt$$Register, R0); // kills cnt, base, R0
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct string_compareL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
|
|
iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
|
|
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL);
|
|
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
|
effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
|
|
ins_cost(300);
|
|
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
|
|
ins_encode %{
|
|
__ string_compare($str1$$Register, $str2$$Register,
|
|
$cnt1$$Register, $cnt2$$Register,
|
|
$tmp$$Register,
|
|
$result$$Register, StrIntrinsicNode::LL);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct string_compareU(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
|
|
iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
|
|
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU);
|
|
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
|
effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
|
|
ins_cost(300);
|
|
format %{ "String Compare char[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
|
|
ins_encode %{
|
|
__ string_compare($str1$$Register, $str2$$Register,
|
|
$cnt1$$Register, $cnt2$$Register,
|
|
$tmp$$Register,
|
|
$result$$Register, StrIntrinsicNode::UU);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct string_compareLU(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
|
|
iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
|
|
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU);
|
|
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
|
effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
|
|
ins_cost(300);
|
|
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
|
|
ins_encode %{
|
|
__ string_compare($str1$$Register, $str2$$Register,
|
|
$cnt1$$Register, $cnt2$$Register,
|
|
$tmp$$Register,
|
|
$result$$Register, StrIntrinsicNode::LU);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct string_compareUL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt1, rarg4RegI cnt2, iRegIdst result,
|
|
iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
|
|
predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL);
|
|
match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
|
|
effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ctr, KILL cr0, TEMP tmp);
|
|
ins_cost(300);
|
|
format %{ "String Compare byte[] $str1,$cnt1,$str2,$cnt2 -> $result \t// KILL $tmp" %}
|
|
ins_encode %{
|
|
__ string_compare($str2$$Register, $str1$$Register,
|
|
$cnt2$$Register, $cnt1$$Register,
|
|
$tmp$$Register,
|
|
$result$$Register, StrIntrinsicNode::UL);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct string_equalsL(rarg1RegP str1, rarg2RegP str2, rarg3RegI cnt, iRegIdst result,
|
|
iRegIdst tmp, regCTR ctr, flagsRegCR0 cr0) %{
|
|
predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL);
|
|
match(Set result (StrEquals (Binary str1 str2) cnt));
|
|
effect(TEMP_DEF result, USE_KILL str1, USE_KILL str2, USE_KILL cnt, TEMP tmp, KILL ctr, KILL cr0);
|
|
ins_cost(300);
|
|
format %{ "String Equals byte[] $str1,$str2,$cnt -> $result \t// KILL $tmp" %}
|
|
ins_encode %{
|
|
__ array_equals(false, $str1$$Register, $str2$$Register,
|
|
$cnt$$Register, $tmp$$Register,
|
|
$result$$Register, true /* byte */);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct array_equalsB(rarg1RegP ary1, rarg2RegP ary2, iRegIdst result,
|
|
iRegIdst tmp1, iRegIdst tmp2, regCTR ctr, flagsRegCR0 cr0, flagsRegCR1 cr1) %{
|
|
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL);
|
|
match(Set result (AryEq ary1 ary2));
|
|
effect(TEMP_DEF result, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0, KILL cr1);
|
|
ins_cost(300);
|
|
format %{ "Array Equals $ary1,$ary2 -> $result \t// KILL $tmp1,$tmp2" %}
|
|
ins_encode %{
|
|
__ array_equals(true, $ary1$$Register, $ary2$$Register,
|
|
$tmp1$$Register, $tmp2$$Register,
|
|
$result$$Register, true /* byte */);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct array_equalsC(rarg1RegP ary1, rarg2RegP ary2, iRegIdst result,
|
|
iRegIdst tmp1, iRegIdst tmp2, regCTR ctr, flagsRegCR0 cr0, flagsRegCR1 cr1) %{
|
|
predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU);
|
|
match(Set result (AryEq ary1 ary2));
|
|
effect(TEMP_DEF result, USE_KILL ary1, USE_KILL ary2, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0, KILL cr1);
|
|
ins_cost(300);
|
|
format %{ "Array Equals $ary1,$ary2 -> $result \t// KILL $tmp1,$tmp2" %}
|
|
ins_encode %{
|
|
__ array_equals(true, $ary1$$Register, $ary2$$Register,
|
|
$tmp1$$Register, $tmp2$$Register,
|
|
$result$$Register, false /* byte */);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct indexOf_imm1_char_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
immP needleImm, immL offsetImm, immI_1 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
|
|
effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
|
|
ins_cost(150);
|
|
|
|
format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
|
|
"-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
|
|
ins_encode %{
|
|
immPOper *needleOper = (immPOper *)$needleImm;
|
|
const TypeOopPtr *t = needleOper->type()->isa_oopptr();
|
|
ciTypeArray* needle_values = t->const_oop()->as_type_array(); // Pointer to live char *
|
|
jchar chr;
|
|
#ifdef VM_LITTLE_ENDIAN
|
|
chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
|
|
((jchar)(unsigned char)needle_values->element_value(0).as_byte());
|
|
#else
|
|
chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
|
|
((jchar)(unsigned char)needle_values->element_value(1).as_byte());
|
|
#endif
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
R0, chr,
|
|
$tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm1_char_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
immP needleImm, immL offsetImm, immI_1 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
|
|
effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
|
|
ins_cost(150);
|
|
|
|
format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
|
|
"-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
|
|
ins_encode %{
|
|
immPOper *needleOper = (immPOper *)$needleImm;
|
|
const TypeOopPtr *t = needleOper->type()->isa_oopptr();
|
|
ciTypeArray* needle_values = t->const_oop()->as_type_array(); // Pointer to live char *
|
|
jchar chr = (jchar)needle_values->element_value(0).as_byte();
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
R0, chr,
|
|
$tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm1_char_UL(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
immP needleImm, immL offsetImm, immI_1 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary (AddP needleImm offsetImm) needlecntImm)));
|
|
effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
|
|
ins_cost(150);
|
|
|
|
format %{ "String IndexOf CSCL1 $haystack[0..$haycnt], $needleImm+$offsetImm[0..$needlecntImm]"
|
|
"-> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
|
|
ins_encode %{
|
|
immPOper *needleOper = (immPOper *)$needleImm;
|
|
const TypeOopPtr *t = needleOper->type()->isa_oopptr();
|
|
ciTypeArray* needle_values = t->const_oop()->as_type_array(); // Pointer to live char *
|
|
jchar chr = (jchar)needle_values->element_value(0).as_byte();
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
R0, chr,
|
|
$tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm1_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
rscratch2RegP needle, immI_1 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
|
effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
|
|
ins_cost(180);
|
|
|
|
format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
|
|
" -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
Node *ndl = in(operand_index($needle)); // The node that defines needle.
|
|
ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
|
|
guarantee(needle_values, "sanity");
|
|
jchar chr;
|
|
#ifdef VM_LITTLE_ENDIAN
|
|
chr = (((jchar)(unsigned char)needle_values->element_value(1).as_byte()) << 8) |
|
|
((jchar)(unsigned char)needle_values->element_value(0).as_byte());
|
|
#else
|
|
chr = (((jchar)(unsigned char)needle_values->element_value(0).as_byte()) << 8) |
|
|
((jchar)(unsigned char)needle_values->element_value(1).as_byte());
|
|
#endif
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
R0, chr,
|
|
$tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm1_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
rscratch2RegP needle, immI_1 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
|
effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
|
|
ins_cost(180);
|
|
|
|
format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
|
|
" -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
Node *ndl = in(operand_index($needle)); // The node that defines needle.
|
|
ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
|
|
guarantee(needle_values, "sanity");
|
|
jchar chr = (jchar)needle_values->element_value(0).as_byte();
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
R0, chr,
|
|
$tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm1_UL(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
rscratch2RegP needle, immI_1 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
|
effect(USE_KILL needle, TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
|
|
ins_cost(180);
|
|
|
|
format %{ "String IndexOf SCL1 $haystack[0..$haycnt], $needle[0..$needlecntImm]"
|
|
" -> $result \t// KILL $haycnt, $needle, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
Node *ndl = in(operand_index($needle)); // The node that defines needle.
|
|
ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
|
|
guarantee(needle_values, "sanity");
|
|
jchar chr = (jchar)needle_values->element_value(0).as_byte();
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
R0, chr,
|
|
$tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOfChar_U(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
iRegIsrc ch, iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOfChar (Binary haystack haycnt) ch));
|
|
effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
predicate(((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::U);
|
|
ins_cost(180);
|
|
|
|
format %{ "StringUTF16 IndexOfChar $haystack[0..$haycnt], $ch"
|
|
" -> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$ch$$Register, 0 /* this is not used if the character is already in a register */,
|
|
$tmp1$$Register, $tmp2$$Register, false /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOfChar_L(iRegIdst result, iRegPsrc haystack, iRegIsrc haycnt,
|
|
iRegIsrc ch, iRegIdst tmp1, iRegIdst tmp2,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, regCTR ctr) %{
|
|
match(Set result (StrIndexOfChar (Binary haystack haycnt) ch));
|
|
effect(TEMP tmp1, TEMP tmp2, KILL cr0, KILL cr1, KILL ctr);
|
|
predicate(((StrIndexOfCharNode*)n)->encoding() == StrIntrinsicNode::L);
|
|
ins_cost(180);
|
|
|
|
format %{ "StringLatin1 IndexOfChar $haystack[0..$haycnt], $ch"
|
|
" -> $result \t// KILL $haycnt, $tmp1, $tmp2, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
__ string_indexof_char($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$ch$$Register, 0 /* this is not used if the character is already in a register */,
|
|
$tmp1$$Register, $tmp2$$Register, true /*is_byte*/);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm_U(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
|
|
iRegPsrc needle, uimmI15 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
|
effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
|
|
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
|
|
ins_cost(250);
|
|
|
|
format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
|
|
" -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
Node *ndl = in(operand_index($needle)); // The node that defines needle.
|
|
ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
|
|
|
|
__ string_indexof($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UU);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm_L(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
|
|
iRegPsrc needle, uimmI15 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
|
effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
|
|
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
|
|
ins_cost(250);
|
|
|
|
format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
|
|
" -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
Node *ndl = in(operand_index($needle)); // The node that defines needle.
|
|
ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
|
|
|
|
__ string_indexof($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::LL);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_imm_UL(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt,
|
|
iRegPsrc needle, uimmI15 needlecntImm,
|
|
iRegIdst tmp1, iRegIdst tmp2, iRegIdst tmp3, iRegIdst tmp4, iRegIdst tmp5,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecntImm)));
|
|
effect(USE_KILL haycnt, /* better: TDEF haycnt, */ TEMP_DEF result,
|
|
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
|
|
// Required for EA: check if it is still a type_array.
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop() &&
|
|
n->in(3)->in(1)->bottom_type()->is_aryptr()->const_oop()->is_type_array());
|
|
ins_cost(250);
|
|
|
|
format %{ "String IndexOf SCL $haystack[0..$haycnt], $needle[0..$needlecntImm]"
|
|
" -> $result \t// KILL $haycnt, $tmp1, $tmp2, $tmp3, $tmp4, $tmp5, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
Node *ndl = in(operand_index($needle)); // The node that defines needle.
|
|
ciTypeArray* needle_values = ndl->bottom_type()->is_aryptr()->const_oop()->as_type_array();
|
|
|
|
__ string_indexof($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$needle$$Register, needle_values, $tmp5$$Register, $needlecntImm$$constant,
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UL);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_U(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
|
|
iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
|
|
effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
|
|
TEMP_DEF result,
|
|
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU);
|
|
ins_cost(300);
|
|
|
|
format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
|
|
" -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
__ string_indexof($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$needle$$Register, nullptr, $needlecnt$$Register, 0, // needlecnt not constant.
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UU);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_L(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
|
|
iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
|
|
effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
|
|
TEMP_DEF result,
|
|
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL);
|
|
ins_cost(300);
|
|
|
|
format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
|
|
" -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
__ string_indexof($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$needle$$Register, nullptr, $needlecnt$$Register, 0, // needlecnt not constant.
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::LL);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
instruct indexOf_UL(iRegIdst result, iRegPsrc haystack, rscratch1RegI haycnt, iRegPsrc needle, rscratch2RegI needlecnt,
|
|
iRegLdst tmp1, iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4,
|
|
flagsRegCR0 cr0, flagsRegCR1 cr1, flagsRegCR6 cr6, regCTR ctr) %{
|
|
match(Set result (StrIndexOf (Binary haystack haycnt) (Binary needle needlecnt)));
|
|
effect(USE_KILL haycnt, USE_KILL needlecnt, /*better: TDEF haycnt, TDEF needlecnt,*/
|
|
TEMP_DEF result,
|
|
TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr0, KILL cr1, KILL cr6, KILL ctr);
|
|
predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL);
|
|
ins_cost(300);
|
|
|
|
format %{ "String IndexOf $haystack[0..$haycnt], $needle[0..$needlecnt]"
|
|
" -> $result \t// KILL $haycnt, $needlecnt, $tmp1, $tmp2, $tmp3, $tmp4, $cr0, $cr1" %}
|
|
ins_encode %{
|
|
__ string_indexof($result$$Register,
|
|
$haystack$$Register, $haycnt$$Register,
|
|
$needle$$Register, nullptr, $needlecnt$$Register, 0, // needlecnt not constant.
|
|
$tmp1$$Register, $tmp2$$Register, $tmp3$$Register, $tmp4$$Register, StrIntrinsicNode::UL);
|
|
%}
|
|
ins_pipe(pipe_class_compare);
|
|
%}
|
|
|
|
// char[] to byte[] compression
|
|
instruct string_compress(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
|
|
iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
|
|
match(Set result (StrCompressedCopy src (Binary dst len)));
|
|
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
|
|
USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
|
|
ins_cost(300);
|
|
format %{ "String Compress $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
|
|
ins_encode %{
|
|
__ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register, $tmp2$$Register,
|
|
$tmp3$$Register, $tmp4$$Register, $tmp5$$Register, $result$$Register, false);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// byte[] to char[] inflation
|
|
instruct string_inflate(Universe dummy, rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegLdst tmp1,
|
|
iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
|
|
match(Set dummy (StrInflatedCopy src (Binary dst len)));
|
|
effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5, USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
|
|
ins_cost(300);
|
|
format %{ "String Inflate $src,$dst,$len \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
|
|
ins_encode %{
|
|
Label Ldone;
|
|
__ string_inflate_16($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register,
|
|
$tmp2$$Register, $tmp3$$Register, $tmp4$$Register, $tmp5$$Register);
|
|
__ rldicl_($tmp1$$Register, $len$$Register, 0, 64-3); // Remaining characters.
|
|
__ beq(CR0, Ldone);
|
|
__ string_inflate($src$$Register, $dst$$Register, $tmp1$$Register, $tmp2$$Register);
|
|
__ bind(Ldone);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// StringCoding.java intrinsics
|
|
instruct count_positives(iRegPsrc ary1, iRegIsrc len, iRegIdst result, iRegLdst tmp1, iRegLdst tmp2,
|
|
regCTR ctr, flagsRegCR0 cr0)
|
|
%{
|
|
match(Set result (CountPositives ary1 len));
|
|
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, KILL ctr, KILL cr0);
|
|
ins_cost(300);
|
|
format %{ "count positives byte[] $ary1,$len -> $result \t// KILL $tmp1, $tmp2" %}
|
|
ins_encode %{
|
|
__ count_positives($ary1$$Register, $len$$Register, $result$$Register,
|
|
$tmp1$$Register, $tmp2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// encode char[] to byte[] in ISO_8859_1
|
|
instruct encode_iso_array(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
|
|
iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
|
|
predicate(!((EncodeISOArrayNode*)n)->is_ascii());
|
|
match(Set result (EncodeISOArray src (Binary dst len)));
|
|
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
|
|
USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
|
|
ins_cost(300);
|
|
format %{ "Encode iso array $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
|
|
ins_encode %{
|
|
__ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register, $tmp2$$Register,
|
|
$tmp3$$Register, $tmp4$$Register, $tmp5$$Register, $result$$Register, false);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// encode char[] to byte[] in ASCII
|
|
instruct encode_ascii_array(rarg1RegP src, rarg2RegP dst, iRegIsrc len, iRegIdst result, iRegLdst tmp1,
|
|
iRegLdst tmp2, iRegLdst tmp3, iRegLdst tmp4, iRegLdst tmp5, regCTR ctr, flagsRegCR0 cr0) %{
|
|
predicate(((EncodeISOArrayNode*)n)->is_ascii());
|
|
match(Set result (EncodeISOArray src (Binary dst len)));
|
|
effect(TEMP_DEF result, TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, TEMP tmp5,
|
|
USE_KILL src, USE_KILL dst, KILL ctr, KILL cr0);
|
|
ins_cost(300);
|
|
format %{ "Encode ascii array $src,$dst,$len -> $result \t// KILL $tmp1, $tmp2, $tmp3, $tmp4, $tmp5" %}
|
|
ins_encode %{
|
|
__ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, $tmp1$$Register, $tmp2$$Register,
|
|
$tmp3$$Register, $tmp4$$Register, $tmp5$$Register, $result$$Register, true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
//---------- Min/Max Instructions ---------------------------------------------
|
|
|
|
|
|
instruct minI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (MinI src1 src2));
|
|
effect(KILL cr0);
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
ins_encode %{
|
|
__ cmpw(CR0, $src1$$Register, $src2$$Register);
|
|
__ isel($dst$$Register, CR0, Assembler::less, /*invert*/false, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
instruct maxI_reg_reg_isel(iRegIdst dst, iRegIsrc src1, iRegIsrc src2, flagsRegCR0 cr0) %{
|
|
match(Set dst (MaxI src1 src2));
|
|
effect(KILL cr0);
|
|
ins_cost(DEFAULT_COST*2);
|
|
|
|
ins_encode %{
|
|
__ cmpw(CR0, $src1$$Register, $src2$$Register);
|
|
__ isel($dst$$Register, CR0, Assembler::greater, /*invert*/false, $src1$$Register, $src2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//---------- Population Count Instructions ------------------------------------
|
|
|
|
// Popcnt for Power7.
|
|
instruct popCountI(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (PopCountI src));
|
|
predicate(UsePopCountInstruction);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "POPCNTW $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ popcntw($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Popcnt for Power7.
|
|
instruct popCountL(iRegIdst dst, iRegLsrc src) %{
|
|
predicate(UsePopCountInstruction);
|
|
match(Set dst (PopCountL src));
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "POPCNTD $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ popcntd($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct countLeadingZerosI(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (CountLeadingZerosI src));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64); // See Matcher::match_rule_supported.
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CNTLZW $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cntlzw($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct countLeadingZerosL(iRegIdst dst, iRegLsrc src) %{
|
|
match(Set dst (CountLeadingZerosL src));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64); // See Matcher::match_rule_supported.
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CNTLZD $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cntlzd($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct countLeadingZerosP(iRegIdst dst, iRegPsrc src) %{
|
|
// no match-rule, false predicate
|
|
effect(DEF dst, USE src);
|
|
predicate(false);
|
|
|
|
format %{ "CNTLZD $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cntlzd($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct countTrailingZerosI_Ex(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (CountTrailingZerosI src));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64 && !UseCountTrailingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI16 imm1 %{ (int)-1 %}
|
|
immI16 imm2 %{ (int)32 %}
|
|
immI_minus1 m1 %{ -1 %}
|
|
iRegIdst tmpI1;
|
|
iRegIdst tmpI2;
|
|
iRegIdst tmpI3;
|
|
addI_reg_imm16(tmpI1, src, imm1);
|
|
andcI_reg_reg(tmpI2, src, m1, tmpI1);
|
|
countLeadingZerosI(tmpI3, tmpI2);
|
|
subI_imm16_reg(dst, imm2, tmpI3);
|
|
%}
|
|
%}
|
|
|
|
instruct countTrailingZerosI_cnttzw(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (CountTrailingZerosI src));
|
|
predicate(UseCountTrailingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CNTTZW $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cnttzw($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct countTrailingZerosL_Ex(iRegIdst dst, iRegLsrc src) %{
|
|
match(Set dst (CountTrailingZerosL src));
|
|
predicate(UseCountLeadingZerosInstructionsPPC64 && !UseCountTrailingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
expand %{
|
|
immL16 imm1 %{ (long)-1 %}
|
|
immI16 imm2 %{ (int)64 %}
|
|
iRegLdst tmpL1;
|
|
iRegLdst tmpL2;
|
|
iRegIdst tmpL3;
|
|
addL_reg_imm16(tmpL1, src, imm1);
|
|
andcL_reg_reg(tmpL2, tmpL1, src);
|
|
countLeadingZerosL(tmpL3, tmpL2);
|
|
subI_imm16_reg(dst, imm2, tmpL3);
|
|
%}
|
|
%}
|
|
|
|
instruct countTrailingZerosL_cnttzd(iRegIdst dst, iRegLsrc src) %{
|
|
match(Set dst (CountTrailingZerosL src));
|
|
predicate(UseCountTrailingZerosInstructionsPPC64);
|
|
ins_cost(DEFAULT_COST);
|
|
|
|
format %{ "CNTTZD $dst, $src" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ cnttzd($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Expand nodes for byte_reverse_int.
|
|
instruct insrwi_a(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
|
|
effect(DEF dst, USE src, USE pos, USE shift);
|
|
predicate(false);
|
|
|
|
format %{ "INSRWI $dst, $src, $pos, $shift" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ insrwi($dst$$Register, $src$$Register, $shift$$constant, $pos$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// As insrwi_a, but with USE_DEF.
|
|
instruct insrwi(iRegIdst dst, iRegIsrc src, immI16 pos, immI16 shift) %{
|
|
effect(USE_DEF dst, USE src, USE pos, USE shift);
|
|
predicate(false);
|
|
|
|
format %{ "INSRWI $dst, $src, $pos, $shift" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ insrwi($dst$$Register, $src$$Register, $shift$$constant, $pos$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Just slightly faster than java implementation.
|
|
instruct bytes_reverse_int_Ex(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (ReverseBytesI src));
|
|
predicate(!UseByteReverseInstructions);
|
|
ins_cost(7*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI16 imm24 %{ (int) 24 %}
|
|
immI16 imm16 %{ (int) 16 %}
|
|
immI16 imm8 %{ (int) 8 %}
|
|
immI16 imm4 %{ (int) 4 %}
|
|
immI16 imm0 %{ (int) 0 %}
|
|
iRegLdst tmpI1;
|
|
iRegLdst tmpI2;
|
|
iRegLdst tmpI3;
|
|
|
|
urShiftI_reg_imm(tmpI1, src, imm24);
|
|
insrwi_a(dst, tmpI1, imm24, imm8);
|
|
urShiftI_reg_imm(tmpI2, src, imm16);
|
|
insrwi(dst, tmpI2, imm8, imm16);
|
|
urShiftI_reg_imm(tmpI3, src, imm8);
|
|
insrwi(dst, tmpI3, imm8, imm8);
|
|
insrwi(dst, src, imm0, imm8);
|
|
%}
|
|
%}
|
|
|
|
instruct bytes_reverse_int_vec(iRegIdst dst, iRegIsrc src, vecX tmpV) %{
|
|
match(Set dst (ReverseBytesI src));
|
|
predicate(UseVectorByteReverseInstructionsPPC64);
|
|
effect(TEMP tmpV);
|
|
ins_cost(DEFAULT_COST*3);
|
|
size(12);
|
|
format %{ "MTVSRWZ $tmpV, $src\n"
|
|
"\tXXBRW $tmpV, $tmpV\n"
|
|
"\tMFVSRWZ $dst, $tmpV" %}
|
|
|
|
ins_encode %{
|
|
__ mtvsrwz($tmpV$$VectorSRegister, $src$$Register);
|
|
__ xxbrw($tmpV$$VectorSRegister, $tmpV$$VectorSRegister);
|
|
__ mfvsrwz($dst$$Register, $tmpV$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct bytes_reverse_int(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (ReverseBytesI src));
|
|
predicate(UseByteReverseInstructions);
|
|
ins_cost(DEFAULT_COST);
|
|
size(4);
|
|
|
|
format %{ "BRW $dst, $src" %}
|
|
|
|
ins_encode %{
|
|
__ brw($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct bytes_reverse_long_Ex(iRegLdst dst, iRegLsrc src) %{
|
|
match(Set dst (ReverseBytesL src));
|
|
predicate(!UseByteReverseInstructions);
|
|
ins_cost(15*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI16 imm56 %{ (int) 56 %}
|
|
immI16 imm48 %{ (int) 48 %}
|
|
immI16 imm40 %{ (int) 40 %}
|
|
immI16 imm32 %{ (int) 32 %}
|
|
immI16 imm24 %{ (int) 24 %}
|
|
immI16 imm16 %{ (int) 16 %}
|
|
immI16 imm8 %{ (int) 8 %}
|
|
immI16 imm0 %{ (int) 0 %}
|
|
iRegLdst tmpL1;
|
|
iRegLdst tmpL2;
|
|
iRegLdst tmpL3;
|
|
iRegLdst tmpL4;
|
|
iRegLdst tmpL5;
|
|
iRegLdst tmpL6;
|
|
|
|
// src : |a|b|c|d|e|f|g|h|
|
|
rldicl(tmpL1, src, imm8, imm24); // tmpL1 : | | | |e|f|g|h|a|
|
|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |a| | | |e|
|
|
rldicl(tmpL3, tmpL2, imm32, imm0); // tmpL3 : | | | |e| | | |a|
|
|
rldicl(tmpL1, src, imm16, imm24); // tmpL1 : | | | |f|g|h|a|b|
|
|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |b| | | |f|
|
|
rldicl(tmpL4, tmpL2, imm40, imm0); // tmpL4 : | | |f| | | |b| |
|
|
orL_reg_reg(tmpL5, tmpL3, tmpL4); // tmpL5 : | | |f|e| | |b|a|
|
|
rldicl(tmpL1, src, imm24, imm24); // tmpL1 : | | | |g|h|a|b|c|
|
|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |c| | | |g|
|
|
rldicl(tmpL3, tmpL2, imm48, imm0); // tmpL3 : | |g| | | |c| | |
|
|
rldicl(tmpL1, src, imm32, imm24); // tmpL1 : | | | |h|a|b|c|d|
|
|
rldicl(tmpL2, tmpL1, imm32, imm24); // tmpL2 : | | | |d| | | |h|
|
|
rldicl(tmpL4, tmpL2, imm56, imm0); // tmpL4 : |h| | | |d| | | |
|
|
orL_reg_reg(tmpL6, tmpL3, tmpL4); // tmpL6 : |h|g| | |d|c| | |
|
|
orL_reg_reg(dst, tmpL5, tmpL6); // dst : |h|g|f|e|d|c|b|a|
|
|
%}
|
|
%}
|
|
|
|
instruct bytes_reverse_long_vec(iRegLdst dst, iRegLsrc src, vecX tmpV) %{
|
|
match(Set dst (ReverseBytesL src));
|
|
predicate(UseVectorByteReverseInstructionsPPC64);
|
|
effect(TEMP tmpV);
|
|
ins_cost(DEFAULT_COST*3);
|
|
size(12);
|
|
format %{ "MTVSRD $tmpV, $src\n"
|
|
"\tXXBRD $tmpV, $tmpV\n"
|
|
"\tMFVSRD $dst, $tmpV" %}
|
|
|
|
ins_encode %{
|
|
__ mtvsrd($tmpV$$VectorSRegister, $src$$Register);
|
|
__ xxbrd($tmpV$$VectorSRegister, $tmpV$$VectorSRegister);
|
|
__ mfvsrd($dst$$Register, $tmpV$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct bytes_reverse_long(iRegLdst dst, iRegLsrc src) %{
|
|
match(Set dst (ReverseBytesL src));
|
|
predicate(UseByteReverseInstructions);
|
|
ins_cost(DEFAULT_COST);
|
|
size(4);
|
|
|
|
format %{ "BRD $dst, $src" %}
|
|
|
|
ins_encode %{
|
|
__ brd($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct bytes_reverse_ushort_Ex(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (ReverseBytesUS src));
|
|
predicate(!UseByteReverseInstructions);
|
|
ins_cost(2*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI16 imm16 %{ (int) 16 %}
|
|
immI16 imm8 %{ (int) 8 %}
|
|
|
|
urShiftI_reg_imm(dst, src, imm8);
|
|
insrwi(dst, src, imm16, imm8);
|
|
%}
|
|
%}
|
|
|
|
instruct bytes_reverse_ushort(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (ReverseBytesUS src));
|
|
predicate(UseByteReverseInstructions);
|
|
ins_cost(DEFAULT_COST);
|
|
size(4);
|
|
|
|
format %{ "BRH $dst, $src" %}
|
|
|
|
ins_encode %{
|
|
__ brh($dst$$Register, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct bytes_reverse_short_Ex(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (ReverseBytesS src));
|
|
predicate(!UseByteReverseInstructions);
|
|
ins_cost(3*DEFAULT_COST);
|
|
|
|
expand %{
|
|
immI16 imm16 %{ (int) 16 %}
|
|
immI16 imm8 %{ (int) 8 %}
|
|
iRegLdst tmpI1;
|
|
|
|
urShiftI_reg_imm(tmpI1, src, imm8);
|
|
insrwi(tmpI1, src, imm16, imm8);
|
|
extsh(dst, tmpI1);
|
|
%}
|
|
%}
|
|
|
|
instruct bytes_reverse_short(iRegIdst dst, iRegIsrc src) %{
|
|
match(Set dst (ReverseBytesS src));
|
|
predicate(UseByteReverseInstructions);
|
|
ins_cost(DEFAULT_COST);
|
|
size(8);
|
|
|
|
format %{ "BRH $dst, $src\n\t"
|
|
"EXTSH $dst, $dst" %}
|
|
|
|
ins_encode %{
|
|
__ brh($dst$$Register, $src$$Register);
|
|
__ extsh($dst$$Register, $dst$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load Integer reversed byte order
|
|
instruct loadI_reversed(iRegIdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesI (LoadI mem)));
|
|
predicate(n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1)));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ lwbrx($dst$$Register, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct loadI_reversed_acquire(iRegIdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesI (LoadI mem)));
|
|
ins_cost(2 * MEMORY_REF_COST);
|
|
|
|
size(12);
|
|
ins_encode %{
|
|
__ lwbrx($dst$$Register, $mem$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load Long - aligned and reversed
|
|
instruct loadL_reversed(iRegLdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesL (LoadL mem)));
|
|
predicate((n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1))));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ ldbrx($dst$$Register, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct loadL_reversed_acquire(iRegLdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesL (LoadL mem)));
|
|
ins_cost(2 * MEMORY_REF_COST);
|
|
|
|
size(12);
|
|
ins_encode %{
|
|
__ ldbrx($dst$$Register, $mem$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load unsigned short / char reversed byte order
|
|
instruct loadUS_reversed(iRegIdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesUS (LoadUS mem)));
|
|
predicate(n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1)));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ lhbrx($dst$$Register, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct loadUS_reversed_acquire(iRegIdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesUS (LoadUS mem)));
|
|
ins_cost(2 * MEMORY_REF_COST);
|
|
|
|
size(12);
|
|
ins_encode %{
|
|
__ lhbrx($dst$$Register, $mem$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Load short reversed byte order
|
|
instruct loadS_reversed(iRegIdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesS (LoadS mem)));
|
|
predicate(n->in(1)->as_Load()->is_unordered() || followed_by_acquire(n->in(1)));
|
|
ins_cost(MEMORY_REF_COST + DEFAULT_COST);
|
|
|
|
size(8);
|
|
ins_encode %{
|
|
__ lhbrx($dst$$Register, $mem$$Register);
|
|
__ extsh($dst$$Register, $dst$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct loadS_reversed_acquire(iRegIdst dst, indirect mem) %{
|
|
match(Set dst (ReverseBytesS (LoadS mem)));
|
|
ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST);
|
|
|
|
size(16);
|
|
ins_encode %{
|
|
__ lhbrx($dst$$Register, $mem$$Register);
|
|
__ twi_0($dst$$Register);
|
|
__ extsh($dst$$Register, $dst$$Register);
|
|
__ isync();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Store Integer reversed byte order
|
|
instruct storeI_reversed(iRegIsrc src, indirect mem) %{
|
|
match(Set mem (StoreI mem (ReverseBytesI src)));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ stwbrx($src$$Register, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Store Long reversed byte order
|
|
instruct storeL_reversed(iRegLsrc src, indirect mem) %{
|
|
match(Set mem (StoreL mem (ReverseBytesL src)));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ stdbrx($src$$Register, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Store unsigned short / char reversed byte order
|
|
instruct storeUS_reversed(iRegIsrc src, indirect mem) %{
|
|
match(Set mem (StoreC mem (ReverseBytesUS src)));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ sthbrx($src$$Register, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Store short reversed byte order
|
|
instruct storeS_reversed(iRegIsrc src, indirect mem) %{
|
|
match(Set mem (StoreC mem (ReverseBytesS src)));
|
|
ins_cost(MEMORY_REF_COST);
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
__ sthbrx($src$$Register, $mem$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct mtvsrwz(vecX temp1, iRegIsrc src) %{
|
|
effect(DEF temp1, USE src);
|
|
|
|
format %{ "MTVSRWZ $temp1, $src \t// Move to 16-byte register" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mtvsrwz($temp1$$VectorSRegister, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct xxspltw(vecX dst, vecX src, immI8 imm1) %{
|
|
effect(DEF dst, USE src, USE imm1);
|
|
|
|
format %{ "XXSPLTW $dst, $src, $imm1 \t// Splat word" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxspltw($dst$$VectorSRegister, $src$$VectorSRegister, $imm1$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct xscvdpspn_regF(vecX dst, regF src) %{
|
|
effect(DEF dst, USE src);
|
|
|
|
format %{ "XSCVDPSPN $dst, $src \t// Convert scalar single precision to vector single precision" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xscvdpspn($dst$$VectorSRegister, $src$$FloatRegister->to_vsr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//---------- Replicate Vector Instructions ------------------------------------
|
|
|
|
// Insrdi does replicate if src == dst.
|
|
instruct repl32(iRegLdst dst) %{
|
|
predicate(false);
|
|
effect(USE_DEF dst);
|
|
|
|
format %{ "INSRDI $dst, #0, $dst, #32 \t// replicate" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ insrdi($dst$$Register, $dst$$Register, 32, 0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Insrdi does replicate if src == dst.
|
|
instruct repl48(iRegLdst dst) %{
|
|
predicate(false);
|
|
effect(USE_DEF dst);
|
|
|
|
format %{ "INSRDI $dst, #0, $dst, #48 \t// replicate" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ insrdi($dst$$Register, $dst$$Register, 48, 0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Insrdi does replicate if src == dst.
|
|
instruct repl56(iRegLdst dst) %{
|
|
predicate(false);
|
|
effect(USE_DEF dst);
|
|
|
|
format %{ "INSRDI $dst, #0, $dst, #56 \t// replicate" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ insrdi($dst$$Register, $dst$$Register, 56, 0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl8B_reg_Ex(iRegLdst dst, iRegIsrc src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 8 &&
|
|
Matcher::vector_element_basic_type(n) == T_BYTE);
|
|
expand %{
|
|
moveReg(dst, src);
|
|
repl56(dst);
|
|
repl48(dst);
|
|
repl32(dst);
|
|
%}
|
|
%}
|
|
|
|
instruct repl8B_immI0(iRegLdst dst, immI_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 8 &&
|
|
Matcher::vector_element_basic_type(n) == T_BYTE);
|
|
format %{ "LI $dst, #0 \t// replicate8B" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl8B_immIminus1(iRegLdst dst, immI_minus1 src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 8 &&
|
|
Matcher::vector_element_basic_type(n) == T_BYTE);
|
|
format %{ "LI $dst, #-1 \t// replicate8B" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl16B_reg_Ex(vecX dst, iRegIsrc src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 16 &&
|
|
Matcher::vector_element_basic_type(n) == T_BYTE);
|
|
|
|
expand %{
|
|
iRegLdst tmpL;
|
|
vecX tmpV;
|
|
immI8 imm1 %{ (int) 1 %}
|
|
moveReg(tmpL, src);
|
|
repl56(tmpL);
|
|
repl48(tmpL);
|
|
mtvsrwz(tmpV, tmpL);
|
|
xxspltw(dst, tmpV, imm1);
|
|
%}
|
|
%}
|
|
|
|
instruct repl16B_immI0(vecX dst, immI_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 16 &&
|
|
Matcher::vector_element_basic_type(n) == T_BYTE);
|
|
|
|
format %{ "XXLXOR $dst, $zero \t// replicate16B" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl16B_immIminus1(vecX dst, immI_minus1 src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 16 &&
|
|
Matcher::vector_element_basic_type(n) == T_BYTE);
|
|
|
|
format %{ "XXLEQV $dst, $src \t// replicate16B" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl4S_reg_Ex(iRegLdst dst, iRegIsrc src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_SHORT);
|
|
expand %{
|
|
moveReg(dst, src);
|
|
repl48(dst);
|
|
repl32(dst);
|
|
%}
|
|
%}
|
|
|
|
instruct repl4S_immI0(iRegLdst dst, immI_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_SHORT);
|
|
format %{ "LI $dst, #0 \t// replicate4S" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl4S_immIminus1(iRegLdst dst, immI_minus1 src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_SHORT);
|
|
format %{ "LI $dst, -1 \t// replicate4S" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl8S_reg_Ex(vecX dst, iRegIsrc src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 8 &&
|
|
Matcher::vector_element_basic_type(n) == T_SHORT);
|
|
|
|
expand %{
|
|
iRegLdst tmpL;
|
|
vecX tmpV;
|
|
immI8 zero %{ (int) 0 %}
|
|
moveReg(tmpL, src);
|
|
repl48(tmpL);
|
|
repl32(tmpL);
|
|
mtvsrd(tmpV, tmpL);
|
|
xxpermdi(dst, tmpV, tmpV, zero);
|
|
%}
|
|
%}
|
|
|
|
instruct repl8S_immI0(vecX dst, immI_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 8 &&
|
|
Matcher::vector_element_basic_type(n) == T_SHORT);
|
|
|
|
format %{ "XXLXOR $dst, $zero \t// replicate8S" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl8S_immIminus1(vecX dst, immI_minus1 src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 8 &&
|
|
Matcher::vector_element_basic_type(n) == T_SHORT);
|
|
|
|
format %{ "XXLEQV $dst, $src \t// replicate8S" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl2I_reg_Ex(iRegLdst dst, iRegIsrc src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_INT);
|
|
ins_cost(2 * DEFAULT_COST);
|
|
expand %{
|
|
moveReg(dst, src);
|
|
repl32(dst);
|
|
%}
|
|
%}
|
|
|
|
instruct repl2I_immI0(iRegLdst dst, immI_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_INT);
|
|
format %{ "LI $dst, #0 \t// replicate2I" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($zero$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl2I_immIminus1(iRegLdst dst, immI_minus1 src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_INT);
|
|
format %{ "LI $dst, -1 \t// replicate2I" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ li($dst$$Register, (int)((short)($src$$constant & 0xFFFF)));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl4I_reg_Ex(vecX dst, iRegIsrc src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_INT);
|
|
ins_cost(2 * DEFAULT_COST);
|
|
|
|
expand %{
|
|
iRegLdst tmpL;
|
|
vecX tmpV;
|
|
immI8 zero %{ (int) 0 %}
|
|
moveReg(tmpL, src);
|
|
repl32(tmpL);
|
|
mtvsrd(tmpV, tmpL);
|
|
xxpermdi(dst, tmpV, tmpV, zero);
|
|
%}
|
|
%}
|
|
|
|
instruct repl4I_immI0(vecX dst, immI_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_INT);
|
|
|
|
format %{ "XXLXOR $dst, $zero \t// replicate4I" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl4I_immIminus1(vecX dst, immI_minus1 src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_INT);
|
|
|
|
format %{ "XXLEQV $dst, $dst, $dst \t// replicate4I" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Move float to int register via stack, replicate.
|
|
instruct repl2F_reg_Ex(iRegLdst dst, regF src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_FLOAT);
|
|
ins_cost(2 * MEMORY_REF_COST + DEFAULT_COST);
|
|
expand %{
|
|
stackSlotL tmpS;
|
|
iRegIdst tmpI;
|
|
moveF2I_reg_stack(tmpS, src); // Move float to stack.
|
|
moveF2I_stack_reg(tmpI, tmpS); // Move stack to int reg.
|
|
moveReg(dst, tmpI); // Move int to long reg.
|
|
repl32(dst); // Replicate bitpattern.
|
|
%}
|
|
%}
|
|
|
|
// Replicate scalar constant to packed float values in Double register
|
|
instruct repl2F_immF_Ex(iRegLdst dst, immF src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_FLOAT);
|
|
ins_cost(5 * DEFAULT_COST);
|
|
|
|
format %{ "LD $dst, offset, $constanttablebase\t// load replicated float $src $src from table, postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_load_replF_constant(dst, src, constanttablebase) );
|
|
%}
|
|
|
|
// Replicate scalar zero constant to packed float values in Double register
|
|
instruct repl2F_immF0(iRegLdst dst, immF_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_FLOAT);
|
|
|
|
format %{ "LI $dst, #0 \t// replicate2F" %}
|
|
ins_encode %{
|
|
__ li($dst$$Register, 0x0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
|
|
//----------Vector Arithmetic Instructions--------------------------------------
|
|
|
|
// Vector Addition Instructions
|
|
|
|
instruct vadd16B_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (AddVB src1 src2));
|
|
predicate(n->as_Vector()->length() == 16);
|
|
format %{ "VADDUBM $dst,$src1,$src2\t// add packed16B" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vaddubm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vadd8S_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (AddVS src1 src2));
|
|
predicate(n->as_Vector()->length() == 8);
|
|
format %{ "VADDUHM $dst,$src1,$src2\t// add packed8S" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vadduhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vadd4I_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (AddVI src1 src2));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "VADDUWM $dst,$src1,$src2\t// add packed4I" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vadduwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vadd4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (AddVF src1 src2));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "VADDFP $dst,$src1,$src2\t// add packed4F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vaddfp($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vadd2L_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (AddVL src1 src2));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "VADDUDM $dst,$src1,$src2\t// add packed2L" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vaddudm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vadd2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (AddVD src1 src2));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "XVADDDP $dst,$src1,$src2\t// add packed2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvadddp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Subtraction Instructions
|
|
|
|
instruct vsub16B_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (SubVB src1 src2));
|
|
predicate(n->as_Vector()->length() == 16);
|
|
format %{ "VSUBUBM $dst,$src1,$src2\t// sub packed16B" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vsububm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vsub8S_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (SubVS src1 src2));
|
|
predicate(n->as_Vector()->length() == 8);
|
|
format %{ "VSUBUHM $dst,$src1,$src2\t// sub packed8S" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vsubuhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vsub4I_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (SubVI src1 src2));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "VSUBUWM $dst,$src1,$src2\t// sub packed4I" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vsubuwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vsub4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (SubVF src1 src2));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "VSUBFP $dst,$src1,$src2\t// sub packed4F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vsubfp($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vsub2L_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (SubVL src1 src2));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "VSUBUDM $dst,$src1,$src2\t// sub packed2L" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vsubudm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vsub2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (SubVD src1 src2));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "XVSUBDP $dst,$src1,$src2\t// sub packed2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvsubdp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Multiplication Instructions
|
|
|
|
instruct vmul8S_reg(vecX dst, vecX src1, vecX src2, vecX tmp) %{
|
|
match(Set dst (MulVS src1 src2));
|
|
predicate(n->as_Vector()->length() == 8);
|
|
effect(TEMP tmp);
|
|
format %{ "VSPLTISH $tmp,0\t// mul packed8S" %}
|
|
format %{ "VMLADDUHM $dst,$src1,$src2\t// mul packed8S" %}
|
|
size(8);
|
|
ins_encode %{
|
|
__ vspltish($tmp$$VectorSRegister->to_vr(), 0);
|
|
__ vmladduhm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr(), $tmp$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vmul4I_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (MulVI src1 src2));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "VMULUWM $dst,$src1,$src2\t// mul packed4I" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ vmuluwm($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vmul4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (MulVF src1 src2));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "XVMULSP $dst,$src1,$src2\t// mul packed4F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvmulsp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vmul2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (MulVD src1 src2));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "XVMULDP $dst,$src1,$src2\t// mul packed2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvmuldp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Division Instructions
|
|
|
|
instruct vdiv4F_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (DivVF src1 src2));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "XVDIVSP $dst,$src1,$src2\t// div packed4F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvdivsp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vdiv2D_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (DivVD src1 src2));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "XVDIVDP $dst,$src1,$src2\t// div packed2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvdivdp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Min / Max Instructions
|
|
|
|
instruct vmin_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (MinV src1 src2));
|
|
format %{ "VMIN $dst,$src1,$src2\t// vector min" %}
|
|
size(4);
|
|
ins_encode %{
|
|
BasicType bt = Matcher::vector_element_basic_type(this);
|
|
switch (bt) {
|
|
case T_INT:
|
|
__ vminsw($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_LONG:
|
|
__ vminsd($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vmax_reg(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (MaxV src1 src2));
|
|
format %{ "VMAX $dst,$src1,$src2\t// vector max" %}
|
|
size(4);
|
|
ins_encode %{
|
|
BasicType bt = Matcher::vector_element_basic_type(this);
|
|
switch (bt) {
|
|
case T_INT:
|
|
__ vmaxsw($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_LONG:
|
|
__ vmaxsd($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vand(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (AndV src1 src2));
|
|
size(4);
|
|
format %{ "VAND $dst,$src1,$src2\t// and vectors" %}
|
|
ins_encode %{
|
|
__ vand($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vor(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (OrV src1 src2));
|
|
size(4);
|
|
format %{ "VOR $dst,$src1,$src2\t// or vectors" %}
|
|
ins_encode %{
|
|
__ vor($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vxor(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (XorV src1 src2));
|
|
size(4);
|
|
format %{ "VXOR $dst,$src1,$src2\t// xor vectors" %}
|
|
ins_encode %{
|
|
__ vxor($dst$$VectorSRegister->to_vr(), $src1$$VectorSRegister->to_vr(), $src2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct reductionI_arith_logic(iRegIdst dst, iRegIsrc srcInt, vecX srcVec, vecX tmp1, vecX tmp2) %{
|
|
predicate(Matcher::vector_element_basic_type(n->in(2)) == T_INT);
|
|
match(Set dst (AddReductionVI srcInt srcVec));
|
|
match(Set dst (MulReductionVI srcInt srcVec));
|
|
match(Set dst (AndReductionV srcInt srcVec));
|
|
match(Set dst ( OrReductionV srcInt srcVec));
|
|
match(Set dst (XorReductionV srcInt srcVec));
|
|
effect(TEMP tmp1, TEMP tmp2);
|
|
ins_cost(DEFAULT_COST * 6);
|
|
format %{ "REDUCEI_ARITH_LOGIC // $dst,$srcInt,$srcVec,$tmp1,$tmp2\t// reduce vector int add/mul/and/or/xor" %}
|
|
size(24);
|
|
ins_encode %{
|
|
int opcode = this->ideal_Opcode();
|
|
__ reduceI(opcode, $dst$$Register, $srcInt$$Register, $srcVec$$VectorSRegister->to_vr(),
|
|
$tmp1$$VectorSRegister->to_vr(), $tmp2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct reductionI_min_max(iRegIdst dst, iRegIsrc srcInt, vecX srcVec, vecX tmp1, vecX tmp2, flagsRegCR0 cr0) %{
|
|
predicate(Matcher::vector_element_basic_type(n->in(2)) == T_INT);
|
|
match(Set dst (MinReductionV srcInt srcVec));
|
|
match(Set dst (MaxReductionV srcInt srcVec));
|
|
effect(TEMP tmp1, TEMP tmp2, KILL cr0);
|
|
ins_cost(DEFAULT_COST * 7);
|
|
format %{ "REDUCEI_MINMAX // $dst,$srcInt,$srcVec,$tmp1,$tmp2,cr0\t// reduce vector int min/max" %}
|
|
size(28);
|
|
ins_encode %{
|
|
int opcode = this->ideal_Opcode();
|
|
__ reduceI(opcode, $dst$$Register, $srcInt$$Register, $srcVec$$VectorSRegister->to_vr(),
|
|
$tmp1$$VectorSRegister->to_vr(), $tmp2$$VectorSRegister->to_vr());
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Absolute Instructions
|
|
|
|
instruct vabs4F_reg(vecX dst, vecX src) %{
|
|
match(Set dst (AbsVF src));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "XVABSSP $dst,$src\t// absolute packed4F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvabssp($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vabs2D_reg(vecX dst, vecX src) %{
|
|
match(Set dst (AbsVD src));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "XVABSDP $dst,$src\t// absolute packed2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvabsdp($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Round Instructions
|
|
instruct roundD_reg(regD dst, regD src, immI8 rmode) %{
|
|
match(Set dst (RoundDoubleMode src rmode));
|
|
format %{ "RoundDoubleMode $src,$rmode" %}
|
|
size(4);
|
|
ins_encode %{
|
|
switch ($rmode$$constant) {
|
|
case RoundDoubleModeNode::rmode_rint:
|
|
__ xvrdpic($dst$$FloatRegister->to_vsr(), $src$$FloatRegister->to_vsr());
|
|
break;
|
|
case RoundDoubleModeNode::rmode_floor:
|
|
__ frim($dst$$FloatRegister, $src$$FloatRegister);
|
|
break;
|
|
case RoundDoubleModeNode::rmode_ceil:
|
|
__ frip($dst$$FloatRegister, $src$$FloatRegister);
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Round Instructions
|
|
instruct vround2D_reg(vecX dst, vecX src, immI8 rmode) %{
|
|
match(Set dst (RoundDoubleModeV src rmode));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "RoundDoubleModeV $src,$rmode" %}
|
|
size(4);
|
|
ins_encode %{
|
|
switch ($rmode$$constant) {
|
|
case RoundDoubleModeNode::rmode_rint:
|
|
__ xvrdpic($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
break;
|
|
case RoundDoubleModeNode::rmode_floor:
|
|
__ xvrdpim($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
break;
|
|
case RoundDoubleModeNode::rmode_ceil:
|
|
__ xvrdpip($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Negate Instructions
|
|
|
|
instruct vneg4F_reg(vecX dst, vecX src) %{
|
|
match(Set dst (NegVF src));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "XVNEGSP $dst,$src\t// negate packed4F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvnegsp($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vneg2D_reg(vecX dst, vecX src) %{
|
|
match(Set dst (NegVD src));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "XVNEGDP $dst,$src\t// negate packed2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvnegdp($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Square Root Instructions
|
|
|
|
instruct vsqrt4F_reg(vecX dst, vecX src) %{
|
|
match(Set dst (SqrtVF src));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
format %{ "XVSQRTSP $dst,$src\t// sqrt packed4F" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvsqrtsp($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vsqrt2D_reg(vecX dst, vecX src) %{
|
|
match(Set dst (SqrtVD src));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
format %{ "XVSQRTDP $dst,$src\t// sqrt packed2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xvsqrtdp($dst$$VectorSRegister, $src$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Vector Population Count and Zeros Count Instructions
|
|
|
|
instruct vpopcnt_reg(vecX dst, vecX src) %{
|
|
match(Set dst (PopCountVI src));
|
|
match(Set dst (PopCountVL src));
|
|
format %{ "VPOPCNT $dst,$src\t// pop count packed" %}
|
|
size(4);
|
|
ins_encode %{
|
|
BasicType bt = Matcher::vector_element_basic_type(this);
|
|
switch (bt) {
|
|
case T_BYTE:
|
|
__ vpopcntb($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_SHORT:
|
|
__ vpopcnth($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_INT:
|
|
__ vpopcntw($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_LONG:
|
|
__ vpopcntd($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vcount_leading_zeros_reg(vecX dst, vecX src) %{
|
|
match(Set dst (CountLeadingZerosV src));
|
|
format %{ "VCLZ $dst,$src\t// leading zeros count packed" %}
|
|
size(4);
|
|
ins_encode %{
|
|
BasicType bt = Matcher::vector_element_basic_type(this);
|
|
switch (bt) {
|
|
case T_BYTE:
|
|
__ vclzb($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_SHORT:
|
|
__ vclzh($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_INT:
|
|
__ vclzw($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_LONG:
|
|
__ vclzd($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct vcount_trailing_zeros_reg(vecX dst, vecX src) %{
|
|
match(Set dst (CountTrailingZerosV src));
|
|
format %{ "VCTZ $dst,$src\t// trailing zeros count packed" %}
|
|
size(4);
|
|
ins_encode %{
|
|
BasicType bt = Matcher::vector_element_basic_type(this);
|
|
switch (bt) {
|
|
case T_BYTE:
|
|
__ vctzb($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_SHORT:
|
|
__ vctzh($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_INT:
|
|
__ vctzw($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
case T_LONG:
|
|
__ vctzd($dst$$VectorSRegister->to_vr(), $src$$VectorSRegister->to_vr());
|
|
break;
|
|
default:
|
|
ShouldNotReachHere();
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// --------------------------------- FMA --------------------------------------
|
|
// src1 * src2 + dst
|
|
instruct vfma4F(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (FmaVF dst (Binary src1 src2)));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
|
|
format %{ "XVMADDASP $dst, $src1, $src2" %}
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ xvmaddasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * (-src2) + dst
|
|
// "(-src1) * src2 + dst" has been idealized to "src2 * (-src1) + dst"
|
|
instruct vfma4F_neg1(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (FmaVF dst (Binary src1 (NegVF src2))));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
|
|
format %{ "XVNMSUBASP $dst, $src1, $src2" %}
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ xvnmsubasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * src2 - dst
|
|
instruct vfma4F_neg2(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (FmaVF (NegVF dst) (Binary src1 src2)));
|
|
predicate(n->as_Vector()->length() == 4);
|
|
|
|
format %{ "XVMSUBASP $dst, $src1, $src2" %}
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ xvmsubasp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * src2 + dst
|
|
instruct vfma2D(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (FmaVD dst (Binary src1 src2)));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
|
|
format %{ "XVMADDADP $dst, $src1, $src2" %}
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ xvmaddadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * (-src2) + dst
|
|
// "(-src1) * src2 + dst" has been idealized to "src2 * (-src1) + dst"
|
|
instruct vfma2D_neg1(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (FmaVD dst (Binary src1 (NegVD src2))));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
|
|
format %{ "XVNMSUBADP $dst, $src1, $src2" %}
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ xvnmsubadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// src1 * src2 - dst
|
|
instruct vfma2D_neg2(vecX dst, vecX src1, vecX src2) %{
|
|
match(Set dst (FmaVD (NegVD dst) (Binary src1 src2)));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
|
|
format %{ "XVMSUBADP $dst, $src1, $src2" %}
|
|
|
|
size(4);
|
|
ins_encode %{
|
|
assert(UseFMA, "Needs FMA instructions support.");
|
|
__ xvmsubadp($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------Overflow Math Instructions-----------------------------------------
|
|
|
|
// Note that we have to make sure that XER.SO is reset before using overflow instructions.
|
|
// Simple Overflow operations can be matched by very few instructions (e.g. addExact: xor, and_, bc).
|
|
// Seems like only Long intrinsincs have an advantage. (The only expensive one is OverflowMulL.)
|
|
|
|
instruct overflowAddL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
|
|
match(Set cr0 (OverflowAddL op1 op2));
|
|
|
|
format %{ "add_ $op1, $op2\t# overflow check long" %}
|
|
ins_encode %{
|
|
__ li(R0, 0);
|
|
__ mtxer(R0); // clear XER.SO
|
|
__ addo_(R0, $op1$$Register, $op2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct overflowSubL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
|
|
match(Set cr0 (OverflowSubL op1 op2));
|
|
|
|
format %{ "subfo_ R0, $op2, $op1\t# overflow check long" %}
|
|
ins_encode %{
|
|
__ li(R0, 0);
|
|
__ mtxer(R0); // clear XER.SO
|
|
__ subfo_(R0, $op2$$Register, $op1$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct overflowNegL_reg(flagsRegCR0 cr0, immL_0 zero, iRegLsrc op2) %{
|
|
match(Set cr0 (OverflowSubL zero op2));
|
|
|
|
format %{ "nego_ R0, $op2\t# overflow check long" %}
|
|
ins_encode %{
|
|
__ li(R0, 0);
|
|
__ mtxer(R0); // clear XER.SO
|
|
__ nego_(R0, $op2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct overflowMulL_reg_reg(flagsRegCR0 cr0, iRegLsrc op1, iRegLsrc op2) %{
|
|
match(Set cr0 (OverflowMulL op1 op2));
|
|
|
|
format %{ "mulldo_ R0, $op1, $op2\t# overflow check long" %}
|
|
ins_encode %{
|
|
__ li(R0, 0);
|
|
__ mtxer(R0); // clear XER.SO
|
|
__ mulldo_(R0, $op1$$Register, $op2$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl4F_reg_Ex(vecX dst, regF src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_FLOAT);
|
|
ins_cost(DEFAULT_COST);
|
|
expand %{
|
|
vecX tmpV;
|
|
immI8 zero %{ (int) 0 %}
|
|
|
|
xscvdpspn_regF(tmpV, src);
|
|
xxspltw(dst, tmpV, zero);
|
|
%}
|
|
%}
|
|
|
|
instruct repl4F_immF_Ex(vecX dst, immF src, iRegLdst tmp) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_FLOAT);
|
|
effect(TEMP tmp);
|
|
ins_cost(10 * DEFAULT_COST);
|
|
|
|
postalloc_expand( postalloc_expand_load_replF_constant_vsx(dst, src, constanttablebase, tmp) );
|
|
%}
|
|
|
|
instruct repl4F_immF0(vecX dst, immF_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 4 &&
|
|
Matcher::vector_element_basic_type(n) == T_FLOAT);
|
|
|
|
format %{ "XXLXOR $dst, $zero \t// replicate4F" %}
|
|
ins_encode %{
|
|
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl2D_reg_Ex(vecX dst, regD src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_DOUBLE);
|
|
|
|
format %{ "XXPERMDI $dst, $src, $src, 0 \t// Splat doubleword" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxpermdi($dst$$VectorSRegister, $src$$FloatRegister->to_vsr(), $src$$FloatRegister->to_vsr(), 0);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl2D_immD0(vecX dst, immD_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_DOUBLE);
|
|
|
|
format %{ "XXLXOR $dst, $zero \t// replicate2D" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct mtvsrd(vecX dst, iRegLsrc src) %{
|
|
predicate(false);
|
|
effect(DEF dst, USE src);
|
|
|
|
format %{ "MTVSRD $dst, $src \t// Move to 16-byte register" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ mtvsrd($dst$$VectorSRegister, $src$$Register);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct xxspltd(vecX dst, vecX src, immI8 zero) %{
|
|
effect(DEF dst, USE src, USE zero);
|
|
|
|
format %{ "XXSPLATD $dst, $src, $zero \t// Splat doubleword" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxpermdi($dst$$VectorSRegister, $src$$VectorSRegister, $src$$VectorSRegister, $zero$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct xxpermdi(vecX dst, vecX src1, vecX src2, immI8 zero) %{
|
|
effect(DEF dst, USE src1, USE src2, USE zero);
|
|
|
|
format %{ "XXPERMDI $dst, $src1, $src2, $zero \t// Splat doubleword" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxpermdi($dst$$VectorSRegister, $src1$$VectorSRegister, $src2$$VectorSRegister, $zero$$constant);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl2L_reg_Ex(vecX dst, iRegLsrc src) %{
|
|
predicate(Matcher::vector_element_basic_type(n) == T_LONG);
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 2);
|
|
expand %{
|
|
vecX tmpV;
|
|
immI8 zero %{ (int) 0 %}
|
|
mtvsrd(tmpV, src);
|
|
xxpermdi(dst, tmpV, tmpV, zero);
|
|
%}
|
|
%}
|
|
|
|
instruct repl2L_immI0(vecX dst, immI_0 zero) %{
|
|
match(Set dst (Replicate zero));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_LONG);
|
|
|
|
format %{ "XXLXOR $dst, $zero \t// replicate2L" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxlxor($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct repl2L_immIminus1(vecX dst, immI_minus1 src) %{
|
|
match(Set dst (Replicate src));
|
|
predicate(n->as_Vector()->length() == 2 &&
|
|
Matcher::vector_element_basic_type(n) == T_LONG);
|
|
|
|
format %{ "XXLEQV $dst, $src \t// replicate2L" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ xxleqv($dst$$VectorSRegister, $dst$$VectorSRegister, $dst$$VectorSRegister);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// ============================================================================
|
|
// Safepoint Instruction
|
|
|
|
instruct safePoint_poll(iRegPdst poll) %{
|
|
match(SafePoint poll);
|
|
|
|
// It caused problems to add the effect that r0 is killed, but this
|
|
// effect no longer needs to be mentioned, since r0 is not contained
|
|
// in a reg_class.
|
|
|
|
format %{ "LD R0, #0, $poll \t// Safepoint poll for GC" %}
|
|
size(4);
|
|
ins_encode( enc_poll(0x0, poll) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// ============================================================================
|
|
// Call Instructions
|
|
|
|
// Call Java Static Instruction
|
|
|
|
source %{
|
|
|
|
#include "runtime/continuation.hpp"
|
|
|
|
%}
|
|
|
|
// Schedulable version of call static node.
|
|
instruct CallStaticJavaDirect(method meth) %{
|
|
match(CallStaticJava);
|
|
effect(USE meth);
|
|
ins_cost(CALL_COST);
|
|
|
|
ins_num_consts(3 /* up to 3 patchable constants: inline cache, 2 call targets. */);
|
|
|
|
format %{ "CALL,static $meth \t// ==> " %}
|
|
size((Continuations::enabled() ? 8 : 4));
|
|
ins_encode( enc_java_static_call(meth) );
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Call Java Dynamic Instruction
|
|
|
|
// Used by postalloc expand of CallDynamicJavaDirectSchedEx (actual call).
|
|
// Loading of IC was postalloc expanded. The nodes loading the IC are reachable
|
|
// via fields ins_field_load_ic_hi_node and ins_field_load_ic_node.
|
|
// The call destination must still be placed in the constant pool.
|
|
instruct CallDynamicJavaDirectSched(method meth) %{
|
|
match(CallDynamicJava); // To get all the data fields we need ...
|
|
effect(USE meth);
|
|
predicate(false); // ... but never match.
|
|
|
|
ins_field_load_ic_hi_node(loadConL_hiNode*);
|
|
ins_field_load_ic_node(loadConLNode*);
|
|
ins_num_consts(1 /* 1 patchable constant: call destination */);
|
|
|
|
format %{ "BL \t// dynamic $meth ==> " %}
|
|
size((Continuations::enabled() ? 8 : 4));
|
|
ins_encode( enc_java_dynamic_call_sched(meth) );
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Schedulable (i.e. postalloc expanded) version of call dynamic java.
|
|
// We use postalloc expanded calls if we use inline caches
|
|
// and do not update method data.
|
|
//
|
|
// This instruction has two constants: inline cache (IC) and call destination.
|
|
// Loading the inline cache will be postalloc expanded, thus leaving a call with
|
|
// one constant.
|
|
instruct CallDynamicJavaDirectSched_Ex(method meth) %{
|
|
match(CallDynamicJava);
|
|
effect(USE meth);
|
|
predicate(UseInlineCaches);
|
|
ins_cost(CALL_COST);
|
|
|
|
ins_num_consts(2 /* 2 patchable constants: inline cache, call destination. */);
|
|
|
|
format %{ "CALL,dynamic $meth \t// postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_java_dynamic_call_sched(meth, constanttablebase) );
|
|
%}
|
|
|
|
// Compound version of call dynamic java
|
|
// We use postalloc expanded calls if we use inline caches
|
|
// and do not update method data.
|
|
instruct CallDynamicJavaDirect(method meth) %{
|
|
match(CallDynamicJava);
|
|
effect(USE meth);
|
|
predicate(!UseInlineCaches);
|
|
ins_cost(CALL_COST);
|
|
|
|
// Enc_java_to_runtime_call needs up to 4 constants (method data oop).
|
|
ins_num_consts(4);
|
|
|
|
format %{ "CALL,dynamic $meth \t// ==> " %}
|
|
ins_encode( enc_java_dynamic_call(meth, constanttablebase) );
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Call Runtime Instruction
|
|
|
|
instruct CallRuntimeDirect(method meth) %{
|
|
match(CallRuntime);
|
|
effect(USE meth);
|
|
ins_cost(CALL_COST);
|
|
|
|
// Enc_java_to_runtime_call needs up to 3 constants: call target,
|
|
// env for callee, C-toc.
|
|
ins_num_consts(3);
|
|
|
|
format %{ "CALL,runtime" %}
|
|
ins_encode( enc_java_to_runtime_call(meth) );
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Call Leaf
|
|
|
|
// Used by postalloc expand of CallLeafDirect_Ex (mtctr).
|
|
instruct CallLeafDirect_mtctr(iRegLdst dst, iRegLsrc src) %{
|
|
effect(DEF dst, USE src);
|
|
|
|
ins_num_consts(1);
|
|
|
|
format %{ "MTCTR $src" %}
|
|
size(4);
|
|
ins_encode( enc_leaf_call_mtctr(src) );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Used by postalloc expand of CallLeafDirect_Ex (actual call).
|
|
instruct CallLeafDirect(method meth) %{
|
|
match(CallLeaf); // To get the data all the data fields we need ...
|
|
effect(USE meth);
|
|
predicate(false); // but never match.
|
|
|
|
format %{ "BCTRL \t// leaf call $meth ==> " %}
|
|
size((Continuations::enabled() ? 8 : 4));
|
|
ins_encode %{
|
|
__ bctrl();
|
|
__ post_call_nop();
|
|
%}
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// postalloc expand of CallLeafDirect.
|
|
// Load address to call from TOC, then bl to it.
|
|
instruct CallLeafDirect_Ex(method meth) %{
|
|
match(CallLeaf);
|
|
effect(USE meth);
|
|
ins_cost(CALL_COST);
|
|
|
|
// Postalloc_expand_java_to_runtime_call needs up to 3 constants: call target,
|
|
// env for callee, C-toc.
|
|
ins_num_consts(3);
|
|
|
|
format %{ "CALL,runtime leaf $meth \t// postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_java_to_runtime_call(meth, constanttablebase) );
|
|
%}
|
|
|
|
// Call runtime without safepoint - same as CallLeaf.
|
|
// postalloc expand of CallLeafNoFPDirect.
|
|
// Load address to call from TOC, then bl to it.
|
|
instruct CallLeafNoFPDirect_Ex(method meth) %{
|
|
match(CallLeafNoFP);
|
|
effect(USE meth);
|
|
ins_cost(CALL_COST);
|
|
|
|
// Enc_java_to_runtime_call needs up to 3 constants: call target,
|
|
// env for callee, C-toc.
|
|
ins_num_consts(3);
|
|
|
|
format %{ "CALL,runtime leaf nofp $meth \t// postalloc expanded" %}
|
|
postalloc_expand( postalloc_expand_java_to_runtime_call(meth, constanttablebase) );
|
|
%}
|
|
|
|
// Tail Call; Jump from runtime stub to Java code.
|
|
// Also known as an 'interprocedural jump'.
|
|
// Target of jump will eventually return to caller.
|
|
// TailJump below removes the return address.
|
|
instruct TailCalljmpInd(iRegPdstNoScratch jump_target, inline_cache_regP method_ptr) %{
|
|
match(TailCall jump_target method_ptr);
|
|
ins_cost(CALL_COST);
|
|
|
|
format %{ "MTCTR $jump_target \t// $method_ptr holds method\n\t"
|
|
"BCTR \t// tail call" %}
|
|
size(8);
|
|
ins_encode %{
|
|
__ mtctr($jump_target$$Register);
|
|
__ bctr();
|
|
%}
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Return Instruction
|
|
instruct Ret() %{
|
|
match(Return);
|
|
format %{ "BLR \t// branch to link register" %}
|
|
size(4);
|
|
ins_encode %{
|
|
// LR is restored in MachEpilogNode. Just do the RET here.
|
|
__ blr();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Tail Jump; remove the return address; jump to target.
|
|
// TailCall above leaves the return address around.
|
|
// TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
|
|
// ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
|
|
// "restore" before this instruction (in Epilogue), we need to materialize it
|
|
// in %i0.
|
|
instruct tailjmpInd(iRegPdstNoScratch jump_target, rarg1RegP ex_oop) %{
|
|
match(TailJump jump_target ex_oop);
|
|
ins_cost(CALL_COST);
|
|
|
|
format %{ "LD R4_ARG2 = LR\n\t"
|
|
"MTCTR $jump_target\n\t"
|
|
"BCTR \t// TailJump, exception oop: $ex_oop" %}
|
|
size(12);
|
|
ins_encode %{
|
|
__ ld(R4_ARG2/* issuing pc */, _abi0(lr), R1_SP);
|
|
__ mtctr($jump_target$$Register);
|
|
__ bctr();
|
|
%}
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Forward exception.
|
|
instruct ForwardExceptionjmp()
|
|
%{
|
|
match(ForwardException);
|
|
ins_cost(CALL_COST);
|
|
|
|
format %{ "Jmp forward_exception_stub" %}
|
|
ins_encode %{
|
|
__ set_inst_mark();
|
|
__ b64_patchable(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
|
|
__ clear_inst_mark();
|
|
%}
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Create exception oop: created by stack-crawling runtime code.
|
|
// Created exception is now available to this handler, and is setup
|
|
// just prior to jumping to this handler. No code emitted.
|
|
instruct CreateException(rarg1RegP ex_oop) %{
|
|
match(Set ex_oop (CreateEx));
|
|
ins_cost(0);
|
|
|
|
format %{ " -- \t// exception oop; no code emitted" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Rethrow exception: The exception oop will come in the first
|
|
// argument position. Then JUMP (not call) to the rethrow stub code.
|
|
instruct RethrowException() %{
|
|
match(Rethrow);
|
|
ins_cost(CALL_COST);
|
|
|
|
format %{ "Jmp rethrow_stub" %}
|
|
ins_encode %{
|
|
__ set_inst_mark();
|
|
__ b64_patchable((address)OptoRuntime::rethrow_stub(), relocInfo::runtime_call_type);
|
|
__ clear_inst_mark();
|
|
%}
|
|
ins_pipe(pipe_class_call);
|
|
%}
|
|
|
|
// Die now.
|
|
instruct ShouldNotReachHere() %{
|
|
match(Halt);
|
|
ins_cost(CALL_COST);
|
|
|
|
format %{ "ShouldNotReachHere" %}
|
|
ins_encode %{
|
|
if (is_reachable()) {
|
|
const char* str = __ code_string(_halt_reason);
|
|
__ stop(str);
|
|
}
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// This name is KNOWN by the ADLC and cannot be changed. The ADLC
|
|
// forces a 'TypeRawPtr::BOTTOM' output type for this guy.
|
|
// Get a DEF on threadRegP, no costs, no encoding, use
|
|
// 'ins_should_rematerialize(true)' to avoid spilling.
|
|
instruct tlsLoadP(threadRegP dst) %{
|
|
match(Set dst (ThreadLocal));
|
|
ins_cost(0);
|
|
|
|
ins_should_rematerialize(true);
|
|
|
|
format %{ " -- \t// $dst=Thread::current(), empty" %}
|
|
size(0);
|
|
ins_encode( /*empty*/ );
|
|
ins_pipe(pipe_class_empty);
|
|
%}
|
|
|
|
//---Some PPC specific nodes---------------------------------------------------
|
|
|
|
// Stop a group.
|
|
instruct endGroup() %{
|
|
ins_cost(0);
|
|
|
|
ins_is_nop(true);
|
|
|
|
format %{ "End Bundle (ori r1, r1, 0)" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ endgroup();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
// Nop instructions
|
|
|
|
instruct fxNop() %{
|
|
ins_cost(0);
|
|
|
|
ins_is_nop(true);
|
|
|
|
format %{ "fxNop" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ nop();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct fpNop0() %{
|
|
ins_cost(0);
|
|
|
|
ins_is_nop(true);
|
|
|
|
format %{ "fpNop0" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fpnop0();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct fpNop1() %{
|
|
ins_cost(0);
|
|
|
|
ins_is_nop(true);
|
|
|
|
format %{ "fpNop1" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ fpnop1();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct brNop0() %{
|
|
ins_cost(0);
|
|
size(4);
|
|
format %{ "brNop0" %}
|
|
ins_encode %{
|
|
__ brnop0();
|
|
%}
|
|
ins_is_nop(true);
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct brNop1() %{
|
|
ins_cost(0);
|
|
|
|
ins_is_nop(true);
|
|
|
|
format %{ "brNop1" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ brnop1();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct brNop2() %{
|
|
ins_cost(0);
|
|
|
|
ins_is_nop(true);
|
|
|
|
format %{ "brNop2" %}
|
|
size(4);
|
|
ins_encode %{
|
|
__ brnop2();
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cacheWB(indirect addr)
|
|
%{
|
|
match(CacheWB addr);
|
|
|
|
ins_cost(100);
|
|
format %{ "cache writeback, address = $addr" %}
|
|
ins_encode %{
|
|
assert($addr->index_position() < 0, "should be");
|
|
assert($addr$$disp == 0, "should be");
|
|
__ cache_wb(Address($addr$$base$$Register));
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cacheWBPreSync()
|
|
%{
|
|
match(CacheWBPreSync);
|
|
|
|
ins_cost(0);
|
|
format %{ "cache writeback presync" %}
|
|
ins_encode %{
|
|
__ cache_wbsync(true);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
instruct cacheWBPostSync()
|
|
%{
|
|
match(CacheWBPostSync);
|
|
|
|
ins_cost(100);
|
|
format %{ "cache writeback postsync" %}
|
|
ins_encode %{
|
|
__ cache_wbsync(false);
|
|
%}
|
|
ins_pipe(pipe_class_default);
|
|
%}
|
|
|
|
//----------PEEPHOLE RULES-----------------------------------------------------
|
|
// These must follow all instruction definitions as they use the names
|
|
// defined in the instructions definitions.
|
|
//
|
|
// peepmatch ( root_instr_name [preceeding_instruction]* );
|
|
//
|
|
// peepconstraint %{
|
|
// (instruction_number.operand_name relational_op instruction_number.operand_name
|
|
// [, ...] );
|
|
// // instruction numbers are zero-based using left to right order in peepmatch
|
|
//
|
|
// peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
|
|
// // provide an instruction_number.operand_name for each operand that appears
|
|
// // in the replacement instruction's match rule
|
|
//
|
|
// ---------VM FLAGS---------------------------------------------------------
|
|
//
|
|
// All peephole optimizations can be turned off using -XX:-OptoPeephole
|
|
//
|
|
// Each peephole rule is given an identifying number starting with zero and
|
|
// increasing by one in the order seen by the parser. An individual peephole
|
|
// can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
|
|
// on the command-line.
|
|
//
|
|
// ---------CURRENT LIMITATIONS----------------------------------------------
|
|
//
|
|
// Only match adjacent instructions in same basic block
|
|
// Only equality constraints
|
|
// Only constraints between operands, not (0.dest_reg == EAX_enc)
|
|
// Only one replacement instruction
|
|
//
|
|
// ---------EXAMPLE----------------------------------------------------------
|
|
//
|
|
// // pertinent parts of existing instructions in architecture description
|
|
// instruct movI(eRegI dst, eRegI src) %{
|
|
// match(Set dst (CopyI src));
|
|
// %}
|
|
//
|
|
// instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
|
|
// match(Set dst (AddI dst src));
|
|
// effect(KILL cr);
|
|
// %}
|
|
//
|
|
// // Change (inc mov) to lea
|
|
// peephole %{
|
|
// // increment preceded by register-register move
|
|
// peepmatch ( incI_eReg movI );
|
|
// // require that the destination register of the increment
|
|
// // match the destination register of the move
|
|
// peepconstraint ( 0.dst == 1.dst );
|
|
// // construct a replacement instruction that sets
|
|
// // the destination to ( move's source register + one )
|
|
// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
|
|
// %}
|
|
//
|
|
// Implementation no longer uses movX instructions since
|
|
// machine-independent system no longer uses CopyX nodes.
|
|
//
|
|
// peephole %{
|
|
// peepmatch ( incI_eReg movI );
|
|
// peepconstraint ( 0.dst == 1.dst );
|
|
// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
|
|
// %}
|
|
//
|
|
// peephole %{
|
|
// peepmatch ( decI_eReg movI );
|
|
// peepconstraint ( 0.dst == 1.dst );
|
|
// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
|
|
// %}
|
|
//
|
|
// peephole %{
|
|
// peepmatch ( addI_eReg_imm movI );
|
|
// peepconstraint ( 0.dst == 1.dst );
|
|
// peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
|
|
// %}
|
|
//
|
|
// peephole %{
|
|
// peepmatch ( addP_eReg_imm movP );
|
|
// peepconstraint ( 0.dst == 1.dst );
|
|
// peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
|
|
// %}
|
|
|
|
// // Change load of spilled value to only a spill
|
|
// instruct storeI(memory mem, eRegI src) %{
|
|
// match(Set mem (StoreI mem src));
|
|
// %}
|
|
//
|
|
// instruct loadI(eRegI dst, memory mem) %{
|
|
// match(Set dst (LoadI mem));
|
|
// %}
|
|
//
|
|
peephole %{
|
|
peepmatch ( loadI storeI );
|
|
peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
|
|
peepreplace ( storeI( 1.mem 1.mem 1.src ) );
|
|
%}
|
|
|
|
peephole %{
|
|
peepmatch ( loadL storeL );
|
|
peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
|
|
peepreplace ( storeL( 1.mem 1.mem 1.src ) );
|
|
%}
|
|
|
|
peephole %{
|
|
peepmatch ( loadP storeP );
|
|
peepconstraint ( 1.src == 0.dst, 1.dst == 0.mem );
|
|
peepreplace ( storeP( 1.dst 1.dst 1.src ) );
|
|
%}
|
|
|
|
//----------SMARTSPILL RULES---------------------------------------------------
|
|
// These must follow all instruction definitions as they use the names
|
|
// defined in the instructions definitions.
|