;; Machine description for IBM RISC System 6000 (POWER) for GNU C compiler
;; Copyright (C) 1990-2020 Free Software Foundation, Inc.
;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
;; This file is part of GCC.
;; GCC is free software; you can redistribute it and/or modify it
;; under the terms of the GNU General Public License as published
;; by the Free Software Foundation; either version 3, or (at your
;; option) any later version.
;; GCC is distributed in the hope that it will be useful, but WITHOUT
;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
;; License for more details.
;; You should have received a copy of the GNU General Public License
;; along with GCC; see the file COPYING3. If not see
;; <http://www.gnu.org/licenses/>.
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
;;
;; REGNOS
;;
(define_constants
[(FIRST_GPR_REGNO 0)
(STACK_POINTER_REGNUM 1)
(TOC_REGNUM 2)
(STATIC_CHAIN_REGNUM 11)
(HARD_FRAME_POINTER_REGNUM 31)
(LAST_GPR_REGNO 31)
(FIRST_FPR_REGNO 32)
(LAST_FPR_REGNO 63)
(FIRST_ALTIVEC_REGNO 64)
(LAST_ALTIVEC_REGNO 95)
(LR_REGNO 96)
(CTR_REGNO 97)
(CA_REGNO 98)
(ARG_POINTER_REGNUM 99)
(CR0_REGNO 100)
(CR1_REGNO 101)
(CR2_REGNO 102)
(CR3_REGNO 103)
(CR4_REGNO 104)
(CR5_REGNO 105)
(CR6_REGNO 106)
(CR7_REGNO 107)
(MAX_CR_REGNO 107)
(VRSAVE_REGNO 108)
(VSCR_REGNO 109)
(FRAME_POINTER_REGNUM 110)
])
;;
;; UNSPEC usage
;;
(define_c_enum "unspec"
[UNSPEC_PROBE_STACK ; probe stack memory reference
UNSPEC_TOCPTR ; address of a word pointing to the TOC
UNSPEC_TOC ; address of the TOC (more-or-less)
UNSPEC_TOCSLOT ; offset from r1 of toc pointer save slot
UNSPEC_MOVSI_GOT
UNSPEC_FCTIWZ
UNSPEC_FRIM
UNSPEC_FRIN
UNSPEC_FRIP
UNSPEC_FRIZ
UNSPEC_XSRDPI
UNSPEC_LD_MPIC ; load_macho_picbase
UNSPEC_RELD_MPIC ; re-load_macho_picbase
UNSPEC_MPIC_CORRECT ; macho_correct_pic
UNSPEC_TLSGD
UNSPEC_TLSLD
UNSPEC_TLS_GET_ADDR
UNSPEC_MOVESI_FROM_CR
UNSPEC_MOVESI_TO_CR
UNSPEC_TLSDTPREL
UNSPEC_TLSDTPRELHA
UNSPEC_TLSDTPRELLO
UNSPEC_TLSGOTDTPREL
UNSPEC_TLSTPREL
UNSPEC_TLSTPRELHA
UNSPEC_TLSTPRELLO
UNSPEC_TLSGOTTPREL
UNSPEC_TLSTLS
UNSPEC_TLSTLS_PCREL
UNSPEC_FIX_TRUNC_TF ; fadd, rounding towards zero
UNSPEC_STFIWX
UNSPEC_POPCNTB
UNSPEC_FRES
UNSPEC_SP_SET
UNSPEC_SP_TEST
UNSPEC_SYNC
UNSPEC_LWSYNC
UNSPEC_SYNC_OP
UNSPEC_ATOMIC
UNSPEC_CMPXCHG
UNSPEC_XCHG
UNSPEC_AND
UNSPEC_DLMZB
UNSPEC_DLMZB_CR
UNSPEC_DLMZB_STRLEN
UNSPEC_RSQRT
UNSPEC_TOCREL
UNSPEC_MACHOPIC_OFFSET
UNSPEC_BPERM
UNSPEC_COPYSIGN
UNSPEC_PARITY
UNSPEC_CMPB
UNSPEC_FCTIW
UNSPEC_FCTID
UNSPEC_LFIWAX
UNSPEC_LFIWZX
UNSPEC_FCTIWUZ
UNSPEC_NOP
UNSPEC_GRP_END_NOP
UNSPEC_P8V_FMRGOW
UNSPEC_P8V_MTVSRWZ
UNSPEC_P8V_RELOAD_FROM_GPR
UNSPEC_P8V_MTVSRD
UNSPEC_P8V_XXPERMDI
UNSPEC_P8V_RELOAD_FROM_VSX
UNSPEC_ADDG6S
UNSPEC_CDTBCD
UNSPEC_CBCDTD
UNSPEC_DIVE
UNSPEC_DIVEU
UNSPEC_UNPACK_128BIT
UNSPEC_PACK_128BIT
UNSPEC_LSQ
UNSPEC_FUSION_GPR
UNSPEC_STACK_CHECK
UNSPEC_CMPRB
UNSPEC_CMPRB2
UNSPEC_CMPEQB
UNSPEC_ADD_ROUND_TO_ODD
UNSPEC_SUB_ROUND_TO_ODD
UNSPEC_MUL_ROUND_TO_ODD
UNSPEC_DIV_ROUND_TO_ODD
UNSPEC_FMA_ROUND_TO_ODD
UNSPEC_SQRT_ROUND_TO_ODD
UNSPEC_TRUNC_ROUND_TO_ODD
UNSPEC_SIGNBIT
UNSPEC_SF_FROM_SI
UNSPEC_SI_FROM_SF
UNSPEC_PLTSEQ
UNSPEC_PLT16_HA
])
;;
;; UNSPEC_VOLATILE usage
;;
(define_c_enum "unspecv"
[UNSPECV_BLOCK
UNSPECV_LL ; load-locked
UNSPECV_SC ; store-conditional
UNSPECV_PROBE_STACK_RANGE ; probe range of stack addresses
UNSPECV_EH_RR ; eh_reg_restore
UNSPECV_ISYNC ; isync instruction
UNSPECV_MFTB ; move from time base
UNSPECV_DARN ; darn 1 (deliver a random number)
UNSPECV_DARN_32 ; darn 2
UNSPECV_DARN_RAW ; darn 0
UNSPECV_NLGR ; non-local goto receiver
UNSPECV_MFFS ; Move from FPSCR
UNSPECV_MFFSL ; Move from FPSCR light instruction version
UNSPECV_MFFSCRN ; Move from FPSCR float rounding mode
UNSPECV_MFFSCDRN ; Move from FPSCR decimal float rounding mode
UNSPECV_MTFSF ; Move to FPSCR Fields 8 to 15
UNSPECV_MTFSF_HI ; Move to FPSCR Fields 0 to 7
UNSPECV_MTFSB0 ; Set FPSCR Field bit to 0
UNSPECV_MTFSB1 ; Set FPSCR Field bit to 1
UNSPECV_SPLIT_STACK_RETURN ; A camouflaged return
UNSPECV_SPEC_BARRIER ; Speculation barrier
UNSPECV_PLT16_LO
UNSPECV_PLT_PCREL
])
; The three different kinds of epilogue.
(define_enum "epilogue_type" [normal sibcall eh_return])
;; Define an insn type attribute. This is used in function unit delay
;; computations.
(define_attr "type"
"integer,two,three,
add,logical,shift,insert,
mul,halfmul,div,
exts,cntlz,popcnt,isel,
load,store,fpload,fpstore,vecload,vecstore,
cmp,
branch,jmpreg,mfjmpr,mtjmpr,trap,isync,sync,load_l,store_c,
cr_logical,mfcr,mfcrf,mtcr,
fpcompare,fp,fpsimple,dmul,qmul,sdiv,ddiv,ssqrt,dsqrt,
vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,
vecfloat,vecfdiv,vecdouble,mffgpr,mftgpr,crypto,
veclogical,veccmpfx,vecexts,vecmove,
htm,htmsimple,dfp,mma"
(const_string "integer"))
;; What data size does this instruction work on?
;; This is used for insert, mul and others as necessary.
(define_attr "size" "8,16,32,64,128" (const_string "32"))
;; What is the insn_cost for this insn? The target hook can still override
;; this. For optimizing for size the "length" attribute is used instead.
(define_attr "cost" "" (const_int 0))
;; Is this instruction record form ("dot", signed compare to 0, writing CR0)?
;; This is used for add, logical, shift, exts, mul.
(define_attr "dot" "no,yes" (const_string "no"))
;; Does this instruction sign-extend its result?
;; This is used for load insns.
(define_attr "sign_extend" "no,yes" (const_string "no"))
;; Does this cr_logical instruction have three operands? That is, BT != BB.
(define_attr "cr_logical_3op" "no,yes" (const_string "no"))
;; Does this instruction use indexed (that is, reg+reg) addressing?
;; This is used for load and store insns. If operand 0 or 1 is a MEM
;; it is automatically set based on that. If a load or store instruction
;; has fewer than two operands it needs to set this attribute manually
;; or the compiler will crash.
(define_attr "indexed" "no,yes"
(if_then_else (ior (match_operand 0 "indexed_address_mem")
(match_operand 1 "indexed_address_mem"))
(const_string "yes")
(const_string "no")))
;; Does this instruction use update addressing?
;; This is used for load and store insns. See the comments for "indexed".
(define_attr "update" "no,yes"
(if_then_else (ior (match_operand 0 "update_address_mem")
(match_operand 1 "update_address_mem"))
(const_string "yes")
(const_string "no")))
;; Is this instruction using operands[2] as shift amount, and can that be a
;; register?
;; This is used for shift insns.
(define_attr "maybe_var_shift" "no,yes" (const_string "no"))
;; Is this instruction using a shift amount from a register?
;; This is used for shift insns.
(define_attr "var_shift" "no,yes"
(if_then_else (and (eq_attr "type" "shift")
(eq_attr "maybe_var_shift" "yes"))
(if_then_else (match_operand 2 "gpc_reg_operand")
(const_string "yes")
(const_string "no"))
(const_string "no")))
;; Is copying of this instruction disallowed?
(define_attr "cannot_copy" "no,yes" (const_string "no"))
;; Whether an insn is a prefixed insn, and an initial 'p' should be printed
;; before the instruction. A prefixed instruction has a prefix instruction
;; word that extends the immediate value of the instructions from 12-16 bits to
;; 34 bits. The macro ASM_OUTPUT_OPCODE emits a leading 'p' for prefixed
;; insns. The default "length" attribute will also be adjusted by default to
;; be 12 bytes.
(define_attr "prefixed" "no,yes"
(cond [(ior (match_test "!TARGET_PREFIXED")
(match_test "!NONJUMP_INSN_P (insn)"))
(const_string "no")
(eq_attr "type" "load,fpload,vecload")
(if_then_else (match_test "prefixed_load_p (insn)")
(const_string "yes")
(const_string "no"))
(eq_attr "type" "store,fpstore,vecstore")
(if_then_else (match_test "prefixed_store_p (insn)")
(const_string "yes")
(const_string "no"))
(eq_attr "type" "integer,add")
(if_then_else (match_test "prefixed_paddi_p (insn)")
(const_string "yes")
(const_string "no"))]
(const_string "no")))
;; Return the number of real hardware instructions in a combined insn. If it
;; is 0, just use the length / 4.
(define_attr "num_insns" "" (const_int 0))
;; If an insn is prefixed, return the maximum number of prefixed instructions
;; in the insn. The macro ADJUST_INSN_LENGTH uses this number to adjust the
;; insn length.
(define_attr "max_prefixed_insns" "" (const_int 1))
;; Length of the instruction (in bytes). This length does not consider the
;; length for prefixed instructions. The macro ADJUST_INSN_LENGTH will adjust
;; the length if there are prefixed instructions.
;;
;; While it might be tempting to use num_insns to calculate the length, it can
;; be problematical unless all insn lengths are adjusted to use num_insns
;; (i.e. if num_insns is 0, it will get the length, which in turn will get
;; num_insns and recurse).
(define_attr "length" "" (const_int 4))
;; Processor type -- this attribute must exactly match the processor_type
;; enumeration in rs6000-opts.h.
(define_attr "cpu"
"ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,
ppc750,ppc7400,ppc7450,
ppc403,ppc405,ppc440,ppc476,
ppc8540,ppc8548,ppce300c2,ppce300c3,ppce500mc,ppce500mc64,ppce5500,ppce6500,
power4,power5,power6,power7,power8,power9,power10,
rs64a,mpccore,cell,ppca2,titan"
(const (symbol_ref "(enum attr_cpu) rs6000_tune")))
;; The ISA we implement.
(define_attr "isa" "any,p5,p6,p7,p7v,p8v,p9v,p9kf,p9tf,p10"
(const_string "any"))
;; Is this alternative enabled for the current CPU/ISA/etc.?
(define_attr "enabled" ""
(cond
[(eq_attr "isa" "any")
(const_int 1)
(and (eq_attr "isa" "p5")
(match_test "TARGET_POPCNTB"))
(const_int 1)
(and (eq_attr "isa" "p6")
(match_test "TARGET_CMPB"))
(const_int 1)
(and (eq_attr "isa" "p7")
(match_test "TARGET_POPCNTD"))
(const_int 1)
(and (eq_attr "isa" "p7v")
(match_test "TARGET_VSX"))
(const_int 1)
(and (eq_attr "isa" "p8v")
(match_test "TARGET_P8_VECTOR"))
(const_int 1)
(and (eq_attr "isa" "p9v")
(match_test "TARGET_P9_VECTOR"))
(const_int 1)
(and (eq_attr "isa" "p9kf")
(match_test "TARGET_FLOAT128_TYPE"))
(const_int 1)
(and (eq_attr "isa" "p9tf")
(match_test "FLOAT128_VECTOR_P (TFmode)"))
(const_int 1)
(and (eq_attr "isa" "p10")
(match_test "TARGET_POWER10"))
(const_int 1)
] (const_int 0)))
;; If this instruction is microcoded on the CELL processor
; The default for load extended, the recorded instructions and rotate/shifts by a variable is always microcoded
(define_attr "cell_micro" "not,conditional,always"
(if_then_else (ior (and (eq_attr "type" "shift,exts,mul")
(eq_attr "dot" "yes"))
(and (eq_attr "type" "load")
(eq_attr "sign_extend" "yes"))
(and (eq_attr "type" "shift")
(eq_attr "var_shift" "yes")))
(const_string "always")
(const_string "not")))
(automata_option "ndfa")
(include "rs64.md")
(include "mpc.md")
(include "40x.md")
(include "440.md")
(include "476.md")
(include "601.md")
(include "603.md")
(include "6xx.md")
(include "7xx.md")
(include "7450.md")
(include "8540.md")
(include "e300c2c3.md")
(include "e500mc.md")
(include "e500mc64.md")
(include "e5500.md")
(include "e6500.md")
(include "power4.md")
(include "power5.md")
(include "power6.md")
(include "power7.md")
(include "power8.md")
(include "power9.md")
(include "power10.md")
(include "cell.md")
(include "a2.md")
(include "titan.md")
(include "predicates.md")
(include "constraints.md")
;; Mode iterators
; This mode iterator allows :GPR to be used to indicate the allowable size
; of whole values in GPRs.
(define_mode_iterator GPR [SI (DI "TARGET_POWERPC64")])
; And again, for patterns that need two (potentially) different integer modes.
(define_mode_iterator GPR2 [SI (DI "TARGET_POWERPC64")])
; Any supported integer mode.
(define_mode_iterator INT [QI HI SI DI TI PTI])
; Any supported integer mode that fits in one register.
(define_mode_iterator INT1 [QI HI SI (DI "TARGET_POWERPC64")])
; Integer modes supported in VSX registers with ISA 3.0 instructions
(define_mode_iterator INT_ISA3 [QI HI SI DI])
; Everything we can extend QImode to.
(define_mode_iterator EXTQI [SI (DI "TARGET_POWERPC64")])
; Everything we can extend HImode to.
(define_mode_iterator EXTHI [SI (DI "TARGET_POWERPC64")])
; Everything we can extend SImode to.
(define_mode_iterator EXTSI [(DI "TARGET_POWERPC64")])
; QImode or HImode for small integer moves and small atomic ops
(define_mode_iterator QHI [QI HI])
; QImode, HImode, SImode for fused ops only for GPR loads
(define_mode_iterator QHSI [QI HI SI])
; HImode or SImode for sign extended fusion ops
(define_mode_iterator HSI [HI SI])
; SImode or DImode, even if DImode doesn't fit in GPRs.
(define_mode_iterator SDI [SI DI])
; The size of a pointer. Also, the size of the value that a record-condition
; (one with a '.') will compare; and the size used for arithmetic carries.
(define_mode_iterator P [(SI "TARGET_32BIT") (DI "TARGET_64BIT")])
; Iterator to add PTImode along with TImode (TImode can go in VSX registers,
; PTImode is GPR only)
(define_mode_iterator TI2 [TI PTI])
; Any hardware-supported floating-point mode
(define_mode_iterator FP [
(SF "TARGET_HARD_FLOAT")
(DF "TARGET_HARD_FLOAT")
(TF "TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128")
(IF "TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128")
(KF "TARGET_FLOAT128_TYPE")
(DD "TARGET_DFP")
(TD "TARGET_DFP")])
; Any fma capable floating-point mode.
(define_mode_iterator FMA_F [
(SF "TARGET_HARD_FLOAT")
(DF "TARGET_HARD_FLOAT || VECTOR_UNIT_VSX_P (DFmode)")
(V4SF "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)")
(V2DF "VECTOR_UNIT_ALTIVEC_OR_VSX_P (V2DFmode)")
(KF "TARGET_FLOAT128_HW && FLOAT128_IEEE_P (KFmode)")
(TF "TARGET_FLOAT128_HW && FLOAT128_IEEE_P (TFmode)")
])
; Floating point move iterators to combine binary and decimal moves
(define_mode_iterator FMOVE32 [SF SD])
(define_mode_iterator FMOVE64 [DF DD])
(define_mode_iterator FMOVE64X [DI DF DD])
(define_mode_iterator FMOVE128 [(TF "TARGET_LONG_DOUBLE_128")
(IF "FLOAT128_IBM_P (IFmode)")
(TD "TARGET_HARD_FLOAT")])
(define_mode_iterator FMOVE128_FPR [(TF "FLOAT128_2REG_P (TFmode)")
(IF "FLOAT128_2REG_P (IFmode)")
(TD "TARGET_HARD_FLOAT")])
; Iterators for 128 bit types for direct move
(define_mode_iterator FMOVE128_GPR [TI
V16QI
V8HI
V4SI
V4SF
V2DI
V2DF
V1TI
(KF "FLOAT128_VECTOR_P (KFmode)")
(TF "FLOAT128_VECTOR_P (TFmode)")])
; Iterator for 128-bit VSX types for pack/unpack
(define_mode_iterator FMOVE128_VSX [V1TI KF])
; Iterators for converting to/from TFmode
(define_mode_iterator IFKF [IF KF])
; Constraints for moving IF/KFmode.
(define_mode_attr IFKF_reg [(IF "d") (KF "wa")])
; Whether a floating point move is ok, don't allow SD without hardware FP
(define_mode_attr fmove_ok [(SF "")
(DF "")
(SD "TARGET_HARD_FLOAT")
(DD "")])
; Convert REAL_VALUE to the appropriate bits
(define_mode_attr real_value_to_target [(SF "REAL_VALUE_TO_TARGET_SINGLE")
(DF "REAL_VALUE_TO_TARGET_DOUBLE")
(SD "REAL_VALUE_TO_TARGET_DECIMAL32")
(DD "REAL_VALUE_TO_TARGET_DECIMAL64")])
; Whether 0.0 has an all-zero bit pattern
(define_mode_attr zero_fp [(SF "j")
(DF "j")
(TF "j")
(IF "j")
(KF "j")
(SD "wn")
(DD "wn")
(TD "wn")])
; Definitions for 64-bit VSX
(define_mode_attr f64_vsx [(DF "wa") (DD "wn")])
; Definitions for 64-bit direct move
(define_mode_attr f64_dm [(DF "wa") (DD "d")])
; Definitions for 64-bit use of altivec registers
(define_mode_attr f64_av [(DF "v") (DD "wn")])
; Definitions for 64-bit access to ISA 3.0 (power9) vector
(define_mode_attr f64_p9 [(DF "v") (DD "wn")])
; These modes do not fit in integer registers in 32-bit mode.
(define_mode_iterator DIFD [DI DF DD])
; Iterator for reciprocal estimate instructions
(define_mode_iterator RECIPF [SF DF V4SF V2DF])
; SFmode or DFmode.
(define_mode_iterator SFDF [SF DF])
; And again, for when we need two FP modes in a pattern.
(define_mode_iterator SFDF2 [SF DF])
; A generic s/d attribute, for sp/dp for example.
(define_mode_attr sd [(SF "s") (DF "d")
(V4SF "s") (V2DF "d")])
; "s" or nothing, for fmuls/fmul for example.
(define_mode_attr s [(SF "s") (DF "")])
; Iterator for 128-bit floating point that uses the IBM double-double format
(define_mode_iterator IBM128 [(IF "FLOAT128_IBM_P (IFmode)")
(TF "FLOAT128_IBM_P (TFmode)")])
; Iterator for 128-bit floating point that uses IEEE 128-bit float
(define_mode_iterator IEEE128 [(KF "FLOAT128_IEEE_P (KFmode)")
(TF "FLOAT128_IEEE_P (TFmode)")])
; Iterator for 128-bit floating point
(define_mode_iterator FLOAT128 [(KF "TARGET_FLOAT128_TYPE")
(IF "TARGET_FLOAT128_TYPE")
(TF "TARGET_LONG_DOUBLE_128")])
; Iterator for signbit on 64-bit machines with direct move
(define_mode_iterator SIGNBIT [(KF "FLOAT128_VECTOR_P (KFmode)")
(TF "FLOAT128_VECTOR_P (TFmode)")])
; Iterator for ISA 3.0 supported floating point types
(define_mode_iterator FP_ISA3 [SF DF])
; SF/DF constraint for arithmetic on traditional floating point registers
(define_mode_attr Ff [(SF "f") (DF "d") (DI "d")])
; SF/DF constraint for arithmetic on VSX registers using instructions added in
; ISA 2.06 (power7). This includes instructions that normally target DF mode,
; but are used on SFmode, since internally SFmode values are kept in the DFmode
; format.
(define_mode_attr Fv [(SF "wa") (DF "wa") (DI "wa")])
; Which isa is needed for those float instructions?
(define_mode_attr Fisa [(SF "p8v") (DF "*") (DI "*")])
; FRE/FRES support
(define_mode_attr FFRE [(SF "FRES") (DF "FRE")])
; Conditional returns.
(define_code_iterator any_return [return simple_return])
(define_code_attr return_pred [(return "direct_return ()")
(simple_return "1")])
(define_code_attr return_str [(return "") (simple_return "simple_")])
; Logical operators.
(define_code_iterator iorxor [ior xor])
(define_code_iterator and_ior_xor [and ior xor])
; Signed/unsigned variants of ops.
(define_code_iterator any_extend [sign_extend zero_extend])
(define_code_iterator any_fix [fix unsigned_fix])
(define_code_iterator any_float [float unsigned_float])
(define_code_attr u [(sign_extend "")
(zero_extend "u")
(fix "")
(unsigned_fix "u")])
(define_code_attr su [(sign_extend "s")
(zero_extend "u")
(fix "s")
(unsigned_fix "u")
(float "s")
(unsigned_float "u")])
(define_code_attr az [(sign_extend "a")
(zero_extend "z")
(fix "a")
(unsigned_fix "z")
(float "a")
(unsigned_float "z")])
(define_code_attr uns [(fix "")
(unsigned_fix "uns")
(float "")
(unsigned_float "uns")])
; Various instructions that come in SI and DI forms.
; A generic w/d attribute, for things like cmpw/cmpd.
(define_mode_attr wd [(QI "b")
(HI "h")
(SI "w")
(DI "d")
(V16QI "b")
(V8HI "h")
(V4SI "w")
(V2DI "d")
(V1TI "q")
(TI "q")])
;; How many bits in this mode?
(define_mode_attr bits [(QI "8") (HI "16") (SI "32") (DI "64")
(SF "32") (DF "64")])
; DImode bits
(define_mode_attr dbits [(QI "56") (HI "48") (SI "32")])
;; Bitmask for shift instructions
(define_mode_attr hH [(SI "h") (DI "H")])
;; A mode twice the size of the given mode
(define_mode_attr dmode [(SI "di") (DI "ti")])
(define_mode_attr DMODE [(SI "DI") (DI "TI")])
;; Suffix for reload patterns
(define_mode_attr ptrsize [(SI "32bit")
(DI "64bit")])
(define_mode_attr tptrsize [(SI "TARGET_32BIT")
(DI "TARGET_64BIT")])
(define_mode_attr mptrsize [(SI "si")
(DI "di")])
(define_mode_attr ptrload [(SI "lwz")
(DI "ld")])
(define_mode_attr ptrm [(SI "m")
(DI "Y")])
(define_mode_attr rreg [(SF "f")
(DF "wa")
(TF "f")
(TD "f")
(V4SF "wa")
(V2DF "wa")])
(define_mode_attr rreg2 [(SF "f")
(DF "d")])
(define_mode_attr SI_CONVERT_FP [(SF "TARGET_FCFIDS")
(DF "TARGET_FCFID")])
;; Mode iterator for logical operations on 128-bit types
(define_mode_iterator BOOL_128 [TI
PTI
(V16QI "TARGET_ALTIVEC")
(V8HI "TARGET_ALTIVEC")
(V4SI "TARGET_ALTIVEC")
(V4SF "TARGET_ALTIVEC")
(V2DI "TARGET_ALTIVEC")
(V2DF "TARGET_ALTIVEC")
(V1TI "TARGET_ALTIVEC")])
;; For the GPRs we use 3 constraints for register outputs, two that are the
;; same as the output register, and a third where the output register is an
;; early clobber, so we don't have to deal with register overlaps. For the
;; vector types, we prefer to use the vector registers. For TI mode, allow
;; either.
;; Mode attribute for boolean operation register constraints for output
(define_mode_attr BOOL_REGS_OUTPUT [(TI "&r,r,r,wa,v")
(PTI "&r,r,r")
(V16QI "wa,v,&?r,?r,?r")
(V8HI "wa,v,&?r,?r,?r")
(V4SI "wa,v,&?r,?r,?r")
(V4SF "wa,v,&?r,?r,?r")
(V2DI "wa,v,&?r,?r,?r")
(V2DF "wa,v,&?r,?r,?r")
(V1TI "wa,v,&?r,?r,?r")])
;; Mode attribute for boolean operation register constraints for operand1
(define_mode_attr BOOL_REGS_OP1 [(TI "r,0,r,wa,v")
(PTI "r,0,r")
(V16QI "wa,v,r,0,r")
(V8HI "wa,v,r,0,r")
(V4SI "wa,v,r,0,r")
(V4SF "wa,v,r,0,r")
(V2DI "wa,v,r,0,r")
(V2DF "wa,v,r,0,r")
(V1TI "wa,v,r,0,r")])
;; Mode attribute for boolean operation register constraints for operand2
(define_mode_attr BOOL_REGS_OP2 [(TI "r,r,0,wa,v")
(PTI "r,r,0")
(V16QI "wa,v,r,r,0")
(V8HI "wa,v,r,r,0")
(V4SI "wa,v,r,r,0")
(V4SF "wa,v,r,r,0")
(V2DI "wa,v,r,r,0")
(V2DF "wa,v,r,r,0")
(V1TI "wa,v,r,r,0")])
;; Mode attribute for boolean operation register constraints for operand1
;; for one_cmpl. To simplify things, we repeat the constraint where 0
;; is used for operand1 or operand2
(define_mode_attr BOOL_REGS_UNARY [(TI "r,0,0,wa,v")
(PTI "r,0,0")
(V16QI "wa,v,r,0,0")
(V8HI "wa,v,r,0,0")
(V4SI "wa,v,r,0,0")
(V4SF "wa,v,r,0,0")
(V2DI "wa,v,r,0,0")
(V2DF "wa,v,r,0,0")
(V1TI "wa,v,r,0,0")])
;; Reload iterator for creating the function to allocate a base register to
;; supplement addressing modes.
(define_mode_iterator RELOAD [V16QI V8HI V4SI V2DI V4SF V2DF V1TI
SF SD SI DF DD DI TI PTI KF IF TF
POI PXI])
;; Iterate over smin, smax
(define_code_iterator fp_minmax [smin smax])
(define_code_attr minmax [(smin "min")
(smax "max")])
(define_code_attr SMINMAX [(smin "SMIN")
(smax "SMAX")])
;; Iterator to optimize the following cases:
;; D-form load to FPR register & move to Altivec register
;; Move Altivec register to FPR register and store
(define_mode_iterator ALTIVEC_DFORM [DF
(SF "TARGET_P8_VECTOR")
(DI "TARGET_POWERPC64")])
(include "darwin.md")
;; Start with fixed-point load and store insns. Here we put only the more
;; complex forms. Basic data transfer is done later.
(define_insn "zero_extendqi<mode>2"
[(set (match_operand:EXTQI 0 "gpc_reg_operand" "=r,r,^wa,^v")
(zero_extend:EXTQI (match_operand:QI 1 "reg_or_mem_operand" "m,r,Z,v")))]
""
"@
lbz%U1%X1 %0,%1
rlwinm %0,%1,0,0xff
lxsibzx %x0,%y1
vextractub %0,%1,7"
[(set_attr "type" "load,shift,fpload,vecperm")
(set_attr "isa" "*,*,p9v,p9v")])
(define_insn_and_split "*zero_extendqi<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (zero_extend:EXTQI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:EXTQI 0 "=r,r"))]
""
"@
andi. %0,%1,0xff
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(zero_extend:EXTQI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*zero_extendqi<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (zero_extend:EXTQI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:EXTQI 0 "gpc_reg_operand" "=r,r")
(zero_extend:EXTQI (match_dup 1)))]
""
"@
andi. %0,%1,0xff
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(zero_extend:EXTQI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "zero_extendhi<mode>2"
[(set (match_operand:EXTHI 0 "gpc_reg_operand" "=r,r,^wa,^v")
(zero_extend:EXTHI (match_operand:HI 1 "reg_or_mem_operand" "m,r,Z,v")))]
""
"@
lhz%U1%X1 %0,%1
rlwinm %0,%1,0,0xffff
lxsihzx %x0,%y1
vextractuh %0,%1,6"
[(set_attr "type" "load,shift,fpload,vecperm")
(set_attr "isa" "*,*,p9v,p9v")])
(define_insn_and_split "*zero_extendhi<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (zero_extend:EXTHI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:EXTHI 0 "=r,r"))]
""
"@
andi. %0,%1,0xffff
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(zero_extend:EXTHI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*zero_extendhi<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (zero_extend:EXTHI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:EXTHI 0 "gpc_reg_operand" "=r,r")
(zero_extend:EXTHI (match_dup 1)))]
""
"@
andi. %0,%1,0xffff
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(zero_extend:EXTHI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "zero_extendsi<mode>2"
[(set (match_operand:EXTSI 0 "gpc_reg_operand" "=r,r,d,wa,wa,r,wa")
(zero_extend:EXTSI (match_operand:SI 1 "reg_or_mem_operand" "m,r,Z,Z,r,wa,wa")))]
""
"@
lwz%U1%X1 %0,%1
rldicl %0,%1,0,32
lfiwzx %0,%y1
lxsiwzx %x0,%y1
mtvsrwz %x0,%1
mfvsrwz %0,%x1
xxextractuw %x0,%x1,4"
[(set_attr "type" "load,shift,fpload,fpload,mffgpr,mftgpr,vecexts")
(set_attr "isa" "*,*,p7,p8v,p8v,p8v,p9v")])
(define_insn_and_split "*zero_extendsi<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (zero_extend:EXTSI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:EXTSI 0 "=r,r"))]
""
"@
rldicl. %0,%1,0,32
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(zero_extend:DI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*zero_extendsi<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (zero_extend:EXTSI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:EXTSI 0 "gpc_reg_operand" "=r,r")
(zero_extend:EXTSI (match_dup 1)))]
""
"@
rldicl. %0,%1,0,32
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(zero_extend:EXTSI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "extendqi<mode>2"
[(set (match_operand:EXTQI 0 "gpc_reg_operand" "=r,?*v")
(sign_extend:EXTQI (match_operand:QI 1 "gpc_reg_operand" "r,?*v")))]
""
"@
extsb %0,%1
vextsb2d %0,%1"
[(set_attr "type" "exts,vecperm")
(set_attr "isa" "*,p9v")])
(define_insn_and_split "*extendqi<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (sign_extend:EXTQI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:EXTQI 0 "=r,r"))]
""
"@
extsb. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(sign_extend:EXTQI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "exts")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*extendqi<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (sign_extend:EXTQI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:EXTQI 0 "gpc_reg_operand" "=r,r")
(sign_extend:EXTQI (match_dup 1)))]
""
"@
extsb. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(sign_extend:EXTQI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "exts")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_expand "extendhi<mode>2"
[(set (match_operand:EXTHI 0 "gpc_reg_operand")
(sign_extend:EXTHI (match_operand:HI 1 "gpc_reg_operand")))]
""
"")
(define_insn "*extendhi<mode>2"
[(set (match_operand:EXTHI 0 "gpc_reg_operand" "=r,r,?*v,?*v")
(sign_extend:EXTHI (match_operand:HI 1 "reg_or_mem_operand" "m,r,Z,v")))]
""
"@
lha%U1%X1 %0,%1
extsh %0,%1
#
vextsh2d %0,%1"
[(set_attr "type" "load,exts,fpload,vecperm")
(set_attr "sign_extend" "yes")
(set_attr "length" "*,*,8,*")
(set_attr "isa" "*,*,p9v,p9v")])
(define_split
[(set (match_operand:EXTHI 0 "altivec_register_operand")
(sign_extend:EXTHI
(match_operand:HI 1 "indexed_or_indirect_operand")))]
"TARGET_P9_VECTOR && reload_completed"
[(set (match_dup 2)
(match_dup 1))
(set (match_dup 0)
(sign_extend:EXTHI (match_dup 2)))]
{
operands[2] = gen_rtx_REG (HImode, REGNO (operands[0]));
})
(define_insn_and_split "*extendhi<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (sign_extend:EXTHI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:EXTHI 0 "=r,r"))]
""
"@
extsh. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(sign_extend:EXTHI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "exts")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*extendhi<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (sign_extend:EXTHI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:EXTHI 0 "gpc_reg_operand" "=r,r")
(sign_extend:EXTHI (match_dup 1)))]
""
"@
extsh. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(sign_extend:EXTHI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "exts")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "extendsi<mode>2"
[(set (match_operand:EXTSI 0 "gpc_reg_operand"
"=r, r, d, wa, wa, v, v, wr")
(sign_extend:EXTSI (match_operand:SI 1 "lwa_operand"
"YZ, r, Z, Z, r, v, v, ?wa")))]
""
"@
lwa%U1%X1 %0,%1
extsw %0,%1
lfiwax %0,%y1
lxsiwax %x0,%y1
mtvsrwa %x0,%1
vextsw2d %0,%1
#
#"
[(set_attr "type" "load,exts,fpload,fpload,mffgpr,vecexts,vecperm,mftgpr")
(set_attr "sign_extend" "yes")
(set_attr "length" "*,*,*,*,*,*,8,8")
(set_attr "isa" "*,*,p6,p8v,p8v,p9v,p8v,p8v")])
(define_split
[(set (match_operand:EXTSI 0 "int_reg_operand")
(sign_extend:EXTSI (match_operand:SI 1 "vsx_register_operand")))]
"TARGET_DIRECT_MOVE_64BIT && reload_completed"
[(set (match_dup 2)
(match_dup 1))
(set (match_dup 0)
(sign_extend:DI (match_dup 2)))]
{
operands[2] = gen_rtx_REG (SImode, reg_or_subregno (operands[0]));
})
(define_split
[(set (match_operand:DI 0 "altivec_register_operand")
(sign_extend:DI (match_operand:SI 1 "altivec_register_operand")))]
"TARGET_P8_VECTOR && !TARGET_P9_VECTOR && reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
int dest_regno = REGNO (dest);
int src_regno = REGNO (src);
rtx dest_v2di = gen_rtx_REG (V2DImode, dest_regno);
rtx src_v4si = gen_rtx_REG (V4SImode, src_regno);
if (BYTES_BIG_ENDIAN)
{
emit_insn (gen_altivec_vupkhsw (dest_v2di, src_v4si));
emit_insn (gen_vsx_xxspltd_v2di (dest_v2di, dest_v2di, const1_rtx));
}
else
{
emit_insn (gen_altivec_vupklsw (dest_v2di, src_v4si));
emit_insn (gen_vsx_xxspltd_v2di (dest_v2di, dest_v2di, const0_rtx));
}
DONE;
})
(define_insn_and_split "*extendsi<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (sign_extend:EXTSI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:EXTSI 0 "=r,r"))]
""
"@
extsw. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(sign_extend:EXTSI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "exts")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*extendsi<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (sign_extend:EXTSI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:EXTSI 0 "gpc_reg_operand" "=r,r")
(sign_extend:EXTSI (match_dup 1)))]
""
"@
extsw. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(sign_extend:EXTSI (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "exts")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
;; IBM 405, 440, 464 and 476 half-word multiplication operations.
(define_insn "*macchwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (plus:SI (mult:SI (ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r")))
(match_operand:SI 4 "gpc_reg_operand" "0"))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (ashiftrt:SI
(match_dup 2)
(const_int 16))
(sign_extend:SI
(match_dup 1)))
(match_dup 4)))]
"TARGET_MULHW"
"macchw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*macchw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r")))
(match_operand:SI 3 "gpc_reg_operand" "0")))]
"TARGET_MULHW"
"macchw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*macchwuc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (plus:SI (mult:SI (lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r")))
(match_operand:SI 4 "gpc_reg_operand" "0"))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (lshiftrt:SI
(match_dup 2)
(const_int 16))
(zero_extend:SI
(match_dup 1)))
(match_dup 4)))]
"TARGET_MULHW"
"macchwu. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*macchwu"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r")))
(match_operand:SI 3 "gpc_reg_operand" "0")))]
"TARGET_MULHW"
"macchwu %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*machhwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (plus:SI (mult:SI (ashiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16)))
(match_operand:SI 4 "gpc_reg_operand" "0"))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (ashiftrt:SI
(match_dup 1)
(const_int 16))
(ashiftrt:SI
(match_dup 2)
(const_int 16)))
(match_dup 4)))]
"TARGET_MULHW"
"machhw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*machhw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (ashiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16)))
(match_operand:SI 3 "gpc_reg_operand" "0")))]
"TARGET_MULHW"
"machhw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*machhwuc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (plus:SI (mult:SI (lshiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16)))
(match_operand:SI 4 "gpc_reg_operand" "0"))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (lshiftrt:SI
(match_dup 1)
(const_int 16))
(lshiftrt:SI
(match_dup 2)
(const_int 16)))
(match_dup 4)))]
"TARGET_MULHW"
"machhwu. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*machhwu"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (lshiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16)))
(match_operand:SI 3 "gpc_reg_operand" "0")))]
"TARGET_MULHW"
"machhwu %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*maclhwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (plus:SI (mult:SI (sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(sign_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r")))
(match_operand:SI 4 "gpc_reg_operand" "0"))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (sign_extend:SI
(match_dup 1))
(sign_extend:SI
(match_dup 2)))
(match_dup 4)))]
"TARGET_MULHW"
"maclhw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*maclhw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(sign_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r")))
(match_operand:SI 3 "gpc_reg_operand" "0")))]
"TARGET_MULHW"
"maclhw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*maclhwuc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (plus:SI (mult:SI (zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(zero_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r")))
(match_operand:SI 4 "gpc_reg_operand" "0"))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (zero_extend:SI
(match_dup 1))
(zero_extend:SI
(match_dup 2)))
(match_dup 4)))]
"TARGET_MULHW"
"maclhwu. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*maclhwu"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI (mult:SI (zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(zero_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r")))
(match_operand:SI 3 "gpc_reg_operand" "0")))]
"TARGET_MULHW"
"maclhwu %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*nmacchwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
(mult:SI (ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r"))))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(minus:SI (match_dup 4)
(mult:SI (ashiftrt:SI
(match_dup 2)
(const_int 16))
(sign_extend:SI
(match_dup 1)))))]
"TARGET_MULHW"
"nmacchw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*nmacchw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
(mult:SI (ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r")))))]
"TARGET_MULHW"
"nmacchw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*nmachhwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
(mult:SI (ashiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(minus:SI (match_dup 4)
(mult:SI (ashiftrt:SI
(match_dup 1)
(const_int 16))
(ashiftrt:SI
(match_dup 2)
(const_int 16)))))]
"TARGET_MULHW"
"nmachhw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*nmachhw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
(mult:SI (ashiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16)))))]
"TARGET_MULHW"
"nmachhw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*nmaclhwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
(mult:SI (sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(sign_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r"))))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(minus:SI (match_dup 4)
(mult:SI (sign_extend:SI
(match_dup 1))
(sign_extend:SI
(match_dup 2)))))]
"TARGET_MULHW"
"nmaclhw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*nmaclhw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
(mult:SI (sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(sign_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r")))))]
"TARGET_MULHW"
"nmaclhw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulchwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (mult:SI (ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r")))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (ashiftrt:SI
(match_dup 2)
(const_int 16))
(sign_extend:SI
(match_dup 1))))]
"TARGET_MULHW"
"mulchw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulchw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r"))))]
"TARGET_MULHW"
"mulchw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulchwuc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (mult:SI (lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r")))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (lshiftrt:SI
(match_dup 2)
(const_int 16))
(zero_extend:SI
(match_dup 1))))]
"TARGET_MULHW"
"mulchwu. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulchwu"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))
(zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "r"))))]
"TARGET_MULHW"
"mulchwu %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulhhwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (mult:SI (ashiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16)))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (ashiftrt:SI
(match_dup 1)
(const_int 16))
(ashiftrt:SI
(match_dup 2)
(const_int 16))))]
"TARGET_MULHW"
"mulhhw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulhhw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (ashiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(ashiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))))]
"TARGET_MULHW"
"mulhhw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulhhwuc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (mult:SI (lshiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16)))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (lshiftrt:SI
(match_dup 1)
(const_int 16))
(lshiftrt:SI
(match_dup 2)
(const_int 16))))]
"TARGET_MULHW"
"mulhhwu. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mulhhwu"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (lshiftrt:SI
(match_operand:SI 1 "gpc_reg_operand" "%r")
(const_int 16))
(lshiftrt:SI
(match_operand:SI 2 "gpc_reg_operand" "r")
(const_int 16))))]
"TARGET_MULHW"
"mulhhwu %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mullhwc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (mult:SI (sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(sign_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r")))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (sign_extend:SI
(match_dup 1))
(sign_extend:SI
(match_dup 2))))]
"TARGET_MULHW"
"mullhw. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mullhw"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (sign_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(sign_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r"))))]
"TARGET_MULHW"
"mullhw %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mullhwuc"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC (mult:SI (zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(zero_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r")))
(const_int 0)))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (zero_extend:SI
(match_dup 1))
(zero_extend:SI
(match_dup 2))))]
"TARGET_MULHW"
"mullhwu. %0,%1,%2"
[(set_attr "type" "halfmul")])
(define_insn "*mullhwu"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mult:SI (zero_extend:SI
(match_operand:HI 1 "gpc_reg_operand" "%r"))
(zero_extend:SI
(match_operand:HI 2 "gpc_reg_operand" "r"))))]
"TARGET_MULHW"
"mullhwu %0,%1,%2"
[(set_attr "type" "halfmul")])
;; IBM 405, 440, 464 and 476 string-search dlmzb instruction support.
(define_insn "dlmzb"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "gpc_reg_operand" "r")]
UNSPEC_DLMZB_CR))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(unspec:SI [(match_dup 1)
(match_dup 2)]
UNSPEC_DLMZB))]
"TARGET_DLMZB"
"dlmzb. %0,%1,%2")
(define_expand "strlensi"
[(set (match_operand:SI 0 "gpc_reg_operand")
(unspec:SI [(match_operand:BLK 1 "general_operand")
(match_operand:QI 2 "const_int_operand")
(match_operand 3 "const_int_operand")]
UNSPEC_DLMZB_STRLEN))
(clobber (match_scratch:CC 4))]
"TARGET_DLMZB && WORDS_BIG_ENDIAN && !optimize_size"
{
rtx result = operands[0];
rtx src = operands[1];
rtx search_char = operands[2];
rtx align = operands[3];
rtx addr, scratch_string, word1, word2, scratch_dlmzb;
rtx loop_label, end_label, mem, cr0, cond;
if (search_char != const0_rtx
|| !CONST_INT_P (align)
|| INTVAL (align) < 8)
FAIL;
word1 = gen_reg_rtx (SImode);
word2 = gen_reg_rtx (SImode);
scratch_dlmzb = gen_reg_rtx (SImode);
scratch_string = gen_reg_rtx (Pmode);
loop_label = gen_label_rtx ();
end_label = gen_label_rtx ();
addr = force_reg (Pmode, XEXP (src, 0));
emit_move_insn (scratch_string, addr);
emit_label (loop_label);
mem = change_address (src, SImode, scratch_string);
emit_move_insn (word1, mem);
emit_move_insn (word2, adjust_address (mem, SImode, 4));
cr0 = gen_rtx_REG (CCmode, CR0_REGNO);
emit_insn (gen_dlmzb (scratch_dlmzb, word1, word2, cr0));
cond = gen_rtx_NE (VOIDmode, cr0, const0_rtx);
emit_jump_insn (gen_rtx_SET (pc_rtx,
gen_rtx_IF_THEN_ELSE (VOIDmode,
cond,
gen_rtx_LABEL_REF
(VOIDmode,
end_label),
pc_rtx)));
emit_insn (gen_addsi3 (scratch_string, scratch_string, GEN_INT (8)));
emit_jump_insn (gen_rtx_SET (pc_rtx,
gen_rtx_LABEL_REF (VOIDmode, loop_label)));
emit_barrier ();
emit_label (end_label);
emit_insn (gen_addsi3 (scratch_string, scratch_string, scratch_dlmzb));
emit_insn (gen_subsi3 (result, scratch_string, addr));
emit_insn (gen_addsi3 (result, result, constm1_rtx));
DONE;
})
;; Fixed-point arithmetic insns.
(define_expand "add<mode>3"
[(set (match_operand:SDI 0 "gpc_reg_operand")
(plus:SDI (match_operand:SDI 1 "gpc_reg_operand")
(match_operand:SDI 2 "reg_or_add_cint_operand")))]
""
{
if (<MODE>mode == DImode && !TARGET_POWERPC64)
{
rtx lo0 = gen_lowpart (SImode, operands[0]);
rtx lo1 = gen_lowpart (SImode, operands[1]);
rtx lo2 = gen_lowpart (SImode, operands[2]);
rtx hi0 = gen_highpart (SImode, operands[0]);
rtx hi1 = gen_highpart (SImode, operands[1]);
rtx hi2 = gen_highpart_mode (SImode, DImode, operands[2]);
if (!reg_or_short_operand (lo2, SImode))
lo2 = force_reg (SImode, lo2);
if (!adde_operand (hi2, SImode))
hi2 = force_reg (SImode, hi2);
emit_insn (gen_addsi3_carry (lo0, lo1, lo2));
emit_insn (gen_addsi3_carry_in (hi0, hi1, hi2));
DONE;
}
if (CONST_INT_P (operands[2]) && !add_operand (operands[2], <MODE>mode))
{
rtx tmp = ((!can_create_pseudo_p ()
|| rtx_equal_p (operands[0], operands[1]))
? operands[0] : gen_reg_rtx (<MODE>mode));
/* Adding a constant to r0 is not a valid insn, so use a different
strategy in that case. */
if (reg_or_subregno (operands[1]) == 0 || reg_or_subregno (tmp) == 0)
{
if (operands[0] == operands[1])
FAIL;
rs6000_emit_move (operands[0], operands[2], <MODE>mode);
emit_insn (gen_add<mode>3 (operands[0], operands[1], operands[0]));
DONE;
}
HOST_WIDE_INT val = INTVAL (operands[2]);
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
if (<MODE>mode == DImode && !satisfies_constraint_L (GEN_INT (rest)))
FAIL;
/* The ordering here is important for the prolog expander.
When space is allocated from the stack, adding 'low' first may
produce a temporary deallocation (which would be bad). */
emit_insn (gen_add<mode>3 (tmp, operands[1], GEN_INT (rest)));
emit_insn (gen_add<mode>3 (operands[0], tmp, GEN_INT (low)));
DONE;
}
})
(define_insn "*add<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r,r,r")
(plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,b,b,b")
(match_operand:GPR 2 "add_operand" "r,I,L,eI")))]
""
"@
add %0,%1,%2
addi %0,%1,%2
addis %0,%1,%v2
addi %0,%1,%2"
[(set_attr "type" "add")
(set_attr "isa" "*,*,*,p10")])
(define_insn "*addsi3_high"
[(set (match_operand:SI 0 "gpc_reg_operand" "=b")
(plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
(high:SI (match_operand 2 "" ""))))]
"TARGET_MACHO && !TARGET_64BIT"
"addis %0,%1,ha16(%2)"
[(set_attr "type" "add")])
(define_insn_and_split "*add<mode>3_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
add. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(plus:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*add<mode>3_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(plus:GPR (match_dup 1)
(match_dup 2)))]
"<MODE>mode == Pmode"
"@
add. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(plus:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*add<mode>3_imm_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,b")
(match_operand:GPR 2 "short_cint_operand" "I,I"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))
(clobber (reg:GPR CA_REGNO))]
"<MODE>mode == Pmode"
"@
addic. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(plus:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*add<mode>3_imm_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,b")
(match_operand:GPR 2 "short_cint_operand" "I,I"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(plus:GPR (match_dup 1)
(match_dup 2)))
(clobber (reg:GPR CA_REGNO))]
"<MODE>mode == Pmode"
"@
addic. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(plus:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
;; Split an add that we can't do in one insn into two insns, each of which
;; does one 16-bit part. This is used by combine. Note that the low-order
;; add should be last in case the result gets used in an address.
(define_split
[(set (match_operand:GPR 0 "gpc_reg_operand")
(plus:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "non_add_cint_operand")))]
""
[(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3)))
(set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))]
{
HOST_WIDE_INT val = INTVAL (operands[2]);
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
operands[4] = GEN_INT (low);
if (<MODE>mode == SImode || satisfies_constraint_L (GEN_INT (rest)))
operands[3] = GEN_INT (rest);
else if (can_create_pseudo_p ())
{
operands[3] = gen_reg_rtx (DImode);
emit_move_insn (operands[3], operands[2]);
emit_insn (gen_adddi3 (operands[0], operands[1], operands[3]));
DONE;
}
else
FAIL;
})
(define_insn "add<mode>3_carry"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(plus:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "reg_or_short_operand" "rI")))
(set (reg:P CA_REGNO)
(ltu:P (plus:P (match_dup 1)
(match_dup 2))
(match_dup 1)))]
""
"add%I2c %0,%1,%2"
[(set_attr "type" "add")])
(define_insn "*add<mode>3_imm_carry_pos"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(plus:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "short_cint_operand" "n")))
(set (reg:P CA_REGNO)
(geu:P (match_dup 1)
(match_operand:P 3 "const_int_operand" "n")))]
"INTVAL (operands[2]) > 0
&& INTVAL (operands[2]) + INTVAL (operands[3]) == 0"
"addic %0,%1,%2"
[(set_attr "type" "add")])
(define_insn "*add<mode>3_imm_carry_0"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(match_operand:P 1 "gpc_reg_operand" "r"))
(set (reg:P CA_REGNO)
(const_int 0))]
""
"addic %0,%1,0"
[(set_attr "type" "add")])
(define_insn "*add<mode>3_imm_carry_m1"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(plus:P (match_operand:P 1 "gpc_reg_operand" "r")
(const_int -1)))
(set (reg:P CA_REGNO)
(ne:P (match_dup 1)
(const_int 0)))]
""
"addic %0,%1,-1"
[(set_attr "type" "add")])
(define_insn "*add<mode>3_imm_carry_neg"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(plus:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "short_cint_operand" "n")))
(set (reg:P CA_REGNO)
(gtu:P (match_dup 1)
(match_operand:P 3 "const_int_operand" "n")))]
"INTVAL (operands[2]) < 0
&& INTVAL (operands[2]) + INTVAL (operands[3]) == -1"
"addic %0,%1,%2"
[(set_attr "type" "add")])
(define_expand "add<mode>3_carry_in"
[(parallel [
(set (match_operand:GPR 0 "gpc_reg_operand")
(plus:GPR (plus:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "adde_operand"))
(reg:GPR CA_REGNO)))
(clobber (reg:GPR CA_REGNO))])]
""
{
if (operands[2] == const0_rtx)
{
emit_insn (gen_add<mode>3_carry_in_0 (operands[0], operands[1]));
DONE;
}
if (operands[2] == constm1_rtx)
{
emit_insn (gen_add<mode>3_carry_in_m1 (operands[0], operands[1]));
DONE;
}
})
(define_insn "*add<mode>3_carry_in_internal"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r"))
(reg:GPR CA_REGNO)))
(clobber (reg:GPR CA_REGNO))]
""
"adde %0,%1,%2"
[(set_attr "type" "add")])
(define_insn "*add<mode>3_carry_in_internal2"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(reg:GPR CA_REGNO))
(match_operand:GPR 2 "gpc_reg_operand" "r")))
(clobber (reg:GPR CA_REGNO))]
""
"adde %0,%1,%2"
[(set_attr "type" "add")])
(define_insn "add<mode>3_carry_in_0"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(reg:GPR CA_REGNO)))
(clobber (reg:GPR CA_REGNO))]
""
"addze %0,%1"
[(set_attr "type" "add")])
(define_insn "add<mode>3_carry_in_m1"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(reg:GPR CA_REGNO))
(const_int -1)))
(clobber (reg:GPR CA_REGNO))]
""
"addme %0,%1"
[(set_attr "type" "add")])
(define_expand "one_cmpl<mode>2"
[(set (match_operand:SDI 0 "gpc_reg_operand")
(not:SDI (match_operand:SDI 1 "gpc_reg_operand")))]
""
{
if (<MODE>mode == DImode && !TARGET_POWERPC64)
{
rs6000_split_logical (operands, NOT, false, false, false);
DONE;
}
})
(define_insn "*one_cmpl<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
""
"not %0,%1")
(define_insn_and_split "*one_cmpl<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
not. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(not:GPR (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*one_cmpl<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(not:GPR (match_dup 1)))]
"<MODE>mode == Pmode"
"@
not. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(not:GPR (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_expand "sub<mode>3"
[(set (match_operand:SDI 0 "gpc_reg_operand")
(minus:SDI (match_operand:SDI 1 "reg_or_short_operand")
(match_operand:SDI 2 "gpc_reg_operand")))]
""
{
if (<MODE>mode == DImode && !TARGET_POWERPC64)
{
rtx lo0 = gen_lowpart (SImode, operands[0]);
rtx lo1 = gen_lowpart (SImode, operands[1]);
rtx lo2 = gen_lowpart (SImode, operands[2]);
rtx hi0 = gen_highpart (SImode, operands[0]);
rtx hi1 = gen_highpart_mode (SImode, DImode, operands[1]);
rtx hi2 = gen_highpart (SImode, operands[2]);
if (!reg_or_short_operand (lo1, SImode))
lo1 = force_reg (SImode, lo1);
if (!adde_operand (hi1, SImode))
hi1 = force_reg (SImode, hi1);
emit_insn (gen_subfsi3_carry (lo0, lo2, lo1));
emit_insn (gen_subfsi3_carry_in (hi0, hi2, hi1));
DONE;
}
if (short_cint_operand (operands[1], <MODE>mode))
{
emit_insn (gen_subf<mode>3_imm (operands[0], operands[2], operands[1]));
DONE;
}
})
(define_insn "*subf<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(minus:GPR (match_operand:GPR 2 "gpc_reg_operand" "r")
(match_operand:GPR 1 "gpc_reg_operand" "r")))]
""
"subf %0,%1,%2"
[(set_attr "type" "add")])
(define_insn_and_split "*subf<mode>3_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (minus:GPR (match_operand:GPR 2 "gpc_reg_operand" "r,r")
(match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
subf. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(minus:GPR (match_dup 2)
(match_dup 1)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*subf<mode>3_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (minus:GPR (match_operand:GPR 2 "gpc_reg_operand" "r,r")
(match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(minus:GPR (match_dup 2)
(match_dup 1)))]
"<MODE>mode == Pmode"
"@
subf. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(minus:GPR (match_dup 2)
(match_dup 1)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "subf<mode>3_imm"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(minus:GPR (match_operand:GPR 2 "short_cint_operand" "I")
(match_operand:GPR 1 "gpc_reg_operand" "r")))
(clobber (reg:GPR CA_REGNO))]
""
"subfic %0,%1,%2"
[(set_attr "type" "add")])
(define_insn_and_split "subf<mode>3_carry_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (minus:P (match_operand:P 2 "gpc_reg_operand" "r,r")
(match_operand:P 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
(minus:P (match_dup 2)
(match_dup 1)))
(set (reg:P CA_REGNO)
(leu:P (match_dup 1)
(match_dup 2)))]
"<MODE>mode == Pmode"
"@
subfc. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(parallel [(set (match_dup 0)
(minus:P (match_dup 2)
(match_dup 1)))
(set (reg:P CA_REGNO)
(leu:P (match_dup 1)
(match_dup 2)))])
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "subf<mode>3_carry"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(minus:P (match_operand:P 2 "reg_or_short_operand" "rI")
(match_operand:P 1 "gpc_reg_operand" "r")))
(set (reg:P CA_REGNO)
(leu:P (match_dup 1)
(match_dup 2)))]
""
"subf%I2c %0,%1,%2"
[(set_attr "type" "add")])
(define_insn "*subf<mode>3_imm_carry_0"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(neg:P (match_operand:P 1 "gpc_reg_operand" "r")))
(set (reg:P CA_REGNO)
(eq:P (match_dup 1)
(const_int 0)))]
""
"subfic %0,%1,0"
[(set_attr "type" "add")])
(define_insn "*subf<mode>3_imm_carry_m1"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(not:P (match_operand:P 1 "gpc_reg_operand" "r")))
(set (reg:P CA_REGNO)
(const_int 1))]
""
"subfic %0,%1,-1"
[(set_attr "type" "add")])
(define_expand "subf<mode>3_carry_in"
[(parallel [
(set (match_operand:GPR 0 "gpc_reg_operand")
(plus:GPR (plus:GPR (not:GPR (match_operand:GPR 1 "gpc_reg_operand"))
(reg:GPR CA_REGNO))
(match_operand:GPR 2 "adde_operand")))
(clobber (reg:GPR CA_REGNO))])]
""
{
if (operands[2] == const0_rtx)
{
emit_insn (gen_subf<mode>3_carry_in_0 (operands[0], operands[1]));
DONE;
}
if (operands[2] == constm1_rtx)
{
emit_insn (gen_subf<mode>3_carry_in_m1 (operands[0], operands[1]));
DONE;
}
})
(define_insn "*subf<mode>3_carry_in_internal"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (plus:GPR (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r"))
(reg:GPR CA_REGNO))
(match_operand:GPR 2 "gpc_reg_operand" "r")))
(clobber (reg:GPR CA_REGNO))]
""
"subfe %0,%1,%2"
[(set_attr "type" "add")])
(define_insn "subf<mode>3_carry_in_0"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r"))
(reg:GPR CA_REGNO)))
(clobber (reg:GPR CA_REGNO))]
""
"subfze %0,%1"
[(set_attr "type" "add")])
(define_insn "subf<mode>3_carry_in_m1"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (minus:GPR (reg:GPR CA_REGNO)
(match_operand:GPR 1 "gpc_reg_operand" "r"))
(const_int -2)))
(clobber (reg:GPR CA_REGNO))]
""
"subfme %0,%1"
[(set_attr "type" "add")])
(define_insn "subf<mode>3_carry_in_xx"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (reg:GPR CA_REGNO)
(const_int -1)))
(clobber (reg:GPR CA_REGNO))]
""
"subfe %0,%0,%0"
[(set_attr "type" "add")])
(define_insn "@neg<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
""
"neg %0,%1"
[(set_attr "type" "add")])
(define_insn_and_split "*neg<mode>2_dot"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
neg. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(neg:GPR (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*neg<mode>2_dot2"
[(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
(compare:CC (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(neg:GPR (match_dup 1)))]
"<MODE>mode == Pmode"
"@
neg. %0,%1
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[2], CCmode)"
[(set (match_dup 0)
(neg:GPR (match_dup 1)))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "add")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "clz<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(clz:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
""
"cntlz<wd> %0,%1"
[(set_attr "type" "cntlz")])
(define_expand "ctz<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(ctz:GPR (match_operand:GPR 1 "gpc_reg_operand")))]
""
{
if (TARGET_CTZ)
{
emit_insn (gen_ctz<mode>2_hw (operands[0], operands[1]));
DONE;
}
rtx tmp1 = gen_reg_rtx (<MODE>mode);
rtx tmp2 = gen_reg_rtx (<MODE>mode);
rtx tmp3 = gen_reg_rtx (<MODE>mode);
if (TARGET_POPCNTD)
{
emit_insn (gen_add<mode>3 (tmp1, operands[1], constm1_rtx));
emit_insn (gen_one_cmpl<mode>2 (tmp2, operands[1]));
emit_insn (gen_and<mode>3 (tmp3, tmp1, tmp2));
emit_insn (gen_popcntd<mode>2 (operands[0], tmp3));
}
else
{
emit_insn (gen_neg<mode>2 (tmp1, operands[1]));
emit_insn (gen_and<mode>3 (tmp2, operands[1], tmp1));
emit_insn (gen_clz<mode>2 (tmp3, tmp2));
emit_insn (gen_sub<mode>3 (operands[0], GEN_INT (<bits> - 1), tmp3));
}
DONE;
})
(define_insn "ctz<mode>2_hw"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ctz:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
"TARGET_CTZ"
"cnttz<wd> %0,%1"
[(set_attr "type" "cntlz")])
(define_expand "ffs<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(ffs:GPR (match_operand:GPR 1 "gpc_reg_operand")))]
""
{
rtx tmp1 = gen_reg_rtx (<MODE>mode);
rtx tmp2 = gen_reg_rtx (<MODE>mode);
rtx tmp3 = gen_reg_rtx (<MODE>mode);
emit_insn (gen_neg<mode>2 (tmp1, operands[1]));
emit_insn (gen_and<mode>3 (tmp2, operands[1], tmp1));
emit_insn (gen_clz<mode>2 (tmp3, tmp2));
emit_insn (gen_sub<mode>3 (operands[0], GEN_INT (<bits>), tmp3));
DONE;
})
(define_expand "popcount<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(popcount:GPR (match_operand:GPR 1 "gpc_reg_operand")))]
"TARGET_POPCNTB || TARGET_POPCNTD"
{
rs6000_emit_popcount (operands[0], operands[1]);
DONE;
})
(define_insn "popcntb<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
UNSPEC_POPCNTB))]
"TARGET_POPCNTB"
"popcntb %0,%1"
[(set_attr "type" "popcnt")])
(define_insn "popcntd<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(popcount:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
"TARGET_POPCNTD"
"popcnt<wd> %0,%1"
[(set_attr "type" "popcnt")])
(define_expand "parity<mode>2"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(parity:GPR (match_operand:GPR 1 "gpc_reg_operand")))]
"TARGET_POPCNTB"
{
rs6000_emit_parity (operands[0], operands[1]);
DONE;
})
(define_insn "parity<mode>2_cmpb"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")] UNSPEC_PARITY))]
"TARGET_CMPB && TARGET_POPCNTB"
"prty<wd> %0,%1"
[(set_attr "type" "popcnt")])
(define_insn "cmpb<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r")] UNSPEC_CMPB))]
"TARGET_CMPB"
"cmpb %0,%1,%2"
[(set_attr "type" "cmp")])
;; Since the hardware zeros the upper part of the register, save generating the
;; AND immediate if we are converting to unsigned
(define_insn "*bswap<mode>2_extenddi"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(zero_extend:DI
(bswap:HSI (match_operand:HSI 1 "memory_operand" "Z"))))]
"TARGET_POWERPC64"
"l<wd>brx %0,%y1"
[(set_attr "type" "load")])
(define_insn "*bswaphi2_extendsi"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(zero_extend:SI
(bswap:HI (match_operand:HI 1 "memory_operand" "Z"))))]
""
"lhbrx %0,%y1"
[(set_attr "type" "load")])
;; Separate the bswap patterns into load, store, and gpr<-gpr. This prevents
;; the register allocator from converting a gpr<-gpr swap into a store and then
;; load with byte swap, which can be slower than doing it in the registers. It
;; also prevents certain failures with the RELOAD register allocator.
(define_expand "bswap<mode>2"
[(use (match_operand:HSI 0 "reg_or_mem_operand"))
(use (match_operand:HSI 1 "reg_or_mem_operand"))]
""
{
rtx dest = operands[0];
rtx src = operands[1];
if (!REG_P (dest) && !REG_P (src))
src = force_reg (<MODE>mode, src);
if (MEM_P (src))
{
src = rs6000_force_indexed_or_indirect_mem (src);
emit_insn (gen_bswap<mode>2_load (dest, src));
}
else if (MEM_P (dest))
{
dest = rs6000_force_indexed_or_indirect_mem (dest);
emit_insn (gen_bswap<mode>2_store (dest, src));
}
else
emit_insn (gen_bswap<mode>2_reg (dest, src));
DONE;
})
(define_insn "bswap<mode>2_load"
[(set (match_operand:HSI 0 "gpc_reg_operand" "=r")
(bswap:HSI (match_operand:HSI 1 "memory_operand" "Z")))]
""
"l<wd>brx %0,%y1"
[(set_attr "type" "load")])
(define_insn "bswap<mode>2_store"
[(set (match_operand:HSI 0 "memory_operand" "=Z")
(bswap:HSI (match_operand:HSI 1 "gpc_reg_operand" "r")))]
""
"st<wd>brx %1,%y0"
[(set_attr "type" "store")])
(define_insn_and_split "bswaphi2_reg"
[(set (match_operand:HI 0 "gpc_reg_operand" "=&r,wa")
(bswap:HI
(match_operand:HI 1 "gpc_reg_operand" "r,wa")))
(clobber (match_scratch:SI 2 "=&r,X"))]
""
"@
#
xxbrh %x0,%x1"
"reload_completed && int_reg_operand (operands[0], HImode)"
[(set (match_dup 3)
(and:SI (lshiftrt:SI (match_dup 4)
(const_int 8))
(const_int 255)))
(set (match_dup 2)
(and:SI (ashift:SI (match_dup 4)
(const_int 8))
(const_int 65280))) ;; 0xff00
(set (match_dup 3)
(ior:SI (match_dup 3)
(match_dup 2)))]
{
operands[3] = simplify_gen_subreg (SImode, operands[0], HImode, 0);
operands[4] = simplify_gen_subreg (SImode, operands[1], HImode, 0);
}
[(set_attr "length" "12,4")
(set_attr "type" "*,vecperm")
(set_attr "isa" "*,p9v")])
;; We are always BITS_BIG_ENDIAN, so the bit positions below in
;; zero_extract insns do not change for -mlittle.
(define_insn_and_split "bswapsi2_reg"
[(set (match_operand:SI 0 "gpc_reg_operand" "=&r,wa")
(bswap:SI
(match_operand:SI 1 "gpc_reg_operand" "r,wa")))]
""
"@
#
xxbrw %x0,%x1"
"reload_completed && int_reg_operand (operands[0], SImode)"
[(set (match_dup 0) ; DABC
(rotate:SI (match_dup 1)
(const_int 24)))
(set (match_dup 0) ; DCBC
(ior:SI (and:SI (ashift:SI (match_dup 1)
(const_int 8))
(const_int 16711680))
(and:SI (match_dup 0)
(const_int -16711681))))
(set (match_dup 0) ; DCBA
(ior:SI (and:SI (lshiftrt:SI (match_dup 1)
(const_int 24))
(const_int 255))
(and:SI (match_dup 0)
(const_int -256))))]
""
[(set_attr "length" "12,4")
(set_attr "type" "*,vecperm")
(set_attr "isa" "*,p9v")])
;; On systems with LDBRX/STDBRX generate the loads/stores directly, just like
;; we do for L{H,W}BRX and ST{H,W}BRX above. If not, we have to generate more
;; complex code.
(define_expand "bswapdi2"
[(parallel [(set (match_operand:DI 0 "reg_or_mem_operand")
(bswap:DI
(match_operand:DI 1 "reg_or_mem_operand")))
(clobber (match_scratch:DI 2))
(clobber (match_scratch:DI 3))])]
""
{
rtx dest = operands[0];
rtx src = operands[1];
if (!REG_P (dest) && !REG_P (src))
operands[1] = src = force_reg (DImode, src);
if (TARGET_POWERPC64 && TARGET_LDBRX)
{
if (MEM_P (src))
{
src = rs6000_force_indexed_or_indirect_mem (src);
emit_insn (gen_bswapdi2_load (dest, src));
}
else if (MEM_P (dest))
{
dest = rs6000_force_indexed_or_indirect_mem (dest);
emit_insn (gen_bswapdi2_store (dest, src));
}
else if (TARGET_P9_VECTOR)
emit_insn (gen_bswapdi2_xxbrd (dest, src));
else
emit_insn (gen_bswapdi2_reg (dest, src));
DONE;
}
if (!TARGET_POWERPC64)
{
/* 32-bit mode needs fewer scratch registers, but 32-bit addressing mode
that uses 64-bit registers needs the same scratch registers as 64-bit
mode. */
emit_insn (gen_bswapdi2_32bit (dest, src));
DONE;
}
})
;; Power7/cell has ldbrx/stdbrx, so use it directly
(define_insn "bswapdi2_load"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(bswap:DI (match_operand:DI 1 "memory_operand" "Z")))]
"TARGET_POWERPC64 && TARGET_LDBRX"
"ldbrx %0,%y1"
[(set_attr "type" "load")])
(define_insn "bswapdi2_store"
[(set (match_operand:DI 0 "memory_operand" "=Z")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
"TARGET_POWERPC64 && TARGET_LDBRX"
"stdbrx %1,%y0"
[(set_attr "type" "store")])
(define_insn "bswapdi2_xxbrd"
[(set (match_operand:DI 0 "gpc_reg_operand" "=wa")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand" "wa")))]
"TARGET_P9_VECTOR"
"xxbrd %x0,%x1"
[(set_attr "type" "vecperm")
(set_attr "isa" "p9v")])
(define_insn "bswapdi2_reg"
[(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
(clobber (match_scratch:DI 2 "=&r"))
(clobber (match_scratch:DI 3 "=&r"))]
"TARGET_POWERPC64 && TARGET_LDBRX && !TARGET_P9_VECTOR"
"#"
[(set_attr "length" "36")])
;; Non-power7/cell, fall back to use lwbrx/stwbrx
(define_insn "*bswapdi2_64bit"
[(set (match_operand:DI 0 "reg_or_mem_operand" "=r,Z,&r")
(bswap:DI (match_operand:DI 1 "reg_or_mem_operand" "Z,r,r")))
(clobber (match_scratch:DI 2 "=&b,&b,&r"))
(clobber (match_scratch:DI 3 "=&r,&r,&r"))]
"TARGET_POWERPC64 && !TARGET_LDBRX
&& (REG_P (operands[0]) || REG_P (operands[1]))
&& !(MEM_P (operands[0]) && MEM_VOLATILE_P (operands[0]))
&& !(MEM_P (operands[1]) && MEM_VOLATILE_P (operands[1]))"
"#"
[(set_attr "length" "16,12,36")])
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand")
(bswap:DI (match_operand:DI 1 "indexed_or_indirect_operand")))
(clobber (match_operand:DI 2 "gpc_reg_operand"))
(clobber (match_operand:DI 3 "gpc_reg_operand"))]
"TARGET_POWERPC64 && !TARGET_LDBRX && reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx op2 = operands[2];
rtx op3 = operands[3];
rtx op3_32 = simplify_gen_subreg (SImode, op3, DImode,
BYTES_BIG_ENDIAN ? 4 : 0);
rtx dest_32 = simplify_gen_subreg (SImode, dest, DImode,
BYTES_BIG_ENDIAN ? 4 : 0);
rtx addr1;
rtx addr2;
rtx word1;
rtx word2;
addr1 = XEXP (src, 0);
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
if (TARGET_AVOID_XFORM)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
addr2 = op2;
}
else
addr2 = gen_rtx_PLUS (Pmode, op2, XEXP (addr1, 1));
}
else if (TARGET_AVOID_XFORM)
{
emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
addr2 = op2;
}
else
{
emit_move_insn (op2, GEN_INT (4));
addr2 = gen_rtx_PLUS (Pmode, op2, addr1);
}
word1 = change_address (src, SImode, addr1);
word2 = change_address (src, SImode, addr2);
if (BYTES_BIG_ENDIAN)
{
emit_insn (gen_bswapsi2 (op3_32, word2));
emit_insn (gen_bswapsi2 (dest_32, word1));
}
else
{
emit_insn (gen_bswapsi2 (op3_32, word1));
emit_insn (gen_bswapsi2 (dest_32, word2));
}
emit_insn (gen_ashldi3 (op3, op3, GEN_INT (32)));
emit_insn (gen_iordi3 (dest, dest, op3));
DONE;
})
(define_split
[(set (match_operand:DI 0 "indexed_or_indirect_operand")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand")))
(clobber (match_operand:DI 2 "gpc_reg_operand"))
(clobber (match_operand:DI 3 "gpc_reg_operand"))]
"TARGET_POWERPC64 && !TARGET_LDBRX && reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx op2 = operands[2];
rtx op3 = operands[3];
rtx src_si = simplify_gen_subreg (SImode, src, DImode,
BYTES_BIG_ENDIAN ? 4 : 0);
rtx op3_si = simplify_gen_subreg (SImode, op3, DImode,
BYTES_BIG_ENDIAN ? 4 : 0);
rtx addr1;
rtx addr2;
rtx word1;
rtx word2;
addr1 = XEXP (dest, 0);
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
if (TARGET_AVOID_XFORM)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
addr2 = op2;
}
else
addr2 = gen_rtx_PLUS (Pmode, op2, XEXP (addr1, 1));
}
else if (TARGET_AVOID_XFORM)
{
emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
addr2 = op2;
}
else
{
emit_move_insn (op2, GEN_INT (4));
addr2 = gen_rtx_PLUS (Pmode, op2, addr1);
}
word1 = change_address (dest, SImode, addr1);
word2 = change_address (dest, SImode, addr2);
emit_insn (gen_lshrdi3 (op3, src, GEN_INT (32)));
if (BYTES_BIG_ENDIAN)
{
emit_insn (gen_bswapsi2 (word1, src_si));
emit_insn (gen_bswapsi2 (word2, op3_si));
}
else
{
emit_insn (gen_bswapsi2 (word2, src_si));
emit_insn (gen_bswapsi2 (word1, op3_si));
}
DONE;
})
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand")))
(clobber (match_operand:DI 2 "gpc_reg_operand"))
(clobber (match_operand:DI 3 "gpc_reg_operand"))]
"TARGET_POWERPC64 && !TARGET_P9_VECTOR && reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx op2 = operands[2];
rtx op3 = operands[3];
int lo_off = BYTES_BIG_ENDIAN ? 4 : 0;
rtx dest_si = simplify_gen_subreg (SImode, dest, DImode, lo_off);
rtx src_si = simplify_gen_subreg (SImode, src, DImode, lo_off);
rtx op2_si = simplify_gen_subreg (SImode, op2, DImode, lo_off);
rtx op3_si = simplify_gen_subreg (SImode, op3, DImode, lo_off);
emit_insn (gen_lshrdi3 (op2, src, GEN_INT (32)));
emit_insn (gen_bswapsi2 (dest_si, src_si));
emit_insn (gen_bswapsi2 (op3_si, op2_si));
emit_insn (gen_ashldi3 (dest, dest, GEN_INT (32)));
emit_insn (gen_iordi3 (dest, dest, op3));
DONE;
})
(define_insn "bswapdi2_32bit"
[(set (match_operand:DI 0 "reg_or_mem_operand" "=r,Z,?&r")
(bswap:DI (match_operand:DI 1 "reg_or_mem_operand" "Z,r,r")))
(clobber (match_scratch:SI 2 "=&b,&b,X"))]
"!TARGET_POWERPC64 && (REG_P (operands[0]) || REG_P (operands[1]))"
"#"
[(set_attr "length" "16,12,36")])
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand")
(bswap:DI (match_operand:DI 1 "indexed_or_indirect_operand")))
(clobber (match_operand:SI 2 "gpc_reg_operand"))]
"!TARGET_POWERPC64 && reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx op2 = operands[2];
rtx dest1 = simplify_gen_subreg (SImode, dest, DImode, 0);
rtx dest2 = simplify_gen_subreg (SImode, dest, DImode, 4);
rtx addr1;
rtx addr2;
rtx word1;
rtx word2;
addr1 = XEXP (src, 0);
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
if (TARGET_AVOID_XFORM
|| REGNO (XEXP (addr1, 1)) == REGNO (dest2))
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
addr2 = op2;
}
else
addr2 = gen_rtx_PLUS (SImode, op2, XEXP (addr1, 1));
}
else if (TARGET_AVOID_XFORM
|| REGNO (addr1) == REGNO (dest2))
{
emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
addr2 = op2;
}
else
{
emit_move_insn (op2, GEN_INT (4));
addr2 = gen_rtx_PLUS (SImode, op2, addr1);
}
word1 = change_address (src, SImode, addr1);
word2 = change_address (src, SImode, addr2);
emit_insn (gen_bswapsi2 (dest2, word1));
/* The REGNO (dest2) tests above ensure that addr2 has not been trashed,
thus allowing us to omit an early clobber on the output. */
emit_insn (gen_bswapsi2 (dest1, word2));
DONE;
})
(define_split
[(set (match_operand:DI 0 "indexed_or_indirect_operand")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand")))
(clobber (match_operand:SI 2 "gpc_reg_operand"))]
"!TARGET_POWERPC64 && reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx op2 = operands[2];
rtx src1 = simplify_gen_subreg (SImode, src, DImode, 0);
rtx src2 = simplify_gen_subreg (SImode, src, DImode, 4);
rtx addr1;
rtx addr2;
rtx word1;
rtx word2;
addr1 = XEXP (dest, 0);
if (GET_CODE (addr1) == PLUS)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 0), GEN_INT (4)));
if (TARGET_AVOID_XFORM)
{
emit_insn (gen_add3_insn (op2, XEXP (addr1, 1), op2));
addr2 = op2;
}
else
addr2 = gen_rtx_PLUS (SImode, op2, XEXP (addr1, 1));
}
else if (TARGET_AVOID_XFORM)
{
emit_insn (gen_add3_insn (op2, addr1, GEN_INT (4)));
addr2 = op2;
}
else
{
emit_move_insn (op2, GEN_INT (4));
addr2 = gen_rtx_PLUS (SImode, op2, addr1);
}
word1 = change_address (dest, SImode, addr1);
word2 = change_address (dest, SImode, addr2);
emit_insn (gen_bswapsi2 (word2, src1));
emit_insn (gen_bswapsi2 (word1, src2));
DONE;
})
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand")
(bswap:DI (match_operand:DI 1 "gpc_reg_operand")))
(clobber (match_operand:SI 2 ""))]
"!TARGET_POWERPC64 && reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx src1 = simplify_gen_subreg (SImode, src, DImode, 0);
rtx src2 = simplify_gen_subreg (SImode, src, DImode, 4);
rtx dest1 = simplify_gen_subreg (SImode, dest, DImode, 0);
rtx dest2 = simplify_gen_subreg (SImode, dest, DImode, 4);
emit_insn (gen_bswapsi2 (dest1, src2));
emit_insn (gen_bswapsi2 (dest2, src1));
DONE;
})
(define_insn "mul<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(mult:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "reg_or_short_operand" "r,I")))]
""
"@
mull<wd> %0,%1,%2
mulli %0,%1,%2"
[(set_attr "type" "mul")
(set (attr "size")
(cond [(match_operand:GPR 2 "s8bit_cint_operand")
(const_string "8")
(match_operand:GPR 2 "short_cint_operand")
(const_string "16")]
(const_string "<bits>")))])
(define_insn_and_split "*mul<mode>3_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (mult:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "gpc_reg_operand" "r,r"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
mull<wd>. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(mult:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "mul")
(set_attr "size" "<bits>")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*mul<mode>3_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (mult:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "gpc_reg_operand" "r,r"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(mult:GPR (match_dup 1)
(match_dup 2)))]
"<MODE>mode == Pmode"
"@
mull<wd>. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(mult:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "mul")
(set_attr "size" "<bits>")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_expand "<su>mul<mode>3_highpart"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(subreg:GPR
(mult:<DMODE> (any_extend:<DMODE>
(match_operand:GPR 1 "gpc_reg_operand"))
(any_extend:<DMODE>
(match_operand:GPR 2 "gpc_reg_operand")))
0))]
""
{
if (<MODE>mode == SImode && TARGET_POWERPC64)
{
emit_insn (gen_<su>mulsi3_highpart_64 (operands[0], operands[1],
operands[2]));
DONE;
}
if (!WORDS_BIG_ENDIAN)
{
emit_insn (gen_<su>mul<mode>3_highpart_le (operands[0], operands[1],
operands[2]));
DONE;
}
})
(define_insn "*<su>mul<mode>3_highpart"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(subreg:GPR
(mult:<DMODE> (any_extend:<DMODE>
(match_operand:GPR 1 "gpc_reg_operand" "r"))
(any_extend:<DMODE>
(match_operand:GPR 2 "gpc_reg_operand" "r")))
0))]
"WORDS_BIG_ENDIAN && !(<MODE>mode == SImode && TARGET_POWERPC64)"
"mulh<wd><u> %0,%1,%2"
[(set_attr "type" "mul")
(set_attr "size" "<bits>")])
(define_insn "<su>mulsi3_highpart_le"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(subreg:SI
(mult:DI (any_extend:DI
(match_operand:SI 1 "gpc_reg_operand" "r"))
(any_extend:DI
(match_operand:SI 2 "gpc_reg_operand" "r")))
4))]
"!WORDS_BIG_ENDIAN && !TARGET_POWERPC64"
"mulhw<u> %0,%1,%2"
[(set_attr "type" "mul")])
(define_insn "<su>muldi3_highpart_le"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(subreg:DI
(mult:TI (any_extend:TI
(match_operand:DI 1 "gpc_reg_operand" "r"))
(any_extend:TI
(match_operand:DI 2 "gpc_reg_operand" "r")))
8))]
"!WORDS_BIG_ENDIAN && TARGET_POWERPC64"
"mulhd<u> %0,%1,%2"
[(set_attr "type" "mul")
(set_attr "size" "64")])
(define_insn "<su>mulsi3_highpart_64"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(truncate:SI
(lshiftrt:DI
(mult:DI (any_extend:DI
(match_operand:SI 1 "gpc_reg_operand" "r"))
(any_extend:DI
(match_operand:SI 2 "gpc_reg_operand" "r")))
(const_int 32))))]
"TARGET_POWERPC64"
"mulhw<u> %0,%1,%2"
[(set_attr "type" "mul")])
(define_expand "<u>mul<mode><dmode>3"
[(set (match_operand:<DMODE> 0 "gpc_reg_operand")
(mult:<DMODE> (any_extend:<DMODE>
(match_operand:GPR 1 "gpc_reg_operand"))
(any_extend:<DMODE>
(match_operand:GPR 2 "gpc_reg_operand"))))]
"!(<MODE>mode == SImode && TARGET_POWERPC64)"
{
rtx l = gen_reg_rtx (<MODE>mode);
rtx h = gen_reg_rtx (<MODE>mode);
emit_insn (gen_mul<mode>3 (l, operands[1], operands[2]));
emit_insn (gen_<su>mul<mode>3_highpart (h, operands[1], operands[2]));
emit_move_insn (gen_lowpart (<MODE>mode, operands[0]), l);
emit_move_insn (gen_highpart (<MODE>mode, operands[0]), h);
DONE;
})
(define_insn "*maddld<mode>4"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(plus:GPR (mult:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r"))
(match_operand:GPR 3 "gpc_reg_operand" "r")))]
"TARGET_MADDLD"
"maddld %0,%1,%2,%3"
[(set_attr "type" "mul")])
(define_insn "udiv<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(udiv:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r")))]
""
"div<wd>u %0,%1,%2"
[(set_attr "type" "div")
(set_attr "size" "<bits>")])
;; For powers of two we can do sra[wd]i/addze for divide and then adjust for
;; modulus. If it isn't a power of two, force operands into register and do
;; a normal divide.
(define_expand "div<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(div:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "reg_or_cint_operand")))]
""
{
if (CONST_INT_P (operands[2])
&& INTVAL (operands[2]) > 0
&& exact_log2 (INTVAL (operands[2])) >= 0)
{
emit_insn (gen_div<mode>3_sra (operands[0], operands[1], operands[2]));
DONE;
}
operands[2] = force_reg (<MODE>mode, operands[2]);
})
(define_insn "*div<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r")))]
""
"div<wd> %0,%1,%2"
[(set_attr "type" "div")
(set_attr "size" "<bits>")])
(define_insn "div<mode>3_sra"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "exact_log2_cint_operand" "N")))
(clobber (reg:GPR CA_REGNO))]
""
"sra<wd>i %0,%1,%p2\;addze %0,%0"
[(set_attr "type" "two")
(set_attr "length" "8")])
(define_insn_and_split "*div<mode>3_sra_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "exact_log2_cint_operand" "N,N"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))
(clobber (reg:GPR CA_REGNO))]
"<MODE>mode == Pmode"
"@
sra<wd>i %0,%1,%p2\;addze. %0,%0
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(parallel [(set (match_dup 0)
(div:GPR (match_dup 1)
(match_dup 2)))
(clobber (reg:GPR CA_REGNO))])
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "two")
(set_attr "length" "8,12")
(set_attr "cell_micro" "not")])
(define_insn_and_split "*div<mode>3_sra_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "exact_log2_cint_operand" "N,N"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(div:GPR (match_dup 1)
(match_dup 2)))
(clobber (reg:GPR CA_REGNO))]
"<MODE>mode == Pmode"
"@
sra<wd>i %0,%1,%p2\;addze. %0,%0
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(parallel [(set (match_dup 0)
(div:GPR (match_dup 1)
(match_dup 2)))
(clobber (reg:GPR CA_REGNO))])
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "two")
(set_attr "length" "8,12")
(set_attr "cell_micro" "not")])
(define_expand "mod<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(mod:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "reg_or_cint_operand")))]
""
{
int i;
rtx temp1;
rtx temp2;
if (!CONST_INT_P (operands[2])
|| INTVAL (operands[2]) <= 0
|| (i = exact_log2 (INTVAL (operands[2]))) < 0)
{
if (!TARGET_MODULO)
FAIL;
operands[2] = force_reg (<MODE>mode, operands[2]);
}
else
{
temp1 = gen_reg_rtx (<MODE>mode);
temp2 = gen_reg_rtx (<MODE>mode);
emit_insn (gen_div<mode>3 (temp1, operands[1], operands[2]));
emit_insn (gen_ashl<mode>3 (temp2, temp1, GEN_INT (i)));
emit_insn (gen_sub<mode>3 (operands[0], operands[1], temp2));
DONE;
}
})
;; In order to enable using a peephole2 for combining div/mod to eliminate the
;; mod, prefer putting the result of mod into a different register
(define_insn "*mod<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
(mod:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r")))]
"TARGET_MODULO"
"mods<wd> %0,%1,%2"
[(set_attr "type" "div")
(set_attr "size" "<bits>")])
(define_insn "umod<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
(umod:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r")))]
"TARGET_MODULO"
"modu<wd> %0,%1,%2"
[(set_attr "type" "div")
(set_attr "size" "<bits>")])
;; On machines with modulo support, do a combined div/mod the old fashioned
;; method, since the multiply/subtract is faster than doing the mod instruction
;; after a divide.
(define_peephole2
[(set (match_operand:GPR 0 "gpc_reg_operand")
(div:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "gpc_reg_operand")))
(set (match_operand:GPR 3 "gpc_reg_operand")
(mod:GPR (match_dup 1)
(match_dup 2)))]
"TARGET_MODULO
&& ! reg_mentioned_p (operands[0], operands[1])
&& ! reg_mentioned_p (operands[0], operands[2])
&& ! reg_mentioned_p (operands[3], operands[1])
&& ! reg_mentioned_p (operands[3], operands[2])"
[(set (match_dup 0)
(div:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(mult:GPR (match_dup 0)
(match_dup 2)))
(set (match_dup 3)
(minus:GPR (match_dup 1)
(match_dup 3)))])
(define_peephole2
[(set (match_operand:GPR 0 "gpc_reg_operand")
(udiv:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "gpc_reg_operand")))
(set (match_operand:GPR 3 "gpc_reg_operand")
(umod:GPR (match_dup 1)
(match_dup 2)))]
"TARGET_MODULO
&& ! reg_mentioned_p (operands[0], operands[1])
&& ! reg_mentioned_p (operands[0], operands[2])
&& ! reg_mentioned_p (operands[3], operands[1])
&& ! reg_mentioned_p (operands[3], operands[2])"
[(set (match_dup 0)
(udiv:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(mult:GPR (match_dup 0)
(match_dup 2)))
(set (match_dup 3)
(minus:GPR (match_dup 1)
(match_dup 3)))])
;; Logical instructions
;; The logical instructions are mostly combined by using match_operator,
;; but the plain AND insns are somewhat different because there is no
;; plain 'andi' (only 'andi.'), no plain 'andis', and there are all
;; those rotate-and-mask operations. Thus, the AND insns come first.
(define_expand "and<mode>3"
[(set (match_operand:SDI 0 "gpc_reg_operand")
(and:SDI (match_operand:SDI 1 "gpc_reg_operand")
(match_operand:SDI 2 "reg_or_cint_operand")))]
""
{
if (<MODE>mode == DImode && !TARGET_POWERPC64)
{
rs6000_split_logical (operands, AND, false, false, false);
DONE;
}
if (CONST_INT_P (operands[2]))
{
if (rs6000_is_valid_and_mask (operands[2], <MODE>mode))
{
emit_insn (gen_and<mode>3_mask (operands[0], operands[1], operands[2]));
DONE;
}
if (logical_const_operand (operands[2], <MODE>mode))
{
emit_insn (gen_and<mode>3_imm (operands[0], operands[1], operands[2]));
DONE;
}
if (rs6000_is_valid_2insn_and (operands[2], <MODE>mode))
{
rs6000_emit_2insn_and (<MODE>mode, operands, true, 0);
DONE;
}
operands[2] = force_reg (<MODE>mode, operands[2]);
}
})
(define_insn "and<mode>3_imm"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r")
(match_operand:GPR 2 "logical_const_operand" "n")))
(clobber (match_scratch:CC 3 "=x"))]
"!rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
"andi%e2. %0,%1,%u2"
[(set_attr "type" "logical")
(set_attr "dot" "yes")])
(define_insn_and_split "*and<mode>3_imm_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,??y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "logical_const_operand" "n,n"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))
(clobber (match_scratch:CC 4 "=X,x"))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& !rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
"@
andi%e2. %0,%1,%u2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(parallel [(set (match_dup 0)
(and:GPR (match_dup 1)
(match_dup 2)))
(clobber (match_dup 4))])
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*and<mode>3_imm_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,??y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "logical_const_operand" "n,n"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(and:GPR (match_dup 1)
(match_dup 2)))
(clobber (match_scratch:CC 4 "=X,x"))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& !rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
"@
andi%e2. %0,%1,%u2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(parallel [(set (match_dup 0)
(and:GPR (match_dup 1)
(match_dup 2)))
(clobber (match_dup 4))])
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*and<mode>3_imm_mask_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,??y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "logical_const_operand" "n,n"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
"@
andi%e2. %0,%1,%u2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(and:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*and<mode>3_imm_mask_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,??y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "logical_const_operand" "n,n"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(and:GPR (match_dup 1)
(match_dup 2)))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
"@
andi%e2. %0,%1,%u2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(and:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "*and<mode>3_imm_dot_shifted"
[(set (match_operand:CC 3 "cc_reg_operand" "=x")
(compare:CC
(and:GPR
(lshiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r")
(match_operand:SI 4 "const_int_operand" "n"))
(match_operand:GPR 2 "const_int_operand" "n"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r"))]
"logical_const_operand (GEN_INT (UINTVAL (operands[2])
<< INTVAL (operands[4])),
DImode)
&& (<MODE>mode == Pmode
|| (UINTVAL (operands[2]) << INTVAL (operands[4])) <= 0x7fffffff)"
{
operands[2] = GEN_INT (UINTVAL (operands[2]) << INTVAL (operands[4]));
return "andi%e2. %0,%1,%u2";
}
[(set_attr "type" "logical")
(set_attr "dot" "yes")])
(define_insn "and<mode>3_mask"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r")
(match_operand:GPR 2 "const_int_operand" "n")))]
"rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
{
return rs6000_insn_for_and_mask (<MODE>mode, operands, false);
}
[(set_attr "type" "shift")])
(define_insn_and_split "*and<mode>3_mask_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "const_int_operand" "n,n"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& !logical_const_operand (operands[2], <MODE>mode)
&& rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
{
if (which_alternative == 0)
return rs6000_insn_for_and_mask (<MODE>mode, operands, true);
else
return "#";
}
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(and:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*and<mode>3_mask_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "const_int_operand" "n,n"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(and:GPR (match_dup 1)
(match_dup 2)))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& !logical_const_operand (operands[2], <MODE>mode)
&& rs6000_is_valid_and_mask (operands[2], <MODE>mode)"
{
if (which_alternative == 0)
return rs6000_insn_for_and_mask (<MODE>mode, operands, true);
else
return "#";
}
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(and:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*and<mode>3_2insn"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r")
(match_operand:GPR 2 "const_int_operand" "n")))]
"rs6000_is_valid_2insn_and (operands[2], <MODE>mode)
&& !(rs6000_is_valid_and_mask (operands[2], <MODE>mode)
|| logical_const_operand (operands[2], <MODE>mode))"
"#"
"&& 1"
[(pc)]
{
rs6000_emit_2insn_and (<MODE>mode, operands, false, 0);
DONE;
}
[(set_attr "type" "shift")
(set_attr "length" "8")])
(define_insn_and_split "*and<mode>3_2insn_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "const_int_operand" "n,n"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& rs6000_is_valid_2insn_and (operands[2], <MODE>mode)
&& !(rs6000_is_valid_and_mask (operands[2], <MODE>mode)
|| logical_const_operand (operands[2], <MODE>mode))"
"#"
"&& reload_completed"
[(pc)]
{
rs6000_emit_2insn_and (<MODE>mode, operands, false, 1);
DONE;
}
[(set_attr "type" "shift")
(set_attr "dot" "yes")
(set_attr "length" "8,12")])
(define_insn_and_split "*and<mode>3_2insn_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,r")
(match_operand:GPR 2 "const_int_operand" "n,n"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(and:GPR (match_dup 1)
(match_dup 2)))]
"(<MODE>mode == Pmode || UINTVAL (operands[2]) <= 0x7fffffff)
&& rs6000_is_valid_2insn_and (operands[2], <MODE>mode)
&& !(rs6000_is_valid_and_mask (operands[2], <MODE>mode)
|| logical_const_operand (operands[2], <MODE>mode))"
"#"
"&& reload_completed"
[(pc)]
{
rs6000_emit_2insn_and (<MODE>mode, operands, false, 2);
DONE;
}
[(set_attr "type" "shift")
(set_attr "dot" "yes")
(set_attr "length" "8,12")])
(define_expand "<code><mode>3"
[(set (match_operand:SDI 0 "gpc_reg_operand")
(iorxor:SDI (match_operand:SDI 1 "gpc_reg_operand")
(match_operand:SDI 2 "reg_or_cint_operand")))]
""
{
if (<MODE>mode == DImode && !TARGET_POWERPC64)
{
rs6000_split_logical (operands, <CODE>, false, false, false);
DONE;
}
if (non_logical_cint_operand (operands[2], <MODE>mode))
{
rtx tmp = ((!can_create_pseudo_p ()
|| rtx_equal_p (operands[0], operands[1]))
? operands[0] : gen_reg_rtx (<MODE>mode));
HOST_WIDE_INT value = INTVAL (operands[2]);
HOST_WIDE_INT lo = value & 0xffff;
HOST_WIDE_INT hi = value - lo;
emit_insn (gen_<code><mode>3 (tmp, operands[1], GEN_INT (hi)));
emit_insn (gen_<code><mode>3 (operands[0], tmp, GEN_INT (lo)));
DONE;
}
if (!reg_or_logical_cint_operand (operands[2], <MODE>mode))
operands[2] = force_reg (<MODE>mode, operands[2]);
})
(define_split
[(set (match_operand:GPR 0 "gpc_reg_operand")
(iorxor:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "non_logical_cint_operand")))]
""
[(set (match_dup 3)
(iorxor:GPR (match_dup 1)
(match_dup 4)))
(set (match_dup 0)
(iorxor:GPR (match_dup 3)
(match_dup 5)))]
{
operands[3] = ((!can_create_pseudo_p ()
|| rtx_equal_p (operands[0], operands[1]))
? operands[0] : gen_reg_rtx (<MODE>mode));
HOST_WIDE_INT value = INTVAL (operands[2]);
HOST_WIDE_INT lo = value & 0xffff;
HOST_WIDE_INT hi = value - lo;
operands[4] = GEN_INT (hi);
operands[5] = GEN_INT (lo);
})
(define_insn "*bool<mode>3_imm"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(match_operator:GPR 3 "boolean_or_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "%r")
(match_operand:GPR 2 "logical_const_operand" "n")]))]
""
"%q3i%e2 %0,%1,%u2"
[(set_attr "type" "logical")])
(define_insn "*bool<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(match_operator:GPR 3 "boolean_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r")]))]
""
"%q3 %0,%1,%2"
[(set_attr "type" "logical")])
(define_insn_and_split "*bool<mode>3_dot"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC (match_operator:GPR 3 "boolean_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "gpc_reg_operand" "r,r")])
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
%q3. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[4], CCmode)"
[(set (match_dup 0)
(match_dup 3))
(set (match_dup 4)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*bool<mode>3_dot2"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC (match_operator:GPR 3 "boolean_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:GPR 2 "gpc_reg_operand" "r,r")])
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(match_dup 3))]
"<MODE>mode == Pmode"
"@
%q3. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[4], CCmode)"
[(set (match_dup 0)
(match_dup 3))
(set (match_dup 4)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "*boolc<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(match_operator:GPR 3 "boolean_operator"
[(not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r"))
(match_operand:GPR 1 "gpc_reg_operand" "r")]))]
""
"%q3 %0,%1,%2"
[(set_attr "type" "logical")])
(define_insn_and_split "*boolc<mode>3_dot"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC (match_operator:GPR 3 "boolean_operator"
[(not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r,r"))
(match_operand:GPR 1 "gpc_reg_operand" "r,r")])
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
%q3. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[4], CCmode)"
[(set (match_dup 0)
(match_dup 3))
(set (match_dup 4)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*boolc<mode>3_dot2"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC (match_operator:GPR 3 "boolean_operator"
[(not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r,r"))
(match_operand:GPR 1 "gpc_reg_operand" "r,r")])
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(match_dup 3))]
"<MODE>mode == Pmode"
"@
%q3. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[4], CCmode)"
[(set (match_dup 0)
(match_dup 3))
(set (match_dup 4)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "*boolcc<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(match_operator:GPR 3 "boolean_operator"
[(not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r"))
(not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r"))]))]
""
"%q3 %0,%1,%2"
[(set_attr "type" "logical")])
(define_insn_and_split "*boolcc<mode>3_dot"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC (match_operator:GPR 3 "boolean_operator"
[(not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r,r"))])
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
%q3. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[4], CCmode)"
[(set (match_dup 0)
(match_dup 3))
(set (match_dup 4)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*boolcc<mode>3_dot2"
[(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
(compare:CC (match_operator:GPR 3 "boolean_operator"
[(not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r"))
(not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r,r"))])
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(match_dup 3))]
"<MODE>mode == Pmode"
"@
%q3. %0,%1,%2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[4], CCmode)"
[(set (match_dup 0)
(match_dup 3))
(set (match_dup 4)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "logical")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
;; TODO: Should have dots of this as well.
(define_insn "*eqv<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(not:GPR (xor:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "gpc_reg_operand" "r"))))]
""
"eqv %0,%1,%2"
[(set_attr "type" "logical")])
;; Rotate-and-mask and insert.
(define_insn "*rotl<mode>3_mask"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(and:GPR (match_operator:GPR 4 "rotate_mask_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn")])
(match_operand:GPR 3 "const_int_operand" "n")))]
"rs6000_is_valid_shift_mask (operands[3], operands[4], <MODE>mode)"
{
return rs6000_insn_for_shift_mask (<MODE>mode, operands, false);
}
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn_and_split "*rotl<mode>3_mask_dot"
[(set (match_operand:CC 5 "cc_reg_operand" "=x,?y")
(compare:CC
(and:GPR (match_operator:GPR 4 "rotate_mask_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn")])
(match_operand:GPR 3 "const_int_operand" "n,n"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"(<MODE>mode == Pmode || UINTVAL (operands[3]) <= 0x7fffffff)
&& rs6000_is_valid_shift_mask (operands[3], operands[4], <MODE>mode)"
{
if (which_alternative == 0)
return rs6000_insn_for_shift_mask (<MODE>mode, operands, true);
else
return "#";
}
"&& reload_completed && cc_reg_not_cr0_operand (operands[5], CCmode)"
[(set (match_dup 0)
(and:GPR (match_dup 4)
(match_dup 3)))
(set (match_dup 5)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*rotl<mode>3_mask_dot2"
[(set (match_operand:CC 5 "cc_reg_operand" "=x,?y")
(compare:CC
(and:GPR (match_operator:GPR 4 "rotate_mask_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn")])
(match_operand:GPR 3 "const_int_operand" "n,n"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(and:GPR (match_dup 4)
(match_dup 3)))]
"(<MODE>mode == Pmode || UINTVAL (operands[3]) <= 0x7fffffff)
&& rs6000_is_valid_shift_mask (operands[3], operands[4], <MODE>mode)"
{
if (which_alternative == 0)
return rs6000_insn_for_shift_mask (<MODE>mode, operands, true);
else
return "#";
}
"&& reload_completed && cc_reg_not_cr0_operand (operands[5], CCmode)"
[(set (match_dup 0)
(and:GPR (match_dup 4)
(match_dup 3)))
(set (match_dup 5)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
; Special case for less-than-0. We can do it with just one machine
; instruction, but the generic optimizers do not realise it is cheap.
(define_insn "*lt0_<mode>di"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(lt:GPR (match_operand:DI 1 "gpc_reg_operand" "r")
(const_int 0)))]
"TARGET_POWERPC64"
"srdi %0,%1,63"
[(set_attr "type" "shift")])
(define_insn "*lt0_<mode>si"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(lt:GPR (match_operand:SI 1 "gpc_reg_operand" "r")
(const_int 0)))]
""
"rlwinm %0,%1,1,31,31"
[(set_attr "type" "shift")])
; Two forms for insert (the two arms of the IOR are not canonicalized,
; both are an AND so are the same precedence).
(define_insn "*rotl<mode>3_insert"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ior:GPR (and:GPR (match_operator:GPR 4 "rotate_mask_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "const_int_operand" "n")])
(match_operand:GPR 3 "const_int_operand" "n"))
(and:GPR (match_operand:GPR 5 "gpc_reg_operand" "0")
(match_operand:GPR 6 "const_int_operand" "n"))))]
"rs6000_is_valid_insert_mask (operands[3], operands[4], <MODE>mode)
&& UINTVAL (operands[3]) + UINTVAL (operands[6]) + 1 == 0"
{
return rs6000_insn_for_insert_mask (<MODE>mode, operands, false);
}
[(set_attr "type" "insert")])
; FIXME: this needs an attr "size", so that the scheduler can see the
; difference between rlwimi and rldimi. We also might want dot forms,
; but not for rlwimi on POWER4 and similar processors.
(define_insn "*rotl<mode>3_insert_2"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ior:GPR (and:GPR (match_operand:GPR 5 "gpc_reg_operand" "0")
(match_operand:GPR 6 "const_int_operand" "n"))
(and:GPR (match_operator:GPR 4 "rotate_mask_operator"
[(match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "const_int_operand" "n")])
(match_operand:GPR 3 "const_int_operand" "n"))))]
"rs6000_is_valid_insert_mask (operands[3], operands[4], <MODE>mode)
&& UINTVAL (operands[3]) + UINTVAL (operands[6]) + 1 == 0"
{
return rs6000_insn_for_insert_mask (<MODE>mode, operands, false);
}
[(set_attr "type" "insert")])
; There are also some forms without one of the ANDs.
(define_insn "*rotl<mode>3_insert_3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ior:GPR (and:GPR (match_operand:GPR 3 "gpc_reg_operand" "0")
(match_operand:GPR 4 "const_int_operand" "n"))
(ashift:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "const_int_operand" "n"))))]
"INTVAL (operands[2]) == exact_log2 (UINTVAL (operands[4]) + 1)"
{
if (<MODE>mode == SImode)
return "rlwimi %0,%1,%h2,0,31-%h2";
else
return "rldimi %0,%1,%H2,0";
}
[(set_attr "type" "insert")])
(define_insn "*rotl<mode>3_insert_4"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ior:GPR (and:GPR (match_operand:GPR 3 "gpc_reg_operand" "0")
(match_operand:GPR 4 "const_int_operand" "n"))
(lshiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "const_int_operand" "n"))))]
"<MODE>mode == SImode &&
GET_MODE_PRECISION (<MODE>mode)
== INTVAL (operands[2]) + exact_log2 (-UINTVAL (operands[4]))"
{
operands[2] = GEN_INT (GET_MODE_PRECISION (<MODE>mode)
- INTVAL (operands[2]));
if (<MODE>mode == SImode)
return "rlwimi %0,%1,%h2,32-%h2,31";
else
return "rldimi %0,%1,%H2,64-%H2";
}
[(set_attr "type" "insert")])
(define_insn "*rotlsi3_insert_5"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
(ior:SI (and:SI (match_operand:SI 1 "gpc_reg_operand" "0,r")
(match_operand:SI 2 "const_int_operand" "n,n"))
(and:SI (match_operand:SI 3 "gpc_reg_operand" "r,0")
(match_operand:SI 4 "const_int_operand" "n,n"))))]
"rs6000_is_valid_mask (operands[2], NULL, NULL, SImode)
&& UINTVAL (operands[2]) != 0 && UINTVAL (operands[4]) != 0
&& UINTVAL (operands[2]) + UINTVAL (operands[4]) + 1 == 0"
"@
rlwimi %0,%3,0,%4
rlwimi %0,%1,0,%2"
[(set_attr "type" "insert")])
(define_insn "*rotldi3_insert_6"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(ior:DI (and:DI (match_operand:DI 1 "gpc_reg_operand" "0")
(match_operand:DI 2 "const_int_operand" "n"))
(and:DI (match_operand:DI 3 "gpc_reg_operand" "r")
(match_operand:DI 4 "const_int_operand" "n"))))]
"exact_log2 (-UINTVAL (operands[2])) > 0
&& UINTVAL (operands[2]) + UINTVAL (operands[4]) + 1 == 0"
{
operands[5] = GEN_INT (64 - exact_log2 (-UINTVAL (operands[2])));
return "rldimi %0,%3,0,%5";
}
[(set_attr "type" "insert")
(set_attr "size" "64")])
(define_insn "*rotldi3_insert_7"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(ior:DI (and:DI (match_operand:DI 3 "gpc_reg_operand" "r")
(match_operand:DI 4 "const_int_operand" "n"))
(and:DI (match_operand:DI 1 "gpc_reg_operand" "0")
(match_operand:DI 2 "const_int_operand" "n"))))]
"exact_log2 (-UINTVAL (operands[2])) > 0
&& UINTVAL (operands[2]) + UINTVAL (operands[4]) + 1 == 0"
{
operands[5] = GEN_INT (64 - exact_log2 (-UINTVAL (operands[2])));
return "rldimi %0,%3,0,%5";
}
[(set_attr "type" "insert")
(set_attr "size" "64")])
; This handles the important case of multiple-precision shifts. There is
; no canonicalization rule for ASHIFT vs. LSHIFTRT, so two patterns.
(define_split
[(set (match_operand:GPR 0 "gpc_reg_operand")
(ior:GPR (ashift:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:SI 3 "const_int_operand"))
(lshiftrt:GPR (match_operand:GPR 2 "gpc_reg_operand")
(match_operand:SI 4 "const_int_operand"))))]
"can_create_pseudo_p ()
&& INTVAL (operands[3]) + INTVAL (operands[4])
>= GET_MODE_PRECISION (<MODE>mode)"
[(set (match_dup 5)
(lshiftrt:GPR (match_dup 2)
(match_dup 4)))
(set (match_dup 0)
(ior:GPR (and:GPR (match_dup 5)
(match_dup 6))
(ashift:GPR (match_dup 1)
(match_dup 3))))]
{
unsigned HOST_WIDE_INT mask = 1;
mask = (mask << INTVAL (operands[3])) - 1;
operands[5] = gen_reg_rtx (<MODE>mode);
operands[6] = GEN_INT (mask);
})
(define_split
[(set (match_operand:GPR 0 "gpc_reg_operand")
(ior:GPR (lshiftrt:GPR (match_operand:GPR 2 "gpc_reg_operand")
(match_operand:SI 4 "const_int_operand"))
(ashift:GPR (match_operand:GPR 1 "gpc_reg_operand")
(match_operand:SI 3 "const_int_operand"))))]
"can_create_pseudo_p ()
&& INTVAL (operands[3]) + INTVAL (operands[4])
>= GET_MODE_PRECISION (<MODE>mode)"
[(set (match_dup 5)
(lshiftrt:GPR (match_dup 2)
(match_dup 4)))
(set (match_dup 0)
(ior:GPR (and:GPR (match_dup 5)
(match_dup 6))
(ashift:GPR (match_dup 1)
(match_dup 3))))]
{
unsigned HOST_WIDE_INT mask = 1;
mask = (mask << INTVAL (operands[3])) - 1;
operands[5] = gen_reg_rtx (<MODE>mode);
operands[6] = GEN_INT (mask);
})
; Another important case is setting some bits to 1; we can do that with
; an insert instruction, in many cases.
(define_insn_and_split "*ior<mode>_mask"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ior:GPR (match_operand:GPR 1 "gpc_reg_operand" "0")
(match_operand:GPR 2 "const_int_operand" "n")))
(clobber (match_scratch:GPR 3 "=r"))]
"!logical_const_operand (operands[2], <MODE>mode)
&& rs6000_is_valid_mask (operands[2], NULL, NULL, <MODE>mode)"
"#"
"&& 1"
[(set (match_dup 3)
(const_int -1))
(set (match_dup 0)
(ior:GPR (and:GPR (rotate:GPR (match_dup 3)
(match_dup 4))
(match_dup 2))
(and:GPR (match_dup 1)
(match_dup 5))))]
{
int nb, ne;
rs6000_is_valid_mask (operands[2], &nb, &ne, <MODE>mode);
if (GET_CODE (operands[3]) == SCRATCH)
operands[3] = gen_reg_rtx (<MODE>mode);
operands[4] = GEN_INT (ne);
operands[5] = GEN_INT (~UINTVAL (operands[2]));
}
[(set_attr "type" "two")
(set_attr "length" "8")])
; Yet another case is an rldimi with the second value coming from memory.
; The zero_extend that should become part of the rldimi is merged into the
; load from memory instead. Split things properly again.
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand")
(ior:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand")
(match_operand:SI 2 "const_int_operand"))
(zero_extend:DI (match_operand:QHSI 3 "memory_operand"))))]
"INTVAL (operands[2]) == <bits>"
[(set (match_dup 4)
(zero_extend:DI (match_dup 3)))
(set (match_dup 0)
(ior:DI (and:DI (match_dup 4)
(match_dup 5))
(ashift:DI (match_dup 1)
(match_dup 2))))]
{
operands[4] = gen_reg_rtx (DImode);
operands[5] = GEN_INT ((HOST_WIDE_INT_1U << <bits>) - 1);
})
; rlwimi, too.
(define_split
[(set (match_operand:SI 0 "gpc_reg_operand")
(ior:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand")
(match_operand:SI 2 "const_int_operand"))
(zero_extend:SI (match_operand:QHI 3 "memory_operand"))))]
"INTVAL (operands[2]) == <bits>"
[(set (match_dup 4)
(zero_extend:SI (match_dup 3)))
(set (match_dup 0)
(ior:SI (and:SI (match_dup 4)
(match_dup 5))
(ashift:SI (match_dup 1)
(match_dup 2))))]
{
operands[4] = gen_reg_rtx (SImode);
operands[5] = GEN_INT ((HOST_WIDE_INT_1U << <bits>) - 1);
})
;; Now the simple shifts.
(define_insn "rotl<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(rotate:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn")))]
""
"rotl<wd>%I2 %0,%1,%<hH>2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn "*rotlsi3_64"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(zero_extend:DI
(rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn"))))]
"TARGET_POWERPC64"
"rotlw%I2 %0,%1,%h2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn_and_split "*rotl<mode>3_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (rotate:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
rotl<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(rotate:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*rotl<mode>3_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (rotate:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(rotate:GPR (match_dup 1)
(match_dup 2)))]
"<MODE>mode == Pmode"
"@
rotl<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(rotate:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "ashl<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ashift:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn")))]
""
"sl<wd>%I2 %0,%1,%<hH>2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn "*ashlsi3_64"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(zero_extend:DI
(ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn"))))]
"TARGET_POWERPC64"
"slw%I2 %0,%1,%h2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn_and_split "*ashl<mode>3_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (ashift:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
sl<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(ashift:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*ashl<mode>3_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (ashift:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(ashift:GPR (match_dup 1)
(match_dup 2)))]
"<MODE>mode == Pmode"
"@
sl<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(ashift:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
;; Pretend we have a memory form of extswsli until register allocation is done
;; so that we use LWZ to load the value from memory, instead of LWA.
(define_insn_and_split "ashdi3_extswsli"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
(ashift:DI
(sign_extend:DI (match_operand:SI 1 "reg_or_mem_operand" "r,m"))
(match_operand:DI 2 "u6bit_cint_operand" "n,n")))]
"TARGET_EXTSWSLI"
"@
extswsli %0,%1,%2
#"
"&& reload_completed && MEM_P (operands[1])"
[(set (match_dup 3)
(match_dup 1))
(set (match_dup 0)
(ashift:DI (sign_extend:DI (match_dup 3))
(match_dup 2)))]
{
operands[3] = gen_lowpart (SImode, operands[0]);
}
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "no")])
(define_insn_and_split "ashdi3_extswsli_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y,?x,??y")
(compare:CC
(ashift:DI
(sign_extend:DI (match_operand:SI 1 "reg_or_mem_operand" "r,r,m,m"))
(match_operand:DI 2 "u6bit_cint_operand" "n,n,n,n"))
(const_int 0)))
(clobber (match_scratch:DI 0 "=r,r,r,r"))]
"TARGET_EXTSWSLI"
"@
extswsli. %0,%1,%2
#
#
#"
"&& reload_completed
&& (cc_reg_not_cr0_operand (operands[3], CCmode)
|| memory_operand (operands[1], SImode))"
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx shift = operands[2];
rtx cr = operands[3];
rtx src2;
if (!MEM_P (src))
src2 = src;
else
{
src2 = gen_lowpart (SImode, dest);
emit_move_insn (src2, src);
}
if (REGNO (cr) == CR0_REGNO)
{
emit_insn (gen_ashdi3_extswsli_dot2 (dest, src2, shift, cr));
DONE;
}
emit_insn (gen_ashdi3_extswsli (dest, src2, shift));
emit_insn (gen_rtx_SET (cr, gen_rtx_COMPARE (CCmode, dest, const0_rtx)));
DONE;
}
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "no")
(set_attr "dot" "yes")
(set_attr "length" "4,8,8,12")])
(define_insn_and_split "ashdi3_extswsli_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y,?x,??y")
(compare:CC
(ashift:DI
(sign_extend:DI (match_operand:SI 1 "reg_or_mem_operand" "r,r,m,m"))
(match_operand:DI 2 "u6bit_cint_operand" "n,n,n,n"))
(const_int 0)))
(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
(ashift:DI (sign_extend:DI (match_dup 1))
(match_dup 2)))]
"TARGET_EXTSWSLI"
"@
extswsli. %0,%1,%2
#
#
#"
"&& reload_completed
&& (cc_reg_not_cr0_operand (operands[3], CCmode)
|| memory_operand (operands[1], SImode))"
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx shift = operands[2];
rtx cr = operands[3];
rtx src2;
if (!MEM_P (src))
src2 = src;
else
{
src2 = gen_lowpart (SImode, dest);
emit_move_insn (src2, src);
}
if (REGNO (cr) == CR0_REGNO)
{
emit_insn (gen_ashdi3_extswsli_dot2 (dest, src2, shift, cr));
DONE;
}
emit_insn (gen_ashdi3_extswsli (dest, src2, shift));
emit_insn (gen_rtx_SET (cr, gen_rtx_COMPARE (CCmode, dest, const0_rtx)));
DONE;
}
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "no")
(set_attr "dot" "yes")
(set_attr "length" "4,8,8,12")])
(define_insn "lshr<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(lshiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn")))]
""
"sr<wd>%I2 %0,%1,%<hH>2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn "*lshrsi3_64"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(zero_extend:DI
(lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn"))))]
"TARGET_POWERPC64"
"srw%I2 %0,%1,%h2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn_and_split "*lshr<mode>3_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (lshiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))]
"<MODE>mode == Pmode"
"@
sr<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(lshiftrt:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*lshr<mode>3_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (lshiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(lshiftrt:GPR (match_dup 1)
(match_dup 2)))]
"<MODE>mode == Pmode"
"@
sr<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(set (match_dup 0)
(lshiftrt:GPR (match_dup 1)
(match_dup 2)))
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn "ashr<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(ashiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn")))
(clobber (reg:GPR CA_REGNO))]
""
"sra<wd>%I2 %0,%1,%<hH>2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn "*ashrsi3_64"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(sign_extend:DI
(ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "reg_or_cint_operand" "rn"))))
(clobber (reg:SI CA_REGNO))]
"TARGET_POWERPC64"
"sraw%I2 %0,%1,%h2"
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")])
(define_insn_and_split "*ashr<mode>3_dot"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (ashiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(clobber (match_scratch:GPR 0 "=r,r"))
(clobber (reg:GPR CA_REGNO))]
"<MODE>mode == Pmode"
"@
sra<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(parallel [(set (match_dup 0)
(ashiftrt:GPR (match_dup 1)
(match_dup 2)))
(clobber (reg:GPR CA_REGNO))])
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
(define_insn_and_split "*ashr<mode>3_dot2"
[(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
(compare:CC (ashiftrt:GPR (match_operand:GPR 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "rn,rn"))
(const_int 0)))
(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(ashiftrt:GPR (match_dup 1)
(match_dup 2)))
(clobber (reg:GPR CA_REGNO))]
"<MODE>mode == Pmode"
"@
sra<wd>%I2. %0,%1,%<hH>2
#"
"&& reload_completed && cc_reg_not_cr0_operand (operands[3], CCmode)"
[(parallel [(set (match_dup 0)
(ashiftrt:GPR (match_dup 1)
(match_dup 2)))
(clobber (reg:GPR CA_REGNO))])
(set (match_dup 3)
(compare:CC (match_dup 0)
(const_int 0)))]
""
[(set_attr "type" "shift")
(set_attr "maybe_var_shift" "yes")
(set_attr "dot" "yes")
(set_attr "length" "4,8")])
;; Builtins to replace a division to generate FRE reciprocal estimate
;; instructions and the necessary fixup instructions
(define_expand "recip<mode>3"
[(match_operand:RECIPF 0 "gpc_reg_operand")
(match_operand:RECIPF 1 "gpc_reg_operand")
(match_operand:RECIPF 2 "gpc_reg_operand")]
"RS6000_RECIP_HAVE_RE_P (<MODE>mode)"
{
rs6000_emit_swdiv (operands[0], operands[1], operands[2], false);
DONE;
})
;; Split to create division from FRE/FRES/etc. and fixup instead of the normal
;; hardware division. This is only done before register allocation and with
;; -ffast-math. This must appear before the divsf3/divdf3 insns.
;; We used to also check optimize_insn_for_speed_p () but problems with guessed
;; frequencies (pr68212/pr77536) yields that unreliable so it was removed.
(define_split
[(set (match_operand:RECIPF 0 "gpc_reg_operand")
(div:RECIPF (match_operand 1 "gpc_reg_operand")
(match_operand 2 "gpc_reg_operand")))]
"RS6000_RECIP_AUTO_RE_P (<MODE>mode)
&& can_create_pseudo_p () && flag_finite_math_only
&& !flag_trapping_math && flag_reciprocal_math"
[(const_int 0)]
{
rs6000_emit_swdiv (operands[0], operands[1], operands[2], true);
DONE;
})
;; Builtins to replace 1/sqrt(x) with instructions using RSQRTE and the
;; appropriate fixup.
(define_expand "rsqrt<mode>2"
[(match_operand:RECIPF 0 "gpc_reg_operand")
(match_operand:RECIPF 1 "gpc_reg_operand")]
"RS6000_RECIP_HAVE_RSQRTE_P (<MODE>mode)"
{
rs6000_emit_swsqrt (operands[0], operands[1], 1);
DONE;
})
;; Floating-point insns, excluding normal data motion. We combine the SF/DF
;; modes here, and also add in conditional vsx/power8-vector support to access
;; values in the traditional Altivec registers if the appropriate
;; -mupper-regs-{df,sf} option is enabled.
(define_expand "abs<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(abs:SFDF (match_operand:SFDF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
"")
(define_insn "*abs<mode>2_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,<Fv>")
(abs:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")))]
"TARGET_HARD_FLOAT"
"@
fabs %0,%1
xsabsdp %x0,%x1"
[(set_attr "type" "fpsimple")])
(define_insn "*nabs<mode>2_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,<Fv>")
(neg:SFDF
(abs:SFDF
(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>"))))]
"TARGET_HARD_FLOAT"
"@
fnabs %0,%1
xsnabsdp %x0,%x1"
[(set_attr "type" "fpsimple")])
(define_expand "neg<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(neg:SFDF (match_operand:SFDF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
"")
(define_insn "*neg<mode>2_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,<Fv>")
(neg:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")))]
"TARGET_HARD_FLOAT"
"@
fneg %0,%1
xsnegdp %x0,%x1"
[(set_attr "type" "fpsimple")])
(define_expand "add<mode>3"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(plus:SFDF (match_operand:SFDF 1 "gpc_reg_operand")
(match_operand:SFDF 2 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
"")
(define_insn "*add<mode>3_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa")
(plus:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "%<Ff>,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa")))]
"TARGET_HARD_FLOAT"
"@
fadd<s> %0,%1,%2
xsadd<sd>p %x0,%x1,%x2"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>")])
(define_expand "sub<mode>3"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(minus:SFDF (match_operand:SFDF 1 "gpc_reg_operand")
(match_operand:SFDF 2 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
"")
(define_insn "*sub<mode>3_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa")
(minus:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa")))]
"TARGET_HARD_FLOAT"
"@
fsub<s> %0,%1,%2
xssub<sd>p %x0,%x1,%x2"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>")])
(define_expand "mul<mode>3"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(mult:SFDF (match_operand:SFDF 1 "gpc_reg_operand")
(match_operand:SFDF 2 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
"")
(define_insn "*mul<mode>3_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa")
(mult:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "%<Ff>,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa")))]
"TARGET_HARD_FLOAT"
"@
fmul<s> %0,%1,%2
xsmul<sd>p %x0,%x1,%x2"
[(set_attr "type" "dmul")
(set_attr "isa" "*,<Fisa>")])
(define_expand "div<mode>3"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(div:SFDF (match_operand:SFDF 1 "gpc_reg_operand")
(match_operand:SFDF 2 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
{
if (RS6000_RECIP_AUTO_RE_P (<MODE>mode)
&& can_create_pseudo_p () && flag_finite_math_only
&& !flag_trapping_math && flag_reciprocal_math)
{
rs6000_emit_swdiv (operands[0], operands[1], operands[2], true);
DONE;
}
})
(define_insn "*div<mode>3_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa")
(div:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa")))]
"TARGET_HARD_FLOAT"
"@
fdiv<s> %0,%1,%2
xsdiv<sd>p %x0,%x1,%x2"
[(set_attr "type" "<sd>div")
(set_attr "isa" "*,<Fisa>")])
(define_insn "*sqrt<mode>2_internal"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa")
(sqrt:SFDF (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa")))]
"TARGET_HARD_FLOAT && TARGET_PPC_GPOPT"
"@
fsqrt<s> %0,%1
xssqrt<sd>p %x0,%x1"
[(set_attr "type" "<sd>sqrt")
(set_attr "isa" "*,<Fisa>")])
(define_expand "sqrt<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(sqrt:SFDF (match_operand:SFDF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_PPC_GPOPT"
{
if (<MODE>mode == SFmode
&& TARGET_RECIP_PRECISION
&& RS6000_RECIP_HAVE_RSQRTE_P (<MODE>mode)
&& !optimize_function_for_size_p (cfun)
&& flag_finite_math_only && !flag_trapping_math
&& flag_unsafe_math_optimizations)
{
rs6000_emit_swsqrt (operands[0], operands[1], 0);
DONE;
}
})
;; Floating point reciprocal approximation
(define_insn "fre<sd>"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa")]
UNSPEC_FRES))]
"TARGET_<FFRE>"
"@
fre<s> %0,%1
xsre<sd>p %x0,%x1"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>")])
(define_expand "fmod<mode>3"
[(use (match_operand:SFDF 0 "gpc_reg_operand"))
(use (match_operand:SFDF 1 "gpc_reg_operand"))
(use (match_operand:SFDF 2 "gpc_reg_operand"))]
"TARGET_HARD_FLOAT
&& TARGET_FPRND
&& flag_unsafe_math_optimizations"
{
rtx div = gen_reg_rtx (<MODE>mode);
emit_insn (gen_div<mode>3 (div, operands[1], operands[2]));
rtx friz = gen_reg_rtx (<MODE>mode);
emit_insn (gen_btrunc<mode>2 (friz, div));
emit_insn (gen_nfms<mode>4 (operands[0], operands[2], friz, operands[1]));
DONE;
})
(define_expand "remainder<mode>3"
[(use (match_operand:SFDF 0 "gpc_reg_operand"))
(use (match_operand:SFDF 1 "gpc_reg_operand"))
(use (match_operand:SFDF 2 "gpc_reg_operand"))]
"TARGET_HARD_FLOAT
&& TARGET_FPRND
&& flag_unsafe_math_optimizations"
{
rtx div = gen_reg_rtx (<MODE>mode);
emit_insn (gen_div<mode>3 (div, operands[1], operands[2]));
rtx frin = gen_reg_rtx (<MODE>mode);
emit_insn (gen_round<mode>2 (frin, div));
emit_insn (gen_nfms<mode>4 (operands[0], operands[2], frin, operands[1]));
DONE;
})
(define_insn "*rsqrt<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa")]
UNSPEC_RSQRT))]
"RS6000_RECIP_HAVE_RSQRTE_P (<MODE>mode)"
"@
frsqrte<s> %0,%1
xsrsqrte<sd>p %x0,%x1"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>")])
;; Floating point comparisons
(define_insn "*cmp<mode>_fpr"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y,y")
(compare:CCFP (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa")))]
"TARGET_HARD_FLOAT"
"@
fcmpu %0,%1,%2
xscmpudp %0,%x1,%x2"
[(set_attr "type" "fpcompare")
(set_attr "isa" "*,<Fisa>")])
;; Floating point conversions
(define_expand "extendsfdf2"
[(set (match_operand:DF 0 "gpc_reg_operand")
(float_extend:DF (match_operand:SF 1 "reg_or_mem_operand")))]
"TARGET_HARD_FLOAT"
{
if (HONOR_SNANS (SFmode))
operands[1] = force_reg (SFmode, operands[1]);
})
(define_insn_and_split "*extendsfdf2_fpr"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,?d,d,wa,?wa,wa,v")
(float_extend:DF (match_operand:SF 1 "reg_or_mem_operand" "0,f,m,0,wa,Z,wY")))]
"TARGET_HARD_FLOAT && !HONOR_SNANS (SFmode)"
"@
#
fmr %0,%1
lfs%U1%X1 %0,%1
#
xscpsgndp %x0,%x1,%x1
lxsspx %x0,%y1
lxssp %0,%1"
"&& reload_completed && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1])"
[(const_int 0)]
{
emit_note (NOTE_INSN_DELETED);
DONE;
}
[(set_attr "type" "fp,fpsimple,fpload,fp,fpsimple,fpload,fpload")
(set_attr "isa" "*,*,*,*,p8v,p8v,p9v")])
(define_insn "*extendsfdf2_snan"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,wa")
(float_extend:DF (match_operand:SF 1 "gpc_reg_operand" "f,wa")))]
"TARGET_HARD_FLOAT && HONOR_SNANS (SFmode)"
"@
frsp %0,%1
xsrsp %x0,%x1"
[(set_attr "type" "fp")
(set_attr "isa" "*,p8v")])
(define_expand "truncdfsf2"
[(set (match_operand:SF 0 "gpc_reg_operand")
(float_truncate:SF (match_operand:DF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
"")
(define_insn "*truncdfsf2_fpr"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f,wa")
(float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "d,wa")))]
"TARGET_HARD_FLOAT"
"@
frsp %0,%1
xsrsp %x0,%x1"
[(set_attr "type" "fp")
(set_attr "isa" "*,p8v")])
;; This expander is here to avoid FLOAT_WORDS_BIGENDIAN tests in
;; builtins.c and optabs.c that are not correct for IBM long double
;; when little-endian.
(define_expand "signbit<mode>2"
[(set (match_dup 2)
(float_truncate:DF (match_operand:FLOAT128 1 "gpc_reg_operand")))
(set (match_dup 3)
(subreg:DI (match_dup 2) 0))
(set (match_dup 4)
(match_dup 5))
(set (match_operand:SI 0 "gpc_reg_operand")
(match_dup 6))]
"TARGET_HARD_FLOAT
&& (!FLOAT128_IEEE_P (<MODE>mode)
|| (TARGET_POWERPC64 && TARGET_DIRECT_MOVE))"
{
if (FLOAT128_IEEE_P (<MODE>mode))
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp = gen_reg_rtx (DImode);
rtx dest_di = gen_lowpart (DImode, dest);
emit_insn (gen_signbit2_dm (<MODE>mode, tmp, src));
emit_insn (gen_lshrdi3 (dest_di, tmp, GEN_INT (63)));
DONE;
}
operands[2] = gen_reg_rtx (DFmode);
operands[3] = gen_reg_rtx (DImode);
if (TARGET_POWERPC64)
{
operands[4] = gen_reg_rtx (DImode);
operands[5] = gen_rtx_LSHIFTRT (DImode, operands[3], GEN_INT (63));
operands[6] = gen_rtx_SUBREG (SImode, operands[4],
WORDS_BIG_ENDIAN ? 4 : 0);
}
else
{
operands[4] = gen_reg_rtx (SImode);
operands[5] = gen_rtx_SUBREG (SImode, operands[3],
WORDS_BIG_ENDIAN ? 0 : 4);
operands[6] = gen_rtx_LSHIFTRT (SImode, operands[4], GEN_INT (31));
}
})
;; Optimize IEEE 128-bit signbit on 64-bit systems with direct move to avoid
;; multiple direct moves. If we used a SUBREG:DI of the Floa128 type, the
;; register allocator would typically move the entire _Float128 item to GPRs (2
;; instructions on ISA 3.0, 3-4 instructions on ISA 2.07).
;;
;; After register allocation, if the _Float128 had originally been in GPRs, the
;; split allows the post reload phases to eliminate the move, and do the shift
;; directly with the register that contains the signbit.
(define_insn_and_split "@signbit<mode>2_dm"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
(unspec:DI [(match_operand:SIGNBIT 1 "gpc_reg_operand" "wa,r")]
UNSPEC_SIGNBIT))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"@
mfvsrd %0,%x1
#"
"&& reload_completed && int_reg_operand (operands[1], <MODE>mode)"
[(set (match_dup 0)
(match_dup 2))]
{
operands[2] = gen_highpart (DImode, operands[1]);
}
[(set_attr "type" "mftgpr,*")])
;; Optimize IEEE 128-bit signbit on to avoid loading the value into a vector
;; register and then doing a direct move if the value comes from memory. On
;; little endian, we have to load the 2nd double-word to get the sign bit.
(define_insn_and_split "*signbit<mode>2_dm_mem"
[(set (match_operand:DI 0 "gpc_reg_operand" "=b")
(unspec:DI [(match_operand:SIGNBIT 1 "memory_operand" "m")]
UNSPEC_SIGNBIT))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"#"
"&& 1"
[(set (match_dup 0)
(match_dup 2))]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx addr = XEXP (src, 0);
if (WORDS_BIG_ENDIAN)
operands[2] = adjust_address (src, DImode, 0);
else if (REG_P (addr) || SUBREG_P (addr))
operands[2] = adjust_address (src, DImode, 8);
else if (GET_CODE (addr) == PLUS && REG_P (XEXP (addr, 0))
&& CONST_INT_P (XEXP (addr, 1)) && mem_operand_gpr (src, DImode))
operands[2] = adjust_address (src, DImode, 8);
else
{
rtx tmp = can_create_pseudo_p () ? gen_reg_rtx (DImode) : dest;
emit_insn (gen_rtx_SET (tmp, addr));
operands[2] = change_address (src, DImode,
gen_rtx_PLUS (DImode, tmp, GEN_INT (8)));
}
})
(define_expand "copysign<mode>3"
[(set (match_dup 3)
(abs:SFDF (match_operand:SFDF 1 "gpc_reg_operand")))
(set (match_dup 4)
(neg:SFDF (abs:SFDF (match_dup 1))))
(set (match_operand:SFDF 0 "gpc_reg_operand")
(if_then_else:SFDF (ge (match_operand:SFDF 2 "gpc_reg_operand")
(match_dup 5))
(match_dup 3)
(match_dup 4)))]
"TARGET_HARD_FLOAT
&& ((TARGET_PPC_GFXOPT
&& !HONOR_NANS (<MODE>mode)
&& !HONOR_SIGNED_ZEROS (<MODE>mode))
|| TARGET_CMPB
|| VECTOR_UNIT_VSX_P (<MODE>mode))"
{
if (TARGET_CMPB || VECTOR_UNIT_VSX_P (<MODE>mode))
{
emit_insn (gen_copysign<mode>3_fcpsgn (operands[0], operands[1],
operands[2]));
DONE;
}
operands[3] = gen_reg_rtx (<MODE>mode);
operands[4] = gen_reg_rtx (<MODE>mode);
operands[5] = CONST0_RTX (<MODE>mode);
})
;; Use an unspec rather providing an if-then-else in RTL, to prevent the
;; compiler from optimizing -0.0
(define_insn "copysign<mode>3_fcpsgn"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,<Fv>")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,<Fv>")]
UNSPEC_COPYSIGN))]
"TARGET_HARD_FLOAT && (TARGET_CMPB || VECTOR_UNIT_VSX_P (<MODE>mode))"
"@
fcpsgn %0,%2,%1
xscpsgndp %x0,%x2,%x1"
[(set_attr "type" "fpsimple")])
;; For MIN, MAX, and conditional move, we use DEFINE_EXPAND's that involve a
;; fsel instruction and some auxiliary computations. Then we just have a
;; single DEFINE_INSN for fsel and the define_splits to make them if made by
;; combine.
;; For MIN, MAX on non-VSX machines, and conditional move all of the time, we
;; use DEFINE_EXPAND's that involve a fsel instruction and some auxiliary
;; computations. Then we just have a single DEFINE_INSN for fsel and the
;; define_splits to make them if made by combine. On VSX machines we have the
;; min/max instructions.
;;
;; On VSX, we only check for TARGET_VSX instead of checking for a vsx/p8 vector
;; to allow either DF/SF to use only traditional registers.
(define_expand "s<minmax><mode>3"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(fp_minmax:SFDF (match_operand:SFDF 1 "gpc_reg_operand")
(match_operand:SFDF 2 "gpc_reg_operand")))]
"TARGET_MINMAX"
{
rs6000_emit_minmax (operands[0], <SMINMAX>, operands[1], operands[2]);
DONE;
})
(define_insn "*s<minmax><mode>3_vsx"
[(set (match_operand:SFDF 0 "vsx_register_operand" "=<Fv>")
(fp_minmax:SFDF (match_operand:SFDF 1 "vsx_register_operand" "<Fv>")
(match_operand:SFDF 2 "vsx_register_operand" "<Fv>")))]
"TARGET_VSX && TARGET_HARD_FLOAT"
{
return (TARGET_P9_MINMAX
? "xs<minmax>cdp %x0,%x1,%x2"
: "xs<minmax>dp %x0,%x1,%x2");
}
[(set_attr "type" "fp")])
;; The conditional move instructions allow us to perform max and min operations
;; even when we don't have the appropriate max/min instruction using the FSEL
;; instruction.
(define_insn_and_split "*s<minmax><mode>3_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(fp_minmax:SFDF (match_operand:SFDF 1 "gpc_reg_operand")
(match_operand:SFDF 2 "gpc_reg_operand")))]
"!TARGET_VSX && TARGET_MINMAX"
"#"
"&& 1"
[(const_int 0)]
{
rs6000_emit_minmax (operands[0], <SMINMAX>, operands[1], operands[2]);
DONE;
})
(define_expand "mov<mode>cc"
[(set (match_operand:GPR 0 "gpc_reg_operand")
(if_then_else:GPR (match_operand 1 "comparison_operator")
(match_operand:GPR 2 "gpc_reg_operand")
(match_operand:GPR 3 "gpc_reg_operand")))]
"TARGET_ISEL"
{
if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
DONE;
else
FAIL;
})
;; We use the BASE_REGS for the isel input operands because, if rA is
;; 0, the value of 0 is placed in rD upon truth. Similarly for rB
;; because we may switch the operands and rB may end up being rA.
;;
;; We need 2 patterns: an unsigned and a signed pattern. We could
;; leave out the mode in operand 4 and use one pattern, but reload can
;; change the mode underneath our feet and then gets confused trying
;; to reload the value.
(define_mode_iterator CCEITHER [CC CCUNS])
(define_mode_attr un [(CC "") (CCUNS "un")])
(define_insn "isel_<un>signed_<GPR:mode>"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(if_then_else:GPR
(match_operator 1 "scc_comparison_operator"
[(match_operand:CCEITHER 4 "cc_reg_operand" "y,y")
(const_int 0)])
(match_operand:GPR 2 "reg_or_zero_operand" "O,b")
(match_operand:GPR 3 "gpc_reg_operand" "r,r")))]
"TARGET_ISEL"
"isel %0,%2,%3,%j1"
[(set_attr "type" "isel")])
;; These patterns can be useful for combine; they let combine know that
;; isel can handle reversed comparisons so long as the operands are
;; registers.
(define_insn "*isel_reversed_<un>signed_<GPR:mode>"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
(if_then_else:GPR
(match_operator 1 "scc_rev_comparison_operator"
[(match_operand:CCEITHER 4 "cc_reg_operand" "y,y")
(const_int 0)])
(match_operand:GPR 2 "gpc_reg_operand" "r,r")
(match_operand:GPR 3 "reg_or_zero_operand" "O,b")))]
"TARGET_ISEL"
{
PUT_CODE (operands[1], reverse_condition (GET_CODE (operands[1])));
return "isel %0,%3,%2,%j1";
}
[(set_attr "type" "isel")])
;; Floating point conditional move
(define_expand "mov<mode>cc"
[(set (match_operand:SFDF 0 "gpc_reg_operand")
(if_then_else:SFDF (match_operand 1 "comparison_operator")
(match_operand:SFDF 2 "gpc_reg_operand")
(match_operand:SFDF 3 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_PPC_GFXOPT"
{
if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
DONE;
else
FAIL;
})
(define_insn "*fsel<SFDF:mode><SFDF2:mode>4"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=&<SFDF:rreg2>")
(if_then_else:SFDF
(ge (match_operand:SFDF2 1 "gpc_reg_operand" "<SFDF2:rreg2>")
(match_operand:SFDF2 4 "zero_fp_constant" "F"))
(match_operand:SFDF 2 "gpc_reg_operand" "<SFDF:rreg2>")
(match_operand:SFDF 3 "gpc_reg_operand" "<SFDF:rreg2>")))]
"TARGET_HARD_FLOAT && TARGET_PPC_GFXOPT"
"fsel %0,%1,%2,%3"
[(set_attr "type" "fp")])
(define_insn_and_split "*mov<SFDF:mode><SFDF2:mode>cc_p9"
[(set (match_operand:SFDF 0 "vsx_register_operand" "=&<SFDF:Fv>,<SFDF:Fv>")
(if_then_else:SFDF
(match_operator:CCFP 1 "fpmask_comparison_operator"
[(match_operand:SFDF2 2 "vsx_register_operand" "<SFDF2:Fv>,<SFDF2:Fv>")
(match_operand:SFDF2 3 "vsx_register_operand" "<SFDF2:Fv>,<SFDF2:Fv>")])
(match_operand:SFDF 4 "vsx_register_operand" "<SFDF:Fv>,<SFDF:Fv>")
(match_operand:SFDF 5 "vsx_register_operand" "<SFDF:Fv>,<SFDF:Fv>")))
(clobber (match_scratch:V2DI 6 "=0,&wa"))]
"TARGET_P9_MINMAX"
"#"
""
[(set (match_dup 6)
(if_then_else:V2DI (match_dup 1)
(match_dup 7)
(match_dup 8)))
(set (match_dup 0)
(if_then_else:SFDF (ne (match_dup 6)
(match_dup 8))
(match_dup 4)
(match_dup 5)))]
{
if (GET_CODE (operands[6]) == SCRATCH)
operands[6] = gen_reg_rtx (V2DImode);
operands[7] = CONSTM1_RTX (V2DImode);
operands[8] = CONST0_RTX (V2DImode);
}
[(set_attr "length" "8")
(set_attr "type" "vecperm")])
;; Handle inverting the fpmask comparisons.
(define_insn_and_split "*mov<SFDF:mode><SFDF2:mode>cc_invert_p9"
[(set (match_operand:SFDF 0 "vsx_register_operand" "=&<SFDF:Fv>,<SFDF:Fv>")
(if_then_else:SFDF
(match_operator:CCFP 1 "invert_fpmask_comparison_operator"
[(match_operand:SFDF2 2 "vsx_register_operand" "<SFDF2:Fv>,<SFDF2:Fv>")
(match_operand:SFDF2 3 "vsx_register_operand" "<SFDF2:Fv>,<SFDF2:Fv>")])
(match_operand:SFDF 4 "vsx_register_operand" "<SFDF:Fv>,<SFDF:Fv>")
(match_operand:SFDF 5 "vsx_register_operand" "<SFDF:Fv>,<SFDF:Fv>")))
(clobber (match_scratch:V2DI 6 "=0,&wa"))]
"TARGET_P9_MINMAX"
"#"
"&& 1"
[(set (match_dup 6)
(if_then_else:V2DI (match_dup 9)
(match_dup 7)
(match_dup 8)))
(set (match_dup 0)
(if_then_else:SFDF (ne (match_dup 6)
(match_dup 8))
(match_dup 5)
(match_dup 4)))]
{
rtx op1 = operands[1];
enum rtx_code cond = reverse_condition_maybe_unordered (GET_CODE (op1));
if (GET_CODE (operands[6]) == SCRATCH)
operands[6] = gen_reg_rtx (V2DImode);
operands[7] = CONSTM1_RTX (V2DImode);
operands[8] = CONST0_RTX (V2DImode);
operands[9] = gen_rtx_fmt_ee (cond, CCFPmode, operands[2], operands[3]);
}
[(set_attr "length" "8")
(set_attr "type" "vecperm")])
(define_insn "*fpmask<mode>"
[(set (match_operand:V2DI 0 "vsx_register_operand" "=wa")
(if_then_else:V2DI
(match_operator:CCFP 1 "fpmask_comparison_operator"
[(match_operand:SFDF 2 "vsx_register_operand" "<Fv>")
(match_operand:SFDF 3 "vsx_register_operand" "<Fv>")])
(match_operand:V2DI 4 "all_ones_constant" "")
(match_operand:V2DI 5 "zero_constant" "")))]
"TARGET_P9_MINMAX"
"xscmp%V1dp %x0,%x2,%x3"
[(set_attr "type" "fpcompare")])
(define_insn "*xxsel<mode>"
[(set (match_operand:SFDF 0 "vsx_register_operand" "=<Fv>")
(if_then_else:SFDF (ne (match_operand:V2DI 1 "vsx_register_operand" "wa")
(match_operand:V2DI 2 "zero_constant" ""))
(match_operand:SFDF 3 "vsx_register_operand" "<Fv>")
(match_operand:SFDF 4 "vsx_register_operand" "<Fv>")))]
"TARGET_P9_MINMAX"
"xxsel %x0,%x4,%x3,%x1"
[(set_attr "type" "vecmove")])
;; Conversions to and from floating-point.
; We don't define lfiwax/lfiwzx with the normal definition, because we
; don't want to support putting SImode in FPR registers.
(define_insn "lfiwax"
[(set (match_operand:DI 0 "gpc_reg_operand" "=d,wa,wa,v")
(unspec:DI [(match_operand:SI 1 "reg_or_indexed_operand" "Z,Z,r,v")]
UNSPEC_LFIWAX))]
"TARGET_HARD_FLOAT && TARGET_LFIWAX"
"@
lfiwax %0,%y1
lxsiwax %x0,%y1
mtvsrwa %x0,%1
vextsw2d %0,%1"
[(set_attr "type" "fpload,fpload,mffgpr,vecexts")
(set_attr "isa" "*,p8v,p8v,p9v")])
; This split must be run before register allocation because it allocates the
; memory slot that is needed to move values to/from the FPR. We don't allocate
; it earlier to allow for the combiner to merge insns together where it might
; not be needed and also in case the insns are deleted as dead code.
(define_insn_and_split "floatsi<mode>2_lfiwax"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d,<Fv>")
(float:SFDF (match_operand:SI 1 "nonimmediate_operand" "r,r")))
(clobber (match_scratch:DI 2 "=d,wa"))]
"TARGET_HARD_FLOAT && TARGET_LFIWAX
&& <SI_CONVERT_FP> && can_create_pseudo_p ()"
"#"
""
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp;
if (!MEM_P (src) && TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
tmp = convert_to_mode (DImode, src, false);
else
{
tmp = operands[2];
if (GET_CODE (tmp) == SCRATCH)
tmp = gen_reg_rtx (DImode);
if (MEM_P (src))
{
src = rs6000_force_indexed_or_indirect_mem (src);
emit_insn (gen_lfiwax (tmp, src));
}
else
{
rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
emit_move_insn (stack, src);
emit_insn (gen_lfiwax (tmp, stack));
}
}
emit_insn (gen_floatdi<mode>2 (dest, tmp));
DONE;
}
[(set_attr "length" "12")
(set_attr "type" "fpload")])
(define_insn_and_split "floatsi<mode>2_lfiwax_mem"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d,<Fv>")
(float:SFDF
(sign_extend:DI
(match_operand:SI 1 "indexed_or_indirect_operand" "Z,Z"))))
(clobber (match_scratch:DI 2 "=d,wa"))]
"TARGET_HARD_FLOAT && TARGET_LFIWAX && <SI_CONVERT_FP>"
"#"
""
[(pc)]
{
operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (DImode);
if (TARGET_P8_VECTOR)
emit_insn (gen_extendsidi2 (operands[2], operands[1]));
else
emit_insn (gen_lfiwax (operands[2], operands[1]));
emit_insn (gen_floatdi<mode>2 (operands[0], operands[2]));
DONE;
}
[(set_attr "length" "8")
(set_attr "type" "fpload")])
(define_insn "lfiwzx"
[(set (match_operand:DI 0 "gpc_reg_operand" "=d,wa,wa,wa")
(unspec:DI [(match_operand:SI 1 "reg_or_indexed_operand" "Z,Z,r,wa")]
UNSPEC_LFIWZX))]
"TARGET_HARD_FLOAT && TARGET_LFIWZX"
"@
lfiwzx %0,%y1
lxsiwzx %x0,%y1
mtvsrwz %x0,%1
xxextractuw %x0,%x1,4"
[(set_attr "type" "fpload,fpload,mftgpr,vecexts")
(set_attr "isa" "*,p8v,p8v,p9v")])
(define_insn_and_split "floatunssi<mode>2_lfiwzx"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d,<Fv>")
(unsigned_float:SFDF (match_operand:SI 1 "nonimmediate_operand" "r,r")))
(clobber (match_scratch:DI 2 "=d,wa"))]
"TARGET_HARD_FLOAT && TARGET_LFIWZX && <SI_CONVERT_FP>"
"#"
""
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp;
if (!MEM_P (src) && TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
tmp = convert_to_mode (DImode, src, true);
else
{
tmp = operands[2];
if (GET_CODE (tmp) == SCRATCH)
tmp = gen_reg_rtx (DImode);
if (MEM_P (src))
{
src = rs6000_force_indexed_or_indirect_mem (src);
emit_insn (gen_lfiwzx (tmp, src));
}
else
{
rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
emit_move_insn (stack, src);
emit_insn (gen_lfiwzx (tmp, stack));
}
}
emit_insn (gen_floatdi<mode>2 (dest, tmp));
DONE;
}
[(set_attr "length" "12")
(set_attr "type" "fpload")])
(define_insn_and_split "floatunssi<mode>2_lfiwzx_mem"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d,<Fv>")
(unsigned_float:SFDF
(zero_extend:DI
(match_operand:SI 1 "indexed_or_indirect_operand" "Z,Z"))))
(clobber (match_scratch:DI 2 "=d,wa"))]
"TARGET_HARD_FLOAT && TARGET_LFIWZX && <SI_CONVERT_FP>"
"#"
""
[(pc)]
{
operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (DImode);
if (TARGET_P8_VECTOR)
emit_insn (gen_zero_extendsidi2 (operands[2], operands[1]));
else
emit_insn (gen_lfiwzx (operands[2], operands[1]));
emit_insn (gen_floatdi<mode>2 (operands[0], operands[2]));
DONE;
}
[(set_attr "length" "8")
(set_attr "type" "fpload")])
; For each of these conversions, there is a define_expand, a define_insn
; with a '#' template, and a define_split (with C code). The idea is
; to allow constant folding with the template of the define_insn,
; then to have the insns split later (between sched1 and final).
(define_expand "floatsidf2"
[(parallel [(set (match_operand:DF 0 "gpc_reg_operand")
(float:DF (match_operand:SI 1 "nonimmediate_operand")))
(use (match_dup 2))
(use (match_dup 3))
(clobber (match_dup 4))
(clobber (match_dup 5))
(clobber (match_dup 6))])]
"TARGET_HARD_FLOAT"
{
if (TARGET_LFIWAX && TARGET_FCFID)
{
emit_insn (gen_floatsidf2_lfiwax (operands[0], operands[1]));
DONE;
}
else if (TARGET_FCFID)
{
rtx dreg = operands[1];
if (!REG_P (dreg))
dreg = force_reg (SImode, dreg);
dreg = convert_to_mode (DImode, dreg, false);
emit_insn (gen_floatdidf2 (operands[0], dreg));
DONE;
}
if (!REG_P (operands[1]))
operands[1] = force_reg (SImode, operands[1]);
operands[2] = force_reg (SImode, GEN_INT (0x43300000));
operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503601774854144\", DFmode));
operands[4] = rs6000_allocate_stack_temp (DFmode, true, false);
operands[5] = gen_reg_rtx (DFmode);
operands[6] = gen_reg_rtx (SImode);
})
(define_insn_and_split "*floatsidf2_internal"
[(set (match_operand:DF 0 "gpc_reg_operand" "=&d")
(float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
(use (match_operand:SI 2 "gpc_reg_operand" "r"))
(use (match_operand:DF 3 "gpc_reg_operand" "d"))
(clobber (match_operand:DF 4 "offsettable_mem_operand" "=o"))
(clobber (match_operand:DF 5 "gpc_reg_operand" "=&d"))
(clobber (match_operand:SI 6 "gpc_reg_operand" "=&r"))]
"!TARGET_FCFID && TARGET_HARD_FLOAT"
"#"
""
[(pc)]
{
rtx lowword, highword;
gcc_assert (MEM_P (operands[4]));
highword = adjust_address (operands[4], SImode, 0);
lowword = adjust_address (operands[4], SImode, 4);
if (! WORDS_BIG_ENDIAN)
std::swap (lowword, highword);
emit_insn (gen_xorsi3 (operands[6], operands[1],
GEN_INT (~ (HOST_WIDE_INT) 0x7fffffff)));
emit_move_insn (lowword, operands[6]);
emit_move_insn (highword, operands[2]);
emit_move_insn (operands[5], operands[4]);
emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
DONE;
}
[(set_attr "length" "24")
(set_attr "type" "fp")])
;; If we don't have a direct conversion to single precision, don't enable this
;; conversion for 32-bit without fast math, because we don't have the insn to
;; generate the fixup swizzle to avoid double rounding problems.
(define_expand "floatunssisf2"
[(set (match_operand:SF 0 "gpc_reg_operand")
(unsigned_float:SF (match_operand:SI 1 "nonimmediate_operand")))]
"TARGET_HARD_FLOAT
&& ((TARGET_FCFIDUS && TARGET_LFIWZX)
|| (TARGET_FCFID
&& (TARGET_POWERPC64 || flag_unsafe_math_optimizations)))"
{
if (TARGET_LFIWZX && TARGET_FCFIDUS)
{
emit_insn (gen_floatunssisf2_lfiwzx (operands[0], operands[1]));
DONE;
}
else
{
rtx dreg = operands[1];
if (!REG_P (dreg))
dreg = force_reg (SImode, dreg);
dreg = convert_to_mode (DImode, dreg, true);
emit_insn (gen_floatdisf2 (operands[0], dreg));
DONE;
}
})
(define_expand "floatunssidf2"
[(parallel [(set (match_operand:DF 0 "gpc_reg_operand")
(unsigned_float:DF (match_operand:SI 1 "nonimmediate_operand")))
(use (match_dup 2))
(use (match_dup 3))
(clobber (match_dup 4))
(clobber (match_dup 5))])]
"TARGET_HARD_FLOAT"
{
if (TARGET_LFIWZX && TARGET_FCFID)
{
emit_insn (gen_floatunssidf2_lfiwzx (operands[0], operands[1]));
DONE;
}
else if (TARGET_FCFID)
{
rtx dreg = operands[1];
if (!REG_P (dreg))
dreg = force_reg (SImode, dreg);
dreg = convert_to_mode (DImode, dreg, true);
emit_insn (gen_floatdidf2 (operands[0], dreg));
DONE;
}
if (!REG_P (operands[1]))
operands[1] = force_reg (SImode, operands[1]);
operands[2] = force_reg (SImode, GEN_INT (0x43300000));
operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503599627370496\", DFmode));
operands[4] = rs6000_allocate_stack_temp (DFmode, true, false);
operands[5] = gen_reg_rtx (DFmode);
})
(define_insn_and_split "*floatunssidf2_internal"
[(set (match_operand:DF 0 "gpc_reg_operand" "=&d")
(unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
(use (match_operand:SI 2 "gpc_reg_operand" "r"))
(use (match_operand:DF 3 "gpc_reg_operand" "d"))
(clobber (match_operand:DF 4 "offsettable_mem_operand" "=o"))
(clobber (match_operand:DF 5 "gpc_reg_operand" "=&d"))]
"!TARGET_FCFIDU && TARGET_HARD_FLOAT
&& !(TARGET_FCFID && TARGET_POWERPC64)"
"#"
""
[(pc)]
{
rtx lowword, highword;
gcc_assert (MEM_P (operands[4]));
highword = adjust_address (operands[4], SImode, 0);
lowword = adjust_address (operands[4], SImode, 4);
if (! WORDS_BIG_ENDIAN)
std::swap (lowword, highword);
emit_move_insn (lowword, operands[1]);
emit_move_insn (highword, operands[2]);
emit_move_insn (operands[5], operands[4]);
emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
DONE;
}
[(set_attr "length" "20")
(set_attr "type" "fp")])
;; ISA 3.0 adds instructions lxsi[bh]zx to directly load QImode and HImode to
;; vector registers. These insns favor doing the sign/zero extension in
;; the vector registers, rather then loading up a GPR, doing a sign/zero
;; extension and then a direct move.
(define_expand "float<QHI:mode><FP_ISA3:mode>2"
[(parallel [(set (match_operand:FP_ISA3 0 "vsx_register_operand")
(float:FP_ISA3
(match_operand:QHI 1 "input_operand")))
(clobber (match_scratch:DI 2))
(clobber (match_scratch:DI 3))
(clobber (match_scratch:<QHI:MODE> 4))])]
"TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
{
if (MEM_P (operands[1]))
operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
})
(define_insn_and_split "*float<QHI:mode><FP_ISA3:mode>2_internal"
[(set (match_operand:FP_ISA3 0 "vsx_register_operand" "=<Fv>,<Fv>,<Fv>")
(float:FP_ISA3
(match_operand:QHI 1 "reg_or_indexed_operand" "v,r,Z")))
(clobber (match_scratch:DI 2 "=v,wa,v"))
(clobber (match_scratch:DI 3 "=X,r,X"))
(clobber (match_scratch:<QHI:MODE> 4 "=X,X,v"))]
"TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx result = operands[0];
rtx input = operands[1];
rtx di = operands[2];
if (!MEM_P (input))
{
rtx tmp = operands[3];
if (altivec_register_operand (input, <QHI:MODE>mode))
emit_insn (gen_extend<QHI:mode>di2 (di, input));
else if (GET_CODE (tmp) == SCRATCH)
emit_insn (gen_extend<QHI:mode>di2 (di, input));
else
{
emit_insn (gen_extend<QHI:mode>di2 (tmp, input));
emit_move_insn (di, tmp);
}
}
else
{
rtx tmp = operands[4];
emit_move_insn (tmp, input);
emit_insn (gen_extend<QHI:mode>di2 (di, tmp));
}
emit_insn (gen_floatdi<FP_ISA3:mode>2 (result, di));
DONE;
}
[(set_attr "isa" "p9v,*,p9v")])
(define_expand "floatuns<QHI:mode><FP_ISA3:mode>2"
[(parallel [(set (match_operand:FP_ISA3 0 "vsx_register_operand")
(unsigned_float:FP_ISA3
(match_operand:QHI 1 "input_operand")))
(clobber (match_scratch:DI 2))
(clobber (match_scratch:DI 3))])]
"TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
{
if (MEM_P (operands[1]))
operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
})
(define_insn_and_split "*floatuns<QHI:mode><FP_ISA3:mode>2_internal"
[(set (match_operand:FP_ISA3 0 "vsx_register_operand" "=<Fv>,<Fv>,<Fv>")
(unsigned_float:FP_ISA3
(match_operand:QHI 1 "reg_or_indexed_operand" "v,r,Z")))
(clobber (match_scratch:DI 2 "=v,wa,wa"))
(clobber (match_scratch:DI 3 "=X,r,X"))]
"TARGET_P9_VECTOR && TARGET_DIRECT_MOVE && TARGET_POWERPC64"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx result = operands[0];
rtx input = operands[1];
rtx di = operands[2];
if (MEM_P (input) || altivec_register_operand (input, <QHI:MODE>mode))
emit_insn (gen_zero_extend<QHI:mode>di2 (di, input));
else
{
rtx tmp = operands[3];
if (GET_CODE (tmp) == SCRATCH)
emit_insn (gen_extend<QHI:mode>di2 (di, input));
else
{
emit_insn (gen_zero_extend<QHI:mode>di2 (tmp, input));
emit_move_insn (di, tmp);
}
}
emit_insn (gen_floatdi<FP_ISA3:mode>2 (result, di));
DONE;
}
[(set_attr "isa" "p9v,*,p9v")])
(define_expand "fix_trunc<mode>si2"
[(set (match_operand:SI 0 "gpc_reg_operand")
(fix:SI (match_operand:SFDF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT"
{
if (!(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE))
{
rtx src = force_reg (<MODE>mode, operands[1]);
if (TARGET_STFIWX)
emit_insn (gen_fix_trunc<mode>si2_stfiwx (operands[0], src));
else
{
rtx tmp = gen_reg_rtx (DImode);
rtx stack = rs6000_allocate_stack_temp (DImode, true, false);
emit_insn (gen_fix_trunc<mode>si2_internal (operands[0], src,
tmp, stack));
}
DONE;
}
})
; Like the convert to float patterns, this insn must be split before
; register allocation so that it can allocate the memory slot if it
; needed
(define_insn_and_split "fix_trunc<mode>si2_stfiwx"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d")))
(clobber (match_scratch:DI 2 "=d"))]
"TARGET_HARD_FLOAT && TARGET_STFIWX && can_create_pseudo_p ()
&& !(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)"
"#"
""
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp = operands[2];
if (GET_CODE (tmp) == SCRATCH)
tmp = gen_reg_rtx (DImode);
emit_insn (gen_fctiwz_<mode> (tmp, src));
if (MEM_P (dest) && (TARGET_MFCRF || MEM_ALIGN (dest) >= 32))
{
dest = rs6000_force_indexed_or_indirect_mem (dest);
emit_insn (gen_stfiwx (dest, tmp));
DONE;
}
else if (TARGET_POWERPC64 && TARGET_DIRECT_MOVE && !MEM_P (dest))
{
dest = gen_lowpart (DImode, dest);
emit_move_insn (dest, tmp);
DONE;
}
else
{
rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
emit_insn (gen_stfiwx (stack, tmp));
emit_move_insn (dest, stack);
DONE;
}
}
[(set_attr "length" "12")
(set_attr "type" "fp")])
(define_insn_and_split "fix_trunc<mode>si2_internal"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r,?r")
(fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d,<rreg>")))
(clobber (match_operand:DI 2 "gpc_reg_operand" "=1,d"))
(clobber (match_operand:DI 3 "offsettable_mem_operand" "=o,o"))]
"TARGET_HARD_FLOAT
&& !(TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)"
"#"
""
[(pc)]
{
rtx lowword;
gcc_assert (MEM_P (operands[3]));
lowword = adjust_address (operands[3], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
emit_insn (gen_fctiwz_<mode> (operands[2], operands[1]));
emit_move_insn (operands[3], operands[2]);
emit_move_insn (operands[0], lowword);
DONE;
}
[(set_attr "length" "16")
(set_attr "type" "fp")])
(define_expand "fix_trunc<mode>di2"
[(set (match_operand:DI 0 "gpc_reg_operand")
(fix:DI (match_operand:SFDF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_FCFID"
"")
(define_insn "*fix_trunc<mode>di2_fctidz"
[(set (match_operand:DI 0 "gpc_reg_operand" "=d,wa")
(fix:DI (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")))]
"TARGET_HARD_FLOAT && TARGET_FCFID"
"@
fctidz %0,%1
xscvdpsxds %x0,%x1"
[(set_attr "type" "fp")])
;; If we have ISA 3.0, QI/HImode values can go in both VSX registers and GPR
;; registers. If we have ISA 2.07, we don't allow QI/HImode values in the
;; vector registers, so we need to do direct moves to the GPRs, but SImode
;; values can go in VSX registers. Keeping the direct move part through
;; register allocation prevents the register allocator from doing a direct move
;; of the SImode value to a GPR, and then a store/load.
(define_insn_and_split "fix<uns>_trunc<SFDF:mode><QHI:mode>2"
[(set (match_operand:<QHI:MODE> 0 "gpc_reg_operand" "=d,wa,r")
(any_fix:QHI (match_operand:SFDF 1 "gpc_reg_operand" "d,wa,wa")))
(clobber (match_scratch:SI 2 "=X,X,wa"))]
"TARGET_DIRECT_MOVE"
"@
fctiw<u>z %0,%1
xscvdp<su>xws %x0,%x1
#"
"&& reload_completed && int_reg_operand (operands[0], <QHI:MODE>mode)"
[(set (match_dup 2)
(any_fix:SI (match_dup 1)))
(set (match_dup 3)
(match_dup 2))]
{
operands[3] = gen_rtx_REG (SImode, REGNO (operands[0]));
}
[(set_attr "type" "fp")
(set_attr "length" "4,4,8")
(set_attr "isa" "p9v,p9v,*")])
(define_insn "*fix<uns>_trunc<SFDF:mode>si2_p8"
[(set (match_operand:SI 0 "gpc_reg_operand" "=d,wa")
(any_fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d,wa")))]
"TARGET_DIRECT_MOVE"
"@
fctiw<u>z %0,%1
xscvdp<su>xws %x0,%x1"
[(set_attr "type" "fp")])
;; Keep the convert and store together through register allocation to prevent
;; the register allocator from getting clever and doing a direct move to a GPR
;; and then store for reg+offset stores.
(define_insn_and_split "*fix<uns>_trunc<SFDF:mode><QHSI:mode>2_mem"
[(set (match_operand:QHSI 0 "memory_operand" "=Z")
(any_fix:QHSI (match_operand:SFDF 1 "gpc_reg_operand" "wa")))
(clobber (match_scratch:SI 2 "=wa"))]
"(<QHSI:MODE>mode == SImode && TARGET_P8_VECTOR) || TARGET_P9_VECTOR"
"#"
"&& reload_completed"
[(set (match_dup 2)
(any_fix:SI (match_dup 1)))
(set (match_dup 0)
(match_dup 3))]
{
operands[3] = (<QHSI:MODE>mode == SImode
? operands[2]
: gen_rtx_REG (<QHSI:MODE>mode, REGNO (operands[2])));
})
(define_expand "fixuns_trunc<mode>si2"
[(set (match_operand:SI 0 "gpc_reg_operand")
(unsigned_fix:SI (match_operand:SFDF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_FCTIWUZ && TARGET_STFIWX"
{
if (!TARGET_P8_VECTOR)
{
emit_insn (gen_fixuns_trunc<mode>si2_stfiwx (operands[0], operands[1]));
DONE;
}
})
(define_insn_and_split "fixuns_trunc<mode>si2_stfiwx"
[(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(unsigned_fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d")))
(clobber (match_scratch:DI 2 "=d"))]
"TARGET_HARD_FLOAT && TARGET_FCTIWUZ
&& TARGET_STFIWX && can_create_pseudo_p ()
&& !TARGET_P8_VECTOR"
"#"
""
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp = operands[2];
if (GET_CODE (tmp) == SCRATCH)
tmp = gen_reg_rtx (DImode);
emit_insn (gen_fctiwuz_<mode> (tmp, src));
if (MEM_P (dest))
{
dest = rs6000_force_indexed_or_indirect_mem (dest);
emit_insn (gen_stfiwx (dest, tmp));
DONE;
}
else if (TARGET_POWERPC64 && TARGET_DIRECT_MOVE)
{
dest = gen_lowpart (DImode, dest);
emit_move_insn (dest, tmp);
DONE;
}
else
{
rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
emit_insn (gen_stfiwx (stack, tmp));
emit_move_insn (dest, stack);
DONE;
}
}
[(set_attr "length" "12")
(set_attr "type" "fp")])
(define_insn "fixuns_trunc<mode>di2"
[(set (match_operand:DI 0 "gpc_reg_operand" "=d,wa")
(unsigned_fix:DI (match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")))]
"TARGET_HARD_FLOAT && TARGET_FCTIDUZ"
"@
fctiduz %0,%1
xscvdpuxds %x0,%x1"
[(set_attr "type" "fp")])
(define_insn "rs6000_mtfsb0"
[(unspec_volatile [(match_operand:SI 0 "u5bit_cint_operand" "n")]
UNSPECV_MTFSB0)]
"TARGET_HARD_FLOAT"
"mtfsb0 %0"
[(set_attr "type" "fp")])
(define_insn "rs6000_mtfsb1"
[(unspec_volatile [(match_operand:SI 0 "u5bit_cint_operand" "n")]
UNSPECV_MTFSB1)]
"TARGET_HARD_FLOAT"
"mtfsb1 %0"
[(set_attr "type" "fp")])
(define_insn "rs6000_mffscrn"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d")
(unspec_volatile:DF [(match_operand:DF 1 "gpc_reg_operand" "d")]
UNSPECV_MFFSCRN))]
"TARGET_P9_MISC"
"mffscrn %0,%1"
[(set_attr "type" "fp")])
(define_insn "rs6000_mffscdrn"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d")
(unspec_volatile:DF [(const_int 0)] UNSPECV_MFFSCDRN))
(use (match_operand:DF 1 "gpc_reg_operand" "d"))]
"TARGET_P9_MISC"
"mffscdrn %0,%1"
[(set_attr "type" "fp")])
(define_expand "rs6000_set_fpscr_rn"
[(match_operand:DI 0 "reg_or_cint_operand")]
"TARGET_HARD_FLOAT"
{
rtx tmp_df = gen_reg_rtx (DFmode);
/* The floating point rounding control bits are FPSCR[62:63]. Put the
new rounding mode bits from operands[0][62:63] into FPSCR[62:63]. */
if (TARGET_P9_MISC)
{
rtx src_df = force_reg (DImode, operands[0]);
src_df = simplify_gen_subreg (DFmode, src_df, DImode, 0);
emit_insn (gen_rs6000_mffscrn (tmp_df, src_df));
DONE;
}
if (CONST_INT_P (operands[0]))
{
if ((INTVAL (operands[0]) & 0x1) == 0x1)
emit_insn (gen_rs6000_mtfsb1 (GEN_INT (31)));
else
emit_insn (gen_rs6000_mtfsb0 (GEN_INT (31)));
if ((INTVAL (operands[0]) & 0x2) == 0x2)
emit_insn (gen_rs6000_mtfsb1 (GEN_INT (30)));
else
emit_insn (gen_rs6000_mtfsb0 (GEN_INT (30)));
}
else
{
rtx tmp_rn = gen_reg_rtx (DImode);
rtx tmp_di = gen_reg_rtx (DImode);
/* Extract new RN mode from operand. */
emit_insn (gen_anddi3 (tmp_rn, operands[0], GEN_INT (0x3)));
/* Insert new RN mode into FSCPR. */
emit_insn (gen_rs6000_mffs (tmp_df));
tmp_di = simplify_gen_subreg (DImode, tmp_df, DFmode, 0);
emit_insn (gen_anddi3 (tmp_di, tmp_di, GEN_INT (-4)));
emit_insn (gen_iordi3 (tmp_di, tmp_di, tmp_rn));
/* Need to write to field k=15. The fields are [0:15]. Hence with
L=0, W=0, FLM_i must be equal to 8, 16 = i + 8*(1-W). FLM is an
8-bit field[0:7]. Need to set the bit that corresponds to the
value of i that you want [0:7]. */
tmp_df = simplify_gen_subreg (DFmode, tmp_di, DImode, 0);
emit_insn (gen_rs6000_mtfsf (GEN_INT (0x01), tmp_df));
}
DONE;
})
(define_expand "rs6000_set_fpscr_drn"
[(match_operand:DI 0 "gpc_reg_operand")]
"TARGET_HARD_FLOAT"
{
rtx tmp_df = gen_reg_rtx (DFmode);
/* The decimal floating point rounding control bits are FPSCR[29:31]. Put the
new rounding mode bits from operands[0][61:63] into FPSCR[29:31]. */
if (TARGET_P9_MISC)
{
rtx src_df = gen_reg_rtx (DFmode);
emit_insn (gen_ashldi3 (operands[0], operands[0], GEN_INT (32)));
src_df = simplify_gen_subreg (DFmode, operands[0], DImode, 0);
emit_insn (gen_rs6000_mffscdrn (tmp_df, src_df));
}
else
{
rtx tmp_rn = gen_reg_rtx (DImode);
rtx tmp_di = gen_reg_rtx (DImode);
/* Extract new DRN mode from operand. */
emit_insn (gen_anddi3 (tmp_rn, operands[0], GEN_INT (0x7)));
emit_insn (gen_ashldi3 (tmp_rn, tmp_rn, GEN_INT (32)));
/* Insert new RN mode into FSCPR. */
emit_insn (gen_rs6000_mffs (tmp_df));
tmp_di = simplify_gen_subreg (DImode, tmp_df, DFmode, 0);
emit_insn (gen_anddi3 (tmp_di, tmp_di, GEN_INT (0xFFFFFFF8FFFFFFFFULL)));
emit_insn (gen_iordi3 (tmp_di, tmp_di, tmp_rn));
/* Need to write to field 7. The fields are [0:15]. The equation to
select the field is i + 8*(1-W). Hence with L=0 and W=1, need to set
i to 0x1 to get field 7 where i selects the field. */
tmp_df = simplify_gen_subreg (DFmode, tmp_di, DImode, 0);
emit_insn (gen_rs6000_mtfsf_hi (GEN_INT (0x01), tmp_df));
}
DONE;
})
;; Here, we use (set (reg) (unspec:DI [(fix:SI ...)] UNSPEC_FCTIWZ))
;; rather than (set (subreg:SI (reg)) (fix:SI ...))
;; because the first makes it clear that operand 0 is not live
;; before the instruction.
(define_insn "fctiwz_<mode>"
[(set (match_operand:DI 0 "gpc_reg_operand" "=d,wa")
(unspec:DI [(fix:SI
(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>"))]
UNSPEC_FCTIWZ))]
"TARGET_HARD_FLOAT"
"@
fctiwz %0,%1
xscvdpsxws %x0,%x1"
[(set_attr "type" "fp")])
(define_insn "fctiwuz_<mode>"
[(set (match_operand:DI 0 "gpc_reg_operand" "=d,wa")
(unspec:DI [(unsigned_fix:SI
(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>"))]
UNSPEC_FCTIWUZ))]
"TARGET_HARD_FLOAT && TARGET_FCTIWUZ"
"@
fctiwuz %0,%1
xscvdpuxws %x0,%x1"
[(set_attr "type" "fp")])
;; Only optimize (float (fix x)) -> frz if we are in fast-math mode, since
;; since the friz instruction does not truncate the value if the floating
;; point value is < LONG_MIN or > LONG_MAX.
(define_insn "*friz"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,wa")
(float:DF (fix:DI (match_operand:DF 1 "gpc_reg_operand" "d,wa"))))]
"TARGET_HARD_FLOAT && TARGET_FPRND
&& flag_unsafe_math_optimizations && !flag_trapping_math && TARGET_FRIZ"
"@
friz %0,%1
xsrdpiz %x0,%x1"
[(set_attr "type" "fp")])
;; Opitmize converting SF/DFmode to signed SImode and back to SF/DFmode. This
;; optimization prevents on ISA 2.06 systems and earlier having to store the
;; value from the FPR/vector unit to the stack, load the value into a GPR, sign
;; extend it, store it back on the stack from the GPR, load it back into the
;; FP/vector unit to do the rounding. If we have direct move (ISA 2.07),
;; disable using store and load to sign/zero extend the value.
(define_insn_and_split "*round32<mode>2_fprs"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d")
(float:SFDF
(fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d"))))
(clobber (match_scratch:DI 2 "=d"))
(clobber (match_scratch:DI 3 "=d"))]
"TARGET_HARD_FLOAT
&& <SI_CONVERT_FP> && TARGET_LFIWAX && TARGET_STFIWX && TARGET_FCFID
&& !TARGET_DIRECT_MOVE && can_create_pseudo_p ()"
"#"
""
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp1 = operands[2];
rtx tmp2 = operands[3];
rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
if (GET_CODE (tmp1) == SCRATCH)
tmp1 = gen_reg_rtx (DImode);
if (GET_CODE (tmp2) == SCRATCH)
tmp2 = gen_reg_rtx (DImode);
emit_insn (gen_fctiwz_<mode> (tmp1, src));
emit_insn (gen_stfiwx (stack, tmp1));
emit_insn (gen_lfiwax (tmp2, stack));
emit_insn (gen_floatdi<mode>2 (dest, tmp2));
DONE;
}
[(set_attr "type" "fpload")
(set_attr "length" "16")])
(define_insn_and_split "*roundu32<mode>2_fprs"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=d")
(unsigned_float:SFDF
(unsigned_fix:SI (match_operand:SFDF 1 "gpc_reg_operand" "d"))))
(clobber (match_scratch:DI 2 "=d"))
(clobber (match_scratch:DI 3 "=d"))]
"TARGET_HARD_FLOAT
&& TARGET_LFIWZX && TARGET_STFIWX && TARGET_FCFIDU && !TARGET_DIRECT_MOVE
&& can_create_pseudo_p ()"
"#"
""
[(pc)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp1 = operands[2];
rtx tmp2 = operands[3];
rtx stack = rs6000_allocate_stack_temp (SImode, false, true);
if (GET_CODE (tmp1) == SCRATCH)
tmp1 = gen_reg_rtx (DImode);
if (GET_CODE (tmp2) == SCRATCH)
tmp2 = gen_reg_rtx (DImode);
emit_insn (gen_fctiwuz_<mode> (tmp1, src));
emit_insn (gen_stfiwx (stack, tmp1));
emit_insn (gen_lfiwzx (tmp2, stack));
emit_insn (gen_floatdi<mode>2 (dest, tmp2));
DONE;
}
[(set_attr "type" "fpload")
(set_attr "length" "16")])
;; No VSX equivalent to fctid
(define_insn "lrint<mode>di2"
[(set (match_operand:DI 0 "gpc_reg_operand" "=d")
(unspec:DI [(match_operand:SFDF 1 "gpc_reg_operand" "<rreg2>")]
UNSPEC_FCTID))]
"TARGET_HARD_FLOAT && TARGET_FPRND"
"fctid %0,%1"
[(set_attr "type" "fp")])
(define_insn "btrunc<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,<Fv>")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")]
UNSPEC_FRIZ))]
"TARGET_HARD_FLOAT && TARGET_FPRND"
"@
friz %0,%1
xsrdpiz %x0,%x1"
[(set_attr "type" "fp")])
(define_insn "ceil<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,<Fv>")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")]
UNSPEC_FRIP))]
"TARGET_HARD_FLOAT && TARGET_FPRND"
"@
frip %0,%1
xsrdpip %x0,%x1"
[(set_attr "type" "fp")])
(define_insn "floor<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,<Fv>")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,<Fv>")]
UNSPEC_FRIM))]
"TARGET_HARD_FLOAT && TARGET_FPRND"
"@
frim %0,%1
xsrdpim %x0,%x1"
[(set_attr "type" "fp")])
;; No VSX equivalent to frin
(define_insn "round<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<rreg2>")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<rreg2>")]
UNSPEC_FRIN))]
"TARGET_HARD_FLOAT && TARGET_FPRND"
"frin %0,%1"
[(set_attr "type" "fp")])
(define_insn "*xsrdpi<mode>2"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Fv>")
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand" "<Fv>")]
UNSPEC_XSRDPI))]
"TARGET_HARD_FLOAT && TARGET_VSX"
"xsrdpi %x0,%x1"
[(set_attr "type" "fp")])
(define_expand "lround<mode>di2"
[(set (match_dup 2)
(unspec:SFDF [(match_operand:SFDF 1 "gpc_reg_operand")]
UNSPEC_XSRDPI))
(set (match_operand:DI 0 "gpc_reg_operand")
(unspec:DI [(match_dup 2)]
UNSPEC_FCTID))]
"TARGET_HARD_FLOAT && TARGET_VSX && TARGET_FPRND"
{
operands[2] = gen_reg_rtx (<MODE>mode);
})
; An UNSPEC is used so we don't have to support SImode in FP registers.
(define_insn "stfiwx"
[(set (match_operand:SI 0 "memory_operand" "=Z,Z")
(unspec:SI [(match_operand:DI 1 "gpc_reg_operand" "d,wa")]
UNSPEC_STFIWX))]
"TARGET_PPC_GFXOPT"
"@
stfiwx %1,%y0
stxsiwx %x1,%y0"
[(set_attr "type" "fpstore")
(set_attr "isa" "*,p8v")])
;; If we don't have a direct conversion to single precision, don't enable this
;; conversion for 32-bit without fast math, because we don't have the insn to
;; generate the fixup swizzle to avoid double rounding problems.
(define_expand "floatsisf2"
[(set (match_operand:SF 0 "gpc_reg_operand")
(float:SF (match_operand:SI 1 "nonimmediate_operand")))]
"TARGET_HARD_FLOAT
&& ((TARGET_FCFIDS && TARGET_LFIWAX)
|| (TARGET_FCFID
&& (TARGET_POWERPC64 || flag_unsafe_math_optimizations)))"
{
if (TARGET_FCFIDS && TARGET_LFIWAX)
{
emit_insn (gen_floatsisf2_lfiwax (operands[0], operands[1]));
DONE;
}
else if (TARGET_FCFID && TARGET_LFIWAX)
{
rtx dfreg = gen_reg_rtx (DFmode);
emit_insn (gen_floatsidf2_lfiwax (dfreg, operands[1]));
emit_insn (gen_truncdfsf2 (operands[0], dfreg));
DONE;
}
else
{
rtx dreg = operands[1];
if (!REG_P (dreg))
dreg = force_reg (SImode, dreg);
dreg = convert_to_mode (DImode, dreg, false);
emit_insn (gen_floatdisf2 (operands[0], dreg));
DONE;
}
})
(define_insn "floatdidf2"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,wa")
(float:DF (match_operand:DI 1 "gpc_reg_operand" "d,wa")))]
"TARGET_FCFID && TARGET_HARD_FLOAT"
"@
fcfid %0,%1
xscvsxddp %x0,%x1"
[(set_attr "type" "fp")])
; Allow the combiner to merge source memory operands to the conversion so that
; the optimizer/register allocator doesn't try to load the value too early in a
; GPR and then use store/load to move it to a FPR and suffer from a store-load
; hit. We will split after reload to avoid the trip through the GPRs
(define_insn_and_split "*floatdidf2_mem"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,wa")
(float:DF (match_operand:DI 1 "memory_operand" "m,Z")))
(clobber (match_scratch:DI 2 "=d,wa"))]
"TARGET_HARD_FLOAT && TARGET_FCFID"
"#"
"&& reload_completed"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (float:DF (match_dup 2)))]
""
[(set_attr "length" "8")
(set_attr "type" "fpload")])
(define_expand "floatunsdidf2"
[(set (match_operand:DF 0 "gpc_reg_operand")
(unsigned_float:DF
(match_operand:DI 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_FCFIDU"
"")
(define_insn "*floatunsdidf2_fcfidu"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,wa")
(unsigned_float:DF (match_operand:DI 1 "gpc_reg_operand" "d,wa")))]
"TARGET_HARD_FLOAT && TARGET_FCFIDU"
"@
fcfidu %0,%1
xscvuxddp %x0,%x1"
[(set_attr "type" "fp")])
(define_insn_and_split "*floatunsdidf2_mem"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,wa")
(unsigned_float:DF (match_operand:DI 1 "memory_operand" "m,Z")))
(clobber (match_scratch:DI 2 "=d,wa"))]
"TARGET_HARD_FLOAT && (TARGET_FCFIDU || VECTOR_UNIT_VSX_P (DFmode))"
"#"
"&& reload_completed"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (unsigned_float:DF (match_dup 2)))]
""
[(set_attr "length" "8")
(set_attr "type" "fpload")])
(define_expand "floatdisf2"
[(set (match_operand:SF 0 "gpc_reg_operand")
(float:SF (match_operand:DI 1 "gpc_reg_operand")))]
"TARGET_FCFID && TARGET_HARD_FLOAT
&& (TARGET_FCFIDS || TARGET_POWERPC64 || flag_unsafe_math_optimizations)"
{
if (!TARGET_FCFIDS)
{
rtx val = operands[1];
if (!flag_unsafe_math_optimizations)
{
rtx label = gen_label_rtx ();
val = gen_reg_rtx (DImode);
emit_insn (gen_floatdisf2_internal2 (val, operands[1], label));
emit_label (label);
}
emit_insn (gen_floatdisf2_internal1 (operands[0], val));
DONE;
}
})
(define_insn "floatdisf2_fcfids"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f,wa")
(float:SF (match_operand:DI 1 "gpc_reg_operand" "d,wa")))]
"TARGET_HARD_FLOAT && TARGET_FCFIDS"
"@
fcfids %0,%1
xscvsxdsp %x0,%x1"
[(set_attr "type" "fp")
(set_attr "isa" "*,p8v")])
(define_insn_and_split "*floatdisf2_mem"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f,wa,wa")
(float:SF (match_operand:DI 1 "memory_operand" "m,m,Z")))
(clobber (match_scratch:DI 2 "=d,d,wa"))]
"TARGET_HARD_FLOAT && TARGET_FCFIDS"
"#"
"&& reload_completed"
[(pc)]
{
emit_move_insn (operands[2], operands[1]);
emit_insn (gen_floatdisf2_fcfids (operands[0], operands[2]));
DONE;
}
[(set_attr "length" "8")
(set_attr "isa" "*,p8v,p8v")])
;; This is not IEEE compliant if rounding mode is "round to nearest".
;; If the DI->DF conversion is inexact, then it's possible to suffer
;; from double rounding.
;; Instead of creating a new cpu type for two FP operations, just use fp
(define_insn_and_split "floatdisf2_internal1"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f")
(float:SF (match_operand:DI 1 "gpc_reg_operand" "d")))
(clobber (match_scratch:DF 2 "=d"))]
"TARGET_FCFID && TARGET_HARD_FLOAT && !TARGET_FCFIDS"
"#"
"&& reload_completed"
[(set (match_dup 2)
(float:DF (match_dup 1)))
(set (match_dup 0)
(float_truncate:SF (match_dup 2)))]
""
[(set_attr "length" "8")
(set_attr "type" "fp")])
;; Twiddles bits to avoid double rounding.
;; Bits that might be truncated when converting to DFmode are replaced
;; by a bit that won't be lost at that stage, but is below the SFmode
;; rounding position.
(define_expand "floatdisf2_internal2"
[(parallel [(set (match_dup 3) (ashiftrt:DI (match_operand:DI 1 "")
(const_int 53)))
(clobber (reg:DI CA_REGNO))])
(set (match_operand:DI 0 "") (and:DI (match_dup 1)
(const_int 2047)))
(set (match_dup 3) (plus:DI (match_dup 3)
(const_int 1)))
(set (match_dup 0) (plus:DI (match_dup 0)
(const_int 2047)))
(set (match_dup 4) (compare:CCUNS (match_dup 3)
(const_int 2)))
(set (match_dup 0) (ior:DI (match_dup 0)
(match_dup 1)))
(set (match_dup 0) (and:DI (match_dup 0)
(const_int -2048)))
(set (pc) (if_then_else (geu (match_dup 4) (const_int 0))
(label_ref (match_operand:DI 2 ""))
(pc)))
(set (match_dup 0) (match_dup 1))]
"TARGET_POWERPC64 && TARGET_HARD_FLOAT && !TARGET_FCFIDS"
{
operands[3] = gen_reg_rtx (DImode);
operands[4] = gen_reg_rtx (CCUNSmode);
})
(define_expand "floatunsdisf2"
[(set (match_operand:SF 0 "gpc_reg_operand")
(unsigned_float:SF (match_operand:DI 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_FCFIDUS"
"")
(define_insn "floatunsdisf2_fcfidus"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f,wa")
(unsigned_float:SF (match_operand:DI 1 "gpc_reg_operand" "d,wa")))]
"TARGET_HARD_FLOAT && TARGET_FCFIDUS"
"@
fcfidus %0,%1
xscvuxdsp %x0,%x1"
[(set_attr "type" "fp")
(set_attr "isa" "*,p8v")])
(define_insn_and_split "*floatunsdisf2_mem"
[(set (match_operand:SF 0 "gpc_reg_operand" "=f,wa,wa")
(unsigned_float:SF (match_operand:DI 1 "memory_operand" "m,m,Z")))
(clobber (match_scratch:DI 2 "=d,d,wa"))]
"TARGET_HARD_FLOAT && TARGET_FCFIDUS"
"#"
"&& reload_completed"
[(pc)]
{
emit_move_insn (operands[2], operands[1]);
emit_insn (gen_floatunsdisf2_fcfidus (operands[0], operands[2]));
DONE;
}
[(set_attr "type" "fpload")
(set_attr "length" "8")
(set_attr "isa" "*,p8v,p8v")])
;; Define the TImode operations that can be done in a small number
;; of instructions. The & constraints are to prevent the register
;; allocator from allocating registers that overlap with the inputs
;; (for example, having an input in 7,8 and an output in 6,7). We
;; also allow for the output being the same as one of the inputs.
(define_expand "addti3"
[(set (match_operand:TI 0 "gpc_reg_operand")
(plus:TI (match_operand:TI 1 "gpc_reg_operand")
(match_operand:TI 2 "reg_or_short_operand")))]
"TARGET_64BIT"
{
rtx lo0 = gen_lowpart (DImode, operands[0]);
rtx lo1 = gen_lowpart (DImode, operands[1]);
rtx lo2 = gen_lowpart (DImode, operands[2]);
rtx hi0 = gen_highpart (DImode, operands[0]);
rtx hi1 = gen_highpart (DImode, operands[1]);
rtx hi2 = gen_highpart_mode (DImode, TImode, operands[2]);
if (!reg_or_short_operand (lo2, DImode))
lo2 = force_reg (DImode, lo2);
if (!adde_operand (hi2, DImode))
hi2 = force_reg (DImode, hi2);
emit_insn (gen_adddi3_carry (lo0, lo1, lo2));
emit_insn (gen_adddi3_carry_in (hi0, hi1, hi2));
DONE;
})
(define_expand "subti3"
[(set (match_operand:TI 0 "gpc_reg_operand")
(minus:TI (match_operand:TI 1 "reg_or_short_operand")
(match_operand:TI 2 "gpc_reg_operand")))]
"TARGET_64BIT"
{
rtx lo0 = gen_lowpart (DImode, operands[0]);
rtx lo1 = gen_lowpart (DImode, operands[1]);
rtx lo2 = gen_lowpart (DImode, operands[2]);
rtx hi0 = gen_highpart (DImode, operands[0]);
rtx hi1 = gen_highpart_mode (DImode, TImode, operands[1]);
rtx hi2 = gen_highpart (DImode, operands[2]);
if (!reg_or_short_operand (lo1, DImode))
lo1 = force_reg (DImode, lo1);
if (!adde_operand (hi1, DImode))
hi1 = force_reg (DImode, hi1);
emit_insn (gen_subfdi3_carry (lo0, lo2, lo1));
emit_insn (gen_subfdi3_carry_in (hi0, hi2, hi1));
DONE;
})
;; 128-bit logical operations expanders
(define_expand "and<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(and:BOOL_128 (match_operand:BOOL_128 1 "vlogical_operand")
(match_operand:BOOL_128 2 "vlogical_operand")))]
""
"")
(define_expand "ior<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(ior:BOOL_128 (match_operand:BOOL_128 1 "vlogical_operand")
(match_operand:BOOL_128 2 "vlogical_operand")))]
""
"")
(define_expand "xor<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(xor:BOOL_128 (match_operand:BOOL_128 1 "vlogical_operand")
(match_operand:BOOL_128 2 "vlogical_operand")))]
""
"")
(define_expand "nor<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(and:BOOL_128
(not:BOOL_128 (match_operand:BOOL_128 1 "vlogical_operand"))
(not:BOOL_128 (match_operand:BOOL_128 2 "vlogical_operand"))))]
""
"")
(define_expand "andc<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(and:BOOL_128
(not:BOOL_128 (match_operand:BOOL_128 2 "vlogical_operand"))
(match_operand:BOOL_128 1 "vlogical_operand")))]
""
"")
;; Power8 vector logical instructions.
(define_expand "eqv<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(not:BOOL_128
(xor:BOOL_128 (match_operand:BOOL_128 1 "vlogical_operand")
(match_operand:BOOL_128 2 "vlogical_operand"))))]
"<MODE>mode == TImode || <MODE>mode == PTImode || TARGET_P8_VECTOR"
"")
;; Rewrite nand into canonical form
(define_expand "nand<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(ior:BOOL_128
(not:BOOL_128 (match_operand:BOOL_128 1 "vlogical_operand"))
(not:BOOL_128 (match_operand:BOOL_128 2 "vlogical_operand"))))]
"<MODE>mode == TImode || <MODE>mode == PTImode || TARGET_P8_VECTOR"
"")
;; The canonical form is to have the negated element first, so we need to
;; reverse arguments.
(define_expand "orc<mode>3"
[(set (match_operand:BOOL_128 0 "vlogical_operand")
(ior:BOOL_128
(not:BOOL_128 (match_operand:BOOL_128 2 "vlogical_operand"))
(match_operand:BOOL_128 1 "vlogical_operand")))]
"<MODE>mode == TImode || <MODE>mode == PTImode || TARGET_P8_VECTOR"
"")
;; 128-bit logical operations insns and split operations
(define_insn_and_split "*and<mode>3_internal"
[(set (match_operand:BOOL_128 0 "vlogical_operand" "=<BOOL_REGS_OUTPUT>")
(and:BOOL_128
(match_operand:BOOL_128 1 "vlogical_operand" "%<BOOL_REGS_OP1>")
(match_operand:BOOL_128 2 "vlogical_operand" "<BOOL_REGS_OP2>")))]
""
{
if (TARGET_VSX && vsx_register_operand (operands[0], <MODE>mode))
return "xxland %x0,%x1,%x2";
if (TARGET_ALTIVEC && altivec_register_operand (operands[0], <MODE>mode))
return "vand %0,%1,%2";
return "#";
}
"reload_completed && int_reg_operand (operands[0], <MODE>mode)"
[(const_int 0)]
{
rs6000_split_logical (operands, AND, false, false, false);
DONE;
}
[(set (attr "type")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "veclogical")
(const_string "integer")))
(set (attr "length")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "4")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))))])
;; 128-bit IOR/XOR
(define_insn_and_split "*bool<mode>3_internal"
[(set (match_operand:BOOL_128 0 "vlogical_operand" "=<BOOL_REGS_OUTPUT>")
(match_operator:BOOL_128 3 "boolean_or_operator"
[(match_operand:BOOL_128 1 "vlogical_operand" "%<BOOL_REGS_OP1>")
(match_operand:BOOL_128 2 "vlogical_operand" "<BOOL_REGS_OP2>")]))]
""
{
if (TARGET_VSX && vsx_register_operand (operands[0], <MODE>mode))
return "xxl%q3 %x0,%x1,%x2";
if (TARGET_ALTIVEC && altivec_register_operand (operands[0], <MODE>mode))
return "v%q3 %0,%1,%2";
return "#";
}
"reload_completed && int_reg_operand (operands[0], <MODE>mode)"
[(const_int 0)]
{
rs6000_split_logical (operands, GET_CODE (operands[3]), false, false, false);
DONE;
}
[(set (attr "type")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "veclogical")
(const_string "integer")))
(set (attr "length")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "4")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))))])
;; 128-bit ANDC/ORC
(define_insn_and_split "*boolc<mode>3_internal1"
[(set (match_operand:BOOL_128 0 "vlogical_operand" "=<BOOL_REGS_OUTPUT>")
(match_operator:BOOL_128 3 "boolean_operator"
[(not:BOOL_128
(match_operand:BOOL_128 2 "vlogical_operand" "<BOOL_REGS_OP2>"))
(match_operand:BOOL_128 1 "vlogical_operand" "<BOOL_REGS_OP1>")]))]
"TARGET_P8_VECTOR || (GET_CODE (operands[3]) == AND)"
{
if (TARGET_VSX && vsx_register_operand (operands[0], <MODE>mode))
return "xxl%q3 %x0,%x1,%x2";
if (TARGET_ALTIVEC && altivec_register_operand (operands[0], <MODE>mode))
return "v%q3 %0,%1,%2";
return "#";
}
"(TARGET_P8_VECTOR || (GET_CODE (operands[3]) == AND))
&& reload_completed && int_reg_operand (operands[0], <MODE>mode)"
[(const_int 0)]
{
rs6000_split_logical (operands, GET_CODE (operands[3]), false, false, true);
DONE;
}
[(set (attr "type")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "veclogical")
(const_string "integer")))
(set (attr "length")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "4")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))))])
(define_insn_and_split "*boolc<mode>3_internal2"
[(set (match_operand:TI2 0 "int_reg_operand" "=&r,r,r")
(match_operator:TI2 3 "boolean_operator"
[(not:TI2
(match_operand:TI2 2 "int_reg_operand" "r,0,r"))
(match_operand:TI2 1 "int_reg_operand" "r,r,0")]))]
"!TARGET_P8_VECTOR && (GET_CODE (operands[3]) != AND)"
"#"
"reload_completed && !TARGET_P8_VECTOR && (GET_CODE (operands[3]) != AND)"
[(const_int 0)]
{
rs6000_split_logical (operands, GET_CODE (operands[3]), false, false, true);
DONE;
}
[(set_attr "type" "integer")
(set (attr "length")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16")))])
;; 128-bit NAND/NOR
(define_insn_and_split "*boolcc<mode>3_internal1"
[(set (match_operand:BOOL_128 0 "vlogical_operand" "=<BOOL_REGS_OUTPUT>")
(match_operator:BOOL_128 3 "boolean_operator"
[(not:BOOL_128
(match_operand:BOOL_128 1 "vlogical_operand" "<BOOL_REGS_OP1>"))
(not:BOOL_128
(match_operand:BOOL_128 2 "vlogical_operand" "<BOOL_REGS_OP2>"))]))]
"TARGET_P8_VECTOR || (GET_CODE (operands[3]) == AND)"
{
if (TARGET_VSX && vsx_register_operand (operands[0], <MODE>mode))
return "xxl%q3 %x0,%x1,%x2";
if (TARGET_ALTIVEC && altivec_register_operand (operands[0], <MODE>mode))
return "v%q3 %0,%1,%2";
return "#";
}
"(TARGET_P8_VECTOR || (GET_CODE (operands[3]) == AND))
&& reload_completed && int_reg_operand (operands[0], <MODE>mode)"
[(const_int 0)]
{
rs6000_split_logical (operands, GET_CODE (operands[3]), false, true, true);
DONE;
}
[(set (attr "type")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "veclogical")
(const_string "integer")))
(set (attr "length")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "4")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))))])
(define_insn_and_split "*boolcc<mode>3_internal2"
[(set (match_operand:TI2 0 "int_reg_operand" "=&r,r,r")
(match_operator:TI2 3 "boolean_operator"
[(not:TI2
(match_operand:TI2 1 "int_reg_operand" "r,0,r"))
(not:TI2
(match_operand:TI2 2 "int_reg_operand" "r,r,0"))]))]
"!TARGET_P8_VECTOR && (GET_CODE (operands[3]) != AND)"
"#"
"reload_completed && !TARGET_P8_VECTOR && (GET_CODE (operands[3]) != AND)"
[(const_int 0)]
{
rs6000_split_logical (operands, GET_CODE (operands[3]), false, true, true);
DONE;
}
[(set_attr "type" "integer")
(set (attr "length")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16")))])
;; 128-bit EQV
(define_insn_and_split "*eqv<mode>3_internal1"
[(set (match_operand:BOOL_128 0 "vlogical_operand" "=<BOOL_REGS_OUTPUT>")
(not:BOOL_128
(xor:BOOL_128
(match_operand:BOOL_128 1 "vlogical_operand" "<BOOL_REGS_OP1>")
(match_operand:BOOL_128 2 "vlogical_operand" "<BOOL_REGS_OP2>"))))]
"TARGET_P8_VECTOR"
{
if (vsx_register_operand (operands[0], <MODE>mode))
return "xxleqv %x0,%x1,%x2";
return "#";
}
"TARGET_P8_VECTOR && reload_completed
&& int_reg_operand (operands[0], <MODE>mode)"
[(const_int 0)]
{
rs6000_split_logical (operands, XOR, true, false, false);
DONE;
}
[(set (attr "type")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "veclogical")
(const_string "integer")))
(set (attr "length")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "4")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))))])
(define_insn_and_split "*eqv<mode>3_internal2"
[(set (match_operand:TI2 0 "int_reg_operand" "=&r,r,r")
(not:TI2
(xor:TI2
(match_operand:TI2 1 "int_reg_operand" "r,0,r")
(match_operand:TI2 2 "int_reg_operand" "r,r,0"))))]
"!TARGET_P8_VECTOR"
"#"
"reload_completed && !TARGET_P8_VECTOR"
[(const_int 0)]
{
rs6000_split_logical (operands, XOR, true, false, false);
DONE;
}
[(set_attr "type" "integer")
(set (attr "length")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16")))])
;; 128-bit one's complement
(define_insn_and_split "one_cmpl<mode>2"
[(set (match_operand:BOOL_128 0 "vlogical_operand" "=<BOOL_REGS_OUTPUT>")
(not:BOOL_128
(match_operand:BOOL_128 1 "vlogical_operand" "<BOOL_REGS_UNARY>")))]
""
{
if (TARGET_VSX && vsx_register_operand (operands[0], <MODE>mode))
return "xxlnor %x0,%x1,%x1";
if (TARGET_ALTIVEC && altivec_register_operand (operands[0], <MODE>mode))
return "vnor %0,%1,%1";
return "#";
}
"reload_completed && int_reg_operand (operands[0], <MODE>mode)"
[(const_int 0)]
{
rs6000_split_logical (operands, NOT, false, false, false);
DONE;
}
[(set (attr "type")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "veclogical")
(const_string "integer")))
(set (attr "length")
(if_then_else
(match_test "vsx_register_operand (operands[0], <MODE>mode)")
(const_string "4")
(if_then_else
(match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))))])
;; Now define ways of moving data around.
;; Set up a register with a value from the GOT table
(define_expand "movsi_got"
[(set (match_operand:SI 0 "gpc_reg_operand")
(unspec:SI [(match_operand:SI 1 "got_operand")
(match_dup 2)] UNSPEC_MOVSI_GOT))]
"DEFAULT_ABI == ABI_V4 && flag_pic == 1"
{
if (GET_CODE (operands[1]) == CONST)
{
rtx offset = const0_rtx;
HOST_WIDE_INT value;
operands[1] = eliminate_constant_term (XEXP (operands[1], 0), &offset);
value = INTVAL (offset);
if (value != 0)
{
rtx tmp = (!can_create_pseudo_p ()
? operands[0]
: gen_reg_rtx (Pmode));
emit_insn (gen_movsi_got (tmp, operands[1]));
emit_insn (gen_addsi3 (operands[0], tmp, offset));
DONE;
}
}
operands[2] = rs6000_got_register (operands[1]);
})
(define_insn "*movsi_got_internal"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
(match_operand:SI 2 "gpc_reg_operand" "b")]
UNSPEC_MOVSI_GOT))]
"DEFAULT_ABI == ABI_V4 && flag_pic == 1"
"lwz %0,%a1@got(%2)"
[(set_attr "type" "load")])
;; Used by sched, shorten_branches and final when the GOT pseudo reg
;; didn't get allocated to a hard register.
(define_split
[(set (match_operand:SI 0 "gpc_reg_operand")
(unspec:SI [(match_operand:SI 1 "got_no_const_operand")
(match_operand:SI 2 "memory_operand")]
UNSPEC_MOVSI_GOT))]
"DEFAULT_ABI == ABI_V4
&& flag_pic == 1
&& reload_completed"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (unspec:SI [(match_dup 1)(match_dup 0)]
UNSPEC_MOVSI_GOT))]
"")
;; MR LA
;; LWZ LFIWZX LXSIWZX
;; STW STFIWX STXSIWX
;; LI LIS PLI #
;; XXLOR XXSPLTIB 0 XXSPLTIB -1 VSPLTISW
;; XXLXOR 0 XXLORC -1 P9 const
;; MTVSRWZ MFVSRWZ
;; MF%1 MT%0 NOP
(define_insn "*movsi_internal1"
[(set (match_operand:SI 0 "nonimmediate_operand"
"=r, r,
r, d, v,
m, Z, Z,
r, r, r, r,
wa, wa, wa, v,
wa, v, v,
wa, r,
r, *h, *h")
(match_operand:SI 1 "input_operand"
"r, U,
m, Z, Z,
r, d, v,
I, L, eI, n,
wa, O, wM, wB,
O, wM, wS,
r, wa,
*h, r, 0"))]
"gpc_reg_operand (operands[0], SImode)
|| gpc_reg_operand (operands[1], SImode)"
"@
mr %0,%1
la %0,%a1
lwz%U1%X1 %0,%1
lfiwzx %0,%y1
lxsiwzx %x0,%y1
stw%U0%X0 %1,%0
stfiwx %1,%y0
stxsiwx %x1,%y0
li %0,%1
lis %0,%v1
li %0,%1
#
xxlor %x0,%x1,%x1
xxspltib %x0,0
xxspltib %x0,255
vspltisw %0,%1
xxlxor %x0,%x0,%x0
xxlorc %x0,%x0,%x0
#
mtvsrwz %x0,%1
mfvsrwz %0,%x1
mf%1 %0
mt%0 %1
nop"
[(set_attr "type"
"*, *,
load, fpload, fpload,
store, fpstore, fpstore,
*, *, *, *,
veclogical, vecsimple, vecsimple, vecsimple,
veclogical, veclogical, vecsimple,
mffgpr, mftgpr,
*, *, *")
(set_attr "length"
"*, *,
*, *, *,
*, *, *,
*, *, *, 8,
*, *, *, *,
*, *, 8,
*, *,
*, *, *")
(set_attr "isa"
"*, *,
*, p8v, p8v,
*, p8v, p8v,
*, *, p10, *,
p8v, p9v, p9v, p8v,
p9v, p8v, p9v,
p8v, p8v,
*, *, *")])
;; Like movsi, but adjust a SF value to be used in a SI context, i.e.
;; (set (reg:SI ...) (subreg:SI (reg:SF ...) 0))
;;
;; Because SF values are actually stored as DF values within the vector
;; registers, we need to convert the value to the vector SF format when
;; we need to use the bits in a union or similar cases. We only need
;; to do this transformation when the value is a vector register. Loads,
;; stores, and transfers within GPRs are assumed to be safe.
;;
;; This is a more general case of reload_gpr_from_vsxsf. That insn must have
;; no alternatives, because the call is created as part of secondary_reload,
;; and operand #2's register class is used to allocate the temporary register.
;; This function is called before reload, and it creates the temporary as
;; needed.
;; MR LWZ LFIWZX LXSIWZX STW
;; STFS STXSSP STXSSPX VSX->GPR VSX->VSX
;; MTVSRWZ
(define_insn_and_split "movsi_from_sf"
[(set (match_operand:SI 0 "nonimmediate_operand"
"=r, r, ?*d, ?*v, m,
m, wY, Z, r, ?*wa,
wa")
(unspec:SI [(match_operand:SF 1 "input_operand"
"r, m, Z, Z, r,
f, v, wa, wa, wa,
r")]
UNSPEC_SI_FROM_SF))
(clobber (match_scratch:V4SF 2
"=X, X, X, X, X,
X, X, X, wa, X,
X"))]
"TARGET_NO_SF_SUBREG
&& (register_operand (operands[0], SImode)
|| register_operand (operands[1], SFmode))"
"@
mr %0,%1
lwz%U1%X1 %0,%1
lfiwzx %0,%y1
lxsiwzx %x0,%y1
stw%U0%X0 %1,%0
stfs%U0%X0 %1,%0
stxssp %1,%0
stxsspx %x1,%y0
#
xscvdpspn %x0,%x1
mtvsrwz %x0,%1"
"&& reload_completed
&& int_reg_operand (operands[0], SImode)
&& vsx_reg_sfsubreg_ok (operands[1], SFmode)"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op0_di = gen_rtx_REG (DImode, reg_or_subregno (op0));
rtx op2_si = gen_rtx_REG (SImode, reg_or_subregno (op2));
emit_insn (gen_vsx_xscvdpspn_scalar (op2, op1));
emit_insn (gen_zero_extendsidi2 (op0_di, op2_si));
DONE;
}
[(set_attr "type"
"*, load, fpload, fpload, store,
fpstore, fpstore, fpstore, mftgpr, fp,
mffgpr")
(set_attr "length"
"*, *, *, *, *,
*, *, *, 8, *,
*")
(set_attr "isa"
"*, *, p8v, p8v, *,
*, p9v, p8v, p8v, p8v,
p8v")])
;; movsi_from_sf with zero extension
;;
;; RLDICL LWZ LFIWZX LXSIWZX VSX->GPR
;; VSX->VSX MTVSRWZ
(define_insn_and_split "*movdi_from_sf_zero_ext"
[(set (match_operand:DI 0 "gpc_reg_operand"
"=r, r, ?*d, ?*v, r,
?v, wa")
(zero_extend:DI
(unspec:SI [(match_operand:SF 1 "input_operand"
"r, m, Z, Z, wa,
wa, r")]
UNSPEC_SI_FROM_SF)))
(clobber (match_scratch:V4SF 2
"=X, X, X, X, wa,
wa, X"))]
"TARGET_DIRECT_MOVE_64BIT
&& (register_operand (operands[0], DImode)
|| register_operand (operands[1], SImode))"
"@
rldicl %0,%1,0,32
lwz%U1%X1 %0,%1
lfiwzx %0,%y1
lxsiwzx %x0,%y1
#
#
mtvsrwz %x0,%1"
"&& reload_completed
&& register_operand (operands[0], DImode)
&& vsx_reg_sfsubreg_ok (operands[1], SFmode)"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op2_si = gen_rtx_REG (SImode, reg_or_subregno (op2));
emit_insn (gen_vsx_xscvdpspn_scalar (op2, op1));
emit_insn (gen_zero_extendsidi2 (op0, op2_si));
DONE;
}
[(set_attr "type"
"*, load, fpload, fpload, two,
two, mffgpr")
(set_attr "length"
"*, *, *, *, 8,
8, *")
(set_attr "isa"
"*, *, p8v, p8v, p8v,
p9v, p8v")])
;; Like movsi_from_sf, but combine a convert from DFmode to SFmode before
;; moving it to SImode. We cannot do a SFmode store without having to do the
;; conversion explicitly since that doesn't work in most cases if the input
;; isn't representable as SF. Use XSCVDPSP instead of XSCVDPSPN, since the
;; former handles cases where the input will not fit in a SFmode, and the
;; latter assumes the value has already been rounded.
(define_insn "*movsi_from_df"
[(set (match_operand:SI 0 "gpc_reg_operand" "=wa")
(unspec:SI [(float_truncate:SF
(match_operand:DF 1 "gpc_reg_operand" "wa"))]
UNSPEC_SI_FROM_SF))]
"TARGET_NO_SF_SUBREG"
"xscvdpsp %x0,%x1"
[(set_attr "type" "fp")])
;; Split a load of a large constant into the appropriate two-insn
;; sequence.
(define_split
[(set (match_operand:SI 0 "gpc_reg_operand")
(match_operand:SI 1 "const_int_operand"))]
"num_insns_constant (operands[1], SImode) > 1"
[(set (match_dup 0)
(match_dup 2))
(set (match_dup 0)
(ior:SI (match_dup 0)
(match_dup 3)))]
{
if (rs6000_emit_set_const (operands[0], operands[1]))
DONE;
else
FAIL;
})
;; Split loading -128..127 to use XXSPLITB and VEXTSW2D
(define_split
[(set (match_operand:DI 0 "altivec_register_operand")
(match_operand:DI 1 "xxspltib_constant_split"))]
"TARGET_P9_VECTOR && reload_completed"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
int r = REGNO (op0);
rtx op0_v16qi = gen_rtx_REG (V16QImode, r);
emit_insn (gen_xxspltib_v16qi (op0_v16qi, op1));
emit_insn (gen_vsx_sign_extend_qi_si (operands[0], op0_v16qi));
DONE;
})
(define_insn "*mov<mode>_internal2"
[(set (match_operand:CC 2 "cc_reg_operand" "=y,x,?y")
(compare:CC (match_operand:P 1 "gpc_reg_operand" "0,r,r")
(const_int 0)))
(set (match_operand:P 0 "gpc_reg_operand" "=r,r,r") (match_dup 1))]
""
"@
cmp<wd>i %2,%0,0
mr. %0,%1
#"
[(set_attr "type" "cmp,logical,cmp")
(set_attr "dot" "yes")
(set_attr "length" "4,4,8")])
(define_split
[(set (match_operand:CC 2 "cc_reg_not_cr0_operand")
(compare:CC (match_operand:P 1 "gpc_reg_operand")
(const_int 0)))
(set (match_operand:P 0 "gpc_reg_operand") (match_dup 1))]
"reload_completed"
[(set (match_dup 0) (match_dup 1))
(set (match_dup 2)
(compare:CC (match_dup 0)
(const_int 0)))]
"")
(define_expand "mov<mode>"
[(set (match_operand:INT 0 "general_operand")
(match_operand:INT 1 "any_operand"))]
""
{
rs6000_emit_move (operands[0], operands[1], <MODE>mode);
DONE;
})
;; MR LHZ/LBZ LXSI*ZX STH/STB STXSI*X LI
;; XXLOR load 0 load -1 VSPLTI* # MFVSRWZ
;; MTVSRWZ MF%1 MT%1 NOP
(define_insn "*mov<mode>_internal"
[(set (match_operand:QHI 0 "nonimmediate_operand"
"=r, r, wa, m, Z, r,
wa, wa, wa, v, ?v, r,
wa, r, *c*l, *h")
(match_operand:QHI 1 "input_operand"
"r, m, Z, r, wa, i,
wa, O, wM, wB, wS, wa,
r, *h, r, 0"))]
"gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode)"
"@
mr %0,%1
l<wd>z%U1%X1 %0,%1
lxsi<wd>zx %x0,%y1
st<wd>%U0%X0 %1,%0
stxsi<wd>x %x1,%y0
li %0,%1
xxlor %x0,%x1,%x1
xxspltib %x0,0
xxspltib %x0,255
vspltis<wd> %0,%1
#
mfvsrwz %0,%x1
mtvsrwz %x0,%1
mf%1 %0
mt%0 %1
nop"
[(set_attr "type"
"*, load, fpload, store, fpstore, *,
vecsimple, vecperm, vecperm, vecperm, vecperm, mftgpr,
mffgpr, mfjmpr, mtjmpr, *")
(set_attr "length"
"*, *, *, *, *, *,
*, *, *, *, 8, *,
*, *, *, *")
(set_attr "isa"
"*, *, p9v, *, p9v, *,
p9v, p9v, p9v, p9v, p9v, p9v,
p9v, *, *, *")])
;; Here is how to move condition codes around. When we store CC data in
;; an integer register or memory, we store just the high-order 4 bits.
;; This lets us not shift in the most common case of CR0.
(define_expand "movcc"
[(set (match_operand:CC 0 "nonimmediate_operand")
(match_operand:CC 1 "nonimmediate_operand"))]
""
"")
(define_mode_iterator CC_any [CC CCUNS CCEQ CCFP])
(define_insn "*movcc_<mode>"
[(set (match_operand:CC_any 0 "nonimmediate_operand"
"=y,x,?y,y,r,r,r,r, r,*c*l,r,m")
(match_operand:CC_any 1 "general_operand"
" y,r, r,O,x,y,r,I,*h, r,m,r"))]
"register_operand (operands[0], <MODE>mode)
|| register_operand (operands[1], <MODE>mode)"
"@
mcrf %0,%1
mtcrf 128,%1
rlwinm %1,%1,%F0,0xffffffff\;mtcrf %R0,%1\;rlwinm %1,%1,%f0,0xffffffff
crxor %0,%0,%0
mfcr %0%Q1
mfcr %0%Q1\;rlwinm %0,%0,%f1,0xf0000000
mr %0,%1
li %0,%1
mf%1 %0
mt%0 %1
lwz%U1%X1 %0,%1
stw%U0%X0 %1,%0"
[(set_attr_alternative "type"
[(const_string "cr_logical")
(const_string "mtcr")
(const_string "mtcr")
(const_string "cr_logical")
(if_then_else (match_test "TARGET_MFCRF")
(const_string "mfcrf") (const_string "mfcr"))
(if_then_else (match_test "TARGET_MFCRF")
(const_string "mfcrf") (const_string "mfcr"))
(const_string "integer")
(const_string "integer")
(const_string "mfjmpr")
(const_string "mtjmpr")
(const_string "load")
(const_string "store")])
(set_attr "length" "*,*,12,*,*,8,*,*,*,*,*,*")])
;; For floating-point, we normally deal with the floating-point registers
;; unless -msoft-float is used. The sole exception is that parameter passing
;; can produce floating-point values in fixed-point registers. Unless the
;; value is a simple constant or already in memory, we deal with this by
;; allocating memory and copying the value explicitly via that memory location.
;; Move 32-bit binary/decimal floating point
(define_expand "mov<mode>"
[(set (match_operand:FMOVE32 0 "nonimmediate_operand")
(match_operand:FMOVE32 1 "any_operand"))]
"<fmove_ok>"
{
rs6000_emit_move (operands[0], operands[1], <MODE>mode);
DONE;
})
(define_split
[(set (match_operand:FMOVE32 0 "gpc_reg_operand")
(match_operand:FMOVE32 1 "const_double_operand"))]
"reload_completed
&& ((REG_P (operands[0]) && REGNO (operands[0]) <= 31)
|| (SUBREG_P (operands[0])
&& REG_P (SUBREG_REG (operands[0]))
&& REGNO (SUBREG_REG (operands[0])) <= 31))"
[(set (match_dup 2) (match_dup 3))]
{
long l;
<real_value_to_target> (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
if (! TARGET_POWERPC64)
operands[2] = operand_subword (operands[0], 0, 0, <MODE>mode);
else
operands[2] = gen_lowpart (SImode, operands[0]);
operands[3] = gen_int_mode (l, SImode);
})
;; Originally, we tried to keep movsf and movsd common, but the differences
;; addressing was making it rather difficult to hide with mode attributes. In
;; particular for SFmode, on ISA 2.07 (power8) systems, having the GPR store
;; before the VSX stores meant that the register allocator would tend to do a
;; direct move to the GPR (which involves conversion from scalar to
;; vector/memory formats) to save values in the traditional Altivec registers,
;; while SDmode had problems on power6 if the GPR store was not first due to
;; the power6 not having an integer store operation.
;;
;; LWZ LFS LXSSP LXSSPX STFS STXSSP
;; STXSSPX STW XXLXOR LI FMR XSCPSGNDP
;; MR MT<x> MF<x> NOP
(define_insn "movsf_hardfloat"
[(set (match_operand:SF 0 "nonimmediate_operand"
"=!r, f, v, wa, m, wY,
Z, m, wa, !r, f, wa,
!r, *c*l, !r, *h")
(match_operand:SF 1 "input_operand"
"m, m, wY, Z, f, v,
wa, r, j, j, f, wa,
r, r, *h, 0"))]
"(register_operand (operands[0], SFmode)
|| register_operand (operands[1], SFmode))
&& TARGET_HARD_FLOAT
&& (TARGET_ALLOW_SF_SUBREG
|| valid_sf_si_move (operands[0], operands[1], SFmode))"
"@
lwz%U1%X1 %0,%1
lfs%U1%X1 %0,%1
lxssp %0,%1
lxsspx %x0,%y1
stfs%U0%X0 %1,%0
stxssp %1,%0
stxsspx %x1,%y0
stw%U0%X0 %1,%0
xxlxor %x0,%x0,%x0
li %0,0
fmr %0,%1
xscpsgndp %x0,%x1,%x1
mr %0,%1
mt%0 %1
mf%1 %0
nop"
[(set_attr "type"
"load, fpload, fpload, fpload, fpstore, fpstore,
fpstore, store, veclogical, integer, fpsimple, fpsimple,
*, mtjmpr, mfjmpr, *")
(set_attr "isa"
"*, *, p9v, p8v, *, p9v,
p8v, *, *, *, *, *,
*, *, *, *")])
;; LWZ LFIWZX STW STFIWX MTVSRWZ MFVSRWZ
;; FMR MR MT%0 MF%1 NOP
(define_insn "movsd_hardfloat"
[(set (match_operand:SD 0 "nonimmediate_operand"
"=!r, d, m, Z, ?d, ?r,
f, !r, *c*l, !r, *h")
(match_operand:SD 1 "input_operand"
"m, Z, r, wx, r, d,
f, r, r, *h, 0"))]
"(register_operand (operands[0], SDmode)
|| register_operand (operands[1], SDmode))
&& TARGET_HARD_FLOAT"
"@
lwz%U1%X1 %0,%1
lfiwzx %0,%y1
stw%U0%X0 %1,%0
stfiwx %1,%y0
mtvsrwz %x0,%1
mfvsrwz %0,%x1
fmr %0,%1
mr %0,%1
mt%0 %1
mf%1 %0
nop"
[(set_attr "type"
"load, fpload, store, fpstore, mffgpr, mftgpr,
fpsimple, *, mtjmpr, mfjmpr, *")
(set_attr "isa"
"*, p7, *, *, p8v, p8v,
*, *, *, *, *")])
;; MR MT%0 MF%0 LWZ STW LI
;; LIS G-const. F/n-const NOP
(define_insn "*mov<mode>_softfloat"
[(set (match_operand:FMOVE32 0 "nonimmediate_operand"
"=r, *c*l, r, r, m, r,
r, r, r, *h")
(match_operand:FMOVE32 1 "input_operand"
"r, r, *h, m, r, I,
L, G, Fn, 0"))]
"(gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))
&& TARGET_SOFT_FLOAT"
"@
mr %0,%1
mt%0 %1
mf%1 %0
lwz%U1%X1 %0,%1
stw%U0%X0 %1,%0
li %0,%1
lis %0,%v1
#
#
nop"
[(set_attr "type"
"*, mtjmpr, mfjmpr, load, store, *,
*, *, *, *")
(set_attr "length"
"*, *, *, *, *, *,
*, *, 8, *")])
;; Like movsf, but adjust a SI value to be used in a SF context, i.e.
;; (set (reg:SF ...) (subreg:SF (reg:SI ...) 0))
;;
;; Because SF values are actually stored as DF values within the vector
;; registers, we need to convert the value to the vector SF format when
;; we need to use the bits in a union or similar cases. We only need
;; to do this transformation when the value is a vector register. Loads,
;; stores, and transfers within GPRs are assumed to be safe.
;;
;; This is a more general case of reload_vsx_from_gprsf. That insn must have
;; no alternatives, because the call is created as part of secondary_reload,
;; and operand #2's register class is used to allocate the temporary register.
;; This function is called before reload, and it creates the temporary as
;; needed.
;; LWZ LFS LXSSP LXSSPX STW STFIWX
;; STXSIWX GPR->VSX VSX->GPR GPR->GPR
(define_insn_and_split "movsf_from_si"
[(set (match_operand:SF 0 "nonimmediate_operand"
"=!r, f, v, wa, m, Z,
Z, wa, ?r, !r")
(unspec:SF [(match_operand:SI 1 "input_operand"
"m, m, wY, Z, r, f,
wa, r, wa, r")]
UNSPEC_SF_FROM_SI))
(clobber (match_scratch:DI 2
"=X, X, X, X, X, X,
X, r, X, X"))]
"TARGET_NO_SF_SUBREG
&& (register_operand (operands[0], SFmode)
|| register_operand (operands[1], SImode))"
"@
lwz%U1%X1 %0,%1
lfs%U1%X1 %0,%1
lxssp %0,%1
lxsspx %x0,%y1
stw%U0%X0 %1,%0
stfiwx %1,%y0
stxsiwx %x1,%y0
#
mfvsrwz %0,%x1
mr %0,%1"
"&& reload_completed
&& vsx_reg_sfsubreg_ok (operands[0], SFmode)
&& int_reg_operand_not_pseudo (operands[1], SImode)"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op1_di = gen_rtx_REG (DImode, REGNO (op1));
/* Move SF value to upper 32-bits for xscvspdpn. */
emit_insn (gen_ashldi3 (op2, op1_di, GEN_INT (32)));
emit_insn (gen_p8_mtvsrd_sf (op0, op2));
emit_insn (gen_vsx_xscvspdpn_directmove (op0, op0));
DONE;
}
[(set_attr "length"
"*, *, *, *, *, *,
*, 12, *, *")
(set_attr "type"
"load, fpload, fpload, fpload, store, fpstore,
fpstore, vecfloat, mffgpr, *")
(set_attr "isa"
"*, *, p9v, p8v, *, *,
p8v, p8v, p8v, *")])
;; Move 64-bit binary/decimal floating point
(define_expand "mov<mode>"
[(set (match_operand:FMOVE64 0 "nonimmediate_operand")
(match_operand:FMOVE64 1 "any_operand"))]
""
{
rs6000_emit_move (operands[0], operands[1], <MODE>mode);
DONE;
})
(define_split
[(set (match_operand:FMOVE64 0 "gpc_reg_operand")
(match_operand:FMOVE64 1 "const_int_operand"))]
"! TARGET_POWERPC64 && reload_completed
&& ((REG_P (operands[0]) && REGNO (operands[0]) <= 31)
|| (SUBREG_P (operands[0])
&& REG_P (SUBREG_REG (operands[0]))
&& REGNO (SUBREG_REG (operands[0])) <= 31))"
[(set (match_dup 2) (match_dup 4))
(set (match_dup 3) (match_dup 1))]
{
int endian = (WORDS_BIG_ENDIAN == 0);
HOST_WIDE_INT value = INTVAL (operands[1]);
operands[2] = operand_subword (operands[0], endian, 0, <MODE>mode);
operands[3] = operand_subword (operands[0], 1 - endian, 0, <MODE>mode);
operands[4] = GEN_INT (value >> 32);
operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000);
})
(define_split
[(set (match_operand:FMOVE64 0 "gpc_reg_operand")
(match_operand:FMOVE64 1 "const_double_operand"))]
"! TARGET_POWERPC64 && reload_completed
&& ((REG_P (operands[0]) && REGNO (operands[0]) <= 31)
|| (SUBREG_P (operands[0])
&& REG_P (SUBREG_REG (operands[0]))
&& REGNO (SUBREG_REG (operands[0])) <= 31))"
[(set (match_dup 2) (match_dup 4))
(set (match_dup 3) (match_dup 5))]
{
int endian = (WORDS_BIG_ENDIAN == 0);
long l[2];
<real_value_to_target> (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
operands[2] = operand_subword (operands[0], endian, 0, <MODE>mode);
operands[3] = operand_subword (operands[0], 1 - endian, 0, <MODE>mode);
operands[4] = gen_int_mode (l[endian], SImode);
operands[5] = gen_int_mode (l[1 - endian], SImode);
})
(define_split
[(set (match_operand:FMOVE64 0 "gpc_reg_operand")
(match_operand:FMOVE64 1 "const_double_operand"))]
"TARGET_POWERPC64 && reload_completed
&& ((REG_P (operands[0]) && REGNO (operands[0]) <= 31)
|| (SUBREG_P (operands[0])
&& REG_P (SUBREG_REG (operands[0]))
&& REGNO (SUBREG_REG (operands[0])) <= 31))"
[(set (match_dup 2) (match_dup 3))]
{
int endian = (WORDS_BIG_ENDIAN == 0);
long l[2];
HOST_WIDE_INT val;
<real_value_to_target> (*CONST_DOUBLE_REAL_VALUE (operands[1]), l);
operands[2] = gen_lowpart (DImode, operands[0]);
/* HIGHPART is lower memory address when WORDS_BIG_ENDIAN. */
val = ((HOST_WIDE_INT)(unsigned long)l[endian] << 32
| ((HOST_WIDE_INT)(unsigned long)l[1 - endian]));
operands[3] = gen_int_mode (val, DImode);
})
;; Don't have reload use general registers to load a constant. It is
;; less efficient than loading the constant into an FP register, since
;; it will probably be used there.
;; The move constraints are ordered to prefer floating point registers before
;; general purpose registers to avoid doing a store and a load to get the value
;; into a floating point register when it is needed for a floating point
;; operation. Prefer traditional floating point registers over VSX registers,
;; since the D-form version of the memory instructions does not need a GPR for
;; reloading. ISA 3.0 (power9) adds D-form addressing for scalars to Altivec
;; registers.
;; If we have FPR registers, rs6000_emit_move has moved all constants to memory,
;; except for 0.0 which can be created on VSX with an xor instruction.
;; STFD LFD FMR LXSD STXSD
;; LXSD STXSD XXLOR XXLXOR GPR<-0
;; LWZ STW MR
(define_insn "*mov<mode>_hardfloat32"
[(set (match_operand:FMOVE64 0 "nonimmediate_operand"
"=m, d, d, <f64_p9>, wY,
<f64_av>, Z, <f64_vsx>, <f64_vsx>, !r,
Y, r, !r")
(match_operand:FMOVE64 1 "input_operand"
"d, m, d, wY, <f64_p9>,
Z, <f64_av>, <f64_vsx>, <zero_fp>, <zero_fp>,
r, Y, r"))]
"! TARGET_POWERPC64 && TARGET_HARD_FLOAT
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"@
stfd%U0%X0 %1,%0
lfd%U1%X1 %0,%1
fmr %0,%1
lxsd %0,%1
stxsd %1,%0
lxsdx %x0,%y1
stxsdx %x1,%y0
xxlor %x0,%x1,%x1
xxlxor %x0,%x0,%x0
#
#
#
#"
[(set_attr "type"
"fpstore, fpload, fpsimple, fpload, fpstore,
fpload, fpstore, veclogical, veclogical, two,
store, load, two")
(set_attr "size" "64")
(set_attr "length"
"*, *, *, *, *,
*, *, *, *, 8,
8, 8, 8")
(set_attr "isa"
"*, *, *, p9v, p9v,
p7v, p7v, *, *, *,
*, *, *")])
;; STW LWZ MR G-const H-const F-const
(define_insn "*mov<mode>_softfloat32"
[(set (match_operand:FMOVE64 0 "nonimmediate_operand"
"=Y, r, r, r, r, r")
(match_operand:FMOVE64 1 "input_operand"
"r, Y, r, G, H, F"))]
"!TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"#"
[(set_attr "type"
"store, load, two, *, *, *")
(set_attr "length"
"8, 8, 8, 8, 12, 16")])
; ld/std require word-aligned displacements -> 'Y' constraint.
; List Y->r and r->Y before r->r for reload.
;; STFD LFD FMR LXSD STXSD
;; LXSDX STXSDX XXLOR XXLXOR LI 0
;; STD LD MR MT{CTR,LR} MF{CTR,LR}
;; NOP MFVSRD MTVSRD
(define_insn "*mov<mode>_hardfloat64"
[(set (match_operand:FMOVE64 0 "nonimmediate_operand"
"=m, d, d, <f64_p9>, wY,
<f64_av>, Z, <f64_vsx>, <f64_vsx>, !r,
YZ, r, !r, *c*l, !r,
*h, r, <f64_dm>")
(match_operand:FMOVE64 1 "input_operand"
"d, m, d, wY, <f64_p9>,
Z, <f64_av>, <f64_vsx>, <zero_fp>, <zero_fp>,
r, YZ, r, r, *h,
0, <f64_dm>, r"))]
"TARGET_POWERPC64 && TARGET_HARD_FLOAT
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"@
stfd%U0%X0 %1,%0
lfd%U1%X1 %0,%1
fmr %0,%1
lxsd %0,%1
stxsd %1,%0
lxsdx %x0,%y1
stxsdx %x1,%y0
xxlor %x0,%x1,%x1
xxlxor %x0,%x0,%x0
li %0,0
std%U0%X0 %1,%0
ld%U1%X1 %0,%1
mr %0,%1
mt%0 %1
mf%1 %0
nop
mfvsrd %0,%x1
mtvsrd %x0,%1"
[(set_attr "type"
"fpstore, fpload, fpsimple, fpload, fpstore,
fpload, fpstore, veclogical, veclogical, integer,
store, load, *, mtjmpr, mfjmpr,
*, mftgpr, mffgpr")
(set_attr "size" "64")
(set_attr "isa"
"*, *, *, p9v, p9v,
p7v, p7v, *, *, *,
*, *, *, *, *,
*, p8v, p8v")])
;; STD LD MR MT<SPR> MF<SPR> G-const
;; H-const F-const Special
(define_insn "*mov<mode>_softfloat64"
[(set (match_operand:FMOVE64 0 "nonimmediate_operand"
"=Y, r, r, *c*l, r, r,
r, r, *h")
(match_operand:FMOVE64 1 "input_operand"
"r, Y, r, r, *h, G,
H, F, 0"))]
"TARGET_POWERPC64 && TARGET_SOFT_FLOAT
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"@
std%U0%X0 %1,%0
ld%U1%X1 %0,%1
mr %0,%1
mt%0 %1
mf%1 %0
#
#
#
nop"
[(set_attr "type"
"store, load, *, mtjmpr, mfjmpr, *,
*, *, *")
(set_attr "length"
"*, *, *, *, *, 8,
12, 16, *")])
(define_expand "mov<mode>"
[(set (match_operand:FMOVE128 0 "general_operand")
(match_operand:FMOVE128 1 "any_operand"))]
""
{
rs6000_emit_move (operands[0], operands[1], <MODE>mode);
DONE;
})
;; It's important to list Y->r and r->Y before r->r because otherwise
;; reload, given m->r, will try to pick r->r and reload it, which
;; doesn't make progress.
;; We can't split little endian direct moves of TDmode, because the words are
;; not swapped like they are for TImode or TFmode. Subregs therefore are
;; problematical. Don't allow direct move for this case.
;; FPR load FPR store FPR move FPR zero GPR load
;; GPR zero GPR store GPR move MFVSRD MTVSRD
(define_insn_and_split "*mov<mode>_64bit_dm"
[(set (match_operand:FMOVE128_FPR 0 "nonimmediate_operand"
"=m, d, d, d, Y,
r, r, r, r, d")
(match_operand:FMOVE128_FPR 1 "input_operand"
"d, m, d, <zero_fp>, r,
<zero_fp>, Y, r, d, r"))]
"TARGET_HARD_FLOAT && TARGET_POWERPC64 && FLOAT128_2REG_P (<MODE>mode)
&& (<MODE>mode != TDmode || WORDS_BIG_ENDIAN)
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"#"
"&& reload_completed"
[(pc)]
{
rs6000_split_multireg_move (operands[0], operands[1]);
DONE;
}
[(set_attr "length" "8")
(set_attr "isa" "*,*,*,*,*,*,*,*,p8v,p8v")
(set_attr "max_prefixed_insns" "2")
(set_attr "num_insns" "2")])
(define_insn_and_split "*movtd_64bit_nodm"
[(set (match_operand:TD 0 "nonimmediate_operand" "=m,d,d,Y,r,r")
(match_operand:TD 1 "input_operand" "d,m,d,r,Y,r"))]
"TARGET_HARD_FLOAT && TARGET_POWERPC64 && !WORDS_BIG_ENDIAN
&& (gpc_reg_operand (operands[0], TDmode)
|| gpc_reg_operand (operands[1], TDmode))"
"#"
"&& reload_completed"
[(pc)]
{
rs6000_split_multireg_move (operands[0], operands[1]);
DONE;
}
[(set_attr "length" "8,8,8,12,12,8")
(set_attr "max_prefixed_insns" "2")
(set_attr "num_insns" "2,2,2,3,3,2")])
(define_insn_and_split "*mov<mode>_32bit"
[(set (match_operand:FMOVE128_FPR 0 "nonimmediate_operand" "=m,d,d,d,Y,r,r")
(match_operand:FMOVE128_FPR 1 "input_operand" "d,m,d,<zero_fp>,r,<zero_fp>Y,r"))]
"TARGET_HARD_FLOAT && !TARGET_POWERPC64
&& (FLOAT128_2REG_P (<MODE>mode)
|| int_reg_operand_not_pseudo (operands[0], <MODE>mode)
|| int_reg_operand_not_pseudo (operands[1], <MODE>mode))
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"#"
"&& reload_completed"
[(pc)]
{
rs6000_split_multireg_move (operands[0], operands[1]);
DONE;
}
[(set_attr "length" "8,8,8,8,20,20,16")])
(define_insn_and_split "*mov<mode>_softfloat"
[(set (match_operand:FMOVE128 0 "nonimmediate_operand" "=Y,r,r,r")
(match_operand:FMOVE128 1 "input_operand" "r,Y,F,r"))]
"TARGET_SOFT_FLOAT
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"#"
"&& reload_completed"
[(pc)]
{
rs6000_split_multireg_move (operands[0], operands[1]);
DONE;
}
[(set_attr_alternative "length"
[(if_then_else (match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))
(if_then_else (match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))
(if_then_else (match_test "TARGET_POWERPC64")
(const_string "16")
(const_string "32"))
(if_then_else (match_test "TARGET_POWERPC64")
(const_string "8")
(const_string "16"))])])
(define_expand "@extenddf<mode>2"
[(set (match_operand:FLOAT128 0 "gpc_reg_operand")
(float_extend:FLOAT128 (match_operand:DF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
if (FLOAT128_IEEE_P (<MODE>mode))
rs6000_expand_float128_convert (operands[0], operands[1], false);
else if (TARGET_VSX)
emit_insn (gen_extenddf2_vsx (<MODE>mode, operands[0], operands[1]));
else
{
rtx zero = gen_reg_rtx (DFmode);
rs6000_emit_move (zero, CONST0_RTX (DFmode), DFmode);
emit_insn (gen_extenddf2_fprs (<MODE>mode,
operands[0], operands[1], zero));
}
DONE;
})
;; Allow memory operands for the source to be created by the combiner.
(define_insn_and_split "@extenddf<mode>2_fprs"
[(set (match_operand:IBM128 0 "gpc_reg_operand" "=d,d,&d")
(float_extend:IBM128
(match_operand:DF 1 "nonimmediate_operand" "d,m,d")))
(use (match_operand:DF 2 "nonimmediate_operand" "m,m,d"))]
"!TARGET_VSX && TARGET_HARD_FLOAT
&& TARGET_LONG_DOUBLE_128 && FLOAT128_IBM_P (<MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 3) (match_dup 1))
(set (match_dup 4) (match_dup 2))]
{
const int lo_word = LONG_DOUBLE_LARGE_FIRST ? GET_MODE_SIZE (DFmode) : 0;
const int hi_word = LONG_DOUBLE_LARGE_FIRST ? 0 : GET_MODE_SIZE (DFmode);
operands[3] = simplify_gen_subreg (DFmode, operands[0], <MODE>mode, hi_word);
operands[4] = simplify_gen_subreg (DFmode, operands[0], <MODE>mode, lo_word);
})
(define_insn_and_split "@extenddf<mode>2_vsx"
[(set (match_operand:IBM128 0 "gpc_reg_operand" "=d,d")
(float_extend:IBM128
(match_operand:DF 1 "nonimmediate_operand" "wa,m")))]
"TARGET_LONG_DOUBLE_128 && TARGET_VSX && FLOAT128_IBM_P (<MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 3) (match_dup 4))]
{
const int lo_word = LONG_DOUBLE_LARGE_FIRST ? GET_MODE_SIZE (DFmode) : 0;
const int hi_word = LONG_DOUBLE_LARGE_FIRST ? 0 : GET_MODE_SIZE (DFmode);
operands[2] = simplify_gen_subreg (DFmode, operands[0], <MODE>mode, hi_word);
operands[3] = simplify_gen_subreg (DFmode, operands[0], <MODE>mode, lo_word);
operands[4] = CONST0_RTX (DFmode);
})
(define_expand "extendsf<mode>2"
[(set (match_operand:FLOAT128 0 "gpc_reg_operand")
(float_extend:FLOAT128 (match_operand:SF 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
if (FLOAT128_IEEE_P (<MODE>mode))
rs6000_expand_float128_convert (operands[0], operands[1], false);
else
{
rtx tmp = gen_reg_rtx (DFmode);
emit_insn (gen_extendsfdf2 (tmp, operands[1]));
emit_insn (gen_extenddf<mode>2 (operands[0], tmp));
}
DONE;
})
(define_expand "trunc<mode>df2"
[(set (match_operand:DF 0 "gpc_reg_operand")
(float_truncate:DF (match_operand:FLOAT128 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
if (FLOAT128_IEEE_P (<MODE>mode))
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
}
})
(define_insn_and_split "trunc<mode>df2_internal1"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d,?d")
(float_truncate:DF
(match_operand:IBM128 1 "gpc_reg_operand" "0,d")))]
"FLOAT128_IBM_P (<MODE>mode) && !TARGET_XL_COMPAT
&& TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
"@
#
fmr %0,%1"
"&& reload_completed && REGNO (operands[0]) == REGNO (operands[1])"
[(const_int 0)]
{
emit_note (NOTE_INSN_DELETED);
DONE;
}
[(set_attr "type" "fpsimple")])
(define_insn "trunc<mode>df2_internal2"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d")
(float_truncate:DF (match_operand:IBM128 1 "gpc_reg_operand" "d")))]
"FLOAT128_IBM_P (<MODE>mode) && TARGET_XL_COMPAT && TARGET_HARD_FLOAT
&& TARGET_LONG_DOUBLE_128"
"fadd %0,%1,%L1"
[(set_attr "type" "fp")])
(define_expand "trunc<mode>sf2"
[(set (match_operand:SF 0 "gpc_reg_operand")
(float_truncate:SF (match_operand:FLOAT128 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
if (FLOAT128_IEEE_P (<MODE>mode))
rs6000_expand_float128_convert (operands[0], operands[1], false);
else
{
rtx tmp = gen_reg_rtx (DFmode);
emit_insn (gen_trunc<mode>df2 (tmp, operands[1]));
emit_insn (gen_truncdfsf2 (operands[0], tmp));
}
DONE;
})
(define_expand "floatsi<mode>2"
[(parallel [(set (match_operand:FLOAT128 0 "gpc_reg_operand")
(float:FLOAT128 (match_operand:SI 1 "gpc_reg_operand")))
(clobber (match_scratch:DI 2))])]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
rtx op0 = operands[0];
rtx op1 = operands[1];
if (TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode))
;
else if (FLOAT128_IEEE_P (<MODE>mode))
{
rs6000_expand_float128_convert (op0, op1, false);
DONE;
}
else
{
rtx tmp = gen_reg_rtx (DFmode);
expand_float (tmp, op1, false);
emit_insn (gen_extenddf2 (<MODE>mode, op0, tmp));
DONE;
}
})
; fadd, but rounding towards zero.
; This is probably not the optimal code sequence.
(define_insn "fix_trunc_helper<mode>"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d")
(unspec:DF [(match_operand:IBM128 1 "gpc_reg_operand" "d")]
UNSPEC_FIX_TRUNC_TF))
(clobber (match_operand:DF 2 "gpc_reg_operand" "=&d"))]
"TARGET_HARD_FLOAT && FLOAT128_IBM_P (<MODE>mode)"
"mffs %2\n\tmtfsb1 31\n\tmtfsb0 30\n\tfadd %0,%1,%L1\n\tmtfsf 1,%2"
[(set_attr "type" "fp")
(set_attr "length" "20")])
(define_expand "fix_trunc<mode>si2"
[(set (match_operand:SI 0 "gpc_reg_operand")
(fix:SI (match_operand:FLOAT128 1 "gpc_reg_operand")))]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
rtx op0 = operands[0];
rtx op1 = operands[1];
if (TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode))
;
else
{
if (FLOAT128_IEEE_P (<MODE>mode))
rs6000_expand_float128_convert (op0, op1, false);
else
emit_insn (gen_fix_truncsi2_fprs (<MODE>mode, op0, op1));
DONE;
}
})
(define_expand "@fix_trunc<mode>si2_fprs"
[(parallel [(set (match_operand:SI 0 "gpc_reg_operand")
(fix:SI (match_operand:IBM128 1 "gpc_reg_operand")))
(clobber (match_dup 2))
(clobber (match_dup 3))
(clobber (match_dup 4))
(clobber (match_dup 5))])]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
operands[2] = gen_reg_rtx (DFmode);
operands[3] = gen_reg_rtx (DFmode);
operands[4] = gen_reg_rtx (DImode);
operands[5] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode));
})
(define_insn_and_split "*fix_trunc<mode>si2_internal"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(fix:SI (match_operand:IBM128 1 "gpc_reg_operand" "d")))
(clobber (match_operand:DF 2 "gpc_reg_operand" "=d"))
(clobber (match_operand:DF 3 "gpc_reg_operand" "=&d"))
(clobber (match_operand:DI 4 "gpc_reg_operand" "=d"))
(clobber (match_operand:DI 5 "offsettable_mem_operand" "=o"))]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
"#"
""
[(pc)]
{
rtx lowword;
emit_insn (gen_fix_trunc_helper<mode> (operands[2], operands[1],
operands[3]));
gcc_assert (MEM_P (operands[5]));
lowword = adjust_address (operands[5], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
emit_insn (gen_fctiwz_df (operands[4], operands[2]));
emit_move_insn (operands[5], operands[4]);
emit_move_insn (operands[0], lowword);
DONE;
})
(define_expand "fix_trunc<mode>di2"
[(set (match_operand:DI 0 "gpc_reg_operand")
(fix:DI (match_operand:IEEE128 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
if (!TARGET_FLOAT128_HW)
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
}
})
(define_expand "fixuns_trunc<IEEE128:mode><SDI:mode>2"
[(set (match_operand:SDI 0 "gpc_reg_operand")
(unsigned_fix:SDI (match_operand:IEEE128 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], true);
DONE;
})
(define_expand "floatdi<mode>2"
[(set (match_operand:IEEE128 0 "gpc_reg_operand")
(float:IEEE128 (match_operand:DI 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
if (!TARGET_FLOAT128_HW)
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
}
})
(define_expand "floatunsdi<IEEE128:mode>2"
[(set (match_operand:IEEE128 0 "gpc_reg_operand")
(unsigned_float:IEEE128 (match_operand:DI 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
if (!TARGET_FLOAT128_HW)
{
rs6000_expand_float128_convert (operands[0], operands[1], true);
DONE;
}
})
(define_expand "floatuns<IEEE128:mode>2"
[(set (match_operand:IEEE128 0 "gpc_reg_operand")
(unsigned_float:IEEE128 (match_operand:SI 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rtx op0 = operands[0];
rtx op1 = operands[1];
if (TARGET_FLOAT128_HW)
emit_insn (gen_floatuns_<IEEE128:mode>si2_hw (op0, op1));
else
rs6000_expand_float128_convert (op0, op1, true);
DONE;
})
(define_expand "neg<mode>2"
[(set (match_operand:FLOAT128 0 "gpc_reg_operand")
(neg:FLOAT128 (match_operand:FLOAT128 1 "gpc_reg_operand")))]
"FLOAT128_IEEE_P (<MODE>mode)
|| (FLOAT128_IBM_P (<MODE>mode) && TARGET_HARD_FLOAT)"
{
if (FLOAT128_IEEE_P (<MODE>mode))
{
if (TARGET_FLOAT128_HW)
emit_insn (gen_neg2_hw (<MODE>mode, operands[0], operands[1]));
else if (TARGET_FLOAT128_TYPE)
emit_insn (gen_ieee_128bit_vsx_neg2 (<MODE>mode,
operands[0], operands[1]));
else
{
rtx libfunc = optab_libfunc (neg_optab, <MODE>mode);
rtx target = emit_library_call_value (libfunc, operands[0], LCT_CONST,
<MODE>mode,
operands[1], <MODE>mode);
if (target && !rtx_equal_p (target, operands[0]))
emit_move_insn (operands[0], target);
}
DONE;
}
})
(define_insn "neg<mode>2_internal"
[(set (match_operand:IBM128 0 "gpc_reg_operand" "=d")
(neg:IBM128 (match_operand:IBM128 1 "gpc_reg_operand" "d")))]
"TARGET_HARD_FLOAT && FLOAT128_IBM_P (<MODE>mode)"
{
if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
return "fneg %L0,%L1\;fneg %0,%1";
else
return "fneg %0,%1\;fneg %L0,%L1";
}
[(set_attr "type" "fpsimple")
(set_attr "length" "8")])
(define_expand "abs<mode>2"
[(set (match_operand:FLOAT128 0 "gpc_reg_operand")
(abs:FLOAT128 (match_operand:FLOAT128 1 "gpc_reg_operand")))]
"FLOAT128_IEEE_P (<MODE>mode)
|| (FLOAT128_IBM_P (<MODE>mode) && TARGET_HARD_FLOAT)"
{
rtx label;
if (FLOAT128_IEEE_P (<MODE>mode))
{
if (TARGET_FLOAT128_HW)
{
emit_insn (gen_abs2_hw (<MODE>mode, operands[0], operands[1]));
DONE;
}
else if (TARGET_FLOAT128_TYPE)
{
emit_insn (gen_ieee_128bit_vsx_abs2 (<MODE>mode,
operands[0], operands[1]));
DONE;
}
else
FAIL;
}
label = gen_label_rtx ();
emit_insn (gen_abs2_internal (<MODE>mode, operands[0], operands[1], label));
emit_label (label);
DONE;
})
(define_expand "@abs<mode>2_internal"
[(set (match_operand:IBM128 0 "gpc_reg_operand")
(match_operand:IBM128 1 "gpc_reg_operand"))
(set (match_dup 3) (match_dup 5))
(set (match_dup 5) (abs:DF (match_dup 5)))
(set (match_dup 4) (compare:CCFP (match_dup 3) (match_dup 5)))
(set (pc) (if_then_else (eq (match_dup 4) (const_int 0))
(label_ref (match_operand 2 ""))
(pc)))
(set (match_dup 6) (neg:DF (match_dup 6)))]
"TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
{
const int hi_word = LONG_DOUBLE_LARGE_FIRST ? 0 : GET_MODE_SIZE (DFmode);
const int lo_word = LONG_DOUBLE_LARGE_FIRST ? GET_MODE_SIZE (DFmode) : 0;
operands[3] = gen_reg_rtx (DFmode);
operands[4] = gen_reg_rtx (CCFPmode);
operands[5] = simplify_gen_subreg (DFmode, operands[0], <MODE>mode, hi_word);
operands[6] = simplify_gen_subreg (DFmode, operands[0], <MODE>mode, lo_word);
})
;; Generate IEEE 128-bit -0.0 (0x80000000000000000000000000000000) in a vector
;; register
(define_expand "ieee_128bit_negative_zero"
[(set (match_operand:V16QI 0 "register_operand") (match_dup 1))]
"TARGET_FLOAT128_TYPE"
{
rtvec v = rtvec_alloc (16);
int i, high;
for (i = 0; i < 16; i++)
RTVEC_ELT (v, i) = const0_rtx;
high = (BYTES_BIG_ENDIAN) ? 0 : 15;
RTVEC_ELT (v, high) = gen_int_mode (0x80, QImode);
rs6000_expand_vector_init (operands[0], gen_rtx_PARALLEL (V16QImode, v));
DONE;
})
;; IEEE 128-bit negate
;; We have 2 insns here for negate and absolute value. The first uses
;; match_scratch so that phases like combine can recognize neg/abs as generic
;; insns, and second insn after the first split pass loads up the bit to
;; twiddle the sign bit. Later GCSE passes can then combine multiple uses of
;; neg/abs to create the constant just once.
(define_insn_and_split "@ieee_128bit_vsx_neg<mode>2"
[(set (match_operand:IEEE128 0 "register_operand" "=wa")
(neg:IEEE128 (match_operand:IEEE128 1 "register_operand" "wa")))
(clobber (match_scratch:V16QI 2 "=v"))]
"TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW"
"#"
"&& 1"
[(parallel [(set (match_dup 0)
(neg:IEEE128 (match_dup 1)))
(use (match_dup 2))])]
{
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (V16QImode);
operands[3] = gen_reg_rtx (V16QImode);
emit_insn (gen_ieee_128bit_negative_zero (operands[2]));
}
[(set_attr "length" "8")
(set_attr "type" "vecsimple")])
(define_insn "*ieee_128bit_vsx_neg<mode>2_internal"
[(set (match_operand:IEEE128 0 "register_operand" "=wa")
(neg:IEEE128 (match_operand:IEEE128 1 "register_operand" "wa")))
(use (match_operand:V16QI 2 "register_operand" "v"))]
"TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW"
"xxlxor %x0,%x1,%x2"
[(set_attr "type" "veclogical")])
;; IEEE 128-bit absolute value
(define_insn_and_split "@ieee_128bit_vsx_abs<mode>2"
[(set (match_operand:IEEE128 0 "register_operand" "=wa")
(abs:IEEE128 (match_operand:IEEE128 1 "register_operand" "wa")))
(clobber (match_scratch:V16QI 2 "=v"))]
"TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"#"
"&& 1"
[(parallel [(set (match_dup 0)
(abs:IEEE128 (match_dup 1)))
(use (match_dup 2))])]
{
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (V16QImode);
operands[3] = gen_reg_rtx (V16QImode);
emit_insn (gen_ieee_128bit_negative_zero (operands[2]));
}
[(set_attr "length" "8")
(set_attr "type" "vecsimple")])
(define_insn "*ieee_128bit_vsx_abs<mode>2_internal"
[(set (match_operand:IEEE128 0 "register_operand" "=wa")
(abs:IEEE128 (match_operand:IEEE128 1 "register_operand" "wa")))
(use (match_operand:V16QI 2 "register_operand" "v"))]
"TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW"
"xxlandc %x0,%x1,%x2"
[(set_attr "type" "veclogical")])
;; IEEE 128-bit negative absolute value
(define_insn_and_split "*ieee_128bit_vsx_nabs<mode>2"
[(set (match_operand:IEEE128 0 "register_operand" "=wa")
(neg:IEEE128
(abs:IEEE128
(match_operand:IEEE128 1 "register_operand" "wa"))))
(clobber (match_scratch:V16QI 2 "=v"))]
"TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW
&& FLOAT128_IEEE_P (<MODE>mode)"
"#"
"&& 1"
[(parallel [(set (match_dup 0)
(neg:IEEE128 (abs:IEEE128 (match_dup 1))))
(use (match_dup 2))])]
{
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (V16QImode);
operands[3] = gen_reg_rtx (V16QImode);
emit_insn (gen_ieee_128bit_negative_zero (operands[2]));
}
[(set_attr "length" "8")
(set_attr "type" "vecsimple")])
(define_insn "*ieee_128bit_vsx_nabs<mode>2_internal"
[(set (match_operand:IEEE128 0 "register_operand" "=wa")
(neg:IEEE128
(abs:IEEE128
(match_operand:IEEE128 1 "register_operand" "wa"))))
(use (match_operand:V16QI 2 "register_operand" "v"))]
"TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW"
"xxlor %x0,%x1,%x2"
[(set_attr "type" "veclogical")])
;; Float128 conversion functions. These expand to library function calls.
;; We use expand to convert from IBM double double to IEEE 128-bit
;; and trunc for the opposite.
(define_expand "extendiftf2"
[(set (match_operand:TF 0 "gpc_reg_operand")
(float_extend:TF (match_operand:IF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_expand "extendifkf2"
[(set (match_operand:KF 0 "gpc_reg_operand")
(float_extend:KF (match_operand:IF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_expand "extendtfkf2"
[(set (match_operand:KF 0 "gpc_reg_operand")
(float_extend:KF (match_operand:TF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_expand "extendtfif2"
[(set (match_operand:IF 0 "gpc_reg_operand")
(float_extend:IF (match_operand:TF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_expand "trunciftf2"
[(set (match_operand:TF 0 "gpc_reg_operand")
(float_truncate:TF (match_operand:IF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_expand "truncifkf2"
[(set (match_operand:KF 0 "gpc_reg_operand")
(float_truncate:KF (match_operand:IF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_expand "trunckftf2"
[(set (match_operand:TF 0 "gpc_reg_operand")
(float_truncate:TF (match_operand:KF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_expand "trunctfif2"
[(set (match_operand:IF 0 "gpc_reg_operand")
(float_truncate:IF (match_operand:TF 1 "gpc_reg_operand")))]
"TARGET_FLOAT128_TYPE"
{
rs6000_expand_float128_convert (operands[0], operands[1], false);
DONE;
})
(define_insn_and_split "*extend<mode>tf2_internal"
[(set (match_operand:TF 0 "gpc_reg_operand" "=<IFKF_reg>")
(float_extend:TF
(match_operand:IFKF 1 "gpc_reg_operand" "<IFKF_reg>")))]
"TARGET_FLOAT128_TYPE
&& FLOAT128_IBM_P (TFmode) == FLOAT128_IBM_P (<MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 0) (match_dup 2))]
{
operands[2] = gen_rtx_REG (TFmode, REGNO (operands[1]));
})
(define_insn_and_split "*extendtf<mode>2_internal"
[(set (match_operand:IFKF 0 "gpc_reg_operand" "=<IFKF_reg>")
(float_extend:IFKF
(match_operand:TF 1 "gpc_reg_operand" "<IFKF_reg>")))]
"TARGET_FLOAT128_TYPE
&& FLOAT128_IBM_P (TFmode) == FLOAT128_IBM_P (<MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 0) (match_dup 2))]
{
operands[2] = gen_rtx_REG (<MODE>mode, REGNO (operands[1]));
})
;; Reload helper functions used by rs6000_secondary_reload. The patterns all
;; must have 3 arguments, and scratch register constraint must be a single
;; constraint.
;; Reload patterns to support gpr load/store with misaligned mem.
;; and multiple gpr load/store at offset >= 0xfffc
(define_expand "reload_<mode>_store"
[(parallel [(match_operand 0 "memory_operand" "=m")
(match_operand 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "register_operand" "=&b")])]
""
{
rs6000_secondary_reload_gpr (operands[1], operands[0], operands[2], true);
DONE;
})
(define_expand "reload_<mode>_load"
[(parallel [(match_operand 0 "gpc_reg_operand" "=r")
(match_operand 1 "memory_operand" "m")
(match_operand:GPR 2 "register_operand" "=b")])]
""
{
rs6000_secondary_reload_gpr (operands[0], operands[1], operands[2], false);
DONE;
})
;; Reload patterns for various types using the vector registers. We may need
;; an additional base register to convert the reg+offset addressing to reg+reg
;; for vector registers and reg+reg or (reg+reg)&(-16) addressing to just an
;; index register for gpr registers.
(define_expand "reload_<RELOAD:mode>_<P:mptrsize>_store"
[(parallel [(match_operand:RELOAD 0 "memory_operand" "m")
(match_operand:RELOAD 1 "gpc_reg_operand" "wa")
(match_operand:P 2 "register_operand" "=b")])]
"<P:tptrsize>"
{
rs6000_secondary_reload_inner (operands[1], operands[0], operands[2], true);
DONE;
})
(define_expand "reload_<RELOAD:mode>_<P:mptrsize>_load"
[(parallel [(match_operand:RELOAD 0 "gpc_reg_operand" "wa")
(match_operand:RELOAD 1 "memory_operand" "m")
(match_operand:P 2 "register_operand" "=b")])]
"<P:tptrsize>"
{
rs6000_secondary_reload_inner (operands[0], operands[1], operands[2], false);
DONE;
})
;; Reload sometimes tries to move the address to a GPR, and can generate
;; invalid RTL for addresses involving AND -16. Allow addresses involving
;; reg+reg, reg+small constant, or just reg, all wrapped in an AND -16.
(define_insn_and_split "*vec_reload_and_plus_<mptrsize>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(and:P (plus:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "reg_or_cint_operand" "rI"))
(const_int -16)))]
"TARGET_ALTIVEC && reload_completed"
"#"
"&& reload_completed"
[(set (match_dup 0)
(plus:P (match_dup 1)
(match_dup 2)))
(set (match_dup 0)
(and:P (match_dup 0)
(const_int -16)))])
;; Power8 merge instructions to allow direct move to/from floating point
;; registers in 32-bit mode. We use TF mode to get two registers to move the
;; individual 32-bit parts across. Subreg doesn't work too well on the TF
;; value, since it is allocated in reload and not all of the flow information
;; is setup for it. We have two patterns to do the two moves between gprs and
;; fprs. There isn't a dependancy between the two, but we could potentially
;; schedule other instructions between the two instructions.
(define_insn "p8_fmrgow_<mode>"
[(set (match_operand:FMOVE64X 0 "register_operand" "=d")
(unspec:FMOVE64X [
(match_operand:DF 1 "register_operand" "d")
(match_operand:DF 2 "register_operand" "d")]
UNSPEC_P8V_FMRGOW))]
"!TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"fmrgow %0,%1,%2"
[(set_attr "type" "fpsimple")])
(define_insn "p8_mtvsrwz"
[(set (match_operand:DF 0 "register_operand" "=d")
(unspec:DF [(match_operand:SI 1 "register_operand" "r")]
UNSPEC_P8V_MTVSRWZ))]
"!TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrwz %x0,%1"
[(set_attr "type" "mftgpr")])
(define_insn_and_split "reload_fpr_from_gpr<mode>"
[(set (match_operand:FMOVE64X 0 "register_operand" "=d")
(unspec:FMOVE64X [(match_operand:FMOVE64X 1 "register_operand" "r")]
UNSPEC_P8V_RELOAD_FROM_GPR))
(clobber (match_operand:IF 2 "register_operand" "=d"))]
"!TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp_hi = simplify_gen_subreg (DFmode, operands[2], IFmode, 0);
rtx tmp_lo = simplify_gen_subreg (DFmode, operands[2], IFmode, 8);
rtx gpr_hi_reg = gen_highpart (SImode, src);
rtx gpr_lo_reg = gen_lowpart (SImode, src);
emit_insn (gen_p8_mtvsrwz (tmp_hi, gpr_hi_reg));
emit_insn (gen_p8_mtvsrwz (tmp_lo, gpr_lo_reg));
emit_insn (gen_p8_fmrgow_<mode> (dest, tmp_hi, tmp_lo));
DONE;
}
[(set_attr "length" "12")
(set_attr "type" "three")])
;; Move 128 bit values from GPRs to VSX registers in 64-bit mode
(define_insn "p8_mtvsrd_df"
[(set (match_operand:DF 0 "register_operand" "=wa")
(unspec:DF [(match_operand:DI 1 "register_operand" "r")]
UNSPEC_P8V_MTVSRD))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrd %x0,%1"
[(set_attr "type" "mftgpr")])
(define_insn "p8_xxpermdi_<mode>"
[(set (match_operand:FMOVE128_GPR 0 "register_operand" "=wa")
(unspec:FMOVE128_GPR [
(match_operand:DF 1 "register_operand" "wa")
(match_operand:DF 2 "register_operand" "wa")]
UNSPEC_P8V_XXPERMDI))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"xxpermdi %x0,%x1,%x2,0"
[(set_attr "type" "vecperm")])
(define_insn_and_split "reload_vsx_from_gpr<mode>"
[(set (match_operand:FMOVE128_GPR 0 "register_operand" "=wa")
(unspec:FMOVE128_GPR
[(match_operand:FMOVE128_GPR 1 "register_operand" "r")]
UNSPEC_P8V_RELOAD_FROM_GPR))
(clobber (match_operand:IF 2 "register_operand" "=wa"))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
/* You might think that we could use op0 as one temp and a DF clobber
as op2, but you'd be wrong. Secondary reload move patterns don't
check for overlap of the clobber and the destination. */
rtx tmp_hi = simplify_gen_subreg (DFmode, operands[2], IFmode, 0);
rtx tmp_lo = simplify_gen_subreg (DFmode, operands[2], IFmode, 8);
rtx gpr_hi_reg = gen_highpart (DImode, src);
rtx gpr_lo_reg = gen_lowpart (DImode, src);
emit_insn (gen_p8_mtvsrd_df (tmp_hi, gpr_hi_reg));
emit_insn (gen_p8_mtvsrd_df (tmp_lo, gpr_lo_reg));
emit_insn (gen_p8_xxpermdi_<mode> (dest, tmp_hi, tmp_lo));
DONE;
}
[(set_attr "length" "12")
(set_attr "type" "three")])
(define_split
[(set (match_operand:FMOVE128_GPR 0 "nonimmediate_operand")
(match_operand:FMOVE128_GPR 1 "input_operand"))]
"reload_completed
&& (int_reg_operand (operands[0], <MODE>mode)
|| int_reg_operand (operands[1], <MODE>mode))
&& (!TARGET_DIRECT_MOVE_128
|| (!vsx_register_operand (operands[0], <MODE>mode)
&& !vsx_register_operand (operands[1], <MODE>mode)))"
[(pc)]
{
rs6000_split_multireg_move (operands[0], operands[1]);
DONE;
})
;; Move SFmode to a VSX from a GPR register. Because scalar floating point
;; type is stored internally as double precision in the VSX registers, we have
;; to convert it from the vector format.
(define_insn "p8_mtvsrd_sf"
[(set (match_operand:SF 0 "register_operand" "=wa")
(unspec:SF [(match_operand:DI 1 "register_operand" "r")]
UNSPEC_P8V_MTVSRD))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mtvsrd %x0,%1"
[(set_attr "type" "mftgpr")])
(define_insn_and_split "reload_vsx_from_gprsf"
[(set (match_operand:SF 0 "register_operand" "=wa")
(unspec:SF [(match_operand:SF 1 "register_operand" "r")]
UNSPEC_P8V_RELOAD_FROM_GPR))
(clobber (match_operand:DI 2 "register_operand" "=r"))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op1_di = simplify_gen_subreg (DImode, op1, SFmode, 0);
/* Move SF value to upper 32-bits for xscvspdpn. */
emit_insn (gen_ashldi3 (op2, op1_di, GEN_INT (32)));
emit_insn (gen_p8_mtvsrd_sf (op0, op2));
emit_insn (gen_vsx_xscvspdpn_directmove (op0, op0));
DONE;
}
[(set_attr "length" "8")
(set_attr "type" "two")])
;; Move 128 bit values from VSX registers to GPRs in 64-bit mode by doing a
;; normal 64-bit move, followed by an xxpermdi to get the bottom 64-bit value,
;; and then doing a move of that.
(define_insn "p8_mfvsrd_3_<mode>"
[(set (match_operand:DF 0 "register_operand" "=r")
(unspec:DF [(match_operand:FMOVE128_GPR 1 "register_operand" "wa")]
UNSPEC_P8V_RELOAD_FROM_VSX))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"mfvsrd %0,%x1"
[(set_attr "type" "mftgpr")])
(define_insn_and_split "reload_gpr_from_vsx<mode>"
[(set (match_operand:FMOVE128_GPR 0 "register_operand" "=r")
(unspec:FMOVE128_GPR
[(match_operand:FMOVE128_GPR 1 "register_operand" "wa")]
UNSPEC_P8V_RELOAD_FROM_VSX))
(clobber (match_operand:FMOVE128_GPR 2 "register_operand" "=wa"))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx tmp = operands[2];
rtx gpr_hi_reg = gen_highpart (DFmode, dest);
rtx gpr_lo_reg = gen_lowpart (DFmode, dest);
emit_insn (gen_p8_mfvsrd_3_<mode> (gpr_hi_reg, src));
emit_insn (gen_vsx_xxpermdi_<mode>_be (tmp, src, src, GEN_INT (3)));
emit_insn (gen_p8_mfvsrd_3_<mode> (gpr_lo_reg, tmp));
DONE;
}
[(set_attr "length" "12")
(set_attr "type" "three")])
;; Move SFmode to a GPR from a VSX register. Because scalar floating point
;; type is stored internally as double precision, we have to convert it to the
;; vector format.
(define_insn_and_split "reload_gpr_from_vsxsf"
[(set (match_operand:SF 0 "register_operand" "=r")
(unspec:SF [(match_operand:SF 1 "register_operand" "wa")]
UNSPEC_P8V_RELOAD_FROM_VSX))
(clobber (match_operand:V4SF 2 "register_operand" "=wa"))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
rtx op2 = operands[2];
rtx op0_di = gen_rtx_REG (DImode, reg_or_subregno (op0));
rtx op2_si = gen_rtx_REG (SImode, reg_or_subregno (op2));
emit_insn (gen_vsx_xscvdpspn_scalar (op2, op1));
emit_insn (gen_zero_extendsidi2 (op0_di, op2_si));
DONE;
}
[(set_attr "length" "8")
(set_attr "type" "two")
(set_attr "isa" "p8v")])
;; Next come the multi-word integer load and store and the load and store
;; multiple insns.
;; List r->r after r->Y, otherwise reload will try to reload a
;; non-offsettable address by using r->r which won't make progress.
;; Use of fprs is disparaged slightly otherwise reload prefers to reload
;; a gpr into a fpr instead of reloading an invalid 'Y' address
;; GPR store GPR load GPR move FPR store FPR load FPR move
;; GPR const AVX store AVX store AVX load AVX load VSX move
;; P9 0 P9 -1 AVX 0/-1 VSX 0 VSX -1 P9 const
;; AVX const
(define_insn "*movdi_internal32"
[(set (match_operand:DI 0 "nonimmediate_operand"
"=Y, r, r, m, ^d, ^d,
r, wY, Z, ^v, $v, ^wa,
wa, wa, v, wa, *i, v,
v")
(match_operand:DI 1 "input_operand"
"r, Y, r, ^d, m, ^d,
IJKnF, ^v, $v, wY, Z, ^wa,
Oj, wM, OjwM, Oj, wM, wS,
wB"))]
"! TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], DImode)
|| gpc_reg_operand (operands[1], DImode))"
"@
#
#
#
stfd%U0%X0 %1,%0
lfd%U1%X1 %0,%1
fmr %0,%1
#
stxsd %1,%0
stxsdx %x1,%y0
lxsd %0,%1
lxsdx %x0,%y1
xxlor %x0,%x1,%x1
xxspltib %x0,0
xxspltib %x0,255
vspltisw %0,%1
xxlxor %x0,%x0,%x0
xxlorc %x0,%x0,%x0
#
#"
[(set_attr "type"
"store, load, *, fpstore, fpload, fpsimple,
*, fpstore, fpstore, fpload, fpload, veclogical,
vecsimple, vecsimple, vecsimple, veclogical,veclogical,vecsimple,
vecsimple")
(set_attr "size" "64")
(set_attr "length"
"8, 8, 8, *, *, *,
16, *, *, *, *, *,
*, *, *, *, *, 8,
*")
(set_attr "isa"
"*, *, *, *, *, *,
*, p9v, p7v, p9v, p7v, *,
p9v, p9v, p7v, *, *, p7v,
p7v")])
(define_split
[(set (match_operand:DI 0 "gpc_reg_operand")
(match_operand:DI 1 "const_int_operand"))]
"! TARGET_POWERPC64 && reload_completed
&& gpr_or_gpr_p (operands[0], operands[1])
&& !direct_move_p (operands[0], operands[1])"
[(set (match_dup 2) (match_dup 4))
(set (match_dup 3) (match_dup 1))]
{
HOST_WIDE_INT value = INTVAL (operands[1]);
operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
DImode);
operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
DImode);
operands[4] = GEN_INT (value >> 32);
operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000);
})
(define_split
[(set (match_operand:DIFD 0 "nonimmediate_operand")
(match_operand:DIFD 1 "input_operand"))]
"reload_completed && !TARGET_POWERPC64
&& gpr_or_gpr_p (operands[0], operands[1])
&& !direct_move_p (operands[0], operands[1])"
[(pc)]
{
rs6000_split_multireg_move (operands[0], operands[1]);
DONE;
})
;; GPR store GPR load GPR move
;; GPR li GPR lis GPR pli GPR #
;; FPR store FPR load FPR move
;; AVX store AVX store AVX load AVX load VSX move
;; P9 0 P9 -1 AVX 0/-1 VSX 0 VSX -1
;; P9 const AVX const
;; From SPR To SPR SPR<->SPR
;; VSX->GPR GPR->VSX
(define_insn "*movdi_internal64"
[(set (match_operand:DI 0 "nonimmediate_operand"
"=YZ, r, r,
r, r, r, r,
m, ^d, ^d,
wY, Z, $v, $v, ^wa,
wa, wa, v, wa, wa,
v, v,
r, *h, *h,
?r, ?wa")
(match_operand:DI 1 "input_operand"
"r, YZ, r,
I, L, eI, nF,
^d, m, ^d,
^v, $v, wY, Z, ^wa,
Oj, wM, OjwM, Oj, wM,
wS, wB,
*h, r, 0,
wa, r"))]
"TARGET_POWERPC64
&& (gpc_reg_operand (operands[0], DImode)
|| gpc_reg_operand (operands[1], DImode))"
"@
std%U0%X0 %1,%0
ld%U1%X1 %0,%1
mr %0,%1
li %0,%1
lis %0,%v1
li %0,%1
#
stfd%U0%X0 %1,%0
lfd%U1%X1 %0,%1
fmr %0,%1
stxsd %1,%0
stxsdx %x1,%y0
lxsd %0,%1
lxsdx %x0,%y1
xxlor %x0,%x1,%x1
xxspltib %x0,0
xxspltib %x0,255
#
xxlxor %x0,%x0,%x0
xxlorc %x0,%x0,%x0
#
#
mf%1 %0
mt%0 %1
nop
mfvsrd %0,%x1
mtvsrd %x0,%1"
[(set_attr "type"
"store, load, *,
*, *, *, *,
fpstore, fpload, fpsimple,
fpstore, fpstore, fpload, fpload, veclogical,
vecsimple, vecsimple, vecsimple, veclogical, veclogical,
vecsimple, vecsimple,
mfjmpr, mtjmpr, *,
mftgpr, mffgpr")
(set_attr "size" "64")
(set_attr "length"
"*, *, *,
*, *, *, 20,
*, *, *,
*, *, *, *, *,
*, *, *, *, *,
8, *,
*, *, *,
*, *")
(set_attr "isa"
"*, *, *,
*, *, p10, *,
*, *, *,
p9v, p7v, p9v, p7v, *,
p9v, p9v, p7v, *, *,
p7v, p7v,
*, *, *,
p8v, p8v")])
; Some DImode loads are best done as a load of -1 followed by a mask
; instruction.
(define_split
[(set (match_operand:DI 0 "int_reg_operand_not_pseudo")
(match_operand:DI 1 "const_int_operand"))]
"TARGET_POWERPC64
&& num_insns_constant (operands[1], DImode) > 1
&& !IN_RANGE (INTVAL (operands[1]), -0x80000000, 0xffffffff)
&& rs6000_is_valid_and_mask (operands[1], DImode)"
[(set (match_dup 0)
(const_int -1))
(set (match_dup 0)
(and:DI (match_dup 0)
(match_dup 1)))]
"")
;; Split a load of a large constant into the appropriate five-instruction
;; sequence. Handle anything in a constant number of insns.
;; When non-easy constants can go in the TOC, this should use
;; easy_fp_constant predicate.
(define_split
[(set (match_operand:DI 0 "int_reg_operand_not_pseudo")
(match_operand:DI 1 "const_int_operand"))]
"TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
{
if (rs6000_emit_set_const (operands[0], operands[1]))
DONE;
else
FAIL;
})
(define_split
[(set (match_operand:DI 0 "int_reg_operand_not_pseudo")
(match_operand:DI 1 "const_scalar_int_operand"))]
"TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
[(set (match_dup 0) (match_dup 2))
(set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
{
if (rs6000_emit_set_const (operands[0], operands[1]))
DONE;
else
FAIL;
})
(define_split
[(set (match_operand:DI 0 "altivec_register_operand")
(match_operand:DI 1 "s5bit_cint_operand"))]
"TARGET_VSX && reload_completed"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
int r = REGNO (op0);
rtx op0_v4si = gen_rtx_REG (V4SImode, r);
emit_insn (gen_altivec_vspltisw (op0_v4si, op1));
if (op1 != const0_rtx && op1 != constm1_rtx)
{
rtx op0_v2di = gen_rtx_REG (V2DImode, r);
emit_insn (gen_altivec_vupkhsw (op0_v2di, op0_v4si));
}
DONE;
})
;; Split integer constants that can be loaded with XXSPLTIB and a
;; sign extend operation.
(define_split
[(set (match_operand:INT_ISA3 0 "altivec_register_operand")
(match_operand:INT_ISA3 1 "xxspltib_constant_split"))]
"TARGET_P9_VECTOR && reload_completed"
[(const_int 0)]
{
rtx op0 = operands[0];
rtx op1 = operands[1];
int r = REGNO (op0);
rtx op0_v16qi = gen_rtx_REG (V16QImode, r);
emit_insn (gen_xxspltib_v16qi (op0_v16qi, op1));
if (<MODE>mode == DImode)
emit_insn (gen_vsx_sign_extend_qi_di (operands[0], op0_v16qi));
else if (<MODE>mode == SImode)
emit_insn (gen_vsx_sign_extend_qi_si (operands[0], op0_v16qi));
else if (<MODE>mode == HImode)
{
rtx op0_v8hi = gen_rtx_REG (V8HImode, r);
emit_insn (gen_altivec_vupkhsb (op0_v8hi, op0_v16qi));
}
DONE;
})
;; TImode/PTImode is similar, except that we usually want to compute the
;; address into a register and use lsi/stsi (the exception is during reload).
(define_insn "*mov<mode>_string"
[(set (match_operand:TI2 0 "reg_or_mem_operand" "=Q,Y,????r,????r,????r,r")
(match_operand:TI2 1 "input_operand" "r,r,Q,Y,r,n"))]
"! TARGET_POWERPC64
&& (<MODE>mode != TImode || VECTOR_MEM_NONE_P (TImode))
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode))"
"#"
[(set_attr "type" "store,store,load,load,*,*")
(set_attr "update" "yes")
(set_attr "indexed" "yes")
(set_attr "cell_micro" "conditional")])
(define_insn "*mov<mode>_ppc64"
[(set (match_operand:TI2 0 "nonimmediate_operand" "=wQ,Y,r,r,r,r")
(match_operand:TI2 1 "input_operand" "r,r,wQ,Y,r,n"))]
"(TARGET_POWERPC64 && VECTOR_MEM_NONE_P (<MODE>mode)
&& (gpc_reg_operand (operands[0], <MODE>mode)
|| gpc_reg_operand (operands[1], <MODE>mode)))"
{
return rs6000_output_move_128bit (operands);
}
[(set_attr "type" "store,store,load,load,*,*")
(set_attr "length" "8")
(set_attr "max_prefixed_insns" "2")])
(define_split
[(set (match_operand:TI2 0 "int_reg_operand")
(match_operand:TI2 1 "const_scalar_int_operand"))]
"TARGET_POWERPC64
&& (VECTOR_MEM_NONE_P (<MODE>mode)
|| (reload_completed && INT_REGNO_P (REGNO (operands[0]))))"
[(set (match_dup 2) (match_dup 4))
(set (match_dup 3) (match_dup 5))]
{
operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
<MODE>mode);
operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
<MODE>mode);
if (CONST_WIDE_INT_P (operands[1]))
{
operands[4] = GEN_INT (CONST_WIDE_INT_ELT (operands[1], 1));
operands[5] = GEN_INT (CONST_WIDE_INT_ELT (operands[1], 0));
}
else if (CONST_INT_P (operands[1]))
{
operands[4] = GEN_INT (- (INTVAL (operands[1]) < 0));
operands[5] = operands[1];
}
else
FAIL;
})
(define_split
[(set (match_operand:TI2 0 "nonimmediate_operand")
(match_operand:TI2 1 "input_operand"))]
"reload_completed
&& gpr_or_gpr_p (operands[0], operands[1])
&& !direct_move_p (operands[0], operands[1])
&& !quad_load_store_p (operands[0], operands[1])"
[(pc)]
{
rs6000_split_multireg_move (operands[0], operands[1]);
DONE;
})
(define_expand "setmemsi"
[(parallel [(set (match_operand:BLK 0 "")
(match_operand 2 "const_int_operand"))
(use (match_operand:SI 1 ""))
(use (match_operand:SI 3 ""))])]
""
{
/* If value to set is not zero, use the library routine. */
if (operands[2] != const0_rtx)
FAIL;
if (expand_block_clear (operands))
DONE;
else
FAIL;
})
;; String compare N insn.
;; Argument 0 is the target (result)
;; Argument 1 is the destination
;; Argument 2 is the source
;; Argument 3 is the length
;; Argument 4 is the alignment
(define_expand "cmpstrnsi"
[(parallel [(set (match_operand:SI 0)
(compare:SI (match_operand:BLK 1)
(match_operand:BLK 2)))
(use (match_operand:SI 3))
(use (match_operand:SI 4))])]
"TARGET_CMPB && (BYTES_BIG_ENDIAN || TARGET_LDBRX)"
{
if (optimize_insn_for_size_p ())
FAIL;
if (expand_strn_compare (operands, 0))
DONE;
else
FAIL;
})
;; String compare insn.
;; Argument 0 is the target (result)
;; Argument 1 is the destination
;; Argument 2 is the source
;; Argument 3 is the alignment
(define_expand "cmpstrsi"
[(parallel [(set (match_operand:SI 0)
(compare:SI (match_operand:BLK 1)
(match_operand:BLK 2)))
(use (match_operand:SI 3))])]
"TARGET_CMPB && (BYTES_BIG_ENDIAN || TARGET_LDBRX)"
{
if (optimize_insn_for_size_p ())
FAIL;
if (expand_strn_compare (operands, 1))
DONE;
else
FAIL;
})
;; Block compare insn.
;; Argument 0 is the target (result)
;; Argument 1 is the destination
;; Argument 2 is the source
;; Argument 3 is the length
;; Argument 4 is the alignment
(define_expand "cmpmemsi"
[(parallel [(set (match_operand:SI 0)
(compare:SI (match_operand:BLK 1)
(match_operand:BLK 2)))
(use (match_operand:SI 3))
(use (match_operand:SI 4))])]
"TARGET_POPCNTD"
{
if (expand_block_compare (operands))
DONE;
else
FAIL;
})
;; String/block copy insn (source and destination must not overlap).
;; Argument 0 is the destination
;; Argument 1 is the source
;; Argument 2 is the length
;; Argument 3 is the alignment
(define_expand "cpymemsi"
[(parallel [(set (match_operand:BLK 0 "")
(match_operand:BLK 1 ""))
(use (match_operand:SI 2 ""))
(use (match_operand:SI 3 ""))])]
""
{
if (expand_block_move (operands, false))
DONE;
else
FAIL;
})
;; String/block move insn (source and destination may overlap).
;; Argument 0 is the destination
;; Argument 1 is the source
;; Argument 2 is the length
;; Argument 3 is the alignment
(define_expand "movmemsi"
[(parallel [(set (match_operand:BLK 0 "")
(match_operand:BLK 1 ""))
(use (match_operand:SI 2 ""))
(use (match_operand:SI 3 ""))])]
""
{
if (expand_block_move (operands, true))
DONE;
else
FAIL;
})
;; Define insns that do load or store with update. Some of these we can
;; get by using pre-decrement or pre-increment, but the hardware can also
;; do cases where the increment is not the size of the object.
;;
;; In all these cases, we use operands 0 and 1 for the register being
;; incremented because those are the operands that local-alloc will
;; tie and these are the pair most likely to be tieable (and the ones
;; that will benefit the most).
(define_insn "*movdi_update1"
[(set (match_operand:DI 3 "gpc_reg_operand" "=r,r")
(mem:DI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_aligned_short_operand" "r,I"))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_POWERPC64 && TARGET_UPDATE
&& (!avoiding_indexed_address_p (DImode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
ldux %3,%0,%2
ldu %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "movdi_<mode>_update"
[(set (mem:DI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_aligned_short_operand" "r,I")))
(match_operand:DI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_POWERPC64 && TARGET_UPDATE
&& (!avoiding_indexed_address_p (DImode)
|| !gpc_reg_operand (operands[2], Pmode)
|| (REG_P (operands[0])
&& REGNO (operands[0]) == STACK_POINTER_REGNUM))"
"@
stdux %3,%0,%2
stdu %3,%2(%0)"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
;; This pattern is only conditional on TARGET_64BIT, as it is
;; needed for stack allocation, even if the user passes -mno-update.
(define_insn "movdi_update_stack"
[(set (mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
(match_operand:DI 2 "reg_or_aligned_short_operand" "r,I")))
(match_operand:DI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
(plus:DI (match_dup 1) (match_dup 2)))]
"TARGET_64BIT"
"@
stdux %3,%0,%2
stdu %3,%2(%0)"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movsi_update1"
[(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
(mem:SI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (SImode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
lwzux %3,%0,%2
lwzu %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movsi_update2"
[(set (match_operand:EXTSI 3 "gpc_reg_operand" "=r")
(sign_extend:EXTSI
(mem:SI (plus:P (match_operand:P 1 "gpc_reg_operand" "0")
(match_operand:P 2 "gpc_reg_operand" "r")))))
(set (match_operand:P 0 "gpc_reg_operand" "=b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_POWERPC64 && !avoiding_indexed_address_p (DImode)"
"lwaux %3,%0,%2"
[(set_attr "type" "load")
(set_attr "sign_extend" "yes")
(set_attr "update" "yes")
(set_attr "indexed" "yes")])
(define_insn "movsi_<mode>_update"
[(set (mem:SI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))
(match_operand:SI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (SImode)
|| !gpc_reg_operand (operands[2], Pmode)
|| (REG_P (operands[0])
&& REGNO (operands[0]) == STACK_POINTER_REGNUM))"
"@
stwux %3,%0,%2
stwu %3,%2(%0)"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
;; This is an unconditional pattern; needed for stack allocation, even
;; if the user passes -mno-update.
(define_insn "movsi_update_stack"
[(set (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
(match_operand:SI 2 "reg_or_short_operand" "r,I")))
(match_operand:SI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
(plus:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
"@
stwux %3,%0,%2
stwu %3,%2(%0)"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movhi_update1"
[(set (match_operand:HI 3 "gpc_reg_operand" "=r,r")
(mem:HI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (HImode)
|| !gpc_reg_operand (operands[2], SImode))"
"@
lhzux %3,%0,%2
lhzu %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movhi_update2"
[(set (match_operand:EXTHI 3 "gpc_reg_operand" "=r,r")
(zero_extend:EXTHI
(mem:HI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (HImode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
lhzux %3,%0,%2
lhzu %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movhi_update3"
[(set (match_operand:EXTHI 3 "gpc_reg_operand" "=r,r")
(sign_extend:EXTHI
(mem:HI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& !(avoiding_indexed_address_p (HImode)
&& gpc_reg_operand (operands[2], Pmode))"
"@
lhaux %3,%0,%2
lhau %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "sign_extend" "yes")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movhi_update4"
[(set (mem:HI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))
(match_operand:HI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (HImode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
sthux %3,%0,%2
sthu %3,%2(%0)"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movqi_update1"
[(set (match_operand:QI 3 "gpc_reg_operand" "=r,r")
(mem:QI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (QImode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
lbzux %3,%0,%2
lbzu %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movqi_update2"
[(set (match_operand:EXTQI 3 "gpc_reg_operand" "=r,r")
(zero_extend:EXTQI
(mem:QI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (QImode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
lbzux %3,%0,%2
lbzu %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movqi_update3"
[(set (mem:QI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))
(match_operand:QI 3 "gpc_reg_operand" "r,r"))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_UPDATE
&& (!avoiding_indexed_address_p (QImode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
stbux %3,%0,%2
stbu %3,%2(%0)"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*mov<SFDF:mode>_update1"
[(set (match_operand:SFDF 3 "gpc_reg_operand" "=<SFDF:Ff>,<SFDF:Ff>")
(mem:SFDF (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_HARD_FLOAT && TARGET_UPDATE
&& (!avoiding_indexed_address_p (<SFDF:MODE>mode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
lf<sd>ux %3,%0,%2
lf<sd>u %3,%2(%0)"
[(set_attr "type" "fpload")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")
(set_attr "size" "<SFDF:bits>")])
(define_insn "*mov<SFDF:mode>_update2"
[(set (mem:SFDF (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))
(match_operand:SFDF 3 "gpc_reg_operand" "<SFDF:Ff>,<SFDF:Ff>"))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_HARD_FLOAT && TARGET_UPDATE
&& (!avoiding_indexed_address_p (<SFDF:MODE>mode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
stf<sd>ux %3,%0,%2
stf<sd>u %3,%2(%0)"
[(set_attr "type" "fpstore")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")
(set_attr "size" "<SFDF:bits>")])
(define_insn "*movsf_update3"
[(set (match_operand:SF 3 "gpc_reg_operand" "=r,r")
(mem:SF (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I"))))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_SOFT_FLOAT && TARGET_UPDATE
&& (!avoiding_indexed_address_p (SFmode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
lwzux %3,%0,%2
lwzu %3,%2(%0)"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
(define_insn "*movsf_update4"
[(set (mem:SF (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
(match_operand:P 2 "reg_or_short_operand" "r,I")))
(match_operand:SF 3 "gpc_reg_operand" "r,r"))
(set (match_operand:P 0 "gpc_reg_operand" "=b,b")
(plus:P (match_dup 1) (match_dup 2)))]
"TARGET_SOFT_FLOAT && TARGET_UPDATE
&& (!avoiding_indexed_address_p (SFmode)
|| !gpc_reg_operand (operands[2], Pmode))"
"@
stwux %3,%0,%2
stwu %3,%2(%0)"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes,no")])
;; After inserting conditional returns we can sometimes have
;; unnecessary register moves. Unfortunately we cannot have a
;; modeless peephole here, because some single SImode sets have early
;; clobber outputs. Although those sets expand to multi-ppc-insn
;; sequences, using get_attr_length here will smash the operands
;; array. Neither is there an early_cobbler_p predicate.
;; Also this optimization interferes with scalars going into
;; altivec registers (the code does reloading through the FPRs).
(define_peephole2
[(set (match_operand:DF 0 "gpc_reg_operand")
(match_operand:DF 1 "any_operand"))
(set (match_operand:DF 2 "gpc_reg_operand")
(match_dup 0))]
"!TARGET_VSX
&& peep2_reg_dead_p (2, operands[0])"
[(set (match_dup 2) (match_dup 1))])
(define_peephole2
[(set (match_operand:SF 0 "gpc_reg_operand")
(match_operand:SF 1 "any_operand"))
(set (match_operand:SF 2 "gpc_reg_operand")
(match_dup 0))]
"!TARGET_P8_VECTOR
&& peep2_reg_dead_p (2, operands[0])"
[(set (match_dup 2) (match_dup 1))])
;; TLS support.
(define_insn "*tls_gd_pcrel<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(unspec:P [(match_operand:P 1 "rs6000_tls_symbol_ref" "")
(const_int 0)]
UNSPEC_TLSGD))]
"HAVE_AS_TLS && TARGET_ELF"
"la %0,%1@got@tlsgd@pcrel"
[(set_attr "prefixed" "yes")])
(define_insn_and_split "*tls_gd<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(unspec:P [(match_operand:P 1 "rs6000_tls_symbol_ref" "")
(match_operand:P 2 "gpc_reg_operand" "b")]
UNSPEC_TLSGD))]
"HAVE_AS_TLS && TARGET_ELF"
"addi %0,%2,%1@got@tlsgd"
"&& TARGET_CMODEL != CMODEL_SMALL"
[(set (match_dup 3)
(high:P
(unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_TLSGD)))
(set (match_dup 0)
(lo_sum:P (match_dup 3)
(unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_TLSGD)))]
{
operands[3] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_CMODEL") (symbol_ref "CMODEL_SMALL"))
(const_int 8)
(const_int 4)))])
(define_insn "*tls_gd_high<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(high:P
(unspec:P [(match_operand:P 1 "rs6000_tls_symbol_ref" "")
(match_operand:P 2 "gpc_reg_operand" "b")]
UNSPEC_TLSGD)))]
"HAVE_AS_TLS && TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%2,%1@got@tlsgd@ha")
(define_insn "*tls_gd_low<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(lo_sum:P (match_operand:P 1 "gpc_reg_operand" "b")
(unspec:P [(match_operand:P 2 "rs6000_tls_symbol_ref" "")
(match_operand:P 3 "gpc_reg_operand" "b")]
UNSPEC_TLSGD)))]
"HAVE_AS_TLS && TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL"
"addi %0,%1,%2@got@tlsgd@l")
(define_insn "*tls_ld_pcrel<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(unspec:P [(const_int 0)]
UNSPEC_TLSLD))]
"HAVE_AS_TLS && TARGET_ELF"
"la %0,%&@got@tlsld@pcrel"
[(set_attr "prefixed" "yes")])
(define_insn_and_split "*tls_ld<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")]
UNSPEC_TLSLD))]
"HAVE_AS_TLS && TARGET_ELF"
"addi %0,%1,%&@got@tlsld"
"&& TARGET_CMODEL != CMODEL_SMALL"
[(set (match_dup 2)
(high:P
(unspec:P [(match_dup 1)] UNSPEC_TLSLD)))
(set (match_dup 0)
(lo_sum:P (match_dup 2)
(unspec:P [(match_dup 1)] UNSPEC_TLSLD)))]
{
operands[2] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_CMODEL") (symbol_ref "CMODEL_SMALL"))
(const_int 8)
(const_int 4)))])
(define_insn "*tls_ld_high<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(high:P
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")]
UNSPEC_TLSLD)))]
"HAVE_AS_TLS && TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%1,%&@got@tlsld@ha")
(define_insn "*tls_ld_low<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(lo_sum:P (match_operand:P 1 "gpc_reg_operand" "b")
(unspec:P [(match_operand:P 2 "gpc_reg_operand" "b")]
UNSPEC_TLSLD)))]
"HAVE_AS_TLS && TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL"
"addi %0,%1,%&@got@tlsld@l")
(define_insn "tls_dtprel_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPREL))]
"HAVE_AS_TLS"
"addi %0,%1,%2@dtprel"
[(set (attr "prefixed")
(if_then_else (match_test "rs6000_tls_size == 16")
(const_string "no")
(const_string "yes")))])
(define_insn "tls_dtprel_ha_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPRELHA))]
"HAVE_AS_TLS"
"addis %0,%1,%2@dtprel@ha")
(define_insn "tls_dtprel_lo_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSDTPRELLO))]
"HAVE_AS_TLS"
"addi %0,%1,%2@dtprel@l")
(define_insn_and_split "tls_got_dtprel_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTDTPREL))]
"HAVE_AS_TLS"
"<ptrload> %0,%2@got@dtprel(%1)"
"&& TARGET_CMODEL != CMODEL_SMALL"
[(set (match_dup 3)
(high:P
(unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_TLSGOTDTPREL)))
(set (match_dup 0)
(lo_sum:P (match_dup 3)
(unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_TLSGOTDTPREL)))]
{
operands[3] = gen_reg_rtx (TARGET_64BIT ? DImode : SImode);
}
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_CMODEL") (symbol_ref "CMODEL_SMALL"))
(const_int 8)
(const_int 4)))])
(define_insn "*tls_got_dtprel_high<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(high:P
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTDTPREL)))]
"HAVE_AS_TLS && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%1,%2@got@dtprel@ha")
(define_insn "*tls_got_dtprel_low<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(lo_sum:P (match_operand:P 1 "gpc_reg_operand" "b")
(unspec:P [(match_operand:P 3 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTDTPREL)))]
"HAVE_AS_TLS && TARGET_CMODEL != CMODEL_SMALL"
"<ptrload> %0,%2@got@dtprel@l(%1)")
(define_insn "tls_tprel_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPREL))]
"HAVE_AS_TLS"
"addi %0,%1,%2@tprel"
[(set (attr "prefixed")
(if_then_else (match_test "rs6000_tls_size == 16")
(const_string "no")
(const_string "yes")))])
(define_insn "tls_tprel_ha_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPRELHA))]
"HAVE_AS_TLS"
"addis %0,%1,%2@tprel@ha")
(define_insn "tls_tprel_lo_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTPRELLO))]
"HAVE_AS_TLS"
"addi %0,%1,%2@tprel@l")
(define_insn "*tls_got_tprel_pcrel_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(unspec:P [(const_int 0)
(match_operand:P 1 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTTPREL))]
"HAVE_AS_TLS"
"<ptrload> %0,%1@got@tprel@pcrel"
[(set_attr "prefixed" "yes")])
;; "b" output constraint here and on tls_tls input to support linker tls
;; optimization. The linker may edit the instructions emitted by a
;; tls_got_tprel/tls_tls pair to addis,addi.
(define_insn_and_split "tls_got_tprel_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTTPREL))]
"HAVE_AS_TLS"
"<ptrload> %0,%2@got@tprel(%1)"
"&& TARGET_CMODEL != CMODEL_SMALL"
[(set (match_dup 3)
(high:P
(unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_TLSGOTTPREL)))
(set (match_dup 0)
(lo_sum:P (match_dup 3)
(unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_TLSGOTTPREL)))]
{
operands[3] = gen_reg_rtx (TARGET_64BIT ? DImode : SImode);
}
[(set (attr "length")
(if_then_else (ne (symbol_ref "TARGET_CMODEL") (symbol_ref "CMODEL_SMALL"))
(const_int 8)
(const_int 4)))])
(define_insn "*tls_got_tprel_high<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(high:P
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTTPREL)))]
"HAVE_AS_TLS && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%1,%2@got@tprel@ha")
(define_insn "*tls_got_tprel_low<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(lo_sum:P (match_operand:P 1 "gpc_reg_operand" "b")
(unspec:P [(match_operand:P 3 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSGOTTPREL)))]
"HAVE_AS_TLS && TARGET_CMODEL != CMODEL_SMALL"
"<ptrload> %0,%2@got@tprel@l(%1)")
(define_insn "tls_tls_pcrel_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTLS_PCREL))]
"TARGET_ELF && HAVE_AS_TLS"
"add %0,%1,%2@tls@pcrel")
(define_insn "tls_tls_<bits>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "rs6000_tls_symbol_ref" "")]
UNSPEC_TLSTLS))]
"TARGET_ELF && HAVE_AS_TLS"
"add %0,%1,%2@tls")
(define_expand "tls_get_tpointer"
[(set (match_operand:SI 0 "gpc_reg_operand")
(unspec:SI [(const_int 0)] UNSPEC_TLSTLS))]
"TARGET_XCOFF && HAVE_AS_TLS"
{
emit_insn (gen_tls_get_tpointer_internal ());
emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
DONE;
})
(define_insn "tls_get_tpointer_internal"
[(set (reg:SI 3)
(unspec:SI [(const_int 0)] UNSPEC_TLSTLS))
(clobber (reg:SI LR_REGNO))]
"TARGET_XCOFF && HAVE_AS_TLS"
"bla __get_tpointer")
(define_expand "tls_get_addr<mode>"
[(set (match_operand:P 0 "gpc_reg_operand")
(unspec:P [(match_operand:P 1 "gpc_reg_operand")
(match_operand:P 2 "gpc_reg_operand")] UNSPEC_TLSTLS))]
"TARGET_XCOFF && HAVE_AS_TLS"
{
emit_move_insn (gen_rtx_REG (Pmode, 3), operands[1]);
emit_move_insn (gen_rtx_REG (Pmode, 4), operands[2]);
emit_insn (gen_tls_get_addr_internal<mode> ());
emit_move_insn (operands[0], gen_rtx_REG (Pmode, 3));
DONE;
})
(define_insn "tls_get_addr_internal<mode>"
[(set (reg:P 3)
(unspec:P [(reg:P 3) (reg:P 4)] UNSPEC_TLSTLS))
(clobber (reg:P 0))
(clobber (reg:P 4))
(clobber (reg:P 5))
(clobber (reg:P 11))
(clobber (reg:CC CR0_REGNO))
(clobber (reg:P LR_REGNO))]
"TARGET_XCOFF && HAVE_AS_TLS"
"bla __tls_get_addr")
;; Next come insns related to the calling sequence.
;;
;; First, an insn to allocate new stack space for dynamic use (e.g., alloca).
;; We move the back-chain and decrement the stack pointer.
;;
;; Operand1 is more naturally reg_or_short_operand. However, for a large
;; constant alloca, using that predicate will force the generic code to put
;; the constant size into a register before calling the expander.
;;
;; As a result the expander would not have the constant size information
;; in those cases and would have to generate less efficient code.
;;
;; Thus we allow reg_or_cint_operand instead so that the expander can see
;; the constant size. The value is forced into a register if necessary.
;;
(define_expand "allocate_stack"
[(set (match_operand 0 "gpc_reg_operand")
(minus (reg 1) (match_operand 1 "reg_or_cint_operand")))
(set (reg 1)
(minus (reg 1) (match_dup 1)))]
""
{
rtx chain = gen_reg_rtx (Pmode);
rtx stack_bot = gen_rtx_MEM (Pmode, stack_pointer_rtx);
rtx neg_op0;
rtx insn, par, set, mem;
/* By allowing reg_or_cint_operand as the predicate we can get
better code for stack-clash-protection because we do not lose
size information. But the rest of the code expects the operand
to be reg_or_short_operand. If it isn't, then force it into
a register. */
rtx orig_op1 = operands[1];
if (!reg_or_short_operand (operands[1], Pmode))
operands[1] = force_reg (Pmode, operands[1]);
emit_move_insn (chain, stack_bot);
/* Check stack bounds if necessary. */
if (crtl->limit_stack)
{
rtx available;
available = expand_binop (Pmode, sub_optab,
stack_pointer_rtx, stack_limit_rtx,
NULL_RTX, 1, OPTAB_WIDEN);
emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx));
}
/* Allocate and probe if requested.
This may look similar to the loop we use for prologue allocations,
but it is critically different. For the former we know the loop
will iterate, but do not know that generally here. The former
uses that knowledge to rotate the loop. Combining them would be
possible with some performance cost. */
if (flag_stack_clash_protection)
{
rtx rounded_size, last_addr, residual;
HOST_WIDE_INT probe_interval;
compute_stack_clash_protection_loop_data (&rounded_size, &last_addr,
&residual, &probe_interval,
orig_op1);
/* We do occasionally get in here with constant sizes, we might
as well do a reasonable job when we obviously can. */
if (rounded_size != const0_rtx)
{
rtx loop_lab, end_loop;
bool rotated = CONST_INT_P (rounded_size);
rtx update = GEN_INT (-probe_interval);
if (probe_interval > 32768)
update = force_reg (Pmode, update);
emit_stack_clash_protection_probe_loop_start (&loop_lab, &end_loop,
last_addr, rotated);
if (TARGET_32BIT)
emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
stack_pointer_rtx,
update, chain));
else
emit_insn (gen_movdi_update_stack (stack_pointer_rtx,
stack_pointer_rtx,
update, chain));
emit_stack_clash_protection_probe_loop_end (loop_lab, end_loop,
last_addr, rotated);
}
/* Now handle residuals. We just have to set operands[1] correctly
and let the rest of the expander run. */
operands[1] = residual;
}
if (!(CONST_INT_P (operands[1])
&& IN_RANGE (INTVAL (operands[1]), -32767, 32768)))
{
operands[1] = force_reg (Pmode, operands[1]);
neg_op0 = gen_reg_rtx (Pmode);
emit_insn (gen_neg2 (Pmode, neg_op0, operands[1]));
}
else
neg_op0 = GEN_INT (-INTVAL (operands[1]));
insn = emit_insn ((* ((TARGET_32BIT) ? gen_movsi_update_stack
: gen_movdi_update_stack))
(stack_pointer_rtx, stack_pointer_rtx, neg_op0,
chain));
/* Since we didn't use gen_frame_mem to generate the MEM, grab
it now and set the alias set/attributes. The above gen_*_update
calls will generate a PARALLEL with the MEM set being the first
operation. */
par = PATTERN (insn);
gcc_assert (GET_CODE (par) == PARALLEL);
set = XVECEXP (par, 0, 0);
gcc_assert (GET_CODE (set) == SET);
mem = SET_DEST (set);
gcc_assert (MEM_P (mem));
MEM_NOTRAP_P (mem) = 1;
set_mem_alias_set (mem, get_frame_alias_set ());
emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
DONE;
})
;; These patterns say how to save and restore the stack pointer. We need not
;; save the stack pointer at function level since we are careful to
;; preserve the backchain. At block level, we have to restore the backchain
;; when we restore the stack pointer.
;;
;; For nonlocal gotos, we must save both the stack pointer and its
;; backchain and restore both. Note that in the nonlocal case, the
;; save area is a memory location.
(define_expand "save_stack_function"
[(match_operand 0 "any_operand")
(match_operand 1 "any_operand")]
""
"DONE;")
(define_expand "restore_stack_function"
[(match_operand 0 "any_operand")
(match_operand 1 "any_operand")]
""
"DONE;")
;; Adjust stack pointer (op0) to a new value (op1).
;; First copy old stack backchain to new location, and ensure that the
;; scheduler won't reorder the sp assignment before the backchain write.
(define_expand "restore_stack_block"
[(set (match_dup 2) (match_dup 3))
(set (match_dup 4) (match_dup 2))
(match_dup 5)
(set (match_operand 0 "register_operand")
(match_operand 1 "register_operand"))]
""
{
rtvec p;
operands[1] = force_reg (Pmode, operands[1]);
operands[2] = gen_reg_rtx (Pmode);
operands[3] = gen_frame_mem (Pmode, operands[0]);
operands[4] = gen_frame_mem (Pmode, operands[1]);
p = rtvec_alloc (1);
RTVEC_ELT (p, 0) = gen_rtx_SET (gen_frame_mem (BLKmode, operands[0]),
const0_rtx);
operands[5] = gen_rtx_PARALLEL (VOIDmode, p);
})
(define_expand "save_stack_nonlocal"
[(set (match_dup 3) (match_dup 4))
(set (match_operand 0 "memory_operand") (match_dup 3))
(set (match_dup 2) (match_operand 1 "register_operand"))]
""
{
int units_per_word = (TARGET_32BIT) ? 4 : 8;
/* Copy the backchain to the first word, sp to the second. */
operands[0] = adjust_address_nv (operands[0], Pmode, 0);
operands[2] = adjust_address_nv (operands[0], Pmode, units_per_word);
operands[3] = gen_reg_rtx (Pmode);
operands[4] = gen_frame_mem (Pmode, operands[1]);
})
(define_expand "restore_stack_nonlocal"
[(set (match_dup 2) (match_operand 1 "memory_operand"))
(set (match_dup 3) (match_dup 4))
(set (match_dup 5) (match_dup 2))
(match_dup 6)
(set (match_operand 0 "register_operand") (match_dup 3))]
""
{
int units_per_word = (TARGET_32BIT) ? 4 : 8;
rtvec p;
/* Restore the backchain from the first word, sp from the second. */
operands[2] = gen_reg_rtx (Pmode);
operands[3] = gen_reg_rtx (Pmode);
operands[1] = adjust_address_nv (operands[1], Pmode, 0);
operands[4] = adjust_address_nv (operands[1], Pmode, units_per_word);
operands[5] = gen_frame_mem (Pmode, operands[3]);
p = rtvec_alloc (1);
RTVEC_ELT (p, 0) = gen_rtx_SET (gen_frame_mem (BLKmode, operands[0]),
const0_rtx);
operands[6] = gen_rtx_PARALLEL (VOIDmode, p);
})
;; Load up a PC-relative address. Print_operand_address will append a @pcrel
;; to the symbol or label.
(define_insn "*pcrel_local_addr"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(match_operand:DI 1 "pcrel_local_address"))]
"TARGET_PCREL"
"la %0,%a1"
[(set_attr "prefixed" "yes")])
;; Load up a PC-relative address to an external symbol. If the symbol and the
;; program are both defined in the main program, the linker will optimize this
;; to a PADDI. Otherwise, it will create a GOT address that is relocated by
;; the dynamic linker and loaded up. Print_operand_address will append a
;; @got@pcrel to the symbol.
(define_insn "*pcrel_extern_addr"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(match_operand:DI 1 "pcrel_external_address"))]
"TARGET_PCREL"
"ld %0,%a1"
[(set_attr "prefixed" "yes")
(set_attr "type" "load")])
;; TOC register handling.
;; Code to initialize the TOC register...
(define_insn "load_toc_aix_si"
[(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(unspec:SI [(const_int 0)] UNSPEC_TOC))
(use (reg:SI 2))])]
"(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2) && TARGET_32BIT"
{
char buf[30];
extern int need_toc_init;
need_toc_init = 1;
ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
operands[2] = gen_rtx_REG (Pmode, 2);
return "lwz %0,%1(%2)";
}
[(set_attr "type" "load")
(set_attr "update" "no")
(set_attr "indexed" "no")])
(define_insn "load_toc_aix_di"
[(parallel [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(unspec:DI [(const_int 0)] UNSPEC_TOC))
(use (reg:DI 2))])]
"(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2) && TARGET_64BIT"
{
char buf[30];
extern int need_toc_init;
need_toc_init = 1;
ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC",
!TARGET_ELF || !TARGET_MINIMAL_TOC);
if (TARGET_ELF)
strcat (buf, "@toc");
operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
operands[2] = gen_rtx_REG (Pmode, 2);
return "ld %0,%1(%2)";
}
[(set_attr "type" "load")
(set_attr "update" "no")
(set_attr "indexed" "no")])
(define_insn "load_toc_v4_pic_si"
[(set (reg:SI LR_REGNO)
(unspec:SI [(const_int 0)] UNSPEC_TOC))]
"DEFAULT_ABI == ABI_V4 && flag_pic == 1 && TARGET_32BIT"
"bl _GLOBAL_OFFSET_TABLE_@local-4"
[(set_attr "type" "branch")])
(define_expand "load_toc_v4_PIC_1"
[(parallel [(set (reg:SI LR_REGNO)
(match_operand:SI 0 "immediate_operand" "s"))
(use (unspec [(match_dup 0)] UNSPEC_TOC))])]
"TARGET_ELF && DEFAULT_ABI == ABI_V4
&& (flag_pic == 2 || (flag_pic && TARGET_SECURE_PLT))"
"")
(define_insn "load_toc_v4_PIC_1_normal"
[(set (reg:SI LR_REGNO)
(match_operand:SI 0 "immediate_operand" "s"))
(use (unspec [(match_dup 0)] UNSPEC_TOC))]
"!TARGET_LINK_STACK && TARGET_ELF && DEFAULT_ABI == ABI_V4
&& (flag_pic == 2 || (flag_pic && TARGET_SECURE_PLT))"
"bcl 20,31,%0\n%0:"
[(set_attr "type" "branch")
(set_attr "cannot_copy" "yes")])
(define_insn "load_toc_v4_PIC_1_476"
[(set (reg:SI LR_REGNO)
(match_operand:SI 0 "immediate_operand" "s"))
(use (unspec [(match_dup 0)] UNSPEC_TOC))]
"TARGET_LINK_STACK && TARGET_ELF && DEFAULT_ABI == ABI_V4
&& (flag_pic == 2 || (flag_pic && TARGET_SECURE_PLT))"
{
char name[32];
static char templ[32];
get_ppc476_thunk_name (name);
sprintf (templ, "bl %s\n%%0:", name);
return templ;
}
[(set_attr "type" "branch")
(set_attr "cannot_copy" "yes")])
(define_expand "load_toc_v4_PIC_1b"
[(parallel [(set (reg:SI LR_REGNO)
(unspec:SI [(match_operand:SI 0 "immediate_operand" "s")
(label_ref (match_operand 1 ""))]
UNSPEC_TOCPTR))
(match_dup 1)])]
"TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2"
"")
(define_insn "load_toc_v4_PIC_1b_normal"
[(set (reg:SI LR_REGNO)
(unspec:SI [(match_operand:SI 0 "immediate_operand" "s")
(label_ref (match_operand 1 "" ""))]
UNSPEC_TOCPTR))
(match_dup 1)]
"!TARGET_LINK_STACK && TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2"
"bcl 20,31,$+8\;.long %0-$"
[(set_attr "type" "branch")
(set_attr "length" "8")])
(define_insn "load_toc_v4_PIC_1b_476"
[(set (reg:SI LR_REGNO)
(unspec:SI [(match_operand:SI 0 "immediate_operand" "s")
(label_ref (match_operand 1 "" ""))]
UNSPEC_TOCPTR))
(match_dup 1)]
"TARGET_LINK_STACK && TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2"
{
char name[32];
static char templ[32];
get_ppc476_thunk_name (name);
sprintf (templ, "bl %s\;b $+8\;.long %%0-$", name);
return templ;
}
[(set_attr "type" "branch")
(set_attr "length" "16")])
(define_insn "load_toc_v4_PIC_2"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(mem:SI (plus:SI
(match_operand:SI 1 "gpc_reg_operand" "b")
(const
(minus:SI (match_operand:SI 2 "immediate_operand" "s")
(match_operand:SI 3 "immediate_operand" "s"))))))]
"TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2"
"lwz %0,%2-%3(%1)"
[(set_attr "type" "load")])
(define_insn "load_toc_v4_PIC_3b"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(plus:SI
(match_operand:SI 1 "gpc_reg_operand" "b")
(high:SI
(const
(minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
(match_operand:SI 3 "symbol_ref_operand" "s"))))))]
"TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic"
"addis %0,%1,%2-%3@ha")
(define_insn "load_toc_v4_PIC_3c"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(lo_sum:SI
(match_operand:SI 1 "gpc_reg_operand" "b")
(const
(minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
(match_operand:SI 3 "symbol_ref_operand" "s")))))]
"TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic"
"addi %0,%1,%2-%3@l")
;; If the TOC is shared over a translation unit, as happens with all
;; the kinds of PIC that we support, we need to restore the TOC
;; pointer only when jumping over units of translation.
;; On Darwin, we need to reload the picbase.
(define_expand "builtin_setjmp_receiver"
[(use (label_ref (match_operand 0 "")))]
"(DEFAULT_ABI == ABI_V4 && flag_pic == 1)
|| (TARGET_TOC && TARGET_MINIMAL_TOC)
|| (DEFAULT_ABI == ABI_DARWIN && flag_pic)"
{
#if TARGET_MACHO
if (DEFAULT_ABI == ABI_DARWIN)
{
rtx picrtx = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
rtx picreg = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
rtx tmplabrtx;
char tmplab[20];
crtl->uses_pic_offset_table = 1;
ASM_GENERATE_INTERNAL_LABEL(tmplab, "LSJR",
CODE_LABEL_NUMBER (operands[0]));
tmplabrtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tmplab));
emit_insn (gen_load_macho_picbase (Pmode, tmplabrtx));
emit_move_insn (picreg, gen_rtx_REG (Pmode, LR_REGNO));
emit_insn (gen_macho_correct_pic (Pmode, picreg, picreg,
picrtx, tmplabrtx));
}
else
#endif
rs6000_emit_load_toc_table (FALSE);
DONE;
})
;; Largetoc support
(define_insn "*largetoc_high"
[(set (match_operand:DI 0 "gpc_reg_operand" "=b*r")
(high:DI
(unspec [(match_operand:DI 1 "" "")
(match_operand:DI 2 "gpc_reg_operand" "b")]
UNSPEC_TOCREL)))]
"TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%2,%1@toc@ha")
(define_insn "*largetoc_high_aix<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b*r")
(high:P
(unspec [(match_operand:P 1 "" "")
(match_operand:P 2 "gpc_reg_operand" "b")]
UNSPEC_TOCREL)))]
"TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%1@u(%2)")
(define_insn "*largetoc_high_plus"
[(set (match_operand:DI 0 "gpc_reg_operand" "=b*r")
(high:DI
(plus:DI
(unspec [(match_operand:DI 1 "" "")
(match_operand:DI 2 "gpc_reg_operand" "b")]
UNSPEC_TOCREL)
(match_operand:DI 3 "add_cint_operand" "n"))))]
"TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%2,%1+%3@toc@ha")
(define_insn "*largetoc_high_plus_aix<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b*r")
(high:P
(plus:P
(unspec [(match_operand:P 1 "" "")
(match_operand:P 2 "gpc_reg_operand" "b")]
UNSPEC_TOCREL)
(match_operand:P 3 "add_cint_operand" "n"))))]
"TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL"
"addis %0,%1+%3@u(%2)")
(define_insn "*largetoc_low"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
(match_operand:DI 2 "" "")))]
"TARGET_ELF && TARGET_CMODEL != CMODEL_SMALL"
"addi %0,%1,%2@l")
(define_insn "*largetoc_low_aix<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(lo_sum:P (match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "" "")))]
"TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL"
"la %0,%2@l(%1)")
(define_insn_and_split "*tocref<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=b")
(match_operand:P 1 "small_toc_ref" "R"))]
"TARGET_TOC
&& legitimate_constant_pool_address_p (operands[1], QImode, false)"
"la %0,%a1"
"&& TARGET_CMODEL != CMODEL_SMALL && reload_completed"
[(set (match_dup 0) (high:P (match_dup 1)))
(set (match_dup 0) (lo_sum:P (match_dup 0) (match_dup 1)))])
;; Elf specific ways of loading addresses for non-PIC code.
;; The output of this could be r0, but we make a very strong
;; preference for a base register because it will usually
;; be needed there.
(define_insn "elf_high"
[(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
(high:SI (match_operand 1 "" "")))]
"TARGET_ELF && !TARGET_64BIT && !flag_pic"
"lis %0,%1@ha")
(define_insn "elf_low"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
(match_operand 2 "" "")))]
"TARGET_ELF && !TARGET_64BIT && !flag_pic"
"la %0,%2@l(%1)")
(define_insn "*pltseq_tocsave_<mode>"
[(set (match_operand:P 0 "memory_operand" "=m")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "symbol_ref_operand" "s")
(match_operand:P 3 "" "")]
UNSPEC_PLTSEQ))]
"TARGET_PLTSEQ
&& DEFAULT_ABI == ABI_ELFv2"
{
return rs6000_pltseq_template (operands, RS6000_PLTSEQ_TOCSAVE);
})
(define_insn "*pltseq_plt16_ha_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "" "")
(match_operand:P 2 "symbol_ref_operand" "s")
(match_operand:P 3 "" "")]
UNSPEC_PLT16_HA))]
"TARGET_PLTSEQ"
{
return rs6000_pltseq_template (operands, RS6000_PLTSEQ_PLT16_HA);
})
(define_insn "*pltseq_plt16_lo_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec_volatile:P [(match_operand:P 1 "gpc_reg_operand" "b")
(match_operand:P 2 "symbol_ref_operand" "s")
(match_operand:P 3 "" "")]
UNSPECV_PLT16_LO))]
"TARGET_PLTSEQ"
{
return rs6000_pltseq_template (operands, RS6000_PLTSEQ_PLT16_LO);
}
[(set_attr "type" "load")])
(define_insn "*pltseq_mtctr_<mode>"
[(set (match_operand:P 0 "register_operand" "=c")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "symbol_ref_operand" "s")
(match_operand:P 3 "" "")]
UNSPEC_PLTSEQ))]
"TARGET_PLTSEQ"
{
return rs6000_pltseq_template (operands, RS6000_PLTSEQ_MTCTR);
})
(define_insn "*pltseq_plt_pcrel<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec_volatile:P [(match_operand:P 1 "" "")
(match_operand:P 2 "symbol_ref_operand" "s")
(match_operand:P 3 "" "")]
UNSPECV_PLT_PCREL))]
"HAVE_AS_PLTSEQ && TARGET_ELF
&& rs6000_pcrel_p (cfun)"
{
return rs6000_pltseq_template (operands, RS6000_PLTSEQ_PLT_PCREL34);
}
[(set_attr "type" "load")
(set_attr "length" "12")])
;; Call and call_value insns
;; For the purposes of expanding calls, Darwin is very similar to SYSV.
(define_expand "call"
[(parallel [(call (mem:SI (match_operand 0 "address_operand"))
(match_operand 1 ""))
(use (match_operand 2 ""))
(clobber (reg:SI LR_REGNO))])]
""
{
#if TARGET_MACHO
if (MACHOPIC_INDIRECT)
operands[0] = machopic_indirect_call_target (operands[0]);
#endif
gcc_assert (MEM_P (operands[0]));
operands[0] = XEXP (operands[0], 0);
if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
rs6000_call_aix (NULL_RTX, operands[0], operands[1], operands[2]);
else if (DEFAULT_ABI == ABI_V4)
rs6000_call_sysv (NULL_RTX, operands[0], operands[1], operands[2]);
else if (DEFAULT_ABI == ABI_DARWIN)
rs6000_call_darwin (NULL_RTX, operands[0], operands[1], operands[2]);
else
gcc_unreachable ();
DONE;
})
(define_expand "call_value"
[(parallel [(set (match_operand 0 "")
(call (mem:SI (match_operand 1 "address_operand"))
(match_operand 2 "")))
(use (match_operand 3 ""))
(clobber (reg:SI LR_REGNO))])]
""
{
#if TARGET_MACHO
if (MACHOPIC_INDIRECT)
operands[1] = machopic_indirect_call_target (operands[1]);
#endif
gcc_assert (MEM_P (operands[1]));
operands[1] = XEXP (operands[1], 0);
if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
rs6000_call_aix (operands[0], operands[1], operands[2], operands[3]);
else if (DEFAULT_ABI == ABI_V4)
rs6000_call_sysv (operands[0], operands[1], operands[2], operands[3]);
else if (DEFAULT_ABI == ABI_DARWIN)
rs6000_call_darwin (operands[0], operands[1], operands[2], operands[3]);
else
gcc_unreachable ();
DONE;
})
;; Call to function in current module. No TOC pointer reload needed.
;; Operand2 is nonzero if we are using the V.4 calling sequence and
;; either the function was not prototyped, or it was prototyped as a
;; variable argument function. It is > 0 if FP registers were passed
;; and < 0 if they were not.
(define_insn "*call_local<mode>"
[(call (mem:SI (match_operand:P 0 "current_file_function_operand" "s,s"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "O,n"))
(clobber (reg:P LR_REGNO))]
"(INTVAL (operands[2]) & CALL_LONG) == 0"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
if (rs6000_pcrel_p (cfun))
return "bl %z0@notoc";
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "bl %z0@local" : "bl %z0";
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
(define_insn "*call_value_local<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "current_file_function_operand" "s,s"))
(match_operand 2)))
(use (match_operand:SI 3 "immediate_operand" "O,n"))
(clobber (reg:P LR_REGNO))]
"(INTVAL (operands[3]) & CALL_LONG) == 0"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
if (rs6000_pcrel_p (cfun))
return "bl %z1@notoc";
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "bl %z1@local" : "bl %z1";
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
;; A function pointer under System V is just a normal pointer
;; operands[0] is the function pointer
;; operands[1] is the tls call arg
;; operands[2] is the value FUNCTION_ARG returns for the VOID argument
;; which indicates how to set cr1
(define_insn "*call_indirect_nonlocal_sysv<mode>"
[(call (mem:SI (match_operand:P 0 "indirect_call_operand" "c,*l,X"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "n,n,n"))
(clobber (reg:P LR_REGNO))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_indirect_call_template (operands, 0);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(cond [(and (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(match_test "(INTVAL (operands[2]) & (CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS))"))
(const_string "12")
(ior (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(match_test "(INTVAL (operands[2]) & (CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS))"))
(const_string "8")]
(const_string "4")))])
(define_insn "*call_nonlocal_sysv<mode>"
[(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "O,n"))
(clobber (reg:P LR_REGNO))]
"(DEFAULT_ABI == ABI_DARWIN
|| (DEFAULT_ABI == ABI_V4
&& (INTVAL (operands[2]) & CALL_LONG) == 0))"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_call_template (operands, 0);
}
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
(define_insn "*call_nonlocal_sysv_secure<mode>"
[(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "O,n"))
(use (match_operand:SI 3 "register_operand" "r,r"))
(clobber (reg:P LR_REGNO))]
"(DEFAULT_ABI == ABI_V4
&& TARGET_SECURE_PLT && flag_pic && !SYMBOL_REF_LOCAL_P (operands[0])
&& (INTVAL (operands[2]) & CALL_LONG) == 0)"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_call_template (operands, 0);
}
[(set_attr "type" "branch,branch")
(set_attr "length" "4,8")])
(define_insn "*call_value_indirect_nonlocal_sysv<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "indirect_call_operand" "c,*l,X"))
(match_operand:P 2 "unspec_tls" "")))
(use (match_operand:SI 3 "immediate_operand" "n,n,n"))
(clobber (reg:P LR_REGNO))]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_indirect_call_template (operands, 1);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(plus
(if_then_else (match_test "IS_V4_FP_ARGS (operands[3])")
(const_int 4)
(const_int 0))
(if_then_else (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(const_int 8)
(const_int 4))))])
(define_insn "*call_value_nonlocal_sysv<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s"))
(match_operand:P 2 "unspec_tls" "")))
(use (match_operand:SI 3 "immediate_operand" "n"))
(clobber (reg:P LR_REGNO))]
"(DEFAULT_ABI == ABI_DARWIN
|| (DEFAULT_ABI == ABI_V4
&& (INTVAL (operands[3]) & CALL_LONG) == 0))"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_call_template (operands, 1);
}
[(set_attr "type" "branch")
(set (attr "length")
(if_then_else (match_test "IS_V4_FP_ARGS (operands[3])")
(const_int 8)
(const_int 4)))])
(define_insn "*call_value_nonlocal_sysv_secure<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s"))
(match_operand:P 2 "unspec_tls" "")))
(use (match_operand:SI 3 "immediate_operand" "n"))
(use (match_operand:SI 4 "register_operand" "r"))
(clobber (reg:P LR_REGNO))]
"(DEFAULT_ABI == ABI_V4
&& TARGET_SECURE_PLT && flag_pic && !SYMBOL_REF_LOCAL_P (operands[1])
&& (INTVAL (operands[3]) & CALL_LONG) == 0)"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_call_template (operands, 1);
}
[(set_attr "type" "branch")
(set (attr "length")
(if_then_else (match_test "IS_V4_FP_ARGS (operands[3])")
(const_int 8)
(const_int 4)))])
;; Call to AIX abi function which may be in another module.
;; Restore the TOC pointer (r2) after the call.
(define_insn "*call_nonlocal_aix<mode>"
[(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "n"))
(clobber (reg:P LR_REGNO))]
"(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
{
return rs6000_call_template (operands, 0);
}
[(set_attr "type" "branch")
(set (attr "length")
(if_then_else (match_test "rs6000_pcrel_p (cfun)")
(const_int 4)
(const_int 8)))])
(define_insn "*call_value_nonlocal_aix<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s"))
(match_operand:P 2 "unspec_tls" "")))
(use (match_operand:SI 3 "immediate_operand" "n"))
(clobber (reg:P LR_REGNO))]
"(DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
{
return rs6000_call_template (operands, 1);
}
[(set_attr "type" "branch")
(set (attr "length")
(if_then_else (match_test "rs6000_pcrel_p (cfun)")
(const_int 4)
(const_int 8)))])
;; Call to indirect functions with the AIX abi using a 3 word descriptor.
;; Operand0 is the addresss of the function to call
;; Operand3 is the location in the function descriptor to load r2 from
;; Operand4 is the offset of the stack location holding the current TOC pointer
(define_insn "*call_indirect_aix<mode>"
[(call (mem:SI (match_operand:P 0 "indirect_call_operand" "c,*l,X"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "n,n,n"))
(use (match_operand:P 3 "memory_operand" "<ptrm>,<ptrm>,<ptrm>"))
(set (reg:P TOC_REGNUM) (unspec:P [(match_operand:P 4 "const_int_operand" "n,n,n")] UNSPEC_TOCSLOT))
(clobber (reg:P LR_REGNO))]
"DEFAULT_ABI == ABI_AIX"
{
return rs6000_indirect_call_template (operands, 0);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(if_then_else (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(const_string "16")
(const_string "12")))])
(define_insn "*call_value_indirect_aix<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "indirect_call_operand" "c,*l,X"))
(match_operand:P 2 "unspec_tls" "")))
(use (match_operand:SI 3 "immediate_operand" "n,n,n"))
(use (match_operand:P 4 "memory_operand" "<ptrm>,<ptrm>,<ptrm>"))
(set (reg:P TOC_REGNUM)
(unspec:P [(match_operand:P 5 "const_int_operand" "n,n,n")]
UNSPEC_TOCSLOT))
(clobber (reg:P LR_REGNO))]
"DEFAULT_ABI == ABI_AIX"
{
return rs6000_indirect_call_template (operands, 1);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(if_then_else (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(const_string "16")
(const_string "12")))])
;; Call to indirect functions with the ELFv2 ABI.
;; Operand0 is the addresss of the function to call
;; Operand3 is the offset of the stack location holding the current TOC pointer
(define_insn "*call_indirect_elfv2<mode>"
[(call (mem:SI (match_operand:P 0 "indirect_call_operand" "c,*l,X"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "n,n,n"))
(set (reg:P TOC_REGNUM) (unspec:P [(match_operand:P 3 "const_int_operand" "n,n,n")] UNSPEC_TOCSLOT))
(clobber (reg:P LR_REGNO))]
"DEFAULT_ABI == ABI_ELFv2"
{
return rs6000_indirect_call_template (operands, 0);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(if_then_else (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(const_string "12")
(const_string "8")))])
(define_insn "*call_indirect_pcrel<mode>"
[(call (mem:SI (match_operand:P 0 "indirect_call_operand" "c,*l,X"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "n,n,n"))
(clobber (reg:P LR_REGNO))]
"rs6000_pcrel_p (cfun)"
{
return rs6000_indirect_call_template (operands, 0);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(if_then_else (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(const_string "8")
(const_string "4")))])
(define_insn "*call_value_indirect_elfv2<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "indirect_call_operand" "c,*l,X"))
(match_operand:P 2 "unspec_tls" "")))
(use (match_operand:SI 3 "immediate_operand" "n,n,n"))
(set (reg:P TOC_REGNUM)
(unspec:P [(match_operand:P 4 "const_int_operand" "n,n,n")]
UNSPEC_TOCSLOT))
(clobber (reg:P LR_REGNO))]
"DEFAULT_ABI == ABI_ELFv2"
{
return rs6000_indirect_call_template (operands, 1);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(if_then_else (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(const_string "12")
(const_string "8")))])
(define_insn "*call_value_indirect_pcrel<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "indirect_call_operand" "c,*l,X"))
(match_operand:P 2 "unspec_tls" "")))
(use (match_operand:SI 3 "immediate_operand" "n,n,n"))
(clobber (reg:P LR_REGNO))]
"rs6000_pcrel_p (cfun)"
{
return rs6000_indirect_call_template (operands, 1);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(if_then_else (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(const_string "8")
(const_string "4")))])
;; Call subroutine returning any type.
(define_expand "untyped_call"
[(parallel [(call (match_operand 0 "")
(const_int 0))
(match_operand 1 "")
(match_operand 2 "")])]
""
{
int i;
emit_call_insn (gen_call (operands[0], const0_rtx, const0_rtx));
for (int i = 0; i < XVECLEN (operands[2], 0); i++)
emit_clobber (SET_SRC (XVECEXP (operands[2], 0, i)));
emit_insn (gen_blockage ());
for (i = 0; i < XVECLEN (operands[2], 0); i++)
{
rtx set = XVECEXP (operands[2], 0, i);
emit_move_insn (SET_DEST (set), SET_SRC (set));
}
/* The optimizer does not know that the call sets the function value
registers we stored in the result block. We avoid problems by
claiming that all hard registers are used and clobbered at this
point. */
emit_insn (gen_blockage ());
DONE;
})
;; sibling call patterns
(define_expand "sibcall"
[(parallel [(call (mem:SI (match_operand 0 "address_operand"))
(match_operand 1 ""))
(use (match_operand 2 ""))
(simple_return)])]
""
{
#if TARGET_MACHO
if (MACHOPIC_INDIRECT)
operands[0] = machopic_indirect_call_target (operands[0]);
#endif
gcc_assert (MEM_P (operands[0]));
gcc_assert (CONST_INT_P (operands[1]));
operands[0] = XEXP (operands[0], 0);
if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
rs6000_sibcall_aix (NULL_RTX, operands[0], operands[1], operands[2]);
else if (DEFAULT_ABI == ABI_V4)
rs6000_sibcall_sysv (NULL_RTX, operands[0], operands[1], operands[2]);
else if (DEFAULT_ABI == ABI_DARWIN)
rs6000_sibcall_darwin (NULL_RTX, operands[0], operands[1], operands[2]);
else
gcc_unreachable ();
DONE;
})
(define_expand "sibcall_value"
[(parallel [(set (match_operand 0 "register_operand")
(call (mem:SI (match_operand 1 "address_operand"))
(match_operand 2 "")))
(use (match_operand 3 ""))
(simple_return)])]
""
{
#if TARGET_MACHO
if (MACHOPIC_INDIRECT)
operands[1] = machopic_indirect_call_target (operands[1]);
#endif
gcc_assert (MEM_P (operands[1]));
gcc_assert (CONST_INT_P (operands[2]));
operands[1] = XEXP (operands[1], 0);
if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
rs6000_sibcall_aix (operands[0], operands[1], operands[2], operands[3]);
else if (DEFAULT_ABI == ABI_V4)
rs6000_sibcall_sysv (operands[0], operands[1], operands[2], operands[3]);
else if (DEFAULT_ABI == ABI_DARWIN)
rs6000_sibcall_darwin (operands[0], operands[1], operands[2], operands[3]);
else
gcc_unreachable ();
DONE;
})
(define_insn "*sibcall_local32"
[(call (mem:SI (match_operand:SI 0 "current_file_function_operand" "s,s"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "O,n"))
(simple_return)]
"(INTVAL (operands[2]) & CALL_LONG) == 0"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "b %z0@local" : "b %z0";
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
(define_insn "*sibcall_local64"
[(call (mem:SI (match_operand:DI 0 "current_file_function_operand" "s,s"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "O,n"))
(simple_return)]
"TARGET_64BIT && (INTVAL (operands[2]) & CALL_LONG) == 0"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "b %z0@local" : "b %z0";
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
(define_insn "*sibcall_value_local32"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:SI 1 "current_file_function_operand" "s,s"))
(match_operand 2)))
(use (match_operand:SI 3 "immediate_operand" "O,n"))
(simple_return)]
"(INTVAL (operands[3]) & CALL_LONG) == 0"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "b %z1@local" : "b %z1";
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
(define_insn "*sibcall_value_local64"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:DI 1 "current_file_function_operand" "s,s"))
(match_operand 2)))
(use (match_operand:SI 3 "immediate_operand" "O,n"))
(simple_return)]
"TARGET_64BIT && (INTVAL (operands[3]) & CALL_LONG) == 0"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return (DEFAULT_ABI == ABI_V4 && flag_pic) ? "b %z1@local" : "b %z1";
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
(define_insn "*sibcall_indirect_nonlocal_sysv<mode>"
[(call (mem:SI (match_operand:P 0 "indirect_call_operand" "c,*l,X"))
(match_operand 1))
(use (match_operand:SI 2 "immediate_operand" "n,n,n"))
(simple_return)]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_indirect_sibcall_template (operands, 0);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(cond [(and (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(match_test "(INTVAL (operands[2]) & (CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS))"))
(const_string "12")
(ior (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(match_test "(INTVAL (operands[2]) & (CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS))"))
(const_string "8")]
(const_string "4")))])
(define_insn "*sibcall_nonlocal_sysv<mode>"
[(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
(match_operand 1))
(use (match_operand 2 "immediate_operand" "O,n"))
(simple_return)]
"(DEFAULT_ABI == ABI_DARWIN
|| DEFAULT_ABI == ABI_V4)
&& (INTVAL (operands[2]) & CALL_LONG) == 0"
{
if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_sibcall_template (operands, 0);
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
(define_insn "*sibcall_value_indirect_nonlocal_sysv<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "indirect_call_operand" "c,*l,X"))
(match_operand 2)))
(use (match_operand:SI 3 "immediate_operand" "n,n,n"))
(simple_return)]
"DEFAULT_ABI == ABI_V4
|| DEFAULT_ABI == ABI_DARWIN"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_indirect_sibcall_template (operands, 1);
}
[(set_attr "type" "jmpreg")
(set (attr "length")
(cond [(and (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(match_test "(INTVAL (operands[3]) & (CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS))"))
(const_string "12")
(ior (and (match_test "!rs6000_speculate_indirect_jumps")
(match_test "which_alternative != 1"))
(match_test "(INTVAL (operands[3]) & (CALL_V4_SET_FP_ARGS | CALL_V4_CLEAR_FP_ARGS))"))
(const_string "8")]
(const_string "4")))])
(define_insn "*sibcall_value_nonlocal_sysv<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
(match_operand 2)))
(use (match_operand:SI 3 "immediate_operand" "O,n"))
(simple_return)]
"(DEFAULT_ABI == ABI_DARWIN
|| DEFAULT_ABI == ABI_V4)
&& (INTVAL (operands[3]) & CALL_LONG) == 0"
{
if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
output_asm_insn ("crxor 6,6,6", operands);
else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
output_asm_insn ("creqv 6,6,6", operands);
return rs6000_sibcall_template (operands, 1);
}
[(set_attr "type" "branch")
(set_attr "length" "4,8")])
;; AIX ABI sibling call patterns.
(define_insn "*sibcall_aix<mode>"
[(call (mem:SI (match_operand:P 0 "call_operand" "s,c"))
(match_operand 1))
(simple_return)]
"DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2"
{
if (which_alternative == 0)
return rs6000_sibcall_template (operands, 0);
else
return "b%T0";
}
[(set_attr "type" "branch")])
(define_insn "*sibcall_value_aix<mode>"
[(set (match_operand 0 "" "")
(call (mem:SI (match_operand:P 1 "call_operand" "s,c"))
(match_operand 2)))
(simple_return)]
"DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2"
{
if (which_alternative == 0)
return rs6000_sibcall_template (operands, 1);
else
return "b%T1";
}
[(set_attr "type" "branch")])
(define_expand "sibcall_epilogue"
[(use (const_int 0))]
""
{
if (!TARGET_SCHED_PROLOG)
emit_insn (gen_blockage ());
rs6000_emit_epilogue (EPILOGUE_TYPE_SIBCALL);
DONE;
})
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
(define_insn "blockage"
[(unspec_volatile [(const_int 0)] UNSPECV_BLOCK)]
""
""
[(set_attr "length" "0")])
(define_expand "probe_stack_address"
[(use (match_operand 0 "address_operand"))]
""
{
operands[0] = gen_rtx_MEM (Pmode, operands[0]);
MEM_VOLATILE_P (operands[0]) = 1;
if (TARGET_64BIT)
emit_insn (gen_probe_stack_di (operands[0]));
else
emit_insn (gen_probe_stack_si (operands[0]));
DONE;
})
(define_insn "probe_stack_<mode>"
[(set (match_operand:P 0 "memory_operand" "=m")
(unspec:P [(const_int 0)] UNSPEC_PROBE_STACK))]
""
{
operands[1] = gen_rtx_REG (Pmode, 0);
return "st<wd>%U0%X0 %1,%0";
}
[(set_attr "type" "store")
(set (attr "update")
(if_then_else (match_operand 0 "update_address_mem")
(const_string "yes")
(const_string "no")))
(set (attr "indexed")
(if_then_else (match_operand 0 "indexed_address_mem")
(const_string "yes")
(const_string "no")))])
(define_insn "probe_stack_range<P:mode>"
[(set (match_operand:P 0 "register_operand" "=&r")
(unspec_volatile:P [(match_operand:P 1 "register_operand" "0")
(match_operand:P 2 "register_operand" "r")
(match_operand:P 3 "register_operand" "r")]
UNSPECV_PROBE_STACK_RANGE))]
""
"* return output_probe_stack_range (operands[0], operands[2], operands[3]);"
[(set_attr "type" "three")])
;; Compare insns are next. Note that the RS/6000 has two types of compares,
;; signed & unsigned, and one type of branch.
;;
;; Start with the DEFINE_EXPANDs to generate the rtl for compares, scc
;; insns, and branches.
(define_expand "cbranch<mode>4"
[(use (match_operator 0 "comparison_operator"
[(match_operand:GPR 1 "gpc_reg_operand")
(match_operand:GPR 2 "reg_or_short_operand")]))
(use (match_operand 3))]
""
{
/* Take care of the possibility that operands[2] might be negative but
this might be a logical operation. That insn doesn't exist. */
if (CONST_INT_P (operands[2])
&& INTVAL (operands[2]) < 0)
{
operands[2] = force_reg (<MODE>mode, operands[2]);
operands[0] = gen_rtx_fmt_ee (GET_CODE (operands[0]),
GET_MODE (operands[0]),
operands[1], operands[2]);
}
rs6000_emit_cbranch (<MODE>mode, operands);
DONE;
})
(define_expand "cbranch<mode>4"
[(use (match_operator 0 "comparison_operator"
[(match_operand:FP 1 "gpc_reg_operand")
(match_operand:FP 2 "gpc_reg_operand")]))
(use (match_operand 3))]
""
{
rs6000_emit_cbranch (<MODE>mode, operands);
DONE;
})
(define_expand "cstore<mode>4_signed"
[(use (match_operator 1 "signed_comparison_operator"
[(match_operand:P 2 "gpc_reg_operand")
(match_operand:P 3 "gpc_reg_operand")]))
(clobber (match_operand:P 0 "gpc_reg_operand"))]
""
{
enum rtx_code cond_code = GET_CODE (operands[1]);
rtx op0 = operands[0];
rtx op1 = operands[2];
rtx op2 = operands[3];
if (cond_code == GE || cond_code == LT)
{
cond_code = swap_condition (cond_code);
std::swap (op1, op2);
}
rtx tmp1 = gen_reg_rtx (<MODE>mode);
rtx tmp2 = gen_reg_rtx (<MODE>mode);
rtx tmp3 = gen_reg_rtx (<MODE>mode);
int sh = GET_MODE_BITSIZE (<MODE>mode) - 1;
emit_insn (gen_lshr<mode>3 (tmp1, op1, GEN_INT (sh)));
emit_insn (gen_ashr<mode>3 (tmp2, op2, GEN_INT (sh)));
emit_insn (gen_subf<mode>3_carry (tmp3, op1, op2));
if (cond_code == LE)
emit_insn (gen_add<mode>3_carry_in (op0, tmp1, tmp2));
else
{
rtx tmp4 = gen_reg_rtx (<MODE>mode);
emit_insn (gen_add<mode>3_carry_in (tmp4, tmp1, tmp2));
emit_insn (gen_xor<mode>3 (op0, tmp4, const1_rtx));
}
DONE;
})
(define_expand "cstore<mode>4_unsigned"
[(use (match_operator 1 "unsigned_comparison_operator"
[(match_operand:P 2 "gpc_reg_operand")
(match_operand:P 3 "reg_or_short_operand")]))
(clobber (match_operand:P 0 "gpc_reg_operand"))]
""
{
enum rtx_code cond_code = GET_CODE (operands[1]);
rtx op0 = operands[0];
rtx op1 = operands[2];
rtx op2 = operands[3];
if (cond_code == GEU || cond_code == LTU)
{
cond_code = swap_condition (cond_code);
std::swap (op1, op2);
}
if (!gpc_reg_operand (op1, <MODE>mode))
op1 = force_reg (<MODE>mode, op1);
if (!reg_or_short_operand (op2, <MODE>mode))
op2 = force_reg (<MODE>mode, op2);
rtx tmp = gen_reg_rtx (<MODE>mode);
rtx tmp2 = gen_reg_rtx (<MODE>mode);
emit_insn (gen_subf<mode>3_carry (tmp, op1, op2));
emit_insn (gen_subf<mode>3_carry_in_xx (tmp2));
if (cond_code == LEU)
emit_insn (gen_add<mode>3 (op0, tmp2, const1_rtx));
else
emit_insn (gen_neg<mode>2 (op0, tmp2));
DONE;
})
(define_expand "cstore_si_as_di"
[(use (match_operator 1 "unsigned_comparison_operator"
[(match_operand:SI 2 "gpc_reg_operand")
(match_operand:SI 3 "reg_or_short_operand")]))
(clobber (match_operand:SI 0 "gpc_reg_operand"))]
""
{
int uns_flag = unsigned_comparison_operator (operands[1], VOIDmode) ? 1 : 0;
enum rtx_code cond_code = signed_condition (GET_CODE (operands[1]));
operands[2] = force_reg (SImode, operands[2]);
operands[3] = force_reg (SImode, operands[3]);
rtx op1 = gen_reg_rtx (DImode);
rtx op2 = gen_reg_rtx (DImode);
convert_move (op1, operands[2], uns_flag);
convert_move (op2, operands[3], uns_flag);
if (cond_code == GT || cond_code == LE)
{
cond_code = swap_condition (cond_code);
std::swap (op1, op2);
}
rtx tmp = gen_reg_rtx (DImode);
rtx tmp2 = gen_reg_rtx (DImode);
emit_insn (gen_subdi3 (tmp, op1, op2));
emit_insn (gen_lshrdi3 (tmp2, tmp, GEN_INT (63)));
rtx tmp3;
switch (cond_code)
{
default:
gcc_unreachable ();
case LT:
tmp3 = tmp2;
break;
case GE:
tmp3 = gen_reg_rtx (DImode);
emit_insn (gen_xordi3 (tmp3, tmp2, const1_rtx));
break;
}
convert_move (operands[0], tmp3, 1);
DONE;
})
(define_expand "cstore<mode>4_signed_imm"
[(use (match_operator 1 "signed_comparison_operator"
[(match_operand:GPR 2 "gpc_reg_operand")
(match_operand:GPR 3 "immediate_operand")]))
(clobber (match_operand:GPR 0 "gpc_reg_operand"))]
""
{
bool invert = false;
enum rtx_code cond_code = GET_CODE (operands[1]);
rtx op0 = operands[0];
rtx op1 = operands[2];
HOST_WIDE_INT val = INTVAL (operands[3]);
if (cond_code == GE || cond_code == GT)
{
cond_code = reverse_condition (cond_code);
invert = true;
}
if (cond_code == LE)
val++;
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_add<mode>3 (tmp, op1, GEN_INT (-val)));
rtx x = gen_reg_rtx (<MODE>mode);
if (val < 0)
emit_insn (gen_and<mode>3 (x, op1, tmp));
else
emit_insn (gen_ior<mode>3 (x, op1, tmp));
if (invert)
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_one_cmpl<mode>2 (tmp, x));
x = tmp;
}
int sh = GET_MODE_BITSIZE (<MODE>mode) - 1;
emit_insn (gen_lshr<mode>3 (op0, x, GEN_INT (sh)));
DONE;
})
(define_expand "cstore<mode>4_unsigned_imm"
[(use (match_operator 1 "unsigned_comparison_operator"
[(match_operand:GPR 2 "gpc_reg_operand")
(match_operand:GPR 3 "immediate_operand")]))
(clobber (match_operand:GPR 0 "gpc_reg_operand"))]
""
{
bool invert = false;
enum rtx_code cond_code = GET_CODE (operands[1]);
rtx op0 = operands[0];
rtx op1 = operands[2];
HOST_WIDE_INT val = INTVAL (operands[3]);
if (cond_code == GEU || cond_code == GTU)
{
cond_code = reverse_condition (cond_code);
invert = true;
}
if (cond_code == LEU)
val++;
rtx tmp = gen_reg_rtx (<MODE>mode);
rtx tmp2 = gen_reg_rtx (<MODE>mode);
emit_insn (gen_add<mode>3 (tmp, op1, GEN_INT (-val)));
emit_insn (gen_one_cmpl<mode>2 (tmp2, op1));
rtx x = gen_reg_rtx (<MODE>mode);
if (val < 0)
emit_insn (gen_ior<mode>3 (x, tmp, tmp2));
else
emit_insn (gen_and<mode>3 (x, tmp, tmp2));
if (invert)
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_one_cmpl<mode>2 (tmp, x));
x = tmp;
}
int sh = GET_MODE_BITSIZE (<MODE>mode) - 1;
emit_insn (gen_lshr<mode>3 (op0, x, GEN_INT (sh)));
DONE;
})
(define_expand "cstore<mode>4"
[(use (match_operator 1 "comparison_operator"
[(match_operand:GPR 2 "gpc_reg_operand")
(match_operand:GPR 3 "reg_or_short_operand")]))
(clobber (match_operand:GPR 0 "gpc_reg_operand"))]
""
{
/* Expanding EQ and NE directly to some machine instructions does not help
but does hurt combine. So don't. */
if (GET_CODE (operands[1]) == EQ)
emit_insn (gen_eq<mode>3 (operands[0], operands[2], operands[3]));
else if (<MODE>mode == Pmode
&& GET_CODE (operands[1]) == NE)
emit_insn (gen_ne<mode>3 (operands[0], operands[2], operands[3]));
else if (GET_CODE (operands[1]) == NE)
{
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_insn (gen_eq<mode>3 (tmp, operands[2], operands[3]));
emit_insn (gen_xor<mode>3 (operands[0], tmp, const1_rtx));
}
/* If ISEL is fast, expand to it. */
else if (TARGET_ISEL)
rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
/* Expanding the unsigned comparisons helps a lot: all the neg_ltu
etc. combinations magically work out just right. */
else if (<MODE>mode == Pmode
&& unsigned_comparison_operator (operands[1], VOIDmode))
emit_insn (gen_cstore<mode>4_unsigned (operands[0], operands[1],
operands[2], operands[3]));
/* For comparisons smaller than Pmode we can cheaply do things in Pmode. */
else if (<MODE>mode == SImode && Pmode == DImode)
emit_insn (gen_cstore_si_as_di (operands[0], operands[1],
operands[2], operands[3]));
/* For signed comparisons against a constant, we can do some simple
bit-twiddling. */
else if (signed_comparison_operator (operands[1], VOIDmode)
&& CONST_INT_P (operands[3]))
emit_insn (gen_cstore<mode>4_signed_imm (operands[0], operands[1],
operands[2], operands[3]));
/* And similarly for unsigned comparisons. */
else if (unsigned_comparison_operator (operands[1], VOIDmode)
&& CONST_INT_P (operands[3]))
emit_insn (gen_cstore<mode>4_unsigned_imm (operands[0], operands[1],
operands[2], operands[3]));
/* We also do not want to use mfcr for signed comparisons. */
else if (<MODE>mode == Pmode
&& signed_comparison_operator (operands[1], VOIDmode))
emit_insn (gen_cstore<mode>4_signed (operands[0], operands[1],
operands[2], operands[3]));
/* Everything else, use the mfcr brute force. */
else
rs6000_emit_sCOND (<MODE>mode, operands);
DONE;
})
(define_expand "cstore<mode>4"
[(use (match_operator 1 "comparison_operator"
[(match_operand:FP 2 "gpc_reg_operand")
(match_operand:FP 3 "gpc_reg_operand")]))
(clobber (match_operand:SI 0 "gpc_reg_operand"))]
""
{
rs6000_emit_sCOND (<MODE>mode, operands);
DONE;
})
(define_expand "stack_protect_set"
[(match_operand 0 "memory_operand")
(match_operand 1 "memory_operand")]
""
{
if (rs6000_stack_protector_guard == SSP_TLS)
{
rtx reg = gen_rtx_REG (Pmode, rs6000_stack_protector_guard_reg);
rtx offset = GEN_INT (rs6000_stack_protector_guard_offset);
rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
operands[1] = gen_rtx_MEM (Pmode, addr);
}
if (TARGET_64BIT)
emit_insn (gen_stack_protect_setdi (operands[0], operands[1]));
else
emit_insn (gen_stack_protect_setsi (operands[0], operands[1]));
DONE;
})
(define_insn "stack_protect_setsi"
[(set (match_operand:SI 0 "memory_operand" "=m")
(unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
(set (match_scratch:SI 2 "=&r") (const_int 0))]
"TARGET_32BIT"
"lwz%U1%X1 %2,%1\;stw%U0%X0 %2,%0\;li %2,0"
[(set_attr "type" "three")
(set_attr "length" "12")])
;; We can't use the prefixed attribute here because there are two memory
;; instructions. We can't split the insn due to the fact that this operation
;; needs to be done in one piece.
(define_insn "stack_protect_setdi"
[(set (match_operand:DI 0 "memory_operand" "=Y")
(unspec:DI [(match_operand:DI 1 "memory_operand" "Y")] UNSPEC_SP_SET))
(set (match_scratch:DI 2 "=&r") (const_int 0))]
"TARGET_64BIT"
{
if (prefixed_memory (operands[1], DImode))
output_asm_insn ("pld %2,%1", operands);
else
output_asm_insn ("ld%U1%X1 %2,%1", operands);
if (prefixed_memory (operands[0], DImode))
output_asm_insn ("pstd %2,%0", operands);
else
output_asm_insn ("std%U0%X0 %2,%0", operands);
return "li %2,0";
}
[(set_attr "type" "three")
;; Back to back prefixed memory instructions take 20 bytes (8 bytes for each
;; prefixed instruction + 4 bytes for the possible NOP). Add in 4 bytes for
;; the LI 0 at the end.
(set_attr "prefixed" "no")
(set_attr "num_insns" "3")
(set (attr "length")
(cond [(and (match_operand 0 "prefixed_memory")
(match_operand 1 "prefixed_memory"))
(const_int 24)
(ior (match_operand 0 "prefixed_memory")
(match_operand 1 "prefixed_memory"))
(const_int 20)]
(const_int 12)))])
(define_expand "stack_protect_test"
[(match_operand 0 "memory_operand")
(match_operand 1 "memory_operand")
(match_operand 2 "")]
""
{
rtx guard = operands[1];
if (rs6000_stack_protector_guard == SSP_TLS)
{
rtx reg = gen_rtx_REG (Pmode, rs6000_stack_protector_guard_reg);
rtx offset = GEN_INT (rs6000_stack_protector_guard_offset);
rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
guard = gen_rtx_MEM (Pmode, addr);
}
operands[1] = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, guard), UNSPEC_SP_TEST);
rtx test = gen_rtx_EQ (VOIDmode, operands[0], operands[1]);
rtx jump = gen_cbranchsi4 (test, operands[0], operands[1], operands[2]);
emit_jump_insn (jump);
DONE;
})
(define_insn "stack_protect_testsi"
[(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
(unspec:CCEQ [(match_operand:SI 1 "memory_operand" "m,m")
(match_operand:SI 2 "memory_operand" "m,m")]
UNSPEC_SP_TEST))
(set (match_scratch:SI 4 "=r,r") (const_int 0))
(clobber (match_scratch:SI 3 "=&r,&r"))]
"TARGET_32BIT"
"@
lwz%U1%X1 %3,%1\;lwz%U2%X2 %4,%2\;xor. %3,%3,%4\;li %4,0
lwz%U1%X1 %3,%1\;lwz%U2%X2 %4,%2\;cmplw %0,%3,%4\;li %3,0\;li %4,0"
[(set_attr "length" "16,20")])
;; We can't use the prefixed attribute here because there are two memory
;; instructions. We can't split the insn due to the fact that this operation
;; needs to be done in one piece.
(define_insn "stack_protect_testdi"
[(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
(unspec:CCEQ [(match_operand:DI 1 "memory_operand" "Y,Y")
(match_operand:DI 2 "memory_operand" "Y,Y")]
UNSPEC_SP_TEST))
(set (match_scratch:DI 4 "=r,r") (const_int 0))
(clobber (match_scratch:DI 3 "=&r,&r"))]
"TARGET_64BIT"
{
if (prefixed_memory (operands[1], DImode))
output_asm_insn ("pld %3,%1", operands);
else
output_asm_insn ("ld%U1%X1 %3,%1", operands);
if (prefixed_memory (operands[2], DImode))
output_asm_insn ("pld %4,%2", operands);
else
output_asm_insn ("ld%U2%X2 %4,%2", operands);
if (which_alternative == 0)
output_asm_insn ("xor. %3,%3,%4", operands);
else
output_asm_insn ("cmpld %0,%3,%4\;li %3,0", operands);
return "li %4,0";
}
;; Back to back prefixed memory instructions take 20 bytes (8 bytes for each
;; prefixed instruction + 4 bytes for the possible NOP). Add in either 4 or
;; 8 bytes to do the test.
[(set_attr "prefixed" "no")
(set_attr "num_insns" "4,5")
(set (attr "length")
(cond [(and (match_operand 1 "prefixed_memory")
(match_operand 2 "prefixed_memory"))
(if_then_else (eq_attr "alternative" "0")
(const_int 28)
(const_int 32))
(ior (match_operand 1 "prefixed_memory")
(match_operand 2 "prefixed_memory"))
(if_then_else (eq_attr "alternative" "0")
(const_int 20)
(const_int 24))]
(if_then_else (eq_attr "alternative" "0")
(const_int 16)
(const_int 20))))])
;; Here are the actual compare insns.
(define_insn "*cmp<mode>_signed"
[(set (match_operand:CC 0 "cc_reg_operand" "=y")
(compare:CC (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "reg_or_short_operand" "rI")))]
""
"cmp<wd>%I2 %0,%1,%2"
[(set_attr "type" "cmp")])
(define_insn "*cmp<mode>_unsigned"
[(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
(compare:CCUNS (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "reg_or_u_short_operand" "rK")))]
""
"cmpl<wd>%I2 %0,%1,%2"
[(set_attr "type" "cmp")])
;; If we are comparing a register for equality with a large constant,
;; we can do this with an XOR followed by a compare. But this is profitable
;; only if the large constant is only used for the comparison (and in this
;; case we already have a register to reuse as scratch).
;;
;; For 64-bit registers, we could only do so if the constant's bit 15 is clear:
;; otherwise we'd need to XOR with FFFFFFFF????0000 which is not available.
(define_peephole2
[(set (match_operand:SI 0 "register_operand")
(match_operand:SI 1 "logical_const_operand"))
(set (match_dup 0) (match_operator:SI 3 "boolean_or_operator"
[(match_dup 0)
(match_operand:SI 2 "logical_const_operand")]))
(set (match_operand:CC 4 "cc_reg_operand")
(compare:CC (match_operand:SI 5 "gpc_reg_operand")
(match_dup 0)))
(set (pc)
(if_then_else (match_operator 6 "equality_operator"
[(match_dup 4) (const_int 0)])
(match_operand 7 "")
(match_operand 8 "")))]
"peep2_reg_dead_p (3, operands[0])
&& peep2_reg_dead_p (4, operands[4])
&& REGNO (operands[0]) != REGNO (operands[5])"
[(set (match_dup 0) (xor:SI (match_dup 5) (match_dup 9)))
(set (match_dup 4) (compare:CC (match_dup 0) (match_dup 10)))
(set (pc) (if_then_else (match_dup 6) (match_dup 7) (match_dup 8)))]
{
/* Get the constant we are comparing against, and see what it looks like
when sign-extended from 16 to 32 bits. Then see what constant we could
XOR with SEXTC to get the sign-extended value. */
rtx cnst = simplify_const_binary_operation (GET_CODE (operands[3]),
SImode,
operands[1], operands[2]);
HOST_WIDE_INT c = INTVAL (cnst);
HOST_WIDE_INT sextc = ((c & 0xffff) ^ 0x8000) - 0x8000;
HOST_WIDE_INT xorv = c ^ sextc;
operands[9] = GEN_INT (xorv);
operands[10] = GEN_INT (sextc);
})
;; Only need to compare second words if first words equal
(define_insn "*cmp<mode>_internal1"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
(compare:CCFP (match_operand:IBM128 1 "gpc_reg_operand" "d")
(match_operand:IBM128 2 "gpc_reg_operand" "d")))]
"!TARGET_XL_COMPAT && FLOAT128_IBM_P (<MODE>mode)
&& TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
"fcmpu %0,%1,%2\;bne %0,$+8\;fcmpu %0,%L1,%L2"
[(set_attr "type" "fpcompare")
(set_attr "length" "12")])
(define_insn_and_split "*cmp<IBM128:mode>_internal2"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
(compare:CCFP (match_operand:IBM128 1 "gpc_reg_operand" "d")
(match_operand:IBM128 2 "gpc_reg_operand" "d")))
(clobber (match_scratch:DF 3 "=d"))
(clobber (match_scratch:DF 4 "=d"))
(clobber (match_scratch:DF 5 "=d"))
(clobber (match_scratch:DF 6 "=d"))
(clobber (match_scratch:DF 7 "=d"))
(clobber (match_scratch:DF 8 "=d"))
(clobber (match_scratch:DF 9 "=d"))
(clobber (match_scratch:DF 10 "=d"))
(clobber (match_scratch:GPR 11 "=b"))]
"TARGET_XL_COMPAT && FLOAT128_IBM_P (<IBM128:MODE>mode)
&& TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
"#"
"&& reload_completed"
[(set (match_dup 3) (match_dup 14))
(set (match_dup 4) (match_dup 15))
(set (match_dup 9) (abs:DF (match_dup 5)))
(set (match_dup 0) (compare:CCFP (match_dup 9) (match_dup 3)))
(set (pc) (if_then_else (ne (match_dup 0) (const_int 0))
(label_ref (match_dup 12))
(pc)))
(set (match_dup 0) (compare:CCFP (match_dup 5) (match_dup 7)))
(set (pc) (label_ref (match_dup 13)))
(match_dup 12)
(set (match_dup 10) (minus:DF (match_dup 5) (match_dup 7)))
(set (match_dup 9) (minus:DF (match_dup 6) (match_dup 8)))
(set (match_dup 9) (plus:DF (match_dup 10) (match_dup 9)))
(set (match_dup 0) (compare:CCFP (match_dup 9) (match_dup 4)))
(match_dup 13)]
{
REAL_VALUE_TYPE rv;
const int lo_word = LONG_DOUBLE_LARGE_FIRST ? GET_MODE_SIZE (DFmode) : 0;
const int hi_word = LONG_DOUBLE_LARGE_FIRST ? 0 : GET_MODE_SIZE (DFmode);
operands[5] = simplify_gen_subreg (DFmode, operands[1],
<IBM128:MODE>mode, hi_word);
operands[6] = simplify_gen_subreg (DFmode, operands[1],
<IBM128:MODE>mode, lo_word);
operands[7] = simplify_gen_subreg (DFmode, operands[2],
<IBM128:MODE>mode, hi_word);
operands[8] = simplify_gen_subreg (DFmode, operands[2],
<IBM128:MODE>mode, lo_word);
operands[12] = gen_label_rtx ();
operands[13] = gen_label_rtx ();
real_inf (&rv);
operands[14] = force_const_mem (DFmode,
const_double_from_real_value (rv, DFmode));
operands[15] = force_const_mem (DFmode,
const_double_from_real_value (dconst0,
DFmode));
if (TARGET_TOC)
{
rtx tocref;
tocref = create_TOC_reference (XEXP (operands[14], 0), operands[11]);
operands[14] = gen_const_mem (DFmode, tocref);
tocref = create_TOC_reference (XEXP (operands[15], 0), operands[11]);
operands[15] = gen_const_mem (DFmode, tocref);
set_mem_alias_set (operands[14], get_TOC_alias_set ());
set_mem_alias_set (operands[15], get_TOC_alias_set ());
}
})
;; Now we have the scc insns. We can do some combinations because of the
;; way the machine works.
;;
;; Note that this is probably faster if we can put an insn between the
;; mfcr and rlinm, but this is tricky. Let's leave it for now. In most
;; cases the insns below which don't use an intermediate CR field will
;; be used instead.
(define_insn "set<mode>_cc"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(match_operator:GPR 1 "scc_comparison_operator"
[(match_operand 2 "cc_reg_operand" "y")
(const_int 0)]))]
""
"mfcr %0%Q2\;rlwinm %0,%0,%J1,1"
[(set (attr "type")
(cond [(match_test "TARGET_MFCRF")
(const_string "mfcrf")
]
(const_string "mfcr")))
(set_attr "length" "8")])
(define_code_iterator cmp [eq ne lt ltu gt gtu le leu ge geu])
(define_code_attr UNS [(eq "CC")
(ne "CC")
(lt "CC") (ltu "CCUNS")
(gt "CC") (gtu "CCUNS")
(le "CC") (leu "CCUNS")
(ge "CC") (geu "CCUNS")])
(define_code_attr UNSu_ [(eq "")
(ne "")
(lt "") (ltu "u_")
(gt "") (gtu "u_")
(le "") (leu "u_")
(ge "") (geu "u_")])
(define_code_attr UNSIK [(eq "I")
(ne "I")
(lt "I") (ltu "K")
(gt "I") (gtu "K")
(le "I") (leu "K")
(ge "I") (geu "K")])
(define_insn_and_split "<code><GPR:mode><GPR2:mode>2_isel"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(cmp:GPR (match_operand:GPR2 1 "gpc_reg_operand" "r")
(match_operand:GPR2 2 "reg_or_<cmp:UNSu_>short_operand" "r<cmp:UNSIK>")))
(clobber (match_scratch:GPR 3 "=r"))
(clobber (match_scratch:GPR 4 "=r"))
(clobber (match_scratch:<UNS> 5 "=y"))]
"TARGET_ISEL
&& !(<CODE> == EQ && operands[2] == const0_rtx)
&& !(<CODE> == NE && operands[2] == const0_rtx
&& <GPR:MODE>mode == Pmode && <GPR2:MODE>mode == Pmode)"
"#"
"&& 1"
[(pc)]
{
rtx_code code = <CODE>;
if (CONST_INT_P (operands[2]) && code != EQ && code != NE)
{
HOST_WIDE_INT val = INTVAL (operands[2]);
if (code == LT && val != -0x8000)
{
code = LE;
val--;
}
if (code == GT && val != 0x7fff)
{
code = GE;
val++;
}
if (code == LTU && val != 0)
{
code = LEU;
val--;
}
if (code == GTU && val != 0xffff)
{
code = GEU;
val++;
}
operands[2] = GEN_INT (val);
}
if (code == NE || code == LE || code == GE || code == LEU || code == GEU)
operands[3] = const0_rtx;
else
{
if (GET_CODE (operands[3]) == SCRATCH)
operands[3] = gen_reg_rtx (<GPR:MODE>mode);
emit_move_insn (operands[3], const0_rtx);
}
if (GET_CODE (operands[4]) == SCRATCH)
operands[4] = gen_reg_rtx (<GPR:MODE>mode);
emit_move_insn (operands[4], const1_rtx);
if (GET_CODE (operands[5]) == SCRATCH)
operands[5] = gen_reg_rtx (<UNS>mode);
rtx c1 = gen_rtx_COMPARE (<UNS>mode, operands[1], operands[2]);
emit_insn (gen_rtx_SET (operands[5], c1));
rtx c2 = gen_rtx_fmt_ee (code, <GPR:MODE>mode, operands[5], const0_rtx);
rtx x = gen_rtx_IF_THEN_ELSE (<GPR:MODE>mode, c2, operands[4], operands[3]);
emit_move_insn (operands[0], x);
DONE;
}
[(set (attr "cost")
(if_then_else (match_test "(CONST_INT_P (operands[2]) && <CODE> != EQ)
|| <CODE> == NE
|| <CODE> == LE || <CODE> == GE
|| <CODE> == LEU || <CODE> == GEU")
(const_string "9")
(const_string "10")))])
(define_mode_attr scc_eq_op2 [(SI "rKLI")
(DI "rKJI")])
(define_expand "eq<mode>3"
[(parallel [
(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(eq:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "scc_eq_operand" "<scc_eq_op2>")))
(clobber (match_scratch:GPR 3 "=r"))
(clobber (match_scratch:GPR 4 "=r"))])]
""
{
if (TARGET_ISEL && operands[2] != const0_rtx)
{
emit_insn (gen_eq<mode><mode>2_isel (operands[0], operands[1],
operands[2]));
DONE;
}
})
(define_insn_and_split "*eq<mode>3"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(eq:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
(match_operand:GPR 2 "scc_eq_operand" "<scc_eq_op2>")))
(clobber (match_scratch:GPR 3 "=r"))
(clobber (match_scratch:GPR 4 "=r"))]
"!(TARGET_ISEL && operands[2] != const0_rtx)"
"#"
"&& 1"
[(set (match_dup 4)
(clz:GPR (match_dup 3)))
(set (match_dup 0)
(lshiftrt:GPR (match_dup 4)
(match_dup 5)))]
{
operands[3] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[3]);
if (GET_CODE (operands[4]) == SCRATCH)
operands[4] = gen_reg_rtx (<MODE>mode);
operands[5] = GEN_INT (exact_log2 (GET_MODE_BITSIZE (<MODE>mode)));
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_expand "ne<mode>3"
[(parallel [
(set (match_operand:P 0 "gpc_reg_operand" "=r")
(ne:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>")))
(clobber (match_scratch:P 3 "=r"))
(clobber (match_scratch:P 4 "=r"))
(clobber (reg:P CA_REGNO))])]
""
{
if (TARGET_ISEL && operands[2] != const0_rtx)
{
emit_insn (gen_ne<mode><mode>2_isel (operands[0], operands[1],
operands[2]));
DONE;
}
})
(define_insn_and_split "*ne<mode>3"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(ne:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>")))
(clobber (match_scratch:P 3 "=r"))
(clobber (match_scratch:P 4 "=r"))
(clobber (reg:P CA_REGNO))]
"!(TARGET_ISEL && operands[2] != const0_rtx)"
"#"
"&& 1"
[(parallel [(set (match_dup 4)
(plus:P (match_dup 3)
(const_int -1)))
(set (reg:P CA_REGNO)
(ne:P (match_dup 3)
(const_int 0)))])
(parallel [(set (match_dup 0)
(plus:P (plus:P (not:P (match_dup 4))
(reg:P CA_REGNO))
(match_dup 3)))
(clobber (reg:P CA_REGNO))])]
{
operands[3] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[3]);
if (GET_CODE (operands[4]) == SCRATCH)
operands[4] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*neg_eq_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(neg:P (eq:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))))
(clobber (match_scratch:P 3 "=r"))
(clobber (match_scratch:P 4 "=r"))
(clobber (reg:P CA_REGNO))]
""
"#"
""
[(parallel [(set (match_dup 4)
(plus:P (match_dup 3)
(const_int -1)))
(set (reg:P CA_REGNO)
(ne:P (match_dup 3)
(const_int 0)))])
(parallel [(set (match_dup 0)
(plus:P (reg:P CA_REGNO)
(const_int -1)))
(clobber (reg:P CA_REGNO))])]
{
operands[3] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[3]);
if (GET_CODE (operands[4]) == SCRATCH)
operands[4] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*neg_ne_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(neg:P (ne:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))))
(clobber (match_scratch:P 3 "=r"))
(clobber (match_scratch:P 4 "=r"))
(clobber (reg:P CA_REGNO))]
""
"#"
""
[(parallel [(set (match_dup 4)
(neg:P (match_dup 3)))
(set (reg:P CA_REGNO)
(eq:P (match_dup 3)
(const_int 0)))])
(parallel [(set (match_dup 0)
(plus:P (reg:P CA_REGNO)
(const_int -1)))
(clobber (reg:P CA_REGNO))])]
{
operands[3] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[3]);
if (GET_CODE (operands[4]) == SCRATCH)
operands[4] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*plus_eq_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(plus:P (eq:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))
(match_operand:P 3 "gpc_reg_operand" "r")))
(clobber (match_scratch:P 4 "=r"))
(clobber (match_scratch:P 5 "=r"))
(clobber (reg:P CA_REGNO))]
""
"#"
""
[(parallel [(set (match_dup 5)
(neg:P (match_dup 4)))
(set (reg:P CA_REGNO)
(eq:P (match_dup 4)
(const_int 0)))])
(parallel [(set (match_dup 0)
(plus:P (match_dup 3)
(reg:P CA_REGNO)))
(clobber (reg:P CA_REGNO))])]
{
operands[4] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[4]);
if (GET_CODE (operands[5]) == SCRATCH)
operands[5] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*plus_ne_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(plus:P (ne:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))
(match_operand:P 3 "gpc_reg_operand" "r")))
(clobber (match_scratch:P 4 "=r"))
(clobber (match_scratch:P 5 "=r"))
(clobber (reg:P CA_REGNO))]
""
"#"
""
[(parallel [(set (match_dup 5)
(plus:P (match_dup 4)
(const_int -1)))
(set (reg:P CA_REGNO)
(ne:P (match_dup 4)
(const_int 0)))])
(parallel [(set (match_dup 0)
(plus:P (match_dup 3)
(reg:P CA_REGNO)))
(clobber (reg:P CA_REGNO))])]
{
operands[4] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[4]);
if (GET_CODE (operands[5]) == SCRATCH)
operands[5] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*minus_eq_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(minus:P (match_operand:P 3 "gpc_reg_operand" "r")
(eq:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))))
(clobber (match_scratch:P 4 "=r"))
(clobber (match_scratch:P 5 "=r"))
(clobber (reg:P CA_REGNO))]
""
"#"
""
[(parallel [(set (match_dup 5)
(plus:P (match_dup 4)
(const_int -1)))
(set (reg:P CA_REGNO)
(ne:P (match_dup 4)
(const_int 0)))])
(parallel [(set (match_dup 0)
(plus:P (plus:P (match_dup 3)
(reg:P CA_REGNO))
(const_int -1)))
(clobber (reg:P CA_REGNO))])]
{
operands[4] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[4]);
if (GET_CODE (operands[5]) == SCRATCH)
operands[5] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*minus_ne_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(minus:P (match_operand:P 3 "gpc_reg_operand" "r")
(ne:P (match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))))
(clobber (match_scratch:P 4 "=r"))
(clobber (match_scratch:P 5 "=r"))
(clobber (reg:P CA_REGNO))]
""
"#"
""
[(parallel [(set (match_dup 5)
(neg:P (match_dup 4)))
(set (reg:P CA_REGNO)
(eq:P (match_dup 4)
(const_int 0)))])
(parallel [(set (match_dup 0)
(plus:P (plus:P (match_dup 3)
(reg:P CA_REGNO))
(const_int -1)))
(clobber (reg:P CA_REGNO))])]
{
operands[4] = rs6000_emit_eqne (<MODE>mode,
operands[1], operands[2], operands[4]);
if (GET_CODE (operands[5]) == SCRATCH)
operands[5] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*eqsi3_ext<mode>"
[(set (match_operand:EXTSI 0 "gpc_reg_operand" "=r")
(eq:EXTSI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "scc_eq_operand" "rKLI")))
(clobber (match_scratch:SI 3 "=r"))
(clobber (match_scratch:SI 4 "=r"))]
""
"#"
""
[(set (match_dup 4)
(clz:SI (match_dup 3)))
(set (match_dup 0)
(zero_extend:EXTSI
(lshiftrt:SI (match_dup 4)
(const_int 5))))]
{
operands[3] = rs6000_emit_eqne (SImode,
operands[1], operands[2], operands[3]);
if (GET_CODE (operands[4]) == SCRATCH)
operands[4] = gen_reg_rtx (SImode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "8")
(const_string "12")))])
(define_insn_and_split "*nesi3_ext<mode>"
[(set (match_operand:EXTSI 0 "gpc_reg_operand" "=r")
(ne:EXTSI (match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "scc_eq_operand" "rKLI")))
(clobber (match_scratch:SI 3 "=r"))
(clobber (match_scratch:SI 4 "=r"))
(clobber (match_scratch:EXTSI 5 "=r"))]
"!TARGET_ISEL"
"#"
"&& 1"
[(set (match_dup 4)
(clz:SI (match_dup 3)))
(set (match_dup 5)
(zero_extend:EXTSI
(lshiftrt:SI (match_dup 4)
(const_int 5))))
(set (match_dup 0)
(xor:EXTSI (match_dup 5)
(const_int 1)))]
{
operands[3] = rs6000_emit_eqne (SImode,
operands[1], operands[2], operands[3]);
if (GET_CODE (operands[4]) == SCRATCH)
operands[4] = gen_reg_rtx (SImode);
if (GET_CODE (operands[5]) == SCRATCH)
operands[5] = gen_reg_rtx (<MODE>mode);
}
[(set (attr "length")
(if_then_else (match_test "operands[2] == const0_rtx")
(const_string "12")
(const_string "16")))])
(define_code_iterator fp_rev [ordered ne unle unge])
(define_code_iterator fp_two [ltgt le ge unlt ungt uneq])
(define_insn_and_split "*<code><mode>_cc"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(fp_rev:GPR (match_operand:CCFP 1 "cc_reg_operand" "y")
(const_int 0)))]
"!flag_finite_math_only"
"#"
"&& 1"
[(pc)]
{
rtx_code revcode = reverse_condition_maybe_unordered (<CODE>);
rtx eq = gen_rtx_fmt_ee (revcode, <MODE>mode, operands[1], const0_rtx);
rtx tmp = gen_reg_rtx (<MODE>mode);
emit_move_insn (tmp, eq);
emit_insn (gen_xor<mode>3 (operands[0], tmp, const1_rtx));
DONE;
}
[(set_attr "length" "12")])
(define_insn_and_split "*<code><mode>_cc"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(fp_two:GPR (match_operand:CCFP 1 "cc_reg_operand" "y")
(const_int 0)))]
"!flag_finite_math_only"
"#"
"&& 1"
[(pc)]
{
rtx cc = rs6000_emit_fp_cror (<CODE>, <MODE>mode, operands[1]);
emit_move_insn (operands[0], gen_rtx_EQ (<MODE>mode, cc, const0_rtx));
DONE;
}
[(set_attr "length" "12")])
;; Conditional branches.
;; These either are a single bc insn, or a bc around a b.
(define_insn "*cbranch"
[(set (pc)
(if_then_else (match_operator 1 "branch_comparison_operator"
[(match_operand 2 "cc_reg_operand" "y")
(const_int 0)])
(label_ref (match_operand 0))
(pc)))]
""
{
return output_cbranch (operands[1], "%l0", 0, insn);
}
[(set_attr "type" "branch")
(set (attr "length")
(if_then_else (and (ge (minus (match_dup 0) (pc))
(const_int -32768))
(lt (minus (match_dup 0) (pc))
(const_int 32764)))
(const_int 4)
(const_int 8)))])
(define_insn_and_split "*cbranch_2insn"
[(set (pc)
(if_then_else (match_operator 1 "extra_insn_branch_comparison_operator"
[(match_operand 2 "cc_reg_operand" "y")
(const_int 0)])
(label_ref (match_operand 0))
(pc)))]
"!flag_finite_math_only"
"#"
"&& 1"
[(pc)]
{
rtx cc = rs6000_emit_fp_cror (GET_CODE (operands[1]), SImode, operands[2]);
rtx note = find_reg_note (curr_insn, REG_BR_PROB, 0);
rtx loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[0]);
rtx cond = gen_rtx_EQ (CCEQmode, cc, const0_rtx);
rtx ite = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, loc_ref, pc_rtx);
emit_jump_insn (gen_rtx_SET (pc_rtx, ite));
if (note)
{
profile_probability prob
= profile_probability::from_reg_br_prob_note (XINT (note, 0));
add_reg_br_prob_note (get_last_insn (), prob);
}
DONE;
}
[(set_attr "type" "branch")
(set (attr "length")
(if_then_else (and (ge (minus (match_dup 0) (pc))
(const_int -32764))
(lt (minus (match_dup 0) (pc))
(const_int 32760)))
(const_int 8)
(const_int 16)))])
;; Conditional return.
(define_insn "*creturn"
[(set (pc)
(if_then_else (match_operator 0 "branch_comparison_operator"
[(match_operand 1 "cc_reg_operand" "y")
(const_int 0)])
(any_return)
(pc)))]
"<return_pred>"
{
return output_cbranch (operands[0], NULL, 0, insn);
}
[(set_attr "type" "jmpreg")])
;; Logic on condition register values.
; This pattern matches things like
; (set (reg:CCEQ 68) (compare:CCEQ (ior:SI (gt:SI (reg:CCFP 68) (const_int 0))
; (eq:SI (reg:CCFP 68) (const_int 0)))
; (const_int 1)))
; which are generated by the branch logic.
; Prefer destructive operations where BT = BB (for crXX BT,BA,BB)
(define_insn "@cceq_ior_compare_<mode>"
[(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
(compare:CCEQ (match_operator:GPR 1 "boolean_operator"
[(match_operator:GPR 2
"branch_positive_comparison_operator"
[(match_operand 3
"cc_reg_operand" "y,y")
(const_int 0)])
(match_operator:GPR 4
"branch_positive_comparison_operator"
[(match_operand 5
"cc_reg_operand" "0,y")
(const_int 0)])])
(const_int 1)))]
""
"cr%q1 %E0,%j2,%j4"
[(set_attr "type" "cr_logical")
(set_attr "cr_logical_3op" "no,yes")])
; Why is the constant -1 here, but 1 in the previous pattern?
; Because ~1 has all but the low bit set.
(define_insn "cceq_ior_compare_complement"
[(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
(compare:CCEQ (match_operator:SI 1 "boolean_operator"
[(not:SI (match_operator:SI 2
"branch_positive_comparison_operator"
[(match_operand 3
"cc_reg_operand" "y,y")
(const_int 0)]))
(match_operator:SI 4
"branch_positive_comparison_operator"
[(match_operand 5
"cc_reg_operand" "0,y")
(const_int 0)])])
(const_int -1)))]
""
"cr%q1 %E0,%j2,%j4"
[(set_attr "type" "cr_logical")
(set_attr "cr_logical_3op" "no,yes")])
(define_insn "@cceq_rev_compare_<mode>"
[(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
(compare:CCEQ (match_operator:GPR 1
"branch_positive_comparison_operator"
[(match_operand 2
"cc_reg_operand" "0,y")
(const_int 0)])
(const_int 0)))]
""
"crnot %E0,%j1"
[(set_attr "type" "cr_logical")
(set_attr "cr_logical_3op" "no,yes")])
;; If we are comparing the result of two comparisons, this can be done
;; using creqv or crxor.
(define_insn_and_split ""
[(set (match_operand:CCEQ 0 "cc_reg_operand" "=y")
(compare:CCEQ (match_operator 1 "branch_comparison_operator"
[(match_operand 2 "cc_reg_operand" "y")
(const_int 0)])
(match_operator 3 "branch_comparison_operator"
[(match_operand 4 "cc_reg_operand" "y")
(const_int 0)])))]
""
"#"
""
[(set (match_dup 0) (compare:CCEQ (xor:SI (match_dup 1) (match_dup 3))
(match_dup 5)))]
{
int positive_1, positive_2;
positive_1 = branch_positive_comparison_operator (operands[1],
GET_MODE (operands[1]));
positive_2 = branch_positive_comparison_operator (operands[3],
GET_MODE (operands[3]));
if (! positive_1)
operands[1] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[2]),
GET_CODE (operands[1])),
SImode,
operands[2], const0_rtx);
else if (GET_MODE (operands[1]) != SImode)
operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]), SImode,
operands[2], const0_rtx);
if (! positive_2)
operands[3] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[4]),
GET_CODE (operands[3])),
SImode,
operands[4], const0_rtx);
else if (GET_MODE (operands[3]) != SImode)
operands[3] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
operands[4], const0_rtx);
if (positive_1 == positive_2)
{
operands[1] = gen_rtx_NOT (SImode, operands[1]);
operands[5] = constm1_rtx;
}
else
{
operands[5] = const1_rtx;
}
})
;; Unconditional branch and return.
(define_insn "jump"
[(set (pc)
(label_ref (match_operand 0)))]
""
"b %l0"
[(set_attr "type" "branch")])
(define_insn "<return_str>return"
[(any_return)]
"<return_pred>"
"blr"
[(set_attr "type" "jmpreg")])
(define_expand "indirect_jump"
[(set (pc) (match_operand 0 "register_operand"))]
""
{
if (!rs6000_speculate_indirect_jumps) {
rtx ccreg = gen_reg_rtx (CCmode);
emit_jump_insn (gen_indirect_jump_nospec (Pmode, operands[0], ccreg));
DONE;
}
})
(define_insn "*indirect_jump<mode>"
[(set (pc)
(match_operand:P 0 "register_operand" "c,*l"))]
"rs6000_speculate_indirect_jumps"
"b%T0"
[(set_attr "type" "jmpreg")])
(define_insn "@indirect_jump<mode>_nospec"
[(set (pc) (match_operand:P 0 "register_operand" "c,*l"))
(clobber (match_operand:CC 1 "cc_reg_operand" "=y,y"))]
"!rs6000_speculate_indirect_jumps"
"crset %E1\;beq%T0- %1\;b $"
[(set_attr "type" "jmpreg")
(set_attr "length" "12")])
;; Table jump for switch statements:
(define_expand "tablejump"
[(use (match_operand 0))
(use (label_ref (match_operand 1)))]
""
{
if (rs6000_speculate_indirect_jumps)
{
if (TARGET_32BIT)
emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
else
emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
}
else
{
rtx ccreg = gen_reg_rtx (CCmode);
rtx jump;
if (TARGET_32BIT)
jump = gen_tablejumpsi_nospec (operands[0], operands[1], ccreg);
else
jump = gen_tablejumpdi_nospec (operands[0], operands[1], ccreg);
emit_jump_insn (jump);
}
DONE;
})
(define_expand "tablejumpsi"
[(set (match_dup 3)
(plus:SI (match_operand:SI 0)
(match_dup 2)))
(parallel [(set (pc)
(match_dup 3))
(use (label_ref (match_operand 1)))])]
"TARGET_32BIT && rs6000_speculate_indirect_jumps"
{
operands[0] = force_reg (SImode, operands[0]);
operands[2] = force_reg (SImode, gen_rtx_LABEL_REF (SImode, operands[1]));
operands[3] = gen_reg_rtx (SImode);
})
(define_expand "tablejumpsi_nospec"
[(set (match_dup 4)
(plus:SI (match_operand:SI 0)
(match_dup 3)))
(parallel [(set (pc)
(match_dup 4))
(use (label_ref (match_operand 1)))
(clobber (match_operand 2))])]
"TARGET_32BIT && !rs6000_speculate_indirect_jumps"
{
operands[0] = force_reg (SImode, operands[0]);
operands[3] = force_reg (SImode, gen_rtx_LABEL_REF (SImode, operands[1]));
operands[4] = gen_reg_rtx (SImode);
})
(define_expand "tablejumpdi"
[(set (match_dup 4)
(sign_extend:DI (match_operand:SI 0 "lwa_operand")))
(set (match_dup 3)
(plus:DI (match_dup 4)
(match_dup 2)))
(parallel [(set (pc)
(match_dup 3))
(use (label_ref (match_operand 1)))])]
"TARGET_64BIT && rs6000_speculate_indirect_jumps"
{
operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (DImode, operands[1]));
operands[3] = gen_reg_rtx (DImode);
operands[4] = gen_reg_rtx (DImode);
})
(define_expand "tablejumpdi_nospec"
[(set (match_dup 5)
(sign_extend:DI (match_operand:SI 0 "lwa_operand")))
(set (match_dup 4)
(plus:DI (match_dup 5)
(match_dup 3)))
(parallel [(set (pc)
(match_dup 4))
(use (label_ref (match_operand 1)))
(clobber (match_operand 2))])]
"TARGET_64BIT && !rs6000_speculate_indirect_jumps"
{
operands[3] = force_reg (DImode, gen_rtx_LABEL_REF (DImode, operands[1]));
operands[4] = gen_reg_rtx (DImode);
operands[5] = gen_reg_rtx (DImode);
})
(define_insn "*tablejump<mode>_internal1"
[(set (pc)
(match_operand:P 0 "register_operand" "c,*l"))
(use (label_ref (match_operand 1)))]
"rs6000_speculate_indirect_jumps"
"b%T0"
[(set_attr "type" "jmpreg")])
(define_insn "*tablejump<mode>_internal1_nospec"
[(set (pc)
(match_operand:P 0 "register_operand" "c,*l"))
(use (label_ref (match_operand 1)))
(clobber (match_operand:CC 2 "cc_reg_operand" "=y,y"))]
"!rs6000_speculate_indirect_jumps"
"crset %E2\;beq%T0- %2\;b $"
[(set_attr "type" "jmpreg")
(set_attr "length" "12")])
(define_insn "nop"
[(unspec [(const_int 0)] UNSPEC_NOP)]
""
"nop")
(define_insn "group_ending_nop"
[(unspec [(const_int 0)] UNSPEC_GRP_END_NOP)]
""
{
operands[0] = gen_rtx_REG (Pmode,
rs6000_tune == PROCESSOR_POWER6 ? 1 : 2);
return "ori %0,%0,0";
})
(define_insn "speculation_barrier"
[(unspec_volatile:BLK [(const_int 0)] UNSPECV_SPEC_BARRIER)]
""
{
operands[0] = gen_rtx_REG (Pmode, 31);
return "ori %0,%0,0";
})
;; Define the subtract-one-and-jump insns, starting with the template
;; so loop.c knows what to generate.
(define_expand "doloop_end"
[(use (match_operand 0)) ; loop pseudo
(use (match_operand 1))] ; label
""
{
if (GET_MODE (operands[0]) != Pmode)
FAIL;
emit_jump_insn (gen_ctr (Pmode, operands[0], operands[1]));
DONE;
})
(define_expand "@ctr<mode>"
[(parallel [(set (pc)
(if_then_else (ne (match_operand:P 0 "register_operand")
(const_int 1))
(label_ref (match_operand 1))
(pc)))
(set (match_dup 0)
(plus:P (match_dup 0)
(const_int -1)))
(clobber (match_scratch:CC 2))
(clobber (match_scratch:P 3))])]
""
"")
;; We need to be able to do this for any operand, including MEM, or we
;; will cause reload to blow up since we don't allow output reloads on
;; JUMP_INSNs.
;; For the length attribute to be calculated correctly, the
;; label MUST be operand 0.
;; rs6000_legitimate_combined_insn prevents combine creating any of
;; the ctr<mode> insns.
(define_code_iterator eqne [eq ne])
(define_code_attr bd [(eq "bdz") (ne "bdnz")])
(define_code_attr bd_neg [(eq "bdnz") (ne "bdz")])
(define_insn "<bd>_<mode>"
[(set (pc)
(if_then_else (eqne (match_operand:P 1 "register_operand" "c,*b,*b,*b")
(const_int 1))
(label_ref (match_operand 0))
(pc)))
(set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*d*wa*c*l")
(plus:P (match_dup 1)
(const_int -1)))
(clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
(clobber (match_scratch:P 4 "=X,X,&r,r"))]
""
{
if (which_alternative != 0)
return "#";
else if (get_attr_length (insn) == 4)
return "<bd> %l0";
else
return "<bd_neg> $+8\;b %l0";
}
[(set_attr "type" "branch")
(set_attr_alternative "length"
[(if_then_else (and (ge (minus (match_dup 0) (pc))
(const_int -32768))
(lt (minus (match_dup 0) (pc))
(const_int 32764)))
(const_int 4)
(const_int 8))
(const_string "16")
(const_string "20")
(const_string "20")])])
;; Now the splitter if we could not allocate the CTR register
(define_split
[(set (pc)
(if_then_else (match_operator 2 "comparison_operator"
[(match_operand:P 1 "gpc_reg_operand")
(const_int 1)])
(match_operand 5)
(match_operand 6)))
(set (match_operand:P 0 "nonimmediate_operand")
(plus:P (match_dup 1)
(const_int -1)))
(clobber (match_scratch:CC 3))
(clobber (match_scratch:P 4))]
"reload_completed"
[(set (pc)
(if_then_else (match_dup 7)
(match_dup 5)
(match_dup 6)))]
{
operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[2]), VOIDmode, operands[3],
const0_rtx);
emit_insn (gen_rtx_SET (operands[3],
gen_rtx_COMPARE (CCmode, operands[1], const1_rtx)));
if (int_reg_operand (operands[0], <MODE>mode))
emit_insn (gen_add<mode>3 (operands[0], operands[1], constm1_rtx));
else
{
emit_insn (gen_add<mode>3 (operands[4], operands[1], constm1_rtx));
emit_move_insn (operands[0], operands[4]);
}
/* No DONE so branch comes from the pattern. */
})
;; patterns for bdnzt/bdnzf/bdzt/bdzf
;; Note that in the case of long branches we have to decompose this into
;; bdnz+bc. This is because bdnzt has an implied AND between the ctr condition
;; and the CR bit, which means there is no way to conveniently invert the
;; comparison as is done with plain bdnz/bdz.
(define_insn "<bd>tf_<mode>"
[(set (pc)
(if_then_else
(and
(eqne (match_operand:P 1 "register_operand" "c,*b,*b,*b")
(const_int 1))
(match_operator 3 "branch_comparison_operator"
[(match_operand 4 "cc_reg_operand" "y,y,y,y")
(const_int 0)]))
(label_ref (match_operand 0))
(pc)))
(set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*d*wa*c*l")
(plus:P (match_dup 1)
(const_int -1)))
(clobber (match_scratch:P 5 "=X,X,&r,r"))
(clobber (match_scratch:CC 6 "=X,&y,&y,&y"))
(clobber (match_scratch:CCEQ 7 "=X,&y,&y,&y"))]
""
{
if (which_alternative != 0)
return "#";
else if (get_attr_length (insn) == 4)
{
if (branch_positive_comparison_operator (operands[3],
GET_MODE (operands[3])))
return "<bd>t %j3,%l0";
else
return "<bd>f %j3,%l0";
}
else
{
static char seq[96];
char *bcs = output_cbranch (operands[3], ".Lshort%=", 1, insn);
sprintf(seq, "<bd_neg> .Lshort%%=\;%s\;b %%l0\;.Lshort%%=:", bcs);
return seq;
}
}
[(set_attr "type" "branch")
(set_attr_alternative "length"
[(if_then_else (and (ge (minus (match_dup 0) (pc))
(const_int -32768))
(lt (minus (match_dup 0) (pc))
(const_int 32764)))
(const_int 4)
(const_int 8))
(const_string "16")
(const_string "20")
(const_string "20")])])
;; Now the splitter if we could not allocate the CTR register
(define_split
[(set (pc)
(if_then_else
(and
(match_operator 1 "comparison_operator"
[(match_operand:P 0 "gpc_reg_operand")
(const_int 1)])
(match_operator 3 "branch_comparison_operator"
[(match_operand 2 "cc_reg_operand")
(const_int 0)]))
(match_operand 4)
(match_operand 5)))
(set (match_operand:P 6 "nonimmediate_operand")
(plus:P (match_dup 0)
(const_int -1)))
(clobber (match_scratch:P 7))
(clobber (match_scratch:CC 8))
(clobber (match_scratch:CCEQ 9))]
"reload_completed"
[(pc)]
{
rtx ctr = operands[0];
rtx ctrcmp = operands[1];
rtx ccin = operands[2];
rtx cccmp = operands[3];
rtx dst1 = operands[4];
rtx dst2 = operands[5];
rtx ctrout = operands[6];
rtx ctrtmp = operands[7];
enum rtx_code cmpcode = GET_CODE (ctrcmp);
bool ispos = branch_positive_comparison_operator (ctrcmp, GET_MODE (ctrcmp));
if (!ispos)
cmpcode = reverse_condition (cmpcode);
/* Generate crand/crandc here. */
emit_insn (gen_rtx_SET (operands[8],
gen_rtx_COMPARE (CCmode, ctr, const1_rtx)));
rtx ctrcmpcc = gen_rtx_fmt_ee (cmpcode, SImode, operands[8], const0_rtx);
rtx andexpr = gen_rtx_AND (SImode, ctrcmpcc, cccmp);
if (ispos)
emit_insn (gen_cceq_ior_compare (SImode, operands[9], andexpr, ctrcmpcc,
operands[8], cccmp, ccin));
else
emit_insn (gen_cceq_ior_compare_complement (operands[9], andexpr, ctrcmpcc,
operands[8], cccmp, ccin));
if (int_reg_operand (ctrout, <MODE>mode))
emit_insn (gen_add<mode>3 (ctrout, ctr, constm1_rtx));
else
{
emit_insn (gen_add<mode>3 (ctrtmp, ctr, constm1_rtx));
emit_move_insn (ctrout, ctrtmp);
}
rtx cmp = gen_rtx_EQ (CCEQmode, operands[9], const0_rtx);
emit_jump_insn (gen_rtx_SET (pc_rtx,
gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,
dst1, dst2)));
DONE;
})
(define_insn "trap"
[(trap_if (const_int 1) (const_int 0))]
""
"trap"
[(set_attr "type" "trap")])
(define_expand "ctrap<mode>4"
[(trap_if (match_operator 0 "ordered_comparison_operator"
[(match_operand:GPR 1 "register_operand")
(match_operand:GPR 2 "reg_or_short_operand")])
(match_operand 3 "zero_constant" ""))]
""
"")
(define_insn ""
[(trap_if (match_operator 0 "ordered_comparison_operator"
[(match_operand:GPR 1 "register_operand" "r")
(match_operand:GPR 2 "reg_or_short_operand" "rI")])
(const_int 0))]
""
"t<wd>%V0%I2 %1,%2"
[(set_attr "type" "trap")])
;; Insns related to generating the function prologue and epilogue.
(define_expand "prologue"
[(use (const_int 0))]
""
{
rs6000_emit_prologue ();
if (!TARGET_SCHED_PROLOG)
emit_insn (gen_blockage ());
DONE;
})
(define_insn "*movesi_from_cr_one"
[(match_parallel 0 "mfcr_operation"
[(set (match_operand:SI 1 "gpc_reg_operand" "=r")
(unspec:SI [(match_operand:CC 2 "cc_reg_operand" "y")
(match_operand 3 "immediate_operand" "n")]
UNSPEC_MOVESI_FROM_CR))])]
"TARGET_MFCRF"
{
int mask = 0;
int i;
for (i = 0; i < XVECLEN (operands[0], 0); i++)
{
mask = INTVAL (XVECEXP (SET_SRC (XVECEXP (operands[0], 0, i)), 0, 1));
operands[4] = GEN_INT (mask);
output_asm_insn ("mfcr %1,%4", operands);
}
return "";
}
[(set_attr "type" "mfcrf")])
;; Don't include the volatile CRs since their values are not used wrt CR save
;; in the prologue and doing so prevents shrink-wrapping because we can't move the
;; prologue past an insn (early exit test) that defines a register used in the
;; prologue.
(define_insn "prologue_movesi_from_cr"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(unspec:SI [(reg:CC CR2_REGNO) (reg:CC CR3_REGNO)
(reg:CC CR4_REGNO)]
UNSPEC_MOVESI_FROM_CR))]
""
"mfcr %0"
[(set_attr "type" "mfcr")])
(define_insn "*crsave"
[(match_parallel 0 "crsave_operation"
[(set (match_operand:SI 1 "memory_operand" "=m")
(match_operand:SI 2 "gpc_reg_operand" "r"))])]
""
"stw %2,%1"
[(set_attr "type" "store")])
(define_insn "*stmw"
[(match_parallel 0 "stmw_operation"
[(set (match_operand:SI 1 "memory_operand" "=m")
(match_operand:SI 2 "gpc_reg_operand" "r"))])]
"TARGET_MULTIPLE"
"stmw %2,%1"
[(set_attr "type" "store")
(set_attr "update" "yes")
(set_attr "indexed" "yes")])
; The following comment applies to:
; save_gpregs_*
; save_fpregs_*
; restore_gpregs*
; return_and_restore_gpregs*
; return_and_restore_fpregs*
; return_and_restore_fpregs_aix*
;
; The out-of-line save / restore functions expects one input argument.
; Since those are not standard call_insn's, we must avoid using
; MATCH_OPERAND for that argument. That way the register rename
; optimization will not try to rename this register.
; Each pattern is repeated for each possible register number used in
; various ABIs (r11, r1, and for some functions r12)
(define_insn "*save_gpregs_<mode>_r11"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 11))
(set (match_operand:P 2 "memory_operand" "=m")
(match_operand:P 3 "gpc_reg_operand" "r"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*save_gpregs_<mode>_r12"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 12))
(set (match_operand:P 2 "memory_operand" "=m")
(match_operand:P 3 "gpc_reg_operand" "r"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*save_gpregs_<mode>_r1"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 1))
(set (match_operand:P 2 "memory_operand" "=m")
(match_operand:P 3 "gpc_reg_operand" "r"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*save_fpregs_<mode>_r11"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 11))
(set (match_operand:DF 2 "memory_operand" "=m")
(match_operand:DF 3 "gpc_reg_operand" "d"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*save_fpregs_<mode>_r12"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 12))
(set (match_operand:DF 2 "memory_operand" "=m")
(match_operand:DF 3 "gpc_reg_operand" "d"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*save_fpregs_<mode>_r1"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 1))
(set (match_operand:DF 2 "memory_operand" "=m")
(match_operand:DF 3 "gpc_reg_operand" "d"))])]
""
"bl %1"
[(set_attr "type" "branch")])
; This is to explain that changes to the stack pointer should
; not be moved over loads from or stores to stack memory.
(define_insn "stack_tie"
[(match_parallel 0 "tie_operand"
[(set (mem:BLK (reg 1)) (const_int 0))])]
""
""
[(set_attr "length" "0")])
; Some 32-bit ABIs do not have a red zone, so the stack deallocation has to
; stay behind all restores from the stack, it cannot be reordered to before
; one. See PR77687. This insn is an add or mr, and a memory clobber.
(define_insn "stack_restore_tie"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
(plus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
(match_operand:SI 2 "reg_or_cint_operand" "O,rI")))
(set (mem:BLK (scratch)) (const_int 0))]
"TARGET_32BIT"
"@
mr %0,%1
add%I2 %0,%1,%2"
[(set_attr "type" "*,add")])
(define_expand "epilogue"
[(use (const_int 0))]
""
{
if (!TARGET_SCHED_PROLOG)
emit_insn (gen_blockage ());
rs6000_emit_epilogue (EPILOGUE_TYPE_NORMAL);
DONE;
})
; On some processors, doing the mtcrf one CC register at a time is
; faster (like on the 604e). On others, doing them all at once is
; faster; for instance, on the 601 and 750.
(define_expand "movsi_to_cr_one"
[(set (match_operand:CC 0 "cc_reg_operand")
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand")
(match_dup 2)] UNSPEC_MOVESI_TO_CR))]
""
"operands[2] = GEN_INT (1 << (7 - (REGNO (operands[0]) - CR0_REGNO)));")
(define_insn "*movsi_to_cr"
[(match_parallel 0 "mtcrf_operation"
[(set (match_operand:CC 1 "cc_reg_operand" "=y")
(unspec:CC [(match_operand:SI 2 "gpc_reg_operand" "r")
(match_operand 3 "immediate_operand" "n")]
UNSPEC_MOVESI_TO_CR))])]
""
{
int mask = 0;
int i;
for (i = 0; i < XVECLEN (operands[0], 0); i++)
mask |= INTVAL (XVECEXP (SET_SRC (XVECEXP (operands[0], 0, i)), 0, 1));
operands[4] = GEN_INT (mask);
return "mtcrf %4,%2";
}
[(set_attr "type" "mtcr")])
(define_insn "*mtcrfsi"
[(set (match_operand:CC 0 "cc_reg_operand" "=y")
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand 2 "immediate_operand" "n")]
UNSPEC_MOVESI_TO_CR))]
"REG_P (operands[0])
&& CR_REGNO_P (REGNO (operands[0]))
&& CONST_INT_P (operands[2])
&& INTVAL (operands[2]) == 1 << (7 - (REGNO (operands[0]) - CR0_REGNO))"
"mtcrf %R0,%1"
[(set_attr "type" "mtcr")])
; The load-multiple instructions have similar properties.
; Note that "load_multiple" is a name known to the machine-independent
; code that actually corresponds to the PowerPC load-string.
(define_insn "*lmw"
[(match_parallel 0 "lmw_operation"
[(set (match_operand:SI 1 "gpc_reg_operand" "=r")
(match_operand:SI 2 "memory_operand" "m"))])]
"TARGET_MULTIPLE"
"lmw %1,%2"
[(set_attr "type" "load")
(set_attr "update" "yes")
(set_attr "indexed" "yes")
(set_attr "cell_micro" "always")])
; FIXME: "any_parallel_operand" is a bit flexible...
; The following comment applies to:
; save_gpregs_*
; save_fpregs_*
; restore_gpregs*
; return_and_restore_gpregs*
; return_and_restore_fpregs*
; return_and_restore_fpregs_aix*
;
; The out-of-line save / restore functions expects one input argument.
; Since those are not standard call_insn's, we must avoid using
; MATCH_OPERAND for that argument. That way the register rename
; optimization will not try to rename this register.
; Each pattern is repeated for each possible register number used in
; various ABIs (r11, r1, and for some functions r12)
(define_insn "*restore_gpregs_<mode>_r11"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 11))
(set (match_operand:P 2 "gpc_reg_operand" "=r")
(match_operand:P 3 "memory_operand" "m"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*restore_gpregs_<mode>_r12"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 12))
(set (match_operand:P 2 "gpc_reg_operand" "=r")
(match_operand:P 3 "memory_operand" "m"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*restore_gpregs_<mode>_r1"
[(match_parallel 0 "any_parallel_operand"
[(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 1))
(set (match_operand:P 2 "gpc_reg_operand" "=r")
(match_operand:P 3 "memory_operand" "m"))])]
""
"bl %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_gpregs_<mode>_r11"
[(match_parallel 0 "any_parallel_operand"
[(return)
(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 11))
(set (match_operand:P 2 "gpc_reg_operand" "=r")
(match_operand:P 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_gpregs_<mode>_r12"
[(match_parallel 0 "any_parallel_operand"
[(return)
(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 12))
(set (match_operand:P 2 "gpc_reg_operand" "=r")
(match_operand:P 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_gpregs_<mode>_r1"
[(match_parallel 0 "any_parallel_operand"
[(return)
(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 1))
(set (match_operand:P 2 "gpc_reg_operand" "=r")
(match_operand:P 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_fpregs_<mode>_r11"
[(match_parallel 0 "any_parallel_operand"
[(return)
(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 11))
(set (match_operand:DF 2 "gpc_reg_operand" "=d")
(match_operand:DF 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_fpregs_<mode>_r12"
[(match_parallel 0 "any_parallel_operand"
[(return)
(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 12))
(set (match_operand:DF 2 "gpc_reg_operand" "=d")
(match_operand:DF 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_fpregs_<mode>_r1"
[(match_parallel 0 "any_parallel_operand"
[(return)
(clobber (reg:P LR_REGNO))
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 1))
(set (match_operand:DF 2 "gpc_reg_operand" "=d")
(match_operand:DF 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_fpregs_aix_<mode>_r11"
[(match_parallel 0 "any_parallel_operand"
[(return)
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 11))
(set (match_operand:DF 2 "gpc_reg_operand" "=d")
(match_operand:DF 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
(define_insn "*return_and_restore_fpregs_aix_<mode>_r1"
[(match_parallel 0 "any_parallel_operand"
[(return)
(use (match_operand:P 1 "symbol_ref_operand" "s"))
(use (reg:P 1))
(set (match_operand:DF 2 "gpc_reg_operand" "=d")
(match_operand:DF 3 "memory_operand" "m"))])]
""
"b %1"
[(set_attr "type" "branch")])
; This is used in compiling the unwind routines.
(define_expand "eh_return"
[(use (match_operand 0 "general_operand"))]
""
{
emit_insn (gen_eh_set_lr (Pmode, operands[0]));
DONE;
})
; We can't expand this before we know where the link register is stored.
(define_insn_and_split "@eh_set_lr_<mode>"
[(unspec_volatile [(match_operand:P 0 "register_operand" "r")] UNSPECV_EH_RR)
(clobber (match_scratch:P 1 "=&b"))]
""
"#"
"reload_completed"
[(const_int 0)]
{
rs6000_emit_eh_reg_restore (operands[0], operands[1]);
DONE;
})
(define_insn "prefetch"
[(prefetch (match_operand 0 "indexed_or_indirect_address" "a")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n"))]
""
{
/* dcbtstt, dcbtt and TH=0b10000 support starts with ISA 2.06 (Power7).
AIX does not support the dcbtstt and dcbtt extended mnemonics.
The AIX assembler does not support the three operand form of dcbt
and dcbtst on Power 7 (-mpwr7). */
int inst_select = INTVAL (operands[2]) || !TARGET_DIRECT_MOVE;
if (REG_P (operands[0]))
{
if (INTVAL (operands[1]) == 0)
return inst_select ? "dcbt 0,%0" : "dcbt 0,%0,16";
else
return inst_select ? "dcbtst 0,%0" : "dcbtst 0,%0,16";
}
else
{
if (INTVAL (operands[1]) == 0)
return inst_select ? "dcbt %a0" : "dcbt %a0,16";
else
return inst_select ? "dcbtst %a0" : "dcbtst %a0,16";
}
}
[(set_attr "type" "load")])
;; Handle -fsplit-stack.
(define_expand "split_stack_prologue"
[(const_int 0)]
""
{
rs6000_expand_split_stack_prologue ();
DONE;
})
(define_expand "load_split_stack_limit"
[(set (match_operand 0)
(unspec [(const_int 0)] UNSPEC_STACK_CHECK))]
""
{
emit_insn (gen_rtx_SET (operands[0],
gen_rtx_UNSPEC (Pmode,
gen_rtvec (1, const0_rtx),
UNSPEC_STACK_CHECK)));
DONE;
})
(define_insn "load_split_stack_limit_di"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(unspec:DI [(const_int 0)] UNSPEC_STACK_CHECK))]
"TARGET_64BIT"
"ld %0,-0x7040(13)"
[(set_attr "type" "load")
(set_attr "update" "no")
(set_attr "indexed" "no")])
(define_insn "load_split_stack_limit_si"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(unspec:SI [(const_int 0)] UNSPEC_STACK_CHECK))]
"!TARGET_64BIT"
"lwz %0,-0x7020(2)"
[(set_attr "type" "load")
(set_attr "update" "no")
(set_attr "indexed" "no")])
;; A return instruction which the middle-end doesn't see.
;; Use r0 to stop regrename twiddling with lr restore insns emitted
;; after the call to __morestack.
(define_insn "split_stack_return"
[(unspec_volatile [(reg:SI 0) (reg:SI LR_REGNO)] UNSPECV_SPLIT_STACK_RETURN)]
""
"blr"
[(set_attr "type" "jmpreg")])
;; If there are operand 0 bytes available on the stack, jump to
;; operand 1.
(define_expand "split_stack_space_check"
[(set (match_dup 2)
(unspec [(const_int 0)] UNSPEC_STACK_CHECK))
(set (match_dup 3)
(minus (reg STACK_POINTER_REGNUM)
(match_operand 0)))
(set (match_dup 4) (compare:CCUNS (match_dup 3) (match_dup 2)))
(set (pc) (if_then_else
(geu (match_dup 4) (const_int 0))
(label_ref (match_operand 1))
(pc)))]
""
{
rs6000_split_stack_space_check (operands[0], operands[1]);
DONE;
})
(define_insn "bpermd_<mode>"
[(set (match_operand:P 0 "gpc_reg_operand" "=r")
(unspec:P [(match_operand:P 1 "gpc_reg_operand" "r")
(match_operand:P 2 "gpc_reg_operand" "r")] UNSPEC_BPERM))]
"TARGET_POPCNTD"
"bpermd %0,%1,%2"
[(set_attr "type" "popcnt")])
;; Builtin fma support. Handle
;; Note that the conditions for expansion are in the FMA_F iterator.
(define_expand "fma<mode>4"
[(set (match_operand:FMA_F 0 "gpc_reg_operand")
(fma:FMA_F
(match_operand:FMA_F 1 "gpc_reg_operand")
(match_operand:FMA_F 2 "gpc_reg_operand")
(match_operand:FMA_F 3 "gpc_reg_operand")))]
""
"")
(define_insn "*fma<mode>4_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa,wa")
(fma:SFDF
(match_operand:SFDF 1 "gpc_reg_operand" "%<Ff>,wa,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa,0")
(match_operand:SFDF 3 "gpc_reg_operand" "<Ff>,0,wa")))]
"TARGET_HARD_FLOAT"
"@
fmadd<s> %0,%1,%2,%3
xsmadda<sd>p %x0,%x1,%x2
xsmaddm<sd>p %x0,%x1,%x3"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>,<Fisa>")])
; Altivec only has fma and nfms.
(define_expand "fms<mode>4"
[(set (match_operand:FMA_F 0 "gpc_reg_operand")
(fma:FMA_F
(match_operand:FMA_F 1 "gpc_reg_operand")
(match_operand:FMA_F 2 "gpc_reg_operand")
(neg:FMA_F (match_operand:FMA_F 3 "gpc_reg_operand"))))]
"!VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
"")
(define_insn "*fms<mode>4_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa,wa")
(fma:SFDF
(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa,0")
(neg:SFDF (match_operand:SFDF 3 "gpc_reg_operand" "<Ff>,0,wa"))))]
"TARGET_HARD_FLOAT"
"@
fmsub<s> %0,%1,%2,%3
xsmsuba<sd>p %x0,%x1,%x2
xsmsubm<sd>p %x0,%x1,%x3"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>,<Fisa>")])
;; If signed zeros are ignored, -(a * b - c) = -a * b + c.
(define_expand "fnma<mode>4"
[(set (match_operand:FMA_F 0 "gpc_reg_operand")
(neg:FMA_F
(fma:FMA_F
(match_operand:FMA_F 1 "gpc_reg_operand")
(match_operand:FMA_F 2 "gpc_reg_operand")
(neg:FMA_F (match_operand:FMA_F 3 "gpc_reg_operand")))))]
"!HONOR_SIGNED_ZEROS (<MODE>mode)"
"")
;; If signed zeros are ignored, -(a * b + c) = -a * b - c.
(define_expand "fnms<mode>4"
[(set (match_operand:FMA_F 0 "gpc_reg_operand")
(neg:FMA_F
(fma:FMA_F
(match_operand:FMA_F 1 "gpc_reg_operand")
(match_operand:FMA_F 2 "gpc_reg_operand")
(match_operand:FMA_F 3 "gpc_reg_operand"))))]
"!HONOR_SIGNED_ZEROS (<MODE>mode) && !VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
"")
; Not an official optab name, but used from builtins.
(define_expand "nfma<mode>4"
[(set (match_operand:FMA_F 0 "gpc_reg_operand")
(neg:FMA_F
(fma:FMA_F
(match_operand:FMA_F 1 "gpc_reg_operand")
(match_operand:FMA_F 2 "gpc_reg_operand")
(match_operand:FMA_F 3 "gpc_reg_operand"))))]
"!VECTOR_UNIT_ALTIVEC_P (<MODE>mode)"
"")
(define_insn "*nfma<mode>4_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa,wa")
(neg:SFDF
(fma:SFDF
(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa,0")
(match_operand:SFDF 3 "gpc_reg_operand" "<Ff>,0,wa"))))]
"TARGET_HARD_FLOAT"
"@
fnmadd<s> %0,%1,%2,%3
xsnmadda<sd>p %x0,%x1,%x2
xsnmaddm<sd>p %x0,%x1,%x3"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>,<Fisa>")])
; Not an official optab name, but used from builtins.
(define_expand "nfms<mode>4"
[(set (match_operand:FMA_F 0 "gpc_reg_operand")
(neg:FMA_F
(fma:FMA_F
(match_operand:FMA_F 1 "gpc_reg_operand")
(match_operand:FMA_F 2 "gpc_reg_operand")
(neg:FMA_F (match_operand:FMA_F 3 "gpc_reg_operand")))))]
""
"")
(define_insn "*nfmssf4_fpr"
[(set (match_operand:SFDF 0 "gpc_reg_operand" "=<Ff>,wa,wa")
(neg:SFDF
(fma:SFDF
(match_operand:SFDF 1 "gpc_reg_operand" "<Ff>,wa,wa")
(match_operand:SFDF 2 "gpc_reg_operand" "<Ff>,wa,0")
(neg:SFDF
(match_operand:SFDF 3 "gpc_reg_operand" "<Ff>,0,wa")))))]
"TARGET_HARD_FLOAT"
"@
fnmsub<s> %0,%1,%2,%3
xsnmsuba<sd>p %x0,%x1,%x2
xsnmsubm<sd>p %x0,%x1,%x3"
[(set_attr "type" "fp")
(set_attr "isa" "*,<Fisa>,<Fisa>")])
(define_expand "rs6000_get_timebase"
[(use (match_operand:DI 0 "gpc_reg_operand"))]
""
{
if (TARGET_POWERPC64)
emit_insn (gen_rs6000_mftb_di (operands[0]));
else
emit_insn (gen_rs6000_get_timebase_ppc32 (operands[0]));
DONE;
})
(define_insn "rs6000_get_timebase_ppc32"
[(set (match_operand:DI 0 "gpc_reg_operand" "=r")
(unspec_volatile:DI [(const_int 0)] UNSPECV_MFTB))
(clobber (match_scratch:SI 1 "=r"))
(clobber (match_scratch:CC 2 "=y"))]
"!TARGET_POWERPC64"
{
if (WORDS_BIG_ENDIAN)
if (TARGET_MFCRF)
{
return "mfspr %0,269\;"
"mfspr %L0,268\;"
"mfspr %1,269\;"
"cmpw %2,%0,%1\;"
"bne- %2,$-16";
}
else
{
return "mftbu %0\;"
"mftb %L0\;"
"mftbu %1\;"
"cmpw %2,%0,%1\;"
"bne- %2,$-16";
}
else
if (TARGET_MFCRF)
{
return "mfspr %L0,269\;"
"mfspr %0,268\;"
"mfspr %1,269\;"
"cmpw %2,%L0,%1\;"
"bne- %2,$-16";
}
else
{
return "mftbu %L0\;"
"mftb %0\;"
"mftbu %1\;"
"cmpw %2,%L0,%1\;"
"bne- %2,$-16";
}
}
[(set_attr "length" "20")])
(define_insn "rs6000_mftb_<mode>"
[(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
(unspec_volatile:GPR [(const_int 0)] UNSPECV_MFTB))]
""
{
if (TARGET_MFCRF)
return "mfspr %0,268";
else
return "mftb %0";
})
;; The ISA 3.0 mffsl instruction is a lower latency instruction
;; for reading bits [29:31], [45:51] and [56:63] of the FPSCR.
(define_insn "rs6000_mffsl_hw"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d")
(unspec_volatile:DF [(const_int 0)] UNSPECV_MFFSL))]
"TARGET_HARD_FLOAT"
"mffsl %0")
(define_expand "rs6000_mffsl"
[(set (match_operand:DF 0 "gpc_reg_operand")
(unspec_volatile:DF [(const_int 0)] UNSPECV_MFFSL))]
"TARGET_HARD_FLOAT"
{
/* If the low latency mffsl instruction (ISA 3.0) is available use it,
otherwise fall back to the older mffs instruction to emulate the mffsl
instruction. */
if (!TARGET_P9_MISC)
{
rtx tmp1 = gen_reg_rtx (DFmode);
/* The mffs instruction reads the entire FPSCR. Emulate the mffsl
instruction using the mffs instruction and masking the result. */
emit_insn (gen_rs6000_mffs (tmp1));
rtx tmp1di = simplify_gen_subreg (DImode, tmp1, DFmode, 0);
rtx tmp2 = gen_reg_rtx (DImode);
emit_insn (gen_anddi3 (tmp2, tmp1di, GEN_INT (0x70007f0ffLL)));
rtx tmp2df = simplify_gen_subreg (DFmode, tmp2, DImode, 0);
emit_move_insn (operands[0], tmp2df);
DONE;
}
emit_insn (gen_rs6000_mffsl_hw (operands[0]));
DONE;
})
(define_insn "rs6000_mffs"
[(set (match_operand:DF 0 "gpc_reg_operand" "=d")
(unspec_volatile:DF [(const_int 0)] UNSPECV_MFFS))]
"TARGET_HARD_FLOAT"
"mffs %0")
(define_insn "rs6000_mtfsf"
[(unspec_volatile [(match_operand:SI 0 "const_int_operand" "i")
(match_operand:DF 1 "gpc_reg_operand" "d")]
UNSPECV_MTFSF)]
"TARGET_HARD_FLOAT"
"mtfsf %0,%1")
(define_insn "rs6000_mtfsf_hi"
[(unspec_volatile [(match_operand:SI 0 "const_int_operand" "n")
(match_operand:DF 1 "gpc_reg_operand" "d")]
UNSPECV_MTFSF_HI)]
"TARGET_HARD_FLOAT"
"mtfsf %0,%1,0,1")
;; Power8 fusion support for fusing an addis instruction with a D-form load of
;; a GPR. The addis instruction must be adjacent to the load, and use the same
;; register that is being loaded. The fused ops must be physically adjacent.
;; On Power8 GPR loads, we try to use the register that is being load. The
;; peephole2 then gathers any other fused possibilities that it can find after
;; register allocation. If power9 fusion is selected, we also fuse floating
;; point loads/stores.
;; Find cases where the addis that feeds into a load instruction is either used
;; once or is the same as the target register, and replace it with the fusion
;; insn
(define_peephole2
[(set (match_operand:P 0 "base_reg_operand")
(match_operand:P 1 "fusion_gpr_addis"))
(set (match_operand:INT1 2 "base_reg_operand")
(match_operand:INT1 3 "fusion_gpr_mem_load"))]
"TARGET_P8_FUSION
&& fusion_gpr_load_p (operands[0], operands[1], operands[2],
operands[3])"
[(const_int 0)]
{
expand_fusion_gpr_load (operands);
DONE;
})
;; Fusion insn, created by the define_peephole2 above (and eventually by
;; reload)
(define_insn "*fusion_gpr_load_<mode>"
[(set (match_operand:INT1 0 "base_reg_operand" "=b")
(unspec:INT1 [(match_operand:INT1 1 "fusion_addis_mem_combo_load" "wF")]
UNSPEC_FUSION_GPR))]
"TARGET_P8_FUSION"
{
return emit_fusion_gpr_load (operands[0], operands[1]);
}
[(set_attr "type" "load")
(set_attr "length" "8")])
;; Optimize cases where we want to do a D-form load (register+offset) on
;; ISA 2.06/2.07 to an Altivec register, and the register allocator
;; has generated:
;; LFD 0,32(3)
;; XXLOR 32,0,0
;;
;; and we change this to:
;; LI 0,32
;; LXSDX 32,3,9
(define_peephole2
[(match_scratch:P 0 "b")
(set (match_operand:ALTIVEC_DFORM 1 "fpr_reg_operand")
(match_operand:ALTIVEC_DFORM 2 "simple_offsettable_mem_operand"))
(set (match_operand:ALTIVEC_DFORM 3 "altivec_register_operand")
(match_dup 1))]
"TARGET_VSX && !TARGET_P9_VECTOR && peep2_reg_dead_p (2, operands[1])"
[(set (match_dup 0)
(match_dup 4))
(set (match_dup 3)
(match_dup 5))]
{
rtx tmp_reg = operands[0];
rtx mem = operands[2];
rtx addr = XEXP (mem, 0);
rtx add_op0, add_op1, new_addr;
gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
add_op0 = XEXP (addr, 0);
add_op1 = XEXP (addr, 1);
gcc_assert (REG_P (add_op0));
new_addr = gen_rtx_PLUS (Pmode, add_op0, tmp_reg);
operands[4] = add_op1;
operands[5] = change_address (mem, <ALTIVEC_DFORM:MODE>mode, new_addr);
})
;; Optimize cases were want to do a D-form store on ISA 2.06/2.07 from an
;; Altivec register, and the register allocator has generated:
;; XXLOR 0,32,32
;; STFD 0,32(3)
;;
;; and we change this to:
;; LI 0,32
;; STXSDX 32,3,9
(define_peephole2
[(match_scratch:P 0 "b")
(set (match_operand:ALTIVEC_DFORM 1 "fpr_reg_operand")
(match_operand:ALTIVEC_DFORM 2 "altivec_register_operand"))
(set (match_operand:ALTIVEC_DFORM 3 "simple_offsettable_mem_operand")
(match_dup 1))]
"TARGET_VSX && !TARGET_P9_VECTOR && peep2_reg_dead_p (2, operands[1])"
[(set (match_dup 0)
(match_dup 4))
(set (match_dup 5)
(match_dup 2))]
{
rtx tmp_reg = operands[0];
rtx mem = operands[3];
rtx addr = XEXP (mem, 0);
rtx add_op0, add_op1, new_addr;
gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
add_op0 = XEXP (addr, 0);
add_op1 = XEXP (addr, 1);
gcc_assert (REG_P (add_op0));
new_addr = gen_rtx_PLUS (Pmode, add_op0, tmp_reg);
operands[4] = add_op1;
operands[5] = change_address (mem, <ALTIVEC_DFORM:MODE>mode, new_addr);
})
;; Miscellaneous ISA 2.06 (power7) instructions
(define_insn "addg6s"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand:SI 1 "register_operand" "r")
(match_operand:SI 2 "register_operand" "r")]
UNSPEC_ADDG6S))]
"TARGET_POPCNTD"
"addg6s %0,%1,%2"
[(set_attr "type" "integer")])
(define_insn "cdtbcd"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand:SI 1 "register_operand" "r")]
UNSPEC_CDTBCD))]
"TARGET_POPCNTD"
"cdtbcd %0,%1"
[(set_attr "type" "integer")])
(define_insn "cbcdtd"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec:SI [(match_operand:SI 1 "register_operand" "r")]
UNSPEC_CBCDTD))]
"TARGET_POPCNTD"
"cbcdtd %0,%1"
[(set_attr "type" "integer")])
(define_int_iterator UNSPEC_DIV_EXTEND [UNSPEC_DIVE
UNSPEC_DIVEU])
(define_int_attr div_extend [(UNSPEC_DIVE "e")
(UNSPEC_DIVEU "eu")])
(define_insn "div<div_extend>_<mode>"
[(set (match_operand:GPR 0 "register_operand" "=r")
(unspec:GPR [(match_operand:GPR 1 "register_operand" "r")
(match_operand:GPR 2 "register_operand" "r")]
UNSPEC_DIV_EXTEND))]
"TARGET_POPCNTD"
"div<wd><div_extend> %0,%1,%2"
[(set_attr "type" "div")
(set_attr "size" "<bits>")])
;; Pack/unpack 128-bit floating point types that take 2 scalar registers
; Type of the 64-bit part when packing/unpacking 128-bit floating point types
(define_mode_attr FP128_64 [(TF "DF")
(IF "DF")
(TD "DI")
(KF "DI")])
(define_expand "unpack<mode>"
[(set (match_operand:<FP128_64> 0 "nonimmediate_operand")
(unspec:<FP128_64>
[(match_operand:FMOVE128 1 "register_operand")
(match_operand:QI 2 "const_0_to_1_operand")]
UNSPEC_UNPACK_128BIT))]
"FLOAT128_2REG_P (<MODE>mode)"
"")
(define_insn_and_split "unpack<mode>_dm"
[(set (match_operand:<FP128_64> 0 "nonimmediate_operand" "=d,m,d,r,m")
(unspec:<FP128_64>
[(match_operand:FMOVE128 1 "register_operand" "d,d,r,d,r")
(match_operand:QI 2 "const_0_to_1_operand" "i,i,i,i,i")]
UNSPEC_UNPACK_128BIT))]
"TARGET_POWERPC64 && TARGET_DIRECT_MOVE && FLOAT128_2REG_P (<MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 0) (match_dup 3))]
{
unsigned fp_regno = REGNO (operands[1]) + UINTVAL (operands[2]);
if (REG_P (operands[0]) && REGNO (operands[0]) == fp_regno)
{
emit_note (NOTE_INSN_DELETED);
DONE;
}
operands[3] = gen_rtx_REG (<FP128_64>mode, fp_regno);
}
[(set_attr "type" "fp,fpstore,mffgpr,mftgpr,store")])
(define_insn_and_split "unpack<mode>_nodm"
[(set (match_operand:<FP128_64> 0 "nonimmediate_operand" "=d,m")
(unspec:<FP128_64>
[(match_operand:FMOVE128 1 "register_operand" "d,d")
(match_operand:QI 2 "const_0_to_1_operand" "i,i")]
UNSPEC_UNPACK_128BIT))]
"(!TARGET_POWERPC64 || !TARGET_DIRECT_MOVE) && FLOAT128_2REG_P (<MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 0) (match_dup 3))]
{
unsigned fp_regno = REGNO (operands[1]) + UINTVAL (operands[2]);
if (REG_P (operands[0]) && REGNO (operands[0]) == fp_regno)
{
emit_note (NOTE_INSN_DELETED);
DONE;
}
operands[3] = gen_rtx_REG (<FP128_64>mode, fp_regno);
}
[(set_attr "type" "fp,fpstore")])
(define_insn_and_split "pack<mode>"
[(set (match_operand:FMOVE128 0 "register_operand" "=&d")
(unspec:FMOVE128
[(match_operand:<FP128_64> 1 "register_operand" "d")
(match_operand:<FP128_64> 2 "register_operand" "d")]
UNSPEC_PACK_128BIT))]
"FLOAT128_2REG_P (<MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 3) (match_dup 1))
(set (match_dup 4) (match_dup 2))]
{
unsigned dest_hi = REGNO (operands[0]);
unsigned dest_lo = dest_hi + 1;
gcc_assert (!IN_RANGE (REGNO (operands[1]), dest_hi, dest_lo));
gcc_assert (!IN_RANGE (REGNO (operands[2]), dest_hi, dest_lo));
operands[3] = gen_rtx_REG (<FP128_64>mode, dest_hi);
operands[4] = gen_rtx_REG (<FP128_64>mode, dest_lo);
}
[(set_attr "type" "fp")
(set_attr "length" "8")])
(define_insn "unpack<mode>"
[(set (match_operand:DI 0 "register_operand" "=wa,wa")
(unspec:DI [(match_operand:FMOVE128_VSX 1 "register_operand" "0,wa")
(match_operand:QI 2 "const_0_to_1_operand" "O,i")]
UNSPEC_UNPACK_128BIT))]
"VECTOR_MEM_ALTIVEC_OR_VSX_P (<MODE>mode)"
{
if (REGNO (operands[0]) == REGNO (operands[1]) && INTVAL (operands[2]) == 0)
return ASM_COMMENT_START " xxpermdi to same register";
operands[3] = GEN_INT (INTVAL (operands[2]) == 0 ? 0 : 3);
return "xxpermdi %x0,%x1,%x1,%3";
}
[(set_attr "type" "vecperm")])
(define_insn "pack<mode>"
[(set (match_operand:FMOVE128_VSX 0 "register_operand" "=wa")
(unspec:FMOVE128_VSX
[(match_operand:DI 1 "register_operand" "wa")
(match_operand:DI 2 "register_operand" "wa")]
UNSPEC_PACK_128BIT))]
"TARGET_VSX"
"xxpermdi %x0,%x1,%x2,0"
[(set_attr "type" "vecperm")])
;; ISA 2.08 IEEE 128-bit floating point support.
(define_insn "add<mode>3"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(plus:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsaddqp %0,%1,%2"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "sub<mode>3"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(minus:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xssubqp %0,%1,%2"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "mul<mode>3"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(mult:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmulqp %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "div<mode>3"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(div:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsdivqp %0,%1,%2"
[(set_attr "type" "vecdiv")
(set_attr "size" "128")])
(define_insn "sqrt<mode>2"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(sqrt:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xssqrtqp %0,%1"
[(set_attr "type" "vecdiv")
(set_attr "size" "128")])
(define_expand "copysign<mode>3"
[(use (match_operand:IEEE128 0 "altivec_register_operand"))
(use (match_operand:IEEE128 1 "altivec_register_operand"))
(use (match_operand:IEEE128 2 "altivec_register_operand"))]
"FLOAT128_IEEE_P (<MODE>mode)"
{
if (TARGET_FLOAT128_HW)
emit_insn (gen_copysign<mode>3_hard (operands[0], operands[1],
operands[2]));
else
emit_insn (gen_copysign<mode>3_soft (operands[0], operands[1],
operands[2]));
DONE;
})
(define_insn "copysign<mode>3_hard"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")]
UNSPEC_COPYSIGN))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscpsgnqp %0,%2,%1"
[(set_attr "type" "vecmove")
(set_attr "size" "128")])
(define_insn "copysign<mode>3_soft"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")]
UNSPEC_COPYSIGN))
(clobber (match_scratch:IEEE128 3 "=&v"))]
"!TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscpsgndp %x3,%x2,%x1\;xxpermdi %x0,%x3,%x1,1"
[(set_attr "type" "veccomplex")
(set_attr "length" "8")])
(define_insn "@neg<mode>2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(neg:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnegqp %0,%1"
[(set_attr "type" "vecmove")
(set_attr "size" "128")])
(define_insn "@abs<mode>2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(abs:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsabsqp %0,%1"
[(set_attr "type" "vecmove")
(set_attr "size" "128")])
(define_insn "*nabs<mode>2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(neg:IEEE128
(abs:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "v"))))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnabsqp %0,%1"
[(set_attr "type" "vecmove")
(set_attr "size" "128")])
;; Initially don't worry about doing fusion
(define_insn "fma<mode>4_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(fma:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "%v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(match_operand:IEEE128 3 "altivec_register_operand" "0")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmaddqp %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*fms<mode>4_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(fma:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "%v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(neg:IEEE128
(match_operand:IEEE128 3 "altivec_register_operand" "0"))))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmsubqp %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfma<mode>4_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(neg:IEEE128
(fma:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "%v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(match_operand:IEEE128 3 "altivec_register_operand" "0"))))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmaddqp %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfms<mode>4_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(neg:IEEE128
(fma:IEEE128
(match_operand:IEEE128 1 "altivec_register_operand" "%v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(neg:IEEE128
(match_operand:IEEE128 3 "altivec_register_operand" "0")))))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmsubqp %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "extend<SFDF:mode><IEEE128:mode>2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(float_extend:IEEE128
(match_operand:SFDF 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<IEEE128:MODE>mode)"
"xscvdpqp %0,%1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
;; Conversion between KFmode and TFmode if TFmode is ieee 128-bit floating
;; point is a simple copy.
(define_insn_and_split "extendkftf2"
[(set (match_operand:TF 0 "vsx_register_operand" "=wa,?wa")
(float_extend:TF (match_operand:KF 1 "vsx_register_operand" "0,wa")))]
"TARGET_FLOAT128_TYPE && TARGET_IEEEQUAD"
"@
#
xxlor %x0,%x1,%x1"
"&& reload_completed && REGNO (operands[0]) == REGNO (operands[1])"
[(const_int 0)]
{
emit_note (NOTE_INSN_DELETED);
DONE;
}
[(set_attr "type" "*,veclogical")
(set_attr "length" "0,4")])
(define_insn_and_split "trunctfkf2"
[(set (match_operand:KF 0 "vsx_register_operand" "=wa,?wa")
(float_extend:KF (match_operand:TF 1 "vsx_register_operand" "0,wa")))]
"TARGET_FLOAT128_TYPE && TARGET_IEEEQUAD"
"@
#
xxlor %x0,%x1,%x1"
"&& reload_completed && REGNO (operands[0]) == REGNO (operands[1])"
[(const_int 0)]
{
emit_note (NOTE_INSN_DELETED);
DONE;
}
[(set_attr "type" "*,veclogical")
(set_attr "length" "0,4")])
(define_insn "trunc<mode>df2_hw"
[(set (match_operand:DF 0 "altivec_register_operand" "=v")
(float_truncate:DF
(match_operand:IEEE128 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscvqpdp %0,%1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
;; There is no KFmode -> SFmode instruction. Preserve the accuracy by doing
;; the KFmode -> DFmode conversion using round to odd rather than the normal
;; conversion
(define_insn_and_split "trunc<mode>sf2_hw"
[(set (match_operand:SF 0 "vsx_register_operand" "=wa")
(float_truncate:SF
(match_operand:IEEE128 1 "altivec_register_operand" "v")))
(clobber (match_scratch:DF 2 "=v"))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"#"
"&& 1"
[(set (match_dup 2)
(unspec:DF [(match_dup 1)]
UNSPEC_TRUNC_ROUND_TO_ODD))
(set (match_dup 0)
(float_truncate:SF (match_dup 2)))]
{
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (DFmode);
}
[(set_attr "type" "vecfloat")
(set_attr "length" "8")
(set_attr "isa" "p8v")])
;; Conversion between IEEE 128-bit and integer types
;; The fix function for DImode and SImode was declared earlier as a
;; define_expand. It calls into rs6000_expand_float128_convert if we don't
;; have IEEE 128-bit hardware support. QImode and HImode are not provided
;; unless we have the IEEE 128-bit hardware.
;;
;; Unlike the code for converting SFmode/DFmode to QImode/HImode, we don't have
;; to provide a GPR target that used direct move and a conversion in the GPR
;; which works around QImode/HImode not being allowed in vector registers in
;; ISA 2.07 (power8).
(define_insn "fix<uns>_<IEEE128:mode><SDI:mode>2_hw"
[(set (match_operand:SDI 0 "altivec_register_operand" "=v")
(any_fix:SDI (match_operand:IEEE128 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<IEEE128:MODE>mode)"
"xscvqp<su><wd>z %0,%1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "fix<uns>_trunc<IEEE128:mode><QHI:mode>2"
[(set (match_operand:QHI 0 "altivec_register_operand" "=v")
(any_fix:QHI
(match_operand:IEEE128 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<IEEE128:MODE>mode)"
"xscvqp<su>wz %0,%1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
;; Combiner patterns to prevent moving the result of converting an IEEE 128-bit
;; floating point value to 8/16/32-bit integer to GPR in order to save it.
(define_insn_and_split "*fix<uns>_trunc<IEEE128:mode><QHSI:mode>2_mem"
[(set (match_operand:QHSI 0 "memory_operand" "=Z")
(any_fix:QHSI
(match_operand:IEEE128 1 "altivec_register_operand" "v")))
(clobber (match_scratch:QHSI 2 "=v"))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<IEEE128:MODE>mode)"
"#"
"&& reload_completed"
[(set (match_dup 2)
(any_fix:QHSI (match_dup 1)))
(set (match_dup 0)
(match_dup 2))])
(define_insn "float_<mode>di2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(float:IEEE128 (match_operand:DI 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscvsdqp %0,%1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn_and_split "float_<mode>si2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(float:IEEE128 (match_operand:SI 1 "nonimmediate_operand" "vrZ")))
(clobber (match_scratch:DI 2 "=v"))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"#"
"&& 1"
[(set (match_dup 2)
(sign_extend:DI (match_dup 1)))
(set (match_dup 0)
(float:IEEE128 (match_dup 2)))]
{
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (DImode);
if (MEM_P (operands[1]))
operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
})
(define_insn_and_split "float<QHI:mode><IEEE128:mode>2"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v,v,v")
(float:IEEE128 (match_operand:QHI 1 "nonimmediate_operand" "v,r,Z")))
(clobber (match_scratch:DI 2 "=X,r,X"))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<IEEE128:MODE>mode)"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx dest_di = gen_rtx_REG (DImode, REGNO (dest));
if (altivec_register_operand (src, <QHI:MODE>mode))
emit_insn (gen_extend<QHI:mode>di2 (dest_di, src));
else if (int_reg_operand (src, <QHI:MODE>mode))
{
rtx ext_di = operands[2];
emit_insn (gen_extend<QHI:mode>di2 (ext_di, src));
emit_move_insn (dest_di, ext_di);
}
else if (MEM_P (src))
{
rtx dest_qhi = gen_rtx_REG (<QHI:MODE>mode, REGNO (dest));
emit_move_insn (dest_qhi, src);
emit_insn (gen_extend<QHI:mode>di2 (dest_di, dest_qhi));
}
else
gcc_unreachable ();
emit_insn (gen_float_<IEEE128:mode>di2_hw (dest, dest_di));
DONE;
}
[(set_attr "length" "8,12,12")
(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "floatuns_<mode>di2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unsigned_float:IEEE128
(match_operand:DI 1 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscvudqp %0,%1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn_and_split "floatuns_<mode>si2_hw"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unsigned_float:IEEE128
(match_operand:SI 1 "nonimmediate_operand" "vrZ")))
(clobber (match_scratch:DI 2 "=v"))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"#"
"&& 1"
[(set (match_dup 2)
(zero_extend:DI (match_dup 1)))
(set (match_dup 0)
(float:IEEE128 (match_dup 2)))]
{
if (GET_CODE (operands[2]) == SCRATCH)
operands[2] = gen_reg_rtx (DImode);
if (MEM_P (operands[1]))
operands[1] = rs6000_force_indexed_or_indirect_mem (operands[1]);
})
(define_insn_and_split "floatuns<QHI:mode><IEEE128:mode>2"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v,v,v")
(unsigned_float:IEEE128
(match_operand:QHI 1 "nonimmediate_operand" "v,r,Z")))
(clobber (match_scratch:DI 2 "=X,r,X"))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<IEEE128:MODE>mode)"
"#"
"&& reload_completed"
[(const_int 0)]
{
rtx dest = operands[0];
rtx src = operands[1];
rtx dest_di = gen_rtx_REG (DImode, REGNO (dest));
if (altivec_register_operand (src, <QHI:MODE>mode) || MEM_P (src))
emit_insn (gen_zero_extend<QHI:mode>di2 (dest_di, src));
else if (int_reg_operand (src, <QHI:MODE>mode))
{
rtx ext_di = operands[2];
emit_insn (gen_zero_extend<QHI:mode>di2 (ext_di, src));
emit_move_insn (dest_di, ext_di);
}
else
gcc_unreachable ();
emit_insn (gen_floatuns_<IEEE128:mode>di2_hw (dest, dest_di));
DONE;
}
[(set_attr "length" "8,12,8")
(set_attr "type" "vecfloat")
(set_attr "size" "128")])
;; IEEE 128-bit round to integer built-in functions
(define_insn "floor<mode>2"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_FRIM))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsrqpi 1,%0,%1,3"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "ceil<mode>2"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_FRIP))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsrqpi 1,%0,%1,2"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "btrunc<mode>2"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_FRIZ))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsrqpi 1,%0,%1,1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "round<mode>2"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_FRIN))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsrqpi 0,%0,%1,0"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
;; IEEE 128-bit instructions with round to odd semantics
(define_insn "add<mode>3_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")]
UNSPEC_ADD_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsaddqpo %0,%1,%2"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "sub<mode>3_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")]
UNSPEC_SUB_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xssubqpo %0,%1,%2"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
(define_insn "mul<mode>3_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")]
UNSPEC_MUL_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmulqpo %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "div<mode>3_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")]
UNSPEC_DIV_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsdivqpo %0,%1,%2"
[(set_attr "type" "vecdiv")
(set_attr "size" "128")])
(define_insn "sqrt<mode>2_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_SQRT_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xssqrtqpo %0,%1"
[(set_attr "type" "vecdiv")
(set_attr "size" "128")])
(define_insn "fma<mode>4_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(match_operand:IEEE128 3 "altivec_register_operand" "0")]
UNSPEC_FMA_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmaddqpo %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*fms<mode>4_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "%v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(neg:IEEE128
(match_operand:IEEE128 3 "altivec_register_operand" "0"))]
UNSPEC_FMA_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsmsubqpo %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfma<mode>4_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(neg:IEEE128
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "%v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(match_operand:IEEE128 3 "altivec_register_operand" "0")]
UNSPEC_FMA_ROUND_TO_ODD)))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmaddqpo %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "*nfms<mode>4_odd"
[(set (match_operand:IEEE128 0 "altivec_register_operand" "=v")
(neg:IEEE128
(unspec:IEEE128
[(match_operand:IEEE128 1 "altivec_register_operand" "%v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")
(neg:IEEE128
(match_operand:IEEE128 3 "altivec_register_operand" "0"))]
UNSPEC_FMA_ROUND_TO_ODD)))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xsnmsubqpo %0,%1,%2"
[(set_attr "type" "qmul")
(set_attr "size" "128")])
(define_insn "trunc<mode>df2_odd"
[(set (match_operand:DF 0 "vsx_register_operand" "=v")
(unspec:DF [(match_operand:IEEE128 1 "altivec_register_operand" "v")]
UNSPEC_TRUNC_ROUND_TO_ODD))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscvqpdpo %0,%1"
[(set_attr "type" "vecfloat")
(set_attr "size" "128")])
;; IEEE 128-bit comparisons
(define_insn "*cmp<mode>_hw"
[(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
(compare:CCFP (match_operand:IEEE128 1 "altivec_register_operand" "v")
(match_operand:IEEE128 2 "altivec_register_operand" "v")))]
"TARGET_FLOAT128_HW && FLOAT128_IEEE_P (<MODE>mode)"
"xscmpuqp %0,%1,%2"
[(set_attr "type" "veccmp")
(set_attr "size" "128")])
;; Miscellaneous ISA 3.0 (power9) instructions
(define_insn "darn_32"
[(set (match_operand:SI 0 "register_operand" "=r")
(unspec_volatile:SI [(const_int 0)] UNSPECV_DARN_32))]
"TARGET_P9_MISC"
"darn %0,0"
[(set_attr "type" "integer")])
(define_insn "darn_raw"
[(set (match_operand:DI 0 "register_operand" "=r")
(unspec_volatile:DI [(const_int 0)] UNSPECV_DARN_RAW))]
"TARGET_P9_MISC && TARGET_64BIT"
"darn %0,2"
[(set_attr "type" "integer")])
(define_insn "darn"
[(set (match_operand:DI 0 "register_operand" "=r")
(unspec_volatile:DI [(const_int 0)] UNSPECV_DARN))]
"TARGET_P9_MISC && TARGET_64BIT"
"darn %0,1"
[(set_attr "type" "integer")])
;; Test byte within range.
;;
;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
;; represents a byte whose value is ignored in this context and
;; vv, the least significant byte, holds the byte value that is to
;; be tested for membership within the range specified by operand 2.
;; The bytes of operand 2 are organized as xx:xx:hi:lo.
;;
;; Return in target register operand 0 a value of 1 if lo <= vv and
;; vv <= hi. Otherwise, set register operand 0 to 0.
;;
;; Though the instructions to which this expansion maps operate on
;; 64-bit registers, the current implementation only operates on
;; SI-mode operands as the high-order bits provide no information
;; that is not already available in the low-order bits. To avoid the
;; costs of data widening operations, future enhancements might allow
;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
(define_expand "cmprb"
[(set (match_dup 3)
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "gpc_reg_operand" "r")]
UNSPEC_CMPRB))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(if_then_else:SI (lt (match_dup 3)
(const_int 0))
(const_int -1)
(if_then_else (gt (match_dup 3)
(const_int 0))
(const_int 1)
(const_int 0))))]
"TARGET_P9_MISC"
{
operands[3] = gen_reg_rtx (CCmode);
})
;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
;; represents a byte whose value is ignored in this context and
;; vv, the least significant byte, holds the byte value that is to
;; be tested for membership within the range specified by operand 2.
;; The bytes of operand 2 are organized as xx:xx:hi:lo.
;;
;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
;; lo <= vv and vv <= hi. Otherwise, set the GT bit to 0. The other
;; 3 bits of the target CR register are all set to 0.
(define_insn "*cmprb_internal"
[(set (match_operand:CC 0 "cc_reg_operand" "=y")
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "gpc_reg_operand" "r")]
UNSPEC_CMPRB))]
"TARGET_P9_MISC"
"cmprb %0,0,%1,%2"
[(set_attr "type" "logical")])
;; Set operand 0 register to -1 if the LT bit (0x8) of condition
;; register operand 1 is on. Otherwise, set operand 0 register to 1
;; if the GT bit (0x4) of condition register operand 1 is on.
;; Otherwise, set operand 0 to 0. Note that the result stored into
;; register operand 0 is non-zero iff either the LT or GT bits are on
;; within condition register operand 1.
(define_insn "setb_signed"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(if_then_else:SI (lt (match_operand:CC 1 "cc_reg_operand" "y")
(const_int 0))
(const_int -1)
(if_then_else (gt (match_dup 1)
(const_int 0))
(const_int 1)
(const_int 0))))]
"TARGET_P9_MISC"
"setb %0,%1"
[(set_attr "type" "logical")])
(define_insn "setb_unsigned"
[(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(if_then_else:SI (ltu (match_operand:CCUNS 1 "cc_reg_operand" "y")
(const_int 0))
(const_int -1)
(if_then_else (gtu (match_dup 1)
(const_int 0))
(const_int 1)
(const_int 0))))]
"TARGET_P9_MISC"
"setb %0,%1"
[(set_attr "type" "logical")])
;; Test byte within two ranges.
;;
;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
;; represents a byte whose value is ignored in this context and
;; vv, the least significant byte, holds the byte value that is to
;; be tested for membership within the range specified by operand 2.
;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
;;
;; Return in target register operand 0 a value of 1 if (lo_1 <= vv and
;; vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2). Otherwise, set register
;; operand 0 to 0.
;;
;; Though the instructions to which this expansion maps operate on
;; 64-bit registers, the current implementation only operates on
;; SI-mode operands as the high-order bits provide no information
;; that is not already available in the low-order bits. To avoid the
;; costs of data widening operations, future enhancements might allow
;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
(define_expand "cmprb2"
[(set (match_dup 3)
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "gpc_reg_operand" "r")]
UNSPEC_CMPRB2))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(if_then_else:SI (lt (match_dup 3)
(const_int 0))
(const_int -1)
(if_then_else (gt (match_dup 3)
(const_int 0))
(const_int 1)
(const_int 0))))]
"TARGET_P9_MISC"
{
operands[3] = gen_reg_rtx (CCmode);
})
;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
;; represents a byte whose value is ignored in this context and
;; vv, the least significant byte, holds the byte value that is to
;; be tested for membership within the ranges specified by operand 2.
;; The bytes of operand 2 are organized as hi_1:lo_1:hi_2:lo_2.
;;
;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if
;; (lo_1 <= vv and vv <= hi_1) or if (lo_2 <= vv and vv <= hi_2).
;; Otherwise, set the GT bit to 0. The other 3 bits of the target
;; CR register are all set to 0.
(define_insn "*cmprb2_internal"
[(set (match_operand:CC 0 "cc_reg_operand" "=y")
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:SI 2 "gpc_reg_operand" "r")]
UNSPEC_CMPRB2))]
"TARGET_P9_MISC"
"cmprb %0,1,%1,%2"
[(set_attr "type" "logical")])
;; Test byte membership within set of 8 bytes.
;;
;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
;; represents a byte whose value is ignored in this context and
;; vv, the least significant byte, holds the byte value that is to
;; be tested for membership within the set specified by operand 2.
;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
;;
;; Return in target register operand 0 a value of 1 if vv equals one
;; of the values e0, e1, e2, e3, e4, e5, e6, or e7. Otherwise, set
;; register operand 0 to 0. Note that the 8 byte values held within
;; operand 2 need not be unique.
;;
;; Though the instructions to which this expansion maps operate on
;; 64-bit registers, the current implementation requires that operands
;; 0 and 1 have mode SI as the high-order bits provide no information
;; that is not already available in the low-order bits. To avoid the
;; costs of data widening operations, future enhancements might allow
;; DI mode for operand 0 and/or might allow operand 1 to be QI mode.
(define_expand "cmpeqb"
[(set (match_dup 3)
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:DI 2 "gpc_reg_operand" "r")]
UNSPEC_CMPEQB))
(set (match_operand:SI 0 "gpc_reg_operand" "=r")
(if_then_else:SI (lt (match_dup 3)
(const_int 0))
(const_int -1)
(if_then_else (gt (match_dup 3)
(const_int 0))
(const_int 1)
(const_int 0))))]
"TARGET_P9_MISC && TARGET_64BIT"
{
operands[3] = gen_reg_rtx (CCmode);
})
;; The bytes of operand 1 are organized as xx:xx:xx:vv, where xx
;; represents a byte whose value is ignored in this context and
;; vv, the least significant byte, holds the byte value that is to
;; be tested for membership within the set specified by operand 2.
;; The bytes of operand 2 are organized as e0:e1:e2:e3:e4:e5:e6:e7.
;;
;; Set bit 1 (the GT bit, 0x4) of CR register operand 0 to 1 if vv
;; equals one of the values e0, e1, e2, e3, e4, e5, e6, or e7. Otherwise,
;; set the GT bit to zero. The other 3 bits of the target CR register
;; are all set to 0.
(define_insn "*cmpeqb_internal"
[(set (match_operand:CC 0 "cc_reg_operand" "=y")
(unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
(match_operand:DI 2 "gpc_reg_operand" "r")]
UNSPEC_CMPEQB))]
"TARGET_P9_MISC && TARGET_64BIT"
"cmpeqb %0,%1,%2"
[(set_attr "type" "logical")])
(include "sync.md")
(include "vector.md")
(include "vsx.md")
(include "altivec.md")
(include "mma.md")
(include "dfp.md")
(include "crypto.md")
(include "htm.md")