1 | //===--- arm_neon.td - ARM NEON compiler interface ------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the TableGen definitions from which the ARM NEON header |
10 | // file will be generated. See ARM document DUI0348B. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | include "arm_neon_incl.td" |
15 | |
16 | def OP_ADD : Op<(op "+", $p0, $p1)>; |
17 | def OP_ADDL : Op<(op "+", (call "vmovl", $p0), (call "vmovl", $p1))>; |
18 | def OP_ADDLHi : Op<(op "+", (call "vmovl_high", $p0), |
19 | (call "vmovl_high", $p1))>; |
20 | def OP_ADDW : Op<(op "+", $p0, (call "vmovl", $p1))>; |
21 | def OP_ADDWHi : Op<(op "+", $p0, (call "vmovl_high", $p1))>; |
22 | def OP_SUB : Op<(op "-", $p0, $p1)>; |
23 | def OP_SUBL : Op<(op "-", (call "vmovl", $p0), (call "vmovl", $p1))>; |
24 | def OP_SUBLHi : Op<(op "-", (call "vmovl_high", $p0), |
25 | (call "vmovl_high", $p1))>; |
26 | def OP_SUBW : Op<(op "-", $p0, (call "vmovl", $p1))>; |
27 | def OP_SUBWHi : Op<(op "-", $p0, (call "vmovl_high", $p1))>; |
28 | def OP_MUL : Op<(op "*", $p0, $p1)>; |
29 | def OP_MLA : Op<(op "+", $p0, (op "*", $p1, $p2))>; |
30 | def OP_MLAL : Op<(op "+", $p0, (call "vmull", $p1, $p2))>; |
31 | def OP_MULLHi : Op<(call "vmull", (call "vget_high", $p0), |
32 | (call "vget_high", $p1))>; |
33 | def OP_MULLHi_P64 : Op<(call "vmull", |
34 | (cast "poly64_t", (call "vget_high", $p0)), |
35 | (cast "poly64_t", (call "vget_high", $p1)))>; |
36 | def OP_MULLHi_N : Op<(call "vmull_n", (call "vget_high", $p0), $p1)>; |
37 | def OP_MLALHi : Op<(call "vmlal", $p0, (call "vget_high", $p1), |
38 | (call "vget_high", $p2))>; |
39 | def OP_MLALHi_N : Op<(call "vmlal_n", $p0, (call "vget_high", $p1), $p2)>; |
40 | def OP_MLS : Op<(op "-", $p0, (op "*", $p1, $p2))>; |
41 | def OP_FMLS : Op<(call "vfma", $p0, (op "-", $p1), $p2)>; |
42 | def OP_MLSL : Op<(op "-", $p0, (call "vmull", $p1, $p2))>; |
43 | def OP_MLSLHi : Op<(call "vmlsl", $p0, (call "vget_high", $p1), |
44 | (call "vget_high", $p2))>; |
45 | def OP_MLSLHi_N : Op<(call "vmlsl_n", $p0, (call "vget_high", $p1), $p2)>; |
46 | def OP_MUL_N : Op<(op "*", $p0, (dup $p1))>; |
47 | def OP_MULX_N : Op<(call "vmulx", $p0, (dup $p1))>; |
48 | def OP_MLA_N : Op<(op "+", $p0, (op "*", $p1, (dup $p2)))>; |
49 | def OP_MLS_N : Op<(op "-", $p0, (op "*", $p1, (dup $p2)))>; |
50 | def OP_FMLA_N : Op<(call "vfma", $p0, $p1, (dup $p2))>; |
51 | def OP_FMLS_N : Op<(call "vfma", $p0, (op "-", $p1), (dup $p2))>; |
52 | def OP_MLAL_N : Op<(op "+", $p0, (call "vmull", $p1, (dup $p2)))>; |
53 | def OP_MLSL_N : Op<(op "-", $p0, (call "vmull", $p1, (dup $p2)))>; |
54 | def OP_MUL_LN : Op<(op "*", $p0, (splat $p1, $p2))>; |
55 | def OP_MULX_LN : Op<(call "vmulx", $p0, (splat $p1, $p2))>; |
56 | def OP_MULL_LN : Op<(call "vmull", $p0, (splat $p1, $p2))>; |
57 | def OP_MULLHi_LN: Op<(call "vmull", (call "vget_high", $p0), (splat $p1, $p2))>; |
58 | def OP_MLA_LN : Op<(op "+", $p0, (op "*", $p1, (splat $p2, $p3)))>; |
59 | def OP_MLS_LN : Op<(op "-", $p0, (op "*", $p1, (splat $p2, $p3)))>; |
60 | def OP_MLAL_LN : Op<(op "+", $p0, (call "vmull", $p1, (splat $p2, $p3)))>; |
61 | def OP_MLALHi_LN: Op<(op "+", $p0, (call "vmull", (call "vget_high", $p1), |
62 | (splat $p2, $p3)))>; |
63 | def OP_MLSL_LN : Op<(op "-", $p0, (call "vmull", $p1, (splat $p2, $p3)))>; |
64 | def OP_MLSLHi_LN : Op<(op "-", $p0, (call "vmull", (call "vget_high", $p1), |
65 | (splat $p2, $p3)))>; |
66 | def OP_QDMULL_LN : Op<(call "vqdmull", $p0, (splat $p1, $p2))>; |
67 | def OP_QDMULLHi_LN : Op<(call "vqdmull", (call "vget_high", $p0), |
68 | (splat $p1, $p2))>; |
69 | def OP_QDMLAL_LN : Op<(call "vqdmlal", $p0, $p1, (splat $p2, $p3))>; |
70 | def OP_QDMLALHi_LN : Op<(call "vqdmlal", $p0, (call "vget_high", $p1), |
71 | (splat $p2, $p3))>; |
72 | def OP_QDMLSL_LN : Op<(call "vqdmlsl", $p0, $p1, (splat $p2, $p3))>; |
73 | def OP_QDMLSLHi_LN : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1), |
74 | (splat $p2, $p3))>; |
75 | def OP_QDMULH_LN : Op<(call "vqdmulh", $p0, (splat $p1, $p2))>; |
76 | def OP_QRDMULH_LN : Op<(call "vqrdmulh", $p0, (splat $p1, $p2))>; |
77 | def OP_QRDMLAH : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, $p2))>; |
78 | def OP_QRDMLSH : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, $p2))>; |
79 | def OP_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, (splat $p2, $p3)))>; |
80 | def OP_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, (splat $p2, $p3)))>; |
81 | def OP_FMS_LN : Op<(call "vfma_lane", $p0, (op "-", $p1), $p2, $p3)>; |
82 | def OP_FMS_LNQ : Op<(call "vfma_laneq", $p0, (op "-", $p1), $p2, $p3)>; |
83 | def OP_TRN1 : Op<(shuffle $p0, $p1, (interleave (decimate mask0, 2), |
84 | (decimate mask1, 2)))>; |
85 | def OP_ZIP1 : Op<(shuffle $p0, $p1, (lowhalf (interleave mask0, mask1)))>; |
86 | def OP_UZP1 : Op<(shuffle $p0, $p1, (add (decimate mask0, 2), |
87 | (decimate mask1, 2)))>; |
88 | def OP_TRN2 : Op<(shuffle $p0, $p1, (interleave |
89 | (decimate (rotl mask0, 1), 2), |
90 | (decimate (rotl mask1, 1), 2)))>; |
91 | def OP_ZIP2 : Op<(shuffle $p0, $p1, (highhalf (interleave mask0, mask1)))>; |
92 | def OP_UZP2 : Op<(shuffle $p0, $p1, (add (decimate (rotl mask0, 1), 2), |
93 | (decimate (rotl mask1, 1), 2)))>; |
94 | def OP_EQ : Op<(cast "R", (op "==", $p0, $p1))>; |
95 | def OP_GE : Op<(cast "R", (op ">=", $p0, $p1))>; |
96 | def OP_LE : Op<(cast "R", (op "<=", $p0, $p1))>; |
97 | def OP_GT : Op<(cast "R", (op ">", $p0, $p1))>; |
98 | def OP_LT : Op<(cast "R", (op "<", $p0, $p1))>; |
99 | def OP_NEG : Op<(op "-", $p0)>; |
100 | def OP_NOT : Op<(op "~", $p0)>; |
101 | def OP_AND : Op<(op "&", $p0, $p1)>; |
102 | def OP_OR : Op<(op "|", $p0, $p1)>; |
103 | def OP_XOR : Op<(op "^", $p0, $p1)>; |
104 | def OP_ANDN : Op<(op "&", $p0, (op "~", $p1))>; |
105 | def OP_ORN : Op<(op "|", $p0, (op "~", $p1))>; |
106 | def OP_CAST : Op<(cast "R", $p0)>; |
107 | def OP_HI : Op<(shuffle $p0, $p0, (highhalf mask0))>; |
108 | def OP_LO : Op<(shuffle $p0, $p0, (lowhalf mask0))>; |
109 | def OP_CONC : Op<(shuffle $p0, $p1, (add mask0, mask1))>; |
110 | def OP_DUP : Op<(dup $p0)>; |
111 | def OP_DUP_LN : Op<(splat $p0, $p1)>; |
112 | def OP_SEL : Op<(cast "R", (op "|", |
113 | (op "&", $p0, (cast $p0, $p1)), |
114 | (op "&", (op "~", $p0), (cast $p0, $p2))))>; |
115 | def OP_REV16 : Op<(shuffle $p0, $p0, (rev 16, mask0))>; |
116 | def OP_REV32 : Op<(shuffle $p0, $p0, (rev 32, mask0))>; |
117 | def OP_REV64 : Op<(shuffle $p0, $p0, (rev 64, mask0))>; |
118 | def OP_XTN : Op<(call "vcombine", $p0, (call "vmovn", $p1))>; |
119 | def OP_SQXTUN : Op<(call "vcombine", (cast $p0, "U", $p0), |
120 | (call "vqmovun", $p1))>; |
121 | def OP_QXTN : Op<(call "vcombine", $p0, (call "vqmovn", $p1))>; |
122 | def OP_VCVT_NA_HI_F16 : Op<(call "vcombine", $p0, (call "vcvt_f16_f32", $p1))>; |
123 | def OP_VCVT_NA_HI_F32 : Op<(call "vcombine", $p0, (call "vcvt_f32_f64", $p1))>; |
124 | def OP_VCVT_EX_HI_F32 : Op<(call "vcvt_f32_f16", (call "vget_high", $p0))>; |
125 | def OP_VCVT_EX_HI_F64 : Op<(call "vcvt_f64_f32", (call "vget_high", $p0))>; |
126 | def OP_VCVTX_HI : Op<(call "vcombine", $p0, (call "vcvtx_f32", $p1))>; |
127 | def OP_REINT : Op<(cast "R", $p0)>; |
128 | def OP_ADDHNHi : Op<(call "vcombine", $p0, (call "vaddhn", $p1, $p2))>; |
129 | def OP_RADDHNHi : Op<(call "vcombine", $p0, (call "vraddhn", $p1, $p2))>; |
130 | def OP_SUBHNHi : Op<(call "vcombine", $p0, (call "vsubhn", $p1, $p2))>; |
131 | def OP_RSUBHNHi : Op<(call "vcombine", $p0, (call "vrsubhn", $p1, $p2))>; |
132 | def OP_ABDL : Op<(cast "R", (call "vmovl", (cast $p0, "U", |
133 | (call "vabd", $p0, $p1))))>; |
134 | def OP_ABDLHi : Op<(call "vabdl", (call "vget_high", $p0), |
135 | (call "vget_high", $p1))>; |
136 | def OP_ABA : Op<(op "+", $p0, (call "vabd", $p1, $p2))>; |
137 | def OP_ABAL : Op<(op "+", $p0, (call "vabdl", $p1, $p2))>; |
138 | def OP_ABALHi : Op<(call "vabal", $p0, (call "vget_high", $p1), |
139 | (call "vget_high", $p2))>; |
140 | def OP_QDMULLHi : Op<(call "vqdmull", (call "vget_high", $p0), |
141 | (call "vget_high", $p1))>; |
142 | def OP_QDMULLHi_N : Op<(call "vqdmull_n", (call "vget_high", $p0), $p1)>; |
143 | def OP_QDMLALHi : Op<(call "vqdmlal", $p0, (call "vget_high", $p1), |
144 | (call "vget_high", $p2))>; |
145 | def OP_QDMLALHi_N : Op<(call "vqdmlal_n", $p0, (call "vget_high", $p1), $p2)>; |
146 | def OP_QDMLSLHi : Op<(call "vqdmlsl", $p0, (call "vget_high", $p1), |
147 | (call "vget_high", $p2))>; |
148 | def OP_QDMLSLHi_N : Op<(call "vqdmlsl_n", $p0, (call "vget_high", $p1), $p2)>; |
149 | def OP_DIV : Op<(op "/", $p0, $p1)>; |
150 | def OP_LONG_HI : Op<(cast "R", (call (name_replace "_high_", "_"), |
151 | (call "vget_high", $p0), $p1))>; |
152 | def OP_NARROW_HI : Op<(cast "R", (call "vcombine", |
153 | (cast "R", "H", $p0), |
154 | (cast "R", "H", |
155 | (call (name_replace "_high_", "_"), |
156 | $p1, $p2))))>; |
157 | def OP_MOVL_HI : LOp<[(save_temp $a1, (call "vget_high", $p0)), |
158 | (cast "R", |
159 | (call "vshll_n", $a1, (literal "int32_t", "0")))]>; |
160 | def OP_COPY_LN : Op<(call "vset_lane", (call "vget_lane", $p2, $p3), $p0, $p1)>; |
161 | def OP_SCALAR_MUL_LN : Op<(op "*", $p0, (call "vget_lane", $p1, $p2))>; |
162 | def OP_SCALAR_MULX_LN : Op<(call "vmulx", $p0, (call "vget_lane", $p1, $p2))>; |
163 | def OP_SCALAR_VMULX_LN : LOp<[(save_temp $x, (call "vget_lane", $p0, |
164 | (literal "int32_t", "0"))), |
165 | (save_temp $y, (call "vget_lane", $p1, $p2)), |
166 | (save_temp $z, (call "vmulx", $x, $y)), |
167 | (call "vset_lane", $z, $p0, $p2)]>; |
168 | def OP_SCALAR_VMULX_LNQ : LOp<[(save_temp $x, (call "vget_lane", $p0, |
169 | (literal "int32_t", "0"))), |
170 | (save_temp $y, (call "vget_lane", $p1, $p2)), |
171 | (save_temp $z, (call "vmulx", $x, $y)), |
172 | (call "vset_lane", $z, $p0, (literal "int32_t", |
173 | "0"))]>; |
174 | class ScalarMulOp<string opname> : |
175 | Op<(call opname, $p0, (call "vget_lane", $p1, $p2))>; |
176 | |
177 | def OP_SCALAR_QDMULL_LN : ScalarMulOp<"vqdmull">; |
178 | def OP_SCALAR_QDMULH_LN : ScalarMulOp<"vqdmulh">; |
179 | def OP_SCALAR_QRDMULH_LN : ScalarMulOp<"vqrdmulh">; |
180 | |
181 | def OP_SCALAR_QRDMLAH_LN : Op<(call "vqadd", $p0, (call "vqrdmulh", $p1, |
182 | (call "vget_lane", $p2, $p3)))>; |
183 | def OP_SCALAR_QRDMLSH_LN : Op<(call "vqsub", $p0, (call "vqrdmulh", $p1, |
184 | (call "vget_lane", $p2, $p3)))>; |
185 | |
186 | def OP_SCALAR_HALF_GET_LN : Op<(bitcast "float16_t", |
187 | (call "vget_lane", |
188 | (bitcast "int16x4_t", $p0), $p1))>; |
189 | def OP_SCALAR_HALF_GET_LNQ : Op<(bitcast "float16_t", |
190 | (call "vget_lane", |
191 | (bitcast "int16x8_t", $p0), $p1))>; |
192 | def OP_SCALAR_HALF_SET_LN : Op<(bitcast "float16x4_t", |
193 | (call "vset_lane", |
194 | (bitcast "int16_t", $p0), |
195 | (bitcast "int16x4_t", $p1), $p2))>; |
196 | def OP_SCALAR_HALF_SET_LNQ : Op<(bitcast "float16x8_t", |
197 | (call "vset_lane", |
198 | (bitcast "int16_t", $p0), |
199 | (bitcast "int16x8_t", $p1), $p2))>; |
200 | |
201 | def OP_DOT_LN |
202 | : Op<(call "vdot", $p0, $p1, |
203 | (bitcast $p1, (splat(bitcast "uint32x2_t", $p2), $p3)))>; |
204 | def OP_DOT_LNQ |
205 | : Op<(call "vdot", $p0, $p1, |
206 | (bitcast $p1, (splat(bitcast "uint32x4_t", $p2), $p3)))>; |
207 | |
208 | def OP_FMLAL_LN : Op<(call "vfmlal_low", $p0, $p1, |
209 | (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; |
210 | def OP_FMLSL_LN : Op<(call "vfmlsl_low", $p0, $p1, |
211 | (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; |
212 | def OP_FMLAL_LN_Hi : Op<(call "vfmlal_high", $p0, $p1, |
213 | (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; |
214 | def OP_FMLSL_LN_Hi : Op<(call "vfmlsl_high", $p0, $p1, |
215 | (dup_typed $p1, (call "vget_lane", $p2, $p3)))>; |
216 | |
217 | //===----------------------------------------------------------------------===// |
218 | // Instructions |
219 | //===----------------------------------------------------------------------===// |
220 | |
221 | //////////////////////////////////////////////////////////////////////////////// |
222 | // E.3.1 Addition |
223 | def VADD : IOpInst<"vadd", "ddd", |
224 | "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>; |
225 | def VADDL : SOpInst<"vaddl", "wdd", "csiUcUsUi", OP_ADDL>; |
226 | def VADDW : SOpInst<"vaddw", "wwd", "csiUcUsUi", OP_ADDW>; |
227 | def VHADD : SInst<"vhadd", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">; |
228 | def VRHADD : SInst<"vrhadd", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">; |
229 | def VQADD : SInst<"vqadd", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
230 | def VADDHN : IInst<"vaddhn", "hkk", "silUsUiUl">; |
231 | def VRADDHN : IInst<"vraddhn", "hkk", "silUsUiUl">; |
232 | |
233 | //////////////////////////////////////////////////////////////////////////////// |
234 | // E.3.2 Multiplication |
235 | def VMUL : IOpInst<"vmul", "ddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MUL>; |
236 | def VMULP : SInst<"vmul", "ddd", "PcQPc">; |
237 | def VMLA : IOpInst<"vmla", "dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>; |
238 | def VMLAL : SOpInst<"vmlal", "wwdd", "csiUcUsUi", OP_MLAL>; |
239 | def VMLS : IOpInst<"vmls", "dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>; |
240 | def VMLSL : SOpInst<"vmlsl", "wwdd", "csiUcUsUi", OP_MLSL>; |
241 | def VQDMULH : SInst<"vqdmulh", "ddd", "siQsQi">; |
242 | def VQRDMULH : SInst<"vqrdmulh", "ddd", "siQsQi">; |
243 | |
244 | let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in { |
245 | def VQRDMLAH : SOpInst<"vqrdmlah", "dddd", "siQsQi", OP_QRDMLAH>; |
246 | def VQRDMLSH : SOpInst<"vqrdmlsh", "dddd", "siQsQi", OP_QRDMLSH>; |
247 | } |
248 | |
249 | def VQDMLAL : SInst<"vqdmlal", "wwdd", "si">; |
250 | def VQDMLSL : SInst<"vqdmlsl", "wwdd", "si">; |
251 | def VMULL : SInst<"vmull", "wdd", "csiUcUsUiPc">; |
252 | def VQDMULL : SInst<"vqdmull", "wdd", "si">; |
253 | |
254 | //////////////////////////////////////////////////////////////////////////////// |
255 | // E.3.3 Subtraction |
256 | def VSUB : IOpInst<"vsub", "ddd", |
257 | "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>; |
258 | def VSUBL : SOpInst<"vsubl", "wdd", "csiUcUsUi", OP_SUBL>; |
259 | def VSUBW : SOpInst<"vsubw", "wwd", "csiUcUsUi", OP_SUBW>; |
260 | def VQSUB : SInst<"vqsub", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
261 | def VHSUB : SInst<"vhsub", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">; |
262 | def VSUBHN : IInst<"vsubhn", "hkk", "silUsUiUl">; |
263 | def VRSUBHN : IInst<"vrsubhn", "hkk", "silUsUiUl">; |
264 | |
265 | //////////////////////////////////////////////////////////////////////////////// |
266 | // E.3.4 Comparison |
267 | def VCEQ : IOpInst<"vceq", "udd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>; |
268 | def VCGE : SOpInst<"vcge", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>; |
269 | let InstName = "vcge" in |
270 | def VCLE : SOpInst<"vcle", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>; |
271 | def VCGT : SOpInst<"vcgt", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>; |
272 | let InstName = "vcgt" in |
273 | def VCLT : SOpInst<"vclt", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>; |
274 | let InstName = "vacge" in { |
275 | def VCAGE : IInst<"vcage", "udd", "fQf">; |
276 | def VCALE : IInst<"vcale", "udd", "fQf">; |
277 | } |
278 | let InstName = "vacgt" in { |
279 | def VCAGT : IInst<"vcagt", "udd", "fQf">; |
280 | def VCALT : IInst<"vcalt", "udd", "fQf">; |
281 | } |
282 | def VTST : WInst<"vtst", "udd", "csiUcUsUiPcPsQcQsQiQUcQUsQUiQPcQPs">; |
283 | |
284 | //////////////////////////////////////////////////////////////////////////////// |
285 | // E.3.5 Absolute Difference |
286 | def VABD : SInst<"vabd", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">; |
287 | def VABDL : SOpInst<"vabdl", "wdd", "csiUcUsUi", OP_ABDL>; |
288 | def VABA : SOpInst<"vaba", "dddd", "csiUcUsUiQcQsQiQUcQUsQUi", OP_ABA>; |
289 | def VABAL : SOpInst<"vabal", "wwdd", "csiUcUsUi", OP_ABAL>; |
290 | |
291 | //////////////////////////////////////////////////////////////////////////////// |
292 | // E.3.6 Max/Min |
293 | def VMAX : SInst<"vmax", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">; |
294 | def VMIN : SInst<"vmin", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">; |
295 | |
296 | //////////////////////////////////////////////////////////////////////////////// |
297 | // E.3.7 Pairwise Addition |
298 | def VPADD : IInst<"vpadd", "ddd", "csiUcUsUif">; |
299 | def VPADDL : SInst<"vpaddl", "nd", "csiUcUsUiQcQsQiQUcQUsQUi">; |
300 | def VPADAL : SInst<"vpadal", "nnd", "csiUcUsUiQcQsQiQUcQUsQUi">; |
301 | |
302 | //////////////////////////////////////////////////////////////////////////////// |
303 | // E.3.8-9 Folding Max/Min |
304 | def VPMAX : SInst<"vpmax", "ddd", "csiUcUsUif">; |
305 | def VPMIN : SInst<"vpmin", "ddd", "csiUcUsUif">; |
306 | |
307 | //////////////////////////////////////////////////////////////////////////////// |
308 | // E.3.10 Reciprocal/Sqrt |
309 | def VRECPS : IInst<"vrecps", "ddd", "fQf">; |
310 | def VRSQRTS : IInst<"vrsqrts", "ddd", "fQf">; |
311 | |
312 | //////////////////////////////////////////////////////////////////////////////// |
313 | // E.3.11 Shifts by signed variable |
314 | def VSHL : SInst<"vshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
315 | def VQSHL : SInst<"vqshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
316 | def VRSHL : SInst<"vrshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
317 | def VQRSHL : SInst<"vqrshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
318 | |
319 | //////////////////////////////////////////////////////////////////////////////// |
320 | // E.3.12 Shifts by constant |
321 | let isShift = 1 in { |
322 | def VSHR_N : SInst<"vshr_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
323 | def VSHL_N : IInst<"vshl_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
324 | def VRSHR_N : SInst<"vrshr_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
325 | def VSRA_N : SInst<"vsra_n", "dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
326 | def VRSRA_N : SInst<"vrsra_n", "dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
327 | def VQSHL_N : SInst<"vqshl_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">; |
328 | def VQSHLU_N : SInst<"vqshlu_n", "udi", "csilQcQsQiQl">; |
329 | def VSHRN_N : IInst<"vshrn_n", "hki", "silUsUiUl">; |
330 | def VQSHRUN_N : SInst<"vqshrun_n", "eki", "sil">; |
331 | def VQRSHRUN_N : SInst<"vqrshrun_n", "eki", "sil">; |
332 | def VQSHRN_N : SInst<"vqshrn_n", "hki", "silUsUiUl">; |
333 | def VRSHRN_N : IInst<"vrshrn_n", "hki", "silUsUiUl">; |
334 | def VQRSHRN_N : SInst<"vqrshrn_n", "hki", "silUsUiUl">; |
335 | def VSHLL_N : SInst<"vshll_n", "wdi", "csiUcUsUi">; |
336 | |
337 | //////////////////////////////////////////////////////////////////////////////// |
338 | // E.3.13 Shifts with insert |
339 | def VSRI_N : WInst<"vsri_n", "dddi", |
340 | "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">; |
341 | def VSLI_N : WInst<"vsli_n", "dddi", |
342 | "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">; |
343 | } |
344 | |
345 | //////////////////////////////////////////////////////////////////////////////// |
346 | // E.3.14 Loads and stores of a single vector |
347 | def VLD1 : WInst<"vld1", "dc", |
348 | "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; |
349 | def VLD1_X2 : WInst<"vld1_x2", "2c", |
350 | "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; |
351 | def VLD1_X3 : WInst<"vld1_x3", "3c", |
352 | "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; |
353 | def VLD1_X4 : WInst<"vld1_x4", "4c", |
354 | "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; |
355 | def VLD1_LANE : WInst<"vld1_lane", "dcdi", |
356 | "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; |
357 | def VLD1_DUP : WInst<"vld1_dup", "dc", |
358 | "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; |
359 | def VST1 : WInst<"vst1", "vpd", |
360 | "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; |
361 | def VST1_X2 : WInst<"vst1_x2", "vp2", |
362 | "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; |
363 | def VST1_X3 : WInst<"vst1_x3", "vp3", |
364 | "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; |
365 | def VST1_X4 : WInst<"vst1_x4", "vp4", |
366 | "cfilsUcUiUlUsQcQfQiQlQsQUcQUiQUlQUsPcPsQPcQPs">; |
367 | def VST1_LANE : WInst<"vst1_lane", "vpdi", |
368 | "QUcQUsQUiQUlQcQsQiQlQfQPcQPsUcUsUiUlcsilfPcPs">; |
369 | let ArchGuard = "(__ARM_FP & 2)" in { |
370 | def VLD1_F16 : WInst<"vld1", "dc", "hQh">; |
371 | def VLD1_X2_F16 : WInst<"vld1_x2", "2c", "hQh">; |
372 | def VLD1_X3_F16 : WInst<"vld1_x3", "3c", "hQh">; |
373 | def VLD1_X4_F16 : WInst<"vld1_x4", "4c", "hQh">; |
374 | def VLD1_LANE_F16 : WInst<"vld1_lane", "dcdi", "hQh">; |
375 | def VLD1_DUP_F16 : WInst<"vld1_dup", "dc", "hQh">; |
376 | def VST1_F16 : WInst<"vst1", "vpd", "hQh">; |
377 | def VST1_X2_F16 : WInst<"vst1_x2", "vp2", "hQh">; |
378 | def VST1_X3_F16 : WInst<"vst1_x3", "vp3", "hQh">; |
379 | def VST1_X4_F16 : WInst<"vst1_x4", "vp4", "hQh">; |
380 | def VST1_LANE_F16 : WInst<"vst1_lane", "vpdi", "hQh">; |
381 | } |
382 | |
383 | //////////////////////////////////////////////////////////////////////////////// |
384 | // E.3.15 Loads and stores of an N-element structure |
385 | def VLD2 : WInst<"vld2", "2c", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; |
386 | def VLD3 : WInst<"vld3", "3c", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; |
387 | def VLD4 : WInst<"vld4", "4c", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; |
388 | def VLD2_DUP : WInst<"vld2_dup", "2c", |
389 | "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">; |
390 | def VLD3_DUP : WInst<"vld3_dup", "3c", |
391 | "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">; |
392 | def VLD4_DUP : WInst<"vld4_dup", "4c", |
393 | "UcUsUiUlcsilfPcPsQcQfQiQlQsQPcQPsQUcQUiQUlQUs">; |
394 | def VLD2_LANE : WInst<"vld2_lane", "2c2i", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; |
395 | def VLD3_LANE : WInst<"vld3_lane", "3c3i", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; |
396 | def VLD4_LANE : WInst<"vld4_lane", "4c4i", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; |
397 | def VST2 : WInst<"vst2", "vp2", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; |
398 | def VST3 : WInst<"vst3", "vp3", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; |
399 | def VST4 : WInst<"vst4", "vp4", "QUcQUsQUiQcQsQiQfQPcQPsUcUsUiUlcsilfPcPs">; |
400 | def VST2_LANE : WInst<"vst2_lane", "vp2i", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; |
401 | def VST3_LANE : WInst<"vst3_lane", "vp3i", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; |
402 | def VST4_LANE : WInst<"vst4_lane", "vp4i", "QUsQUiQsQiQfQPsUcUsUicsifPcPs">; |
403 | let ArchGuard = "(__ARM_FP & 2)" in { |
404 | def VLD2_F16 : WInst<"vld2", "2c", "hQh">; |
405 | def VLD3_F16 : WInst<"vld3", "3c", "hQh">; |
406 | def VLD4_F16 : WInst<"vld4", "4c", "hQh">; |
407 | def VLD2_DUP_F16 : WInst<"vld2_dup", "2c", "hQh">; |
408 | def VLD3_DUP_F16 : WInst<"vld3_dup", "3c", "hQh">; |
409 | def VLD4_DUP_F16 : WInst<"vld4_dup", "4c", "hQh">; |
410 | def VLD2_LANE_F16 : WInst<"vld2_lane", "2c2i", "hQh">; |
411 | def VLD3_LANE_F16 : WInst<"vld3_lane", "3c3i", "hQh">; |
412 | def VLD4_LANE_F16 : WInst<"vld4_lane", "4c4i", "hQh">; |
413 | def VST2_F16 : WInst<"vst2", "vp2", "hQh">; |
414 | def VST3_F16 : WInst<"vst3", "vp3", "hQh">; |
415 | def VST4_F16 : WInst<"vst4", "vp4", "hQh">; |
416 | def VST2_LANE_F16 : WInst<"vst2_lane", "vp2i", "hQh">; |
417 | def VST3_LANE_F16 : WInst<"vst3_lane", "vp3i", "hQh">; |
418 | def VST4_LANE_F16 : WInst<"vst4_lane", "vp4i", "hQh">; |
419 | } |
420 | |
421 | //////////////////////////////////////////////////////////////////////////////// |
422 | // E.3.16 Extract lanes from a vector |
423 | let InstName = "vmov" in |
424 | def VGET_LANE : IInst<"vget_lane", "sdi", |
425 | "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">; |
426 | |
427 | //////////////////////////////////////////////////////////////////////////////// |
428 | // E.3.17 Set lanes within a vector |
429 | let InstName = "vmov" in |
430 | def VSET_LANE : IInst<"vset_lane", "dsdi", |
431 | "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">; |
432 | |
433 | //////////////////////////////////////////////////////////////////////////////// |
434 | // E.3.18 Initialize a vector from bit pattern |
435 | def VCREATE : NoTestOpInst<"vcreate", "dl", "csihfUcUsUiUlPcPsl", OP_CAST> { |
436 | let BigEndianSafe = 1; |
437 | } |
438 | |
439 | //////////////////////////////////////////////////////////////////////////////// |
440 | // E.3.19 Set all lanes to same value |
441 | let InstName = "vmov" in { |
442 | def VDUP_N : WOpInst<"vdup_n", "ds", |
443 | "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl", |
444 | OP_DUP>; |
445 | def VMOV_N : WOpInst<"vmov_n", "ds", |
446 | "UcUsUicsiPcPshfQUcQUsQUiQcQsQiQPcQPsQhQflUlQlQUl", |
447 | OP_DUP>; |
448 | } |
449 | let InstName = "" in |
450 | def VDUP_LANE: WOpInst<"vdup_lane", "dgi", |
451 | "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", |
452 | OP_DUP_LN>; |
453 | |
454 | //////////////////////////////////////////////////////////////////////////////// |
455 | // E.3.20 Combining vectors |
456 | def VCOMBINE : NoTestOpInst<"vcombine", "kdd", "csilhfUcUsUiUlPcPs", OP_CONC>; |
457 | |
458 | //////////////////////////////////////////////////////////////////////////////// |
459 | // E.3.21 Splitting vectors |
460 | // Note that the ARM NEON Reference 2.0 mistakenly document the vget_high_f16() |
461 | // and vget_low_f16() intrinsics as AArch64-only. We (and GCC) support all |
462 | // versions of these intrinsics in both AArch32 and AArch64 architectures. See |
463 | // D45668 for more details. |
464 | let InstName = "vmov" in { |
465 | def VGET_HIGH : NoTestOpInst<"vget_high", "dk", "csilhfUcUsUiUlPcPs", OP_HI>; |
466 | def VGET_LOW : NoTestOpInst<"vget_low", "dk", "csilhfUcUsUiUlPcPs", OP_LO>; |
467 | } |
468 | |
469 | //////////////////////////////////////////////////////////////////////////////// |
470 | // E.3.22 Converting vectors |
471 | |
472 | let ArchGuard = "(__ARM_FP & 2)" in { |
473 | def VCVT_F16_F32 : SInst<"vcvt_f16_f32", "md", "Hf">; |
474 | def VCVT_F32_F16 : SInst<"vcvt_f32_f16", "wd", "h">; |
475 | } |
476 | |
477 | def VCVT_S32 : SInst<"vcvt_s32", "xd", "fQf">; |
478 | def VCVT_U32 : SInst<"vcvt_u32", "ud", "fQf">; |
479 | def VCVT_F32 : SInst<"vcvt_f32", "fd", "iUiQiQUi">; |
480 | let isVCVT_N = 1 in { |
481 | def VCVT_N_S32 : SInst<"vcvt_n_s32", "xdi", "fQf">; |
482 | def VCVT_N_U32 : SInst<"vcvt_n_u32", "udi", "fQf">; |
483 | def VCVT_N_F32 : SInst<"vcvt_n_f32", "fdi", "iUiQiQUi">; |
484 | } |
485 | |
486 | def VMOVN : IInst<"vmovn", "hk", "silUsUiUl">; |
487 | def VMOVL : SInst<"vmovl", "wd", "csiUcUsUi">; |
488 | def VQMOVN : SInst<"vqmovn", "hk", "silUsUiUl">; |
489 | def VQMOVUN : SInst<"vqmovun", "ek", "sil">; |
490 | |
491 | //////////////////////////////////////////////////////////////////////////////// |
492 | // E.3.23-24 Table lookup, Extended table lookup |
493 | let InstName = "vtbl" in { |
494 | def VTBL1 : WInst<"vtbl1", "ddt", "UccPc">; |
495 | def VTBL2 : WInst<"vtbl2", "d2t", "UccPc">; |
496 | def VTBL3 : WInst<"vtbl3", "d3t", "UccPc">; |
497 | def VTBL4 : WInst<"vtbl4", "d4t", "UccPc">; |
498 | } |
499 | let InstName = "vtbx" in { |
500 | def VTBX1 : WInst<"vtbx1", "dddt", "UccPc">; |
501 | def VTBX2 : WInst<"vtbx2", "dd2t", "UccPc">; |
502 | def VTBX3 : WInst<"vtbx3", "dd3t", "UccPc">; |
503 | def VTBX4 : WInst<"vtbx4", "dd4t", "UccPc">; |
504 | } |
505 | |
506 | //////////////////////////////////////////////////////////////////////////////// |
507 | // E.3.25 Operations with a scalar value |
508 | def VMLA_LANE : IOpInst<"vmla_lane", "dddgi", |
509 | "siUsUifQsQiQUsQUiQf", OP_MLA_LN>; |
510 | def VMLAL_LANE : SOpInst<"vmlal_lane", "wwddi", "siUsUi", OP_MLAL_LN>; |
511 | def VQDMLAL_LANE : SOpInst<"vqdmlal_lane", "wwddi", "si", OP_QDMLAL_LN>; |
512 | def VMLS_LANE : IOpInst<"vmls_lane", "dddgi", |
513 | "siUsUifQsQiQUsQUiQf", OP_MLS_LN>; |
514 | def VMLSL_LANE : SOpInst<"vmlsl_lane", "wwddi", "siUsUi", OP_MLSL_LN>; |
515 | def VQDMLSL_LANE : SOpInst<"vqdmlsl_lane", "wwddi", "si", OP_QDMLSL_LN>; |
516 | def VMUL_N : IOpInst<"vmul_n", "dds", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>; |
517 | def VMUL_LANE : IOpInst<"vmul_lane", "ddgi", |
518 | "sifUsUiQsQiQfQUsQUi", OP_MUL_LN>; |
519 | def VMULL_N : SInst<"vmull_n", "wda", "siUsUi">; |
520 | def VMULL_LANE : SOpInst<"vmull_lane", "wddi", "siUsUi", OP_MULL_LN>; |
521 | def VQDMULL_N : SInst<"vqdmull_n", "wda", "si">; |
522 | def VQDMULL_LANE : SOpInst<"vqdmull_lane", "wddi", "si", OP_QDMULL_LN>; |
523 | def VQDMULH_N : SInst<"vqdmulh_n", "dda", "siQsQi">; |
524 | def VQDMULH_LANE : SOpInst<"vqdmulh_lane", "ddgi", "siQsQi", OP_QDMULH_LN>; |
525 | def VQRDMULH_N : SInst<"vqrdmulh_n", "dda", "siQsQi">; |
526 | def VQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "ddgi", "siQsQi", OP_QRDMULH_LN>; |
527 | |
528 | let ArchGuard = "defined(__ARM_FEATURE_QRDMX)" in { |
529 | def VQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "dddgi", "siQsQi", OP_QRDMLAH_LN>; |
530 | def VQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "dddgi", "siQsQi", OP_QRDMLSH_LN>; |
531 | } |
532 | |
533 | def VMLA_N : IOpInst<"vmla_n", "ddda", "siUsUifQsQiQUsQUiQf", OP_MLA_N>; |
534 | def VMLAL_N : SOpInst<"vmlal_n", "wwda", "siUsUi", OP_MLAL_N>; |
535 | def VQDMLAL_N : SInst<"vqdmlal_n", "wwda", "si">; |
536 | def VMLS_N : IOpInst<"vmls_n", "ddds", "siUsUifQsQiQUsQUiQf", OP_MLS_N>; |
537 | def VMLSL_N : SOpInst<"vmlsl_n", "wwda", "siUsUi", OP_MLSL_N>; |
538 | def VQDMLSL_N : SInst<"vqdmlsl_n", "wwda", "si">; |
539 | |
540 | //////////////////////////////////////////////////////////////////////////////// |
541 | // E.3.26 Vector Extract |
542 | def VEXT : WInst<"vext", "dddi", |
543 | "cUcPcsUsPsiUilUlfQcQUcQPcQsQUsQPsQiQUiQlQUlQf">; |
544 | |
545 | //////////////////////////////////////////////////////////////////////////////// |
546 | // E.3.27 Reverse vector elements |
547 | def VREV64 : WOpInst<"vrev64", "dd", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf", |
548 | OP_REV64>; |
549 | def VREV32 : WOpInst<"vrev32", "dd", "csUcUsPcPsQcQsQUcQUsQPcQPs", OP_REV32>; |
550 | def VREV16 : WOpInst<"vrev16", "dd", "cUcPcQcQUcQPc", OP_REV16>; |
551 | |
552 | //////////////////////////////////////////////////////////////////////////////// |
553 | // E.3.28 Other single operand arithmetic |
554 | def VABS : SInst<"vabs", "dd", "csifQcQsQiQf">; |
555 | def VQABS : SInst<"vqabs", "dd", "csiQcQsQi">; |
556 | def VNEG : SOpInst<"vneg", "dd", "csifQcQsQiQf", OP_NEG>; |
557 | def VQNEG : SInst<"vqneg", "dd", "csiQcQsQi">; |
558 | def VCLS : SInst<"vcls", "dd", "csiQcQsQi">; |
559 | def VCLZ : IInst<"vclz", "dd", "csiUcUsUiQcQsQiQUcQUsQUi">; |
560 | def VCNT : WInst<"vcnt", "dd", "UccPcQUcQcQPc">; |
561 | def VRECPE : SInst<"vrecpe", "dd", "fUiQfQUi">; |
562 | def VRSQRTE : SInst<"vrsqrte", "dd", "fUiQfQUi">; |
563 | |
564 | //////////////////////////////////////////////////////////////////////////////// |
565 | // E.3.29 Logical operations |
566 | def VMVN : LOpInst<"vmvn", "dd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>; |
567 | def VAND : LOpInst<"vand", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>; |
568 | def VORR : LOpInst<"vorr", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>; |
569 | def VEOR : LOpInst<"veor", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>; |
570 | def VBIC : LOpInst<"vbic", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>; |
571 | def VORN : LOpInst<"vorn", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>; |
572 | let isHiddenLInst = 1 in |
573 | def VBSL : SInst<"vbsl", "dudd", |
574 | "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs">; |
575 | |
576 | //////////////////////////////////////////////////////////////////////////////// |
577 | // E.3.30 Transposition operations |
578 | def VTRN : WInst<"vtrn", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; |
579 | def VZIP : WInst<"vzip", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; |
580 | def VUZP : WInst<"vuzp", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">; |
581 | |
582 | //////////////////////////////////////////////////////////////////////////////// |
583 | // E.3.31 Vector reinterpret cast operations |
584 | def VREINTERPRET |
585 | : NoTestOpInst<"vreinterpret", "dd", |
586 | "csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs", OP_REINT> { |
587 | let CartesianProductOfTypes = 1; |
588 | let ArchGuard = "!defined(__aarch64__)"; |
589 | let BigEndianSafe = 1; |
590 | } |
591 | |
592 | //////////////////////////////////////////////////////////////////////////////// |
593 | // Vector fused multiply-add operations |
594 | |
595 | let ArchGuard = "defined(__ARM_FEATURE_FMA)" in { |
596 | def VFMA : SInst<"vfma", "dddd", "fQf">; |
597 | def VFMS : SOpInst<"vfms", "dddd", "fQf", OP_FMLS>; |
598 | def FMLA_N_F32 : SOpInst<"vfma_n", "ddds", "fQf", OP_FMLA_N>; |
599 | } |
600 | |
601 | //////////////////////////////////////////////////////////////////////////////// |
602 | // fp16 vector operations |
603 | def SCALAR_HALF_GET_LANE : IOpInst<"vget_lane", "sdi", "h", OP_SCALAR_HALF_GET_LN>; |
604 | def SCALAR_HALF_SET_LANE : IOpInst<"vset_lane", "dsdi", "h", OP_SCALAR_HALF_SET_LN>; |
605 | def SCALAR_HALF_GET_LANEQ : IOpInst<"vget_lane", "sdi", "Qh", OP_SCALAR_HALF_GET_LNQ>; |
606 | def SCALAR_HALF_SET_LANEQ : IOpInst<"vset_lane", "dsdi", "Qh", OP_SCALAR_HALF_SET_LNQ>; |
607 | |
608 | //////////////////////////////////////////////////////////////////////////////// |
609 | // AArch64 Intrinsics |
610 | |
611 | let ArchGuard = "defined(__aarch64__)" in { |
612 | |
613 | //////////////////////////////////////////////////////////////////////////////// |
614 | // Load/Store |
615 | def LD1 : WInst<"vld1", "dc", "dQdPlQPl">; |
616 | def LD2 : WInst<"vld2", "2c", "QUlQldQdPlQPl">; |
617 | def LD3 : WInst<"vld3", "3c", "QUlQldQdPlQPl">; |
618 | def LD4 : WInst<"vld4", "4c", "QUlQldQdPlQPl">; |
619 | def ST1 : WInst<"vst1", "vpd", "dQdPlQPl">; |
620 | def ST2 : WInst<"vst2", "vp2", "QUlQldQdPlQPl">; |
621 | def ST3 : WInst<"vst3", "vp3", "QUlQldQdPlQPl">; |
622 | def ST4 : WInst<"vst4", "vp4", "QUlQldQdPlQPl">; |
623 | |
624 | def LD1_X2 : WInst<"vld1_x2", "2c", |
625 | "dQdPlQPl">; |
626 | def LD1_X3 : WInst<"vld1_x3", "3c", |
627 | "dQdPlQPl">; |
628 | def LD1_X4 : WInst<"vld1_x4", "4c", |
629 | "dQdPlQPl">; |
630 | |
631 | def ST1_X2 : WInst<"vst1_x2", "vp2", "dQdPlQPl">; |
632 | def ST1_X3 : WInst<"vst1_x3", "vp3", "dQdPlQPl">; |
633 | def ST1_X4 : WInst<"vst1_x4", "vp4", "dQdPlQPl">; |
634 | |
635 | def LD1_LANE : WInst<"vld1_lane", "dcdi", "dQdPlQPl">; |
636 | def LD2_LANE : WInst<"vld2_lane", "2c2i", "lUlQcQUcQPcQlQUldQdPlQPl">; |
637 | def LD3_LANE : WInst<"vld3_lane", "3c3i", "lUlQcQUcQPcQlQUldQdPlQPl">; |
638 | def LD4_LANE : WInst<"vld4_lane", "4c4i", "lUlQcQUcQPcQlQUldQdPlQPl">; |
639 | def ST1_LANE : WInst<"vst1_lane", "vpdi", "dQdPlQPl">; |
640 | def ST2_LANE : WInst<"vst2_lane", "vp2i", "lUlQcQUcQPcQlQUldQdPlQPl">; |
641 | def ST3_LANE : WInst<"vst3_lane", "vp3i", "lUlQcQUcQPcQlQUldQdPlQPl">; |
642 | def ST4_LANE : WInst<"vst4_lane", "vp4i", "lUlQcQUcQPcQlQUldQdPlQPl">; |
643 | |
644 | def LD1_DUP : WInst<"vld1_dup", "dc", "dQdPlQPl">; |
645 | def LD2_DUP : WInst<"vld2_dup", "2c", "dQdPlQPl">; |
646 | def LD3_DUP : WInst<"vld3_dup", "3c", "dQdPlQPl">; |
647 | def LD4_DUP : WInst<"vld4_dup", "4c", "dQdPlQPl">; |
648 | |
649 | def VLDRQ : WInst<"vldrq", "sc", "Pk">; |
650 | def VSTRQ : WInst<"vstrq", "vps", "Pk">; |
651 | |
652 | //////////////////////////////////////////////////////////////////////////////// |
653 | // Addition |
654 | def ADD : IOpInst<"vadd", "ddd", "dQd", OP_ADD>; |
655 | |
656 | //////////////////////////////////////////////////////////////////////////////// |
657 | // Subtraction |
658 | def SUB : IOpInst<"vsub", "ddd", "dQd", OP_SUB>; |
659 | |
660 | //////////////////////////////////////////////////////////////////////////////// |
661 | // Multiplication |
662 | def MUL : IOpInst<"vmul", "ddd", "dQd", OP_MUL>; |
663 | def MLA : IOpInst<"vmla", "dddd", "dQd", OP_MLA>; |
664 | def MLS : IOpInst<"vmls", "dddd", "dQd", OP_MLS>; |
665 | |
666 | //////////////////////////////////////////////////////////////////////////////// |
667 | // Multiplication Extended |
668 | def MULX : SInst<"vmulx", "ddd", "fdQfQd">; |
669 | |
670 | //////////////////////////////////////////////////////////////////////////////// |
671 | // Division |
672 | def FDIV : IOpInst<"vdiv", "ddd", "fdQfQd", OP_DIV>; |
673 | |
674 | //////////////////////////////////////////////////////////////////////////////// |
675 | // Vector fused multiply-add operations |
676 | def FMLA : SInst<"vfma", "dddd", "dQd">; |
677 | def FMLS : SOpInst<"vfms", "dddd", "dQd", OP_FMLS>; |
678 | |
679 | //////////////////////////////////////////////////////////////////////////////// |
680 | // MUL, MLA, MLS, FMA, FMS definitions with scalar argument |
681 | def VMUL_N_A64 : IOpInst<"vmul_n", "dds", "Qd", OP_MUL_N>; |
682 | |
683 | def FMLA_N : SOpInst<"vfma_n", "ddds", "dQd", OP_FMLA_N>; |
684 | def FMLS_N : SOpInst<"vfms_n", "ddds", "fdQfQd", OP_FMLS_N>; |
685 | |
686 | def MLA_N : SOpInst<"vmla_n", "ddds", "Qd", OP_MLA_N>; |
687 | def MLS_N : SOpInst<"vmls_n", "ddds", "Qd", OP_MLS_N>; |
688 | |
689 | //////////////////////////////////////////////////////////////////////////////// |
690 | // Logical operations |
691 | def BSL : SInst<"vbsl", "dudd", "dPlQdQPl">; |
692 | |
693 | //////////////////////////////////////////////////////////////////////////////// |
694 | // Absolute Difference |
695 | def ABD : SInst<"vabd", "ddd", "dQd">; |
696 | |
697 | //////////////////////////////////////////////////////////////////////////////// |
698 | // saturating absolute/negate |
699 | def ABS : SInst<"vabs", "dd", "dQdlQl">; |
700 | def QABS : SInst<"vqabs", "dd", "lQl">; |
701 | def NEG : SOpInst<"vneg", "dd", "dlQdQl", OP_NEG>; |
702 | def QNEG : SInst<"vqneg", "dd", "lQl">; |
703 | |
704 | //////////////////////////////////////////////////////////////////////////////// |
705 | // Signed Saturating Accumulated of Unsigned Value |
706 | def SUQADD : SInst<"vuqadd", "ddd", "csilQcQsQiQl">; |
707 | |
708 | //////////////////////////////////////////////////////////////////////////////// |
709 | // Unsigned Saturating Accumulated of Signed Value |
710 | def USQADD : SInst<"vsqadd", "ddd", "UcUsUiUlQUcQUsQUiQUl">; |
711 | |
712 | //////////////////////////////////////////////////////////////////////////////// |
713 | // Reciprocal/Sqrt |
714 | def FRECPS : IInst<"vrecps", "ddd", "dQd">; |
715 | def FRSQRTS : IInst<"vrsqrts", "ddd", "dQd">; |
716 | def FRECPE : SInst<"vrecpe", "dd", "dQd">; |
717 | def FRSQRTE : SInst<"vrsqrte", "dd", "dQd">; |
718 | def FSQRT : SInst<"vsqrt", "dd", "fdQfQd">; |
719 | |
720 | //////////////////////////////////////////////////////////////////////////////// |
721 | // bitwise reverse |
722 | def RBIT : IInst<"vrbit", "dd", "cUcPcQcQUcQPc">; |
723 | |
724 | //////////////////////////////////////////////////////////////////////////////// |
725 | // Integer extract and narrow to high |
726 | def XTN2 : SOpInst<"vmovn_high", "qhk", "silUsUiUl", OP_XTN>; |
727 | |
728 | //////////////////////////////////////////////////////////////////////////////// |
729 | // Signed integer saturating extract and unsigned narrow to high |
730 | def SQXTUN2 : SOpInst<"vqmovun_high", "emd", "HsHiHl", OP_SQXTUN>; |
731 | |
732 | //////////////////////////////////////////////////////////////////////////////// |
733 | // Integer saturating extract and narrow to high |
734 | def QXTN2 : SOpInst<"vqmovn_high", "qhk", "silUsUiUl", OP_QXTN>; |
735 | |
736 | //////////////////////////////////////////////////////////////////////////////// |
737 | // Converting vectors |
738 | |
739 | def VCVT_F32_F64 : SInst<"vcvt_f32_f64", "md", "Qd">; |
740 | def VCVT_F64_F32 : SInst<"vcvt_f64_f32", "wd", "f">; |
741 | |
742 | def VCVT_S64 : SInst<"vcvt_s64", "xd", "dQd">; |
743 | def VCVT_U64 : SInst<"vcvt_u64", "ud", "dQd">; |
744 | def VCVT_F64 : SInst<"vcvt_f64", "Fd", "lUlQlQUl">; |
745 | |
746 | def VCVT_HIGH_F16_F32 : SOpInst<"vcvt_high_f16", "hmj", "Hf", OP_VCVT_NA_HI_F16>; |
747 | def VCVT_HIGH_F32_F16 : SOpInst<"vcvt_high_f32", "wk", "h", OP_VCVT_EX_HI_F32>; |
748 | def VCVT_HIGH_F32_F64 : SOpInst<"vcvt_high_f32", "qfj", "d", OP_VCVT_NA_HI_F32>; |
749 | def VCVT_HIGH_F64_F32 : SOpInst<"vcvt_high_f64", "wj", "f", OP_VCVT_EX_HI_F64>; |
750 | |
751 | def VCVTX_F32_F64 : SInst<"vcvtx_f32", "fj", "d">; |
752 | def VCVTX_HIGH_F32_F64 : SOpInst<"vcvtx_high_f32", "qfj", "d", OP_VCVTX_HI>; |
753 | |
754 | //////////////////////////////////////////////////////////////////////////////// |
755 | // Comparison |
756 | def FCAGE : IInst<"vcage", "udd", "dQd">; |
757 | def FCAGT : IInst<"vcagt", "udd", "dQd">; |
758 | def FCALE : IInst<"vcale", "udd", "dQd">; |
759 | def FCALT : IInst<"vcalt", "udd", "dQd">; |
760 | def CMTST : WInst<"vtst", "udd", "lUlPlQlQUlQPl">; |
761 | def CFMEQ : SOpInst<"vceq", "udd", "lUldQdQlQUlPlQPl", OP_EQ>; |
762 | def CFMGE : SOpInst<"vcge", "udd", "lUldQdQlQUl", OP_GE>; |
763 | def CFMLE : SOpInst<"vcle", "udd", "lUldQdQlQUl", OP_LE>; |
764 | def CFMGT : SOpInst<"vcgt", "udd", "lUldQdQlQUl", OP_GT>; |
765 | def CFMLT : SOpInst<"vclt", "udd", "lUldQdQlQUl", OP_LT>; |
766 | |
767 | def CMEQ : SInst<"vceqz", "ud", |
768 | "csilfUcUsUiUlPcPsPlQcQsQiQlQfQUcQUsQUiQUlQPcQPsdQdQPl">; |
769 | def CMGE : SInst<"vcgez", "ud", "csilfdQcQsQiQlQfQd">; |
770 | def CMLE : SInst<"vclez", "ud", "csilfdQcQsQiQlQfQd">; |
771 | def CMGT : SInst<"vcgtz", "ud", "csilfdQcQsQiQlQfQd">; |
772 | def CMLT : SInst<"vcltz", "ud", "csilfdQcQsQiQlQfQd">; |
773 | |
774 | //////////////////////////////////////////////////////////////////////////////// |
775 | // Max/Min Integer |
776 | def MAX : SInst<"vmax", "ddd", "dQd">; |
777 | def MIN : SInst<"vmin", "ddd", "dQd">; |
778 | |
779 | //////////////////////////////////////////////////////////////////////////////// |
780 | // Pairwise Max/Min |
781 | def MAXP : SInst<"vpmax", "ddd", "QcQsQiQUcQUsQUiQfQd">; |
782 | def MINP : SInst<"vpmin", "ddd", "QcQsQiQUcQUsQUiQfQd">; |
783 | |
784 | //////////////////////////////////////////////////////////////////////////////// |
785 | // Pairwise MaxNum/MinNum Floating Point |
786 | def FMAXNMP : SInst<"vpmaxnm", "ddd", "fQfQd">; |
787 | def FMINNMP : SInst<"vpminnm", "ddd", "fQfQd">; |
788 | |
789 | //////////////////////////////////////////////////////////////////////////////// |
790 | // Pairwise Addition |
791 | def ADDP : IInst<"vpadd", "ddd", "QcQsQiQlQUcQUsQUiQUlQfQd">; |
792 | |
793 | //////////////////////////////////////////////////////////////////////////////// |
794 | // Shifts by constant |
795 | let isShift = 1 in { |
796 | // Left shift long high |
797 | def SHLL_HIGH_N : SOpInst<"vshll_high_n", "ndi", "HcHsHiHUcHUsHUi", |
798 | OP_LONG_HI>; |
799 | |
800 | //////////////////////////////////////////////////////////////////////////////// |
801 | def SRI_N : WInst<"vsri_n", "dddi", "PlQPl">; |
802 | def SLI_N : WInst<"vsli_n", "dddi", "PlQPl">; |
803 | |
804 | // Right shift narrow high |
805 | def SHRN_HIGH_N : IOpInst<"vshrn_high_n", "hmdi", |
806 | "HsHiHlHUsHUiHUl", OP_NARROW_HI>; |
807 | def QSHRUN_HIGH_N : SOpInst<"vqshrun_high_n", "hmdi", |
808 | "HsHiHl", OP_NARROW_HI>; |
809 | def RSHRN_HIGH_N : IOpInst<"vrshrn_high_n", "hmdi", |
810 | "HsHiHlHUsHUiHUl", OP_NARROW_HI>; |
811 | def QRSHRUN_HIGH_N : SOpInst<"vqrshrun_high_n", "hmdi", |
812 | "HsHiHl", OP_NARROW_HI>; |
813 | def QSHRN_HIGH_N : SOpInst<"vqshrn_high_n", "hmdi", |
814 | "HsHiHlHUsHUiHUl", OP_NARROW_HI>; |
815 | def QRSHRN_HIGH_N : SOpInst<"vqrshrn_high_n", "hmdi", |
816 | "HsHiHlHUsHUiHUl", OP_NARROW_HI>; |
817 | } |
818 | |
819 | //////////////////////////////////////////////////////////////////////////////// |
820 | // Converting vectors |
821 | def VMOVL_HIGH : SOpInst<"vmovl_high", "nd", "HcHsHiHUcHUsHUi", OP_MOVL_HI>; |
822 | |
823 | let isVCVT_N = 1 in { |
824 | def CVTF_N_F64 : SInst<"vcvt_n_f64", "Fdi", "lUlQlQUl">; |
825 | def FCVTZS_N_S64 : SInst<"vcvt_n_s64", "xdi", "dQd">; |
826 | def FCVTZS_N_U64 : SInst<"vcvt_n_u64", "udi", "dQd">; |
827 | } |
828 | |
829 | //////////////////////////////////////////////////////////////////////////////// |
830 | // 3VDiff class using high 64-bit in operands |
831 | def VADDL_HIGH : SOpInst<"vaddl_high", "wkk", "csiUcUsUi", OP_ADDLHi>; |
832 | def VADDW_HIGH : SOpInst<"vaddw_high", "wwk", "csiUcUsUi", OP_ADDWHi>; |
833 | def VSUBL_HIGH : SOpInst<"vsubl_high", "wkk", "csiUcUsUi", OP_SUBLHi>; |
834 | def VSUBW_HIGH : SOpInst<"vsubw_high", "wwk", "csiUcUsUi", OP_SUBWHi>; |
835 | |
836 | def VABDL_HIGH : SOpInst<"vabdl_high", "wkk", "csiUcUsUi", OP_ABDLHi>; |
837 | def VABAL_HIGH : SOpInst<"vabal_high", "wwkk", "csiUcUsUi", OP_ABALHi>; |
838 | |
839 | def VMULL_HIGH : SOpInst<"vmull_high", "wkk", "csiUcUsUiPc", OP_MULLHi>; |
840 | def VMULL_HIGH_N : SOpInst<"vmull_high_n", "wks", "siUsUi", OP_MULLHi_N>; |
841 | def VMLAL_HIGH : SOpInst<"vmlal_high", "wwkk", "csiUcUsUi", OP_MLALHi>; |
842 | def VMLAL_HIGH_N : SOpInst<"vmlal_high_n", "wwks", "siUsUi", OP_MLALHi_N>; |
843 | def VMLSL_HIGH : SOpInst<"vmlsl_high", "wwkk", "csiUcUsUi", OP_MLSLHi>; |
844 | def VMLSL_HIGH_N : SOpInst<"vmlsl_high_n", "wwks", "siUsUi", OP_MLSLHi_N>; |
845 | |
846 | def VADDHN_HIGH : SOpInst<"vaddhn_high", "qhkk", "silUsUiUl", OP_ADDHNHi>; |
847 | def VRADDHN_HIGH : SOpInst<"vraddhn_high", "qhkk", "silUsUiUl", OP_RADDHNHi>; |
848 | def VSUBHN_HIGH : SOpInst<"vsubhn_high", "qhkk", "silUsUiUl", OP_SUBHNHi>; |
849 | def VRSUBHN_HIGH : SOpInst<"vrsubhn_high", "qhkk", "silUsUiUl", OP_RSUBHNHi>; |
850 | |
851 | def VQDMULL_HIGH : SOpInst<"vqdmull_high", "wkk", "si", OP_QDMULLHi>; |
852 | def VQDMULL_HIGH_N : SOpInst<"vqdmull_high_n", "wks", "si", OP_QDMULLHi_N>; |
853 | def VQDMLAL_HIGH : SOpInst<"vqdmlal_high", "wwkk", "si", OP_QDMLALHi>; |
854 | def VQDMLAL_HIGH_N : SOpInst<"vqdmlal_high_n", "wwks", "si", OP_QDMLALHi_N>; |
855 | def VQDMLSL_HIGH : SOpInst<"vqdmlsl_high", "wwkk", "si", OP_QDMLSLHi>; |
856 | def VQDMLSL_HIGH_N : SOpInst<"vqdmlsl_high_n", "wwks", "si", OP_QDMLSLHi_N>; |
857 | def VMULL_P64 : SInst<"vmull", "rss", "Pl">; |
858 | def VMULL_HIGH_P64 : SOpInst<"vmull_high", "rdd", "HPl", OP_MULLHi_P64>; |
859 | |
860 | |
861 | //////////////////////////////////////////////////////////////////////////////// |
862 | // Extract or insert element from vector |
863 | def GET_LANE : IInst<"vget_lane", "sdi", "dQdPlQPl">; |
864 | def SET_LANE : IInst<"vset_lane", "dsdi", "dQdPlQPl">; |
865 | def COPY_LANE : IOpInst<"vcopy_lane", "ddidi", |
866 | "csilUcUsUiUlPcPsPlfd", OP_COPY_LN>; |
867 | def COPYQ_LANE : IOpInst<"vcopy_lane", "ddigi", |
868 | "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>; |
869 | def COPY_LANEQ : IOpInst<"vcopy_laneq", "ddiki", |
870 | "csilPcPsPlUcUsUiUlfd", OP_COPY_LN>; |
871 | def COPYQ_LANEQ : IOpInst<"vcopy_laneq", "ddidi", |
872 | "QcQsQiQlQUcQUsQUiQUlQPcQPsQfQdQPl", OP_COPY_LN>; |
873 | |
874 | //////////////////////////////////////////////////////////////////////////////// |
875 | // Set all lanes to same value |
876 | def VDUP_LANE1: WOpInst<"vdup_lane", "dgi", "hdQhQdPlQPl", OP_DUP_LN>; |
877 | def VDUP_LANE2: WOpInst<"vdup_laneq", "dji", |
878 | "csilUcUsUiUlPcPshfdQcQsQiQlQPcQPsQUcQUsQUiQUlQhQfQdPlQPl", |
879 | OP_DUP_LN>; |
880 | def DUP_N : WOpInst<"vdup_n", "ds", "dQdPlQPl", OP_DUP>; |
881 | def MOV_N : WOpInst<"vmov_n", "ds", "dQdPlQPl", OP_DUP>; |
882 | |
883 | //////////////////////////////////////////////////////////////////////////////// |
884 | def COMBINE : NoTestOpInst<"vcombine", "kdd", "dPl", OP_CONC>; |
885 | |
886 | //////////////////////////////////////////////////////////////////////////////// |
887 | //Initialize a vector from bit pattern |
888 | def CREATE : NoTestOpInst<"vcreate", "dl", "dPl", OP_CAST> { |
889 | let BigEndianSafe = 1; |
890 | } |
891 | |
892 | //////////////////////////////////////////////////////////////////////////////// |
893 | |
894 | def VMLA_LANEQ : IOpInst<"vmla_laneq", "dddji", |
895 | "siUsUifQsQiQUsQUiQf", OP_MLA_LN>; |
896 | def VMLS_LANEQ : IOpInst<"vmls_laneq", "dddji", |
897 | "siUsUifQsQiQUsQUiQf", OP_MLS_LN>; |
898 | |
899 | def VFMA_LANE : IInst<"vfma_lane", "dddgi", "fdQfQd">; |
900 | def VFMA_LANEQ : IInst<"vfma_laneq", "dddji", "fdQfQd"> { |
901 | let isLaneQ = 1; |
902 | } |
903 | def VFMS_LANE : IOpInst<"vfms_lane", "dddgi", "fdQfQd", OP_FMS_LN>; |
904 | def VFMS_LANEQ : IOpInst<"vfms_laneq", "dddji", "fdQfQd", OP_FMS_LNQ>; |
905 | |
906 | def VMLAL_LANEQ : SOpInst<"vmlal_laneq", "wwdki", "siUsUi", OP_MLAL_LN>; |
907 | def VMLAL_HIGH_LANE : SOpInst<"vmlal_high_lane", "wwkdi", "siUsUi", |
908 | OP_MLALHi_LN>; |
909 | def VMLAL_HIGH_LANEQ : SOpInst<"vmlal_high_laneq", "wwkki", "siUsUi", |
910 | OP_MLALHi_LN>; |
911 | def VMLSL_LANEQ : SOpInst<"vmlsl_laneq", "wwdki", "siUsUi", OP_MLSL_LN>; |
912 | def VMLSL_HIGH_LANE : SOpInst<"vmlsl_high_lane", "wwkdi", "siUsUi", |
913 | OP_MLSLHi_LN>; |
914 | def VMLSL_HIGH_LANEQ : SOpInst<"vmlsl_high_laneq", "wwkki", "siUsUi", |
915 | OP_MLSLHi_LN>; |
916 | |
917 | def VQDMLAL_LANEQ : SOpInst<"vqdmlal_laneq", "wwdki", "si", OP_QDMLAL_LN>; |
918 | def VQDMLAL_HIGH_LANE : SOpInst<"vqdmlal_high_lane", "wwkdi", "si", |
919 | OP_QDMLALHi_LN>; |
920 | def VQDMLAL_HIGH_LANEQ : SOpInst<"vqdmlal_high_laneq", "wwkki", "si", |
921 | OP_QDMLALHi_LN>; |
922 | def VQDMLSL_LANEQ : SOpInst<"vqdmlsl_laneq", "wwdki", "si", OP_QDMLSL_LN>; |
923 | def VQDMLSL_HIGH_LANE : SOpInst<"vqdmlsl_high_lane", "wwkdi", "si", |
924 | OP_QDMLSLHi_LN>; |
925 | def VQDMLSL_HIGH_LANEQ : SOpInst<"vqdmlsl_high_laneq", "wwkki", "si", |
926 | OP_QDMLSLHi_LN>; |
927 | |
928 | // Newly add double parameter for vmul_lane in aarch64 |
929 | // Note: d type is handled by SCALAR_VMUL_LANE |
930 | def VMUL_LANE_A64 : IOpInst<"vmul_lane", "ddgi", "Qd", OP_MUL_LN>; |
931 | |
932 | // Note: d type is handled by SCALAR_VMUL_LANEQ |
933 | def VMUL_LANEQ : IOpInst<"vmul_laneq", "ddji", |
934 | "sifUsUiQsQiQUsQUiQfQd", OP_MUL_LN>; |
935 | def VMULL_LANEQ : SOpInst<"vmull_laneq", "wdki", "siUsUi", OP_MULL_LN>; |
936 | def VMULL_HIGH_LANE : SOpInst<"vmull_high_lane", "wkdi", "siUsUi", |
937 | OP_MULLHi_LN>; |
938 | def VMULL_HIGH_LANEQ : SOpInst<"vmull_high_laneq", "wkki", "siUsUi", |
939 | OP_MULLHi_LN>; |
940 | |
941 | def VQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "wdki", "si", OP_QDMULL_LN>; |
942 | def VQDMULL_HIGH_LANE : SOpInst<"vqdmull_high_lane", "wkdi", "si", |
943 | OP_QDMULLHi_LN>; |
944 | def VQDMULL_HIGH_LANEQ : SOpInst<"vqdmull_high_laneq", "wkki", "si", |
945 | OP_QDMULLHi_LN>; |
946 | |
947 | def VQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "ddji", "siQsQi", OP_QDMULH_LN>; |
948 | def VQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "ddji", "siQsQi", OP_QRDMULH_LN>; |
949 | |
950 | let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in { |
951 | def VQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "dddji", "siQsQi", OP_QRDMLAH_LN>; |
952 | def VQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "dddji", "siQsQi", OP_QRDMLSH_LN>; |
953 | } |
954 | |
955 | // Note: d type implemented by SCALAR_VMULX_LANE |
956 | def VMULX_LANE : IOpInst<"vmulx_lane", "ddgi", "fQfQd", OP_MULX_LN>; |
957 | // Note: d type is implemented by SCALAR_VMULX_LANEQ |
958 | def VMULX_LANEQ : IOpInst<"vmulx_laneq", "ddji", "fQfQd", OP_MULX_LN>; |
959 | |
960 | //////////////////////////////////////////////////////////////////////////////// |
961 | // Across vectors class |
962 | def VADDLV : SInst<"vaddlv", "rd", "csiUcUsUiQcQsQiQUcQUsQUi">; |
963 | def VMAXV : SInst<"vmaxv", "sd", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">; |
964 | def VMINV : SInst<"vminv", "sd", "csifUcUsUiQcQsQiQUcQUsQUiQfQd">; |
965 | def VADDV : SInst<"vaddv", "sd", "csifUcUsUiQcQsQiQUcQUsQUiQfQdQlQUl">; |
966 | def FMAXNMV : SInst<"vmaxnmv", "sd", "fQfQd">; |
967 | def FMINNMV : SInst<"vminnmv", "sd", "fQfQd">; |
968 | |
969 | //////////////////////////////////////////////////////////////////////////////// |
970 | // Newly added Vector Extract for f64 |
971 | def VEXT_A64 : WInst<"vext", "dddi", "dQdPlQPl">; |
972 | |
973 | //////////////////////////////////////////////////////////////////////////////// |
974 | // Crypto |
975 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_CRYPTO)" in { |
976 | def AESE : SInst<"vaese", "ddd", "QUc">; |
977 | def AESD : SInst<"vaesd", "ddd", "QUc">; |
978 | def AESMC : SInst<"vaesmc", "dd", "QUc">; |
979 | def AESIMC : SInst<"vaesimc", "dd", "QUc">; |
980 | |
981 | def SHA1H : SInst<"vsha1h", "ss", "Ui">; |
982 | def SHA1SU1 : SInst<"vsha1su1", "ddd", "QUi">; |
983 | def SHA256SU0 : SInst<"vsha256su0", "ddd", "QUi">; |
984 | |
985 | def SHA1C : SInst<"vsha1c", "ddsd", "QUi">; |
986 | def SHA1P : SInst<"vsha1p", "ddsd", "QUi">; |
987 | def SHA1M : SInst<"vsha1m", "ddsd", "QUi">; |
988 | def SHA1SU0 : SInst<"vsha1su0", "dddd", "QUi">; |
989 | def SHA256H : SInst<"vsha256h", "dddd", "QUi">; |
990 | def SHA256H2 : SInst<"vsha256h2", "dddd", "QUi">; |
991 | def SHA256SU1 : SInst<"vsha256su1", "dddd", "QUi">; |
992 | } |
993 | |
994 | //////////////////////////////////////////////////////////////////////////////// |
995 | // Float -> Int conversions with explicit rounding mode |
996 | |
997 | let ArchGuard = "__ARM_ARCH >= 8" in { |
998 | def FCVTNS_S32 : SInst<"vcvtn_s32", "xd", "fQf">; |
999 | def FCVTNU_S32 : SInst<"vcvtn_u32", "ud", "fQf">; |
1000 | def FCVTPS_S32 : SInst<"vcvtp_s32", "xd", "fQf">; |
1001 | def FCVTPU_S32 : SInst<"vcvtp_u32", "ud", "fQf">; |
1002 | def FCVTMS_S32 : SInst<"vcvtm_s32", "xd", "fQf">; |
1003 | def FCVTMU_S32 : SInst<"vcvtm_u32", "ud", "fQf">; |
1004 | def FCVTAS_S32 : SInst<"vcvta_s32", "xd", "fQf">; |
1005 | def FCVTAU_S32 : SInst<"vcvta_u32", "ud", "fQf">; |
1006 | } |
1007 | |
1008 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)" in { |
1009 | def FCVTNS_S64 : SInst<"vcvtn_s64", "xd", "dQd">; |
1010 | def FCVTNU_S64 : SInst<"vcvtn_u64", "ud", "dQd">; |
1011 | def FCVTPS_S64 : SInst<"vcvtp_s64", "xd", "dQd">; |
1012 | def FCVTPU_S64 : SInst<"vcvtp_u64", "ud", "dQd">; |
1013 | def FCVTMS_S64 : SInst<"vcvtm_s64", "xd", "dQd">; |
1014 | def FCVTMU_S64 : SInst<"vcvtm_u64", "ud", "dQd">; |
1015 | def FCVTAS_S64 : SInst<"vcvta_s64", "xd", "dQd">; |
1016 | def FCVTAU_S64 : SInst<"vcvta_u64", "ud", "dQd">; |
1017 | } |
1018 | |
1019 | //////////////////////////////////////////////////////////////////////////////// |
1020 | // Round to Integral |
1021 | |
1022 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { |
1023 | def FRINTN_S32 : SInst<"vrndn", "dd", "fQf">; |
1024 | def FRINTA_S32 : SInst<"vrnda", "dd", "fQf">; |
1025 | def FRINTP_S32 : SInst<"vrndp", "dd", "fQf">; |
1026 | def FRINTM_S32 : SInst<"vrndm", "dd", "fQf">; |
1027 | def FRINTX_S32 : SInst<"vrndx", "dd", "fQf">; |
1028 | def FRINTZ_S32 : SInst<"vrnd", "dd", "fQf">; |
1029 | def FRINTI_S32 : SInst<"vrndi", "dd", "fQf">; |
1030 | } |
1031 | |
1032 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { |
1033 | def FRINTN_S64 : SInst<"vrndn", "dd", "dQd">; |
1034 | def FRINTA_S64 : SInst<"vrnda", "dd", "dQd">; |
1035 | def FRINTP_S64 : SInst<"vrndp", "dd", "dQd">; |
1036 | def FRINTM_S64 : SInst<"vrndm", "dd", "dQd">; |
1037 | def FRINTX_S64 : SInst<"vrndx", "dd", "dQd">; |
1038 | def FRINTZ_S64 : SInst<"vrnd", "dd", "dQd">; |
1039 | def FRINTI_S64 : SInst<"vrndi", "dd", "dQd">; |
1040 | } |
1041 | |
1042 | //////////////////////////////////////////////////////////////////////////////// |
1043 | // MaxNum/MinNum Floating Point |
1044 | |
1045 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in { |
1046 | def FMAXNM_S32 : SInst<"vmaxnm", "ddd", "fQf">; |
1047 | def FMINNM_S32 : SInst<"vminnm", "ddd", "fQf">; |
1048 | } |
1049 | |
1050 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_NUMERIC_MAXMIN)" in { |
1051 | def FMAXNM_S64 : SInst<"vmaxnm", "ddd", "dQd">; |
1052 | def FMINNM_S64 : SInst<"vminnm", "ddd", "dQd">; |
1053 | } |
1054 | |
1055 | //////////////////////////////////////////////////////////////////////////////// |
1056 | // Permutation |
1057 | def VTRN1 : SOpInst<"vtrn1", "ddd", |
1058 | "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN1>; |
1059 | def VZIP1 : SOpInst<"vzip1", "ddd", |
1060 | "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP1>; |
1061 | def VUZP1 : SOpInst<"vuzp1", "ddd", |
1062 | "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP1>; |
1063 | def VTRN2 : SOpInst<"vtrn2", "ddd", |
1064 | "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_TRN2>; |
1065 | def VZIP2 : SOpInst<"vzip2", "ddd", |
1066 | "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_ZIP2>; |
1067 | def VUZP2 : SOpInst<"vuzp2", "ddd", |
1068 | "csiUcUsUifPcPsQcQsQiQlQUcQUsQUiQUlQfQdQPcQPsQPl", OP_UZP2>; |
1069 | |
1070 | //////////////////////////////////////////////////////////////////////////////// |
1071 | // Table lookup |
1072 | let InstName = "vtbl" in { |
1073 | def VQTBL1_A64 : WInst<"vqtbl1", "djt", "UccPcQUcQcQPc">; |
1074 | def VQTBL2_A64 : WInst<"vqtbl2", "dBt", "UccPcQUcQcQPc">; |
1075 | def VQTBL3_A64 : WInst<"vqtbl3", "dCt", "UccPcQUcQcQPc">; |
1076 | def VQTBL4_A64 : WInst<"vqtbl4", "dDt", "UccPcQUcQcQPc">; |
1077 | } |
1078 | let InstName = "vtbx" in { |
1079 | def VQTBX1_A64 : WInst<"vqtbx1", "ddjt", "UccPcQUcQcQPc">; |
1080 | def VQTBX2_A64 : WInst<"vqtbx2", "ddBt", "UccPcQUcQcQPc">; |
1081 | def VQTBX3_A64 : WInst<"vqtbx3", "ddCt", "UccPcQUcQcQPc">; |
1082 | def VQTBX4_A64 : WInst<"vqtbx4", "ddDt", "UccPcQUcQcQPc">; |
1083 | } |
1084 | |
1085 | //////////////////////////////////////////////////////////////////////////////// |
1086 | // Vector reinterpret cast operations |
1087 | |
1088 | // NeonEmitter implicitly takes the cartesian product of the type string with |
1089 | // itself during generation so, unlike all other intrinsics, this one should |
1090 | // include *all* types, not just additional ones. |
1091 | def VVREINTERPRET |
1092 | : NoTestOpInst<"vreinterpret", "dd", |
1093 | "csilUcUsUiUlhfdPcPsPlQcQsQiQlQUcQUsQUiQUlQhQfQdQPcQPsQPlQPk", OP_REINT> { |
1094 | let CartesianProductOfTypes = 1; |
1095 | let BigEndianSafe = 1; |
1096 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__)"; |
1097 | } |
1098 | |
1099 | //////////////////////////////////////////////////////////////////////////////// |
1100 | // Scalar Intrinsics |
1101 | // Scalar Arithmetic |
1102 | |
1103 | // Scalar Addition |
1104 | def SCALAR_ADD : SInst<"vadd", "sss", "SlSUl">; |
1105 | // Scalar Saturating Add |
1106 | def SCALAR_QADD : SInst<"vqadd", "sss", "ScSsSiSlSUcSUsSUiSUl">; |
1107 | |
1108 | // Scalar Subtraction |
1109 | def SCALAR_SUB : SInst<"vsub", "sss", "SlSUl">; |
1110 | // Scalar Saturating Sub |
1111 | def SCALAR_QSUB : SInst<"vqsub", "sss", "ScSsSiSlSUcSUsSUiSUl">; |
1112 | |
1113 | let InstName = "vmov" in { |
1114 | def VGET_HIGH_A64 : NoTestOpInst<"vget_high", "dk", "dPl", OP_HI>; |
1115 | def VGET_LOW_A64 : NoTestOpInst<"vget_low", "dk", "dPl", OP_LO>; |
1116 | } |
1117 | |
1118 | //////////////////////////////////////////////////////////////////////////////// |
1119 | // Scalar Shift |
1120 | // Scalar Shift Left |
1121 | def SCALAR_SHL: SInst<"vshl", "sss", "SlSUl">; |
1122 | // Scalar Saturating Shift Left |
1123 | def SCALAR_QSHL: SInst<"vqshl", "sss", "ScSsSiSlSUcSUsSUiSUl">; |
1124 | // Scalar Saturating Rounding Shift Left |
1125 | def SCALAR_QRSHL: SInst<"vqrshl", "sss", "ScSsSiSlSUcSUsSUiSUl">; |
1126 | // Scalar Shift Rounding Left |
1127 | def SCALAR_RSHL: SInst<"vrshl", "sss", "SlSUl">; |
1128 | |
1129 | //////////////////////////////////////////////////////////////////////////////// |
1130 | // Scalar Shift (Immediate) |
1131 | let isScalarShift = 1 in { |
1132 | // Signed/Unsigned Shift Right (Immediate) |
1133 | def SCALAR_SSHR_N: SInst<"vshr_n", "ssi", "SlSUl">; |
1134 | // Signed/Unsigned Rounding Shift Right (Immediate) |
1135 | def SCALAR_SRSHR_N: SInst<"vrshr_n", "ssi", "SlSUl">; |
1136 | |
1137 | // Signed/Unsigned Shift Right and Accumulate (Immediate) |
1138 | def SCALAR_SSRA_N: SInst<"vsra_n", "sssi", "SlSUl">; |
1139 | // Signed/Unsigned Rounding Shift Right and Accumulate (Immediate) |
1140 | def SCALAR_SRSRA_N: SInst<"vrsra_n", "sssi", "SlSUl">; |
1141 | |
1142 | // Shift Left (Immediate) |
1143 | def SCALAR_SHL_N: SInst<"vshl_n", "ssi", "SlSUl">; |
1144 | // Signed/Unsigned Saturating Shift Left (Immediate) |
1145 | def SCALAR_SQSHL_N: SInst<"vqshl_n", "ssi", "ScSsSiSlSUcSUsSUiSUl">; |
1146 | // Signed Saturating Shift Left Unsigned (Immediate) |
1147 | def SCALAR_SQSHLU_N: SInst<"vqshlu_n", "ssi", "ScSsSiSl">; |
1148 | |
1149 | // Shift Right And Insert (Immediate) |
1150 | def SCALAR_SRI_N: SInst<"vsri_n", "sssi", "SlSUl">; |
1151 | // Shift Left And Insert (Immediate) |
1152 | def SCALAR_SLI_N: SInst<"vsli_n", "sssi", "SlSUl">; |
1153 | |
1154 | let isScalarNarrowShift = 1 in { |
1155 | // Signed/Unsigned Saturating Shift Right Narrow (Immediate) |
1156 | def SCALAR_SQSHRN_N: SInst<"vqshrn_n", "zsi", "SsSiSlSUsSUiSUl">; |
1157 | // Signed/Unsigned Saturating Rounded Shift Right Narrow (Immediate) |
1158 | def SCALAR_SQRSHRN_N: SInst<"vqrshrn_n", "zsi", "SsSiSlSUsSUiSUl">; |
1159 | // Signed Saturating Shift Right Unsigned Narrow (Immediate) |
1160 | def SCALAR_SQSHRUN_N: SInst<"vqshrun_n", "zsi", "SsSiSl">; |
1161 | // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate) |
1162 | def SCALAR_SQRSHRUN_N: SInst<"vqrshrun_n", "zsi", "SsSiSl">; |
1163 | } |
1164 | |
1165 | //////////////////////////////////////////////////////////////////////////////// |
1166 | // Scalar Signed/Unsigned Fixed-point Convert To Floating-Point (Immediate) |
1167 | def SCALAR_SCVTF_N_F32: SInst<"vcvt_n_f32", "ysi", "SiSUi">; |
1168 | def SCALAR_SCVTF_N_F64: SInst<"vcvt_n_f64", "osi", "SlSUl">; |
1169 | |
1170 | //////////////////////////////////////////////////////////////////////////////// |
1171 | // Scalar Floating-point Convert To Signed/Unsigned Fixed-point (Immediate) |
1172 | def SCALAR_FCVTZS_N_S32 : SInst<"vcvt_n_s32", "$si", "Sf">; |
1173 | def SCALAR_FCVTZU_N_U32 : SInst<"vcvt_n_u32", "bsi", "Sf">; |
1174 | def SCALAR_FCVTZS_N_S64 : SInst<"vcvt_n_s64", "$si", "Sd">; |
1175 | def SCALAR_FCVTZU_N_U64 : SInst<"vcvt_n_u64", "bsi", "Sd">; |
1176 | } |
1177 | |
1178 | //////////////////////////////////////////////////////////////////////////////// |
1179 | // Scalar Floating-point Round to Integral |
1180 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING)" in { |
1181 | def SCALAR_FRINTN_S32 : SInst<"vrndn", "ss", "Sf">; |
1182 | } |
1183 | |
1184 | //////////////////////////////////////////////////////////////////////////////// |
1185 | // Scalar Reduce Pairwise Addition (Scalar and Floating Point) |
1186 | def SCALAR_ADDP : SInst<"vpadd", "sd", "SfSHlSHdSHUl">; |
1187 | |
1188 | //////////////////////////////////////////////////////////////////////////////// |
1189 | // Scalar Reduce Floating Point Pairwise Max/Min |
1190 | def SCALAR_FMAXP : SInst<"vpmax", "sd", "SfSQd">; |
1191 | |
1192 | def SCALAR_FMINP : SInst<"vpmin", "sd", "SfSQd">; |
1193 | |
1194 | //////////////////////////////////////////////////////////////////////////////// |
1195 | // Scalar Reduce Floating Point Pairwise maxNum/minNum |
1196 | def SCALAR_FMAXNMP : SInst<"vpmaxnm", "sd", "SfSQd">; |
1197 | def SCALAR_FMINNMP : SInst<"vpminnm", "sd", "SfSQd">; |
1198 | |
1199 | //////////////////////////////////////////////////////////////////////////////// |
1200 | // Scalar Integer Saturating Doubling Multiply Half High |
1201 | def SCALAR_SQDMULH : SInst<"vqdmulh", "sss", "SsSi">; |
1202 | |
1203 | //////////////////////////////////////////////////////////////////////////////// |
1204 | // Scalar Integer Saturating Rounding Doubling Multiply Half High |
1205 | def SCALAR_SQRDMULH : SInst<"vqrdmulh", "sss", "SsSi">; |
1206 | |
1207 | let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in { |
1208 | //////////////////////////////////////////////////////////////////////////////// |
1209 | // Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half |
1210 | def SCALAR_SQRDMLAH : SOpInst<"vqrdmlah", "ssss", "SsSi", OP_QRDMLAH>; |
1211 | |
1212 | //////////////////////////////////////////////////////////////////////////////// |
1213 | // Signed Saturating Rounding Doubling Multiply Subtract Returning High Half |
1214 | def SCALAR_SQRDMLSH : SOpInst<"vqrdmlsh", "ssss", "SsSi", OP_QRDMLSH>; |
1215 | } |
1216 | |
1217 | //////////////////////////////////////////////////////////////////////////////// |
1218 | // Scalar Floating-point Multiply Extended |
1219 | def SCALAR_FMULX : IInst<"vmulx", "sss", "SfSd">; |
1220 | |
1221 | //////////////////////////////////////////////////////////////////////////////// |
1222 | // Scalar Floating-point Reciprocal Step |
1223 | def SCALAR_FRECPS : IInst<"vrecps", "sss", "SfSd">; |
1224 | |
1225 | //////////////////////////////////////////////////////////////////////////////// |
1226 | // Scalar Floating-point Reciprocal Square Root Step |
1227 | def SCALAR_FRSQRTS : IInst<"vrsqrts", "sss", "SfSd">; |
1228 | |
1229 | //////////////////////////////////////////////////////////////////////////////// |
1230 | // Scalar Signed Integer Convert To Floating-point |
1231 | def SCALAR_SCVTFS : SInst<"vcvt_f32", "ys", "Si">; |
1232 | def SCALAR_SCVTFD : SInst<"vcvt_f64", "os", "Sl">; |
1233 | |
1234 | //////////////////////////////////////////////////////////////////////////////// |
1235 | // Scalar Unsigned Integer Convert To Floating-point |
1236 | def SCALAR_UCVTFS : SInst<"vcvt_f32", "ys", "SUi">; |
1237 | def SCALAR_UCVTFD : SInst<"vcvt_f64", "os", "SUl">; |
1238 | |
1239 | //////////////////////////////////////////////////////////////////////////////// |
1240 | // Scalar Floating-point Converts |
1241 | def SCALAR_FCVTXN : IInst<"vcvtx_f32", "ys", "Sd">; |
1242 | def SCALAR_FCVTNSS : SInst<"vcvtn_s32", "$s", "Sf">; |
1243 | def SCALAR_FCVTNUS : SInst<"vcvtn_u32", "bs", "Sf">; |
1244 | def SCALAR_FCVTNSD : SInst<"vcvtn_s64", "$s", "Sd">; |
1245 | def SCALAR_FCVTNUD : SInst<"vcvtn_u64", "bs", "Sd">; |
1246 | def SCALAR_FCVTMSS : SInst<"vcvtm_s32", "$s", "Sf">; |
1247 | def SCALAR_FCVTMUS : SInst<"vcvtm_u32", "bs", "Sf">; |
1248 | def SCALAR_FCVTMSD : SInst<"vcvtm_s64", "$s", "Sd">; |
1249 | def SCALAR_FCVTMUD : SInst<"vcvtm_u64", "bs", "Sd">; |
1250 | def SCALAR_FCVTASS : SInst<"vcvta_s32", "$s", "Sf">; |
1251 | def SCALAR_FCVTAUS : SInst<"vcvta_u32", "bs", "Sf">; |
1252 | def SCALAR_FCVTASD : SInst<"vcvta_s64", "$s", "Sd">; |
1253 | def SCALAR_FCVTAUD : SInst<"vcvta_u64", "bs", "Sd">; |
1254 | def SCALAR_FCVTPSS : SInst<"vcvtp_s32", "$s", "Sf">; |
1255 | def SCALAR_FCVTPUS : SInst<"vcvtp_u32", "bs", "Sf">; |
1256 | def SCALAR_FCVTPSD : SInst<"vcvtp_s64", "$s", "Sd">; |
1257 | def SCALAR_FCVTPUD : SInst<"vcvtp_u64", "bs", "Sd">; |
1258 | def SCALAR_FCVTZSS : SInst<"vcvt_s32", "$s", "Sf">; |
1259 | def SCALAR_FCVTZUS : SInst<"vcvt_u32", "bs", "Sf">; |
1260 | def SCALAR_FCVTZSD : SInst<"vcvt_s64", "$s", "Sd">; |
1261 | def SCALAR_FCVTZUD : SInst<"vcvt_u64", "bs", "Sd">; |
1262 | |
1263 | //////////////////////////////////////////////////////////////////////////////// |
1264 | // Scalar Floating-point Reciprocal Estimate |
1265 | def SCALAR_FRECPE : IInst<"vrecpe", "ss", "SfSd">; |
1266 | |
1267 | //////////////////////////////////////////////////////////////////////////////// |
1268 | // Scalar Floating-point Reciprocal Exponent |
1269 | def SCALAR_FRECPX : IInst<"vrecpx", "ss", "SfSd">; |
1270 | |
1271 | //////////////////////////////////////////////////////////////////////////////// |
1272 | // Scalar Floating-point Reciprocal Square Root Estimate |
1273 | def SCALAR_FRSQRTE : IInst<"vrsqrte", "ss", "SfSd">; |
1274 | |
1275 | //////////////////////////////////////////////////////////////////////////////// |
1276 | // Scalar Integer Comparison |
1277 | def SCALAR_CMEQ : SInst<"vceq", "sss", "SlSUl">; |
1278 | def SCALAR_CMEQZ : SInst<"vceqz", "ss", "SlSUl">; |
1279 | def SCALAR_CMGE : SInst<"vcge", "sss", "Sl">; |
1280 | def SCALAR_CMGEZ : SInst<"vcgez", "ss", "Sl">; |
1281 | def SCALAR_CMHS : SInst<"vcge", "sss", "SUl">; |
1282 | def SCALAR_CMLE : SInst<"vcle", "sss", "SlSUl">; |
1283 | def SCALAR_CMLEZ : SInst<"vclez", "ss", "Sl">; |
1284 | def SCALAR_CMLT : SInst<"vclt", "sss", "SlSUl">; |
1285 | def SCALAR_CMLTZ : SInst<"vcltz", "ss", "Sl">; |
1286 | def SCALAR_CMGT : SInst<"vcgt", "sss", "Sl">; |
1287 | def SCALAR_CMGTZ : SInst<"vcgtz", "ss", "Sl">; |
1288 | def SCALAR_CMHI : SInst<"vcgt", "sss", "SUl">; |
1289 | def SCALAR_CMTST : SInst<"vtst", "sss", "SlSUl">; |
1290 | |
1291 | //////////////////////////////////////////////////////////////////////////////// |
1292 | // Scalar Floating-point Comparison |
1293 | def SCALAR_FCMEQ : IInst<"vceq", "bss", "SfSd">; |
1294 | def SCALAR_FCMEQZ : IInst<"vceqz", "bs", "SfSd">; |
1295 | def SCALAR_FCMGE : IInst<"vcge", "bss", "SfSd">; |
1296 | def SCALAR_FCMGEZ : IInst<"vcgez", "bs", "SfSd">; |
1297 | def SCALAR_FCMGT : IInst<"vcgt", "bss", "SfSd">; |
1298 | def SCALAR_FCMGTZ : IInst<"vcgtz", "bs", "SfSd">; |
1299 | def SCALAR_FCMLE : IInst<"vcle", "bss", "SfSd">; |
1300 | def SCALAR_FCMLEZ : IInst<"vclez", "bs", "SfSd">; |
1301 | def SCALAR_FCMLT : IInst<"vclt", "bss", "SfSd">; |
1302 | def SCALAR_FCMLTZ : IInst<"vcltz", "bs", "SfSd">; |
1303 | |
1304 | //////////////////////////////////////////////////////////////////////////////// |
1305 | // Scalar Floating-point Absolute Compare Mask Greater Than Or Equal |
1306 | def SCALAR_FACGE : IInst<"vcage", "bss", "SfSd">; |
1307 | def SCALAR_FACLE : IInst<"vcale", "bss", "SfSd">; |
1308 | |
1309 | //////////////////////////////////////////////////////////////////////////////// |
1310 | // Scalar Floating-point Absolute Compare Mask Greater Than |
1311 | def SCALAR_FACGT : IInst<"vcagt", "bss", "SfSd">; |
1312 | def SCALAR_FACLT : IInst<"vcalt", "bss", "SfSd">; |
1313 | |
1314 | //////////////////////////////////////////////////////////////////////////////// |
1315 | // Scalar Absolute Value |
1316 | def SCALAR_ABS : SInst<"vabs", "ss", "Sl">; |
1317 | |
1318 | //////////////////////////////////////////////////////////////////////////////// |
1319 | // Scalar Absolute Difference |
1320 | def SCALAR_ABD : IInst<"vabd", "sss", "SfSd">; |
1321 | |
1322 | //////////////////////////////////////////////////////////////////////////////// |
1323 | // Scalar Signed Saturating Absolute Value |
1324 | def SCALAR_SQABS : SInst<"vqabs", "ss", "ScSsSiSl">; |
1325 | |
1326 | //////////////////////////////////////////////////////////////////////////////// |
1327 | // Scalar Negate |
1328 | def SCALAR_NEG : SInst<"vneg", "ss", "Sl">; |
1329 | |
1330 | //////////////////////////////////////////////////////////////////////////////// |
1331 | // Scalar Signed Saturating Negate |
1332 | def SCALAR_SQNEG : SInst<"vqneg", "ss", "ScSsSiSl">; |
1333 | |
1334 | //////////////////////////////////////////////////////////////////////////////// |
1335 | // Scalar Signed Saturating Accumulated of Unsigned Value |
1336 | def SCALAR_SUQADD : SInst<"vuqadd", "sss", "ScSsSiSl">; |
1337 | |
1338 | //////////////////////////////////////////////////////////////////////////////// |
1339 | // Scalar Unsigned Saturating Accumulated of Signed Value |
1340 | def SCALAR_USQADD : SInst<"vsqadd", "sss", "SUcSUsSUiSUl">; |
1341 | |
1342 | //////////////////////////////////////////////////////////////////////////////// |
1343 | // Signed Saturating Doubling Multiply-Add Long |
1344 | def SCALAR_SQDMLAL : SInst<"vqdmlal", "rrss", "SsSi">; |
1345 | |
1346 | //////////////////////////////////////////////////////////////////////////////// |
1347 | // Signed Saturating Doubling Multiply-Subtract Long |
1348 | def SCALAR_SQDMLSL : SInst<"vqdmlsl", "rrss", "SsSi">; |
1349 | |
1350 | //////////////////////////////////////////////////////////////////////////////// |
1351 | // Signed Saturating Doubling Multiply Long |
1352 | def SCALAR_SQDMULL : SInst<"vqdmull", "rss", "SsSi">; |
1353 | |
1354 | //////////////////////////////////////////////////////////////////////////////// |
1355 | // Scalar Signed Saturating Extract Unsigned Narrow |
1356 | def SCALAR_SQXTUN : SInst<"vqmovun", "zs", "SsSiSl">; |
1357 | |
1358 | //////////////////////////////////////////////////////////////////////////////// |
1359 | // Scalar Signed Saturating Extract Narrow |
1360 | def SCALAR_SQXTN : SInst<"vqmovn", "zs", "SsSiSl">; |
1361 | |
1362 | //////////////////////////////////////////////////////////////////////////////// |
1363 | // Scalar Unsigned Saturating Extract Narrow |
1364 | def SCALAR_UQXTN : SInst<"vqmovn", "zs", "SUsSUiSUl">; |
1365 | |
1366 | // Scalar Floating Point multiply (scalar, by element) |
1367 | def SCALAR_FMUL_LANE : IOpInst<"vmul_lane", "ssdi", "SfSd", OP_SCALAR_MUL_LN>; |
1368 | def SCALAR_FMUL_LANEQ : IOpInst<"vmul_laneq", "ssji", "SfSd", OP_SCALAR_MUL_LN>; |
1369 | |
1370 | // Scalar Floating Point multiply extended (scalar, by element) |
1371 | def SCALAR_FMULX_LANE : IOpInst<"vmulx_lane", "ssdi", "SfSd", OP_SCALAR_MULX_LN>; |
1372 | def SCALAR_FMULX_LANEQ : IOpInst<"vmulx_laneq", "ssji", "SfSd", OP_SCALAR_MULX_LN>; |
1373 | |
1374 | def SCALAR_VMUL_N : IInst<"vmul_n", "dds", "d">; |
1375 | |
1376 | // VMUL_LANE_A64 d type implemented using scalar mul lane |
1377 | def SCALAR_VMUL_LANE : IInst<"vmul_lane", "ddgi", "d">; |
1378 | |
1379 | // VMUL_LANEQ d type implemented using scalar mul lane |
1380 | def SCALAR_VMUL_LANEQ : IInst<"vmul_laneq", "ddji", "d"> { |
1381 | let isLaneQ = 1; |
1382 | } |
1383 | |
1384 | // VMULX_LANE d type implemented using scalar vmulx_lane |
1385 | def SCALAR_VMULX_LANE : IOpInst<"vmulx_lane", "ddgi", "d", OP_SCALAR_VMULX_LN>; |
1386 | |
1387 | // VMULX_LANEQ d type implemented using scalar vmulx_laneq |
1388 | def SCALAR_VMULX_LANEQ : IOpInst<"vmulx_laneq", "ddji", "d", OP_SCALAR_VMULX_LNQ>; |
1389 | |
1390 | // Scalar Floating Point fused multiply-add (scalar, by element) |
1391 | def SCALAR_FMLA_LANE : IInst<"vfma_lane", "sssdi", "SfSd">; |
1392 | def SCALAR_FMLA_LANEQ : IInst<"vfma_laneq", "sssji", "SfSd">; |
1393 | |
1394 | // Scalar Floating Point fused multiply-subtract (scalar, by element) |
1395 | def SCALAR_FMLS_LANE : IOpInst<"vfms_lane", "sssdi", "SfSd", OP_FMS_LN>; |
1396 | def SCALAR_FMLS_LANEQ : IOpInst<"vfms_laneq", "sssji", "SfSd", OP_FMS_LNQ>; |
1397 | |
1398 | // Signed Saturating Doubling Multiply Long (scalar by element) |
1399 | def SCALAR_SQDMULL_LANE : SOpInst<"vqdmull_lane", "rsdi", "SsSi", OP_SCALAR_QDMULL_LN>; |
1400 | def SCALAR_SQDMULL_LANEQ : SOpInst<"vqdmull_laneq", "rsji", "SsSi", OP_SCALAR_QDMULL_LN>; |
1401 | |
1402 | // Signed Saturating Doubling Multiply-Add Long (scalar by element) |
1403 | def SCALAR_SQDMLAL_LANE : SInst<"vqdmlal_lane", "rrsdi", "SsSi">; |
1404 | def SCALAR_SQDMLAL_LANEQ : SInst<"vqdmlal_laneq", "rrsji", "SsSi">; |
1405 | |
1406 | // Signed Saturating Doubling Multiply-Subtract Long (scalar by element) |
1407 | def SCALAR_SQDMLS_LANE : SInst<"vqdmlsl_lane", "rrsdi", "SsSi">; |
1408 | def SCALAR_SQDMLS_LANEQ : SInst<"vqdmlsl_laneq", "rrsji", "SsSi">; |
1409 | |
1410 | // Scalar Integer Saturating Doubling Multiply Half High (scalar by element) |
1411 | def SCALAR_SQDMULH_LANE : SOpInst<"vqdmulh_lane", "ssdi", "SsSi", OP_SCALAR_QDMULH_LN>; |
1412 | def SCALAR_SQDMULH_LANEQ : SOpInst<"vqdmulh_laneq", "ssji", "SsSi", OP_SCALAR_QDMULH_LN>; |
1413 | |
1414 | // Scalar Integer Saturating Rounding Doubling Multiply Half High |
1415 | def SCALAR_SQRDMULH_LANE : SOpInst<"vqrdmulh_lane", "ssdi", "SsSi", OP_SCALAR_QRDMULH_LN>; |
1416 | def SCALAR_SQRDMULH_LANEQ : SOpInst<"vqrdmulh_laneq", "ssji", "SsSi", OP_SCALAR_QRDMULH_LN>; |
1417 | |
1418 | let ArchGuard = "defined(__ARM_FEATURE_QRDMX) && defined(__aarch64__)" in { |
1419 | // Signed Saturating Rounding Doubling Multiply Accumulate Returning High Half |
1420 | def SCALAR_SQRDMLAH_LANE : SOpInst<"vqrdmlah_lane", "sssdi", "SsSi", OP_SCALAR_QRDMLAH_LN>; |
1421 | def SCALAR_SQRDMLAH_LANEQ : SOpInst<"vqrdmlah_laneq", "sssji", "SsSi", OP_SCALAR_QRDMLAH_LN>; |
1422 | |
1423 | // Signed Saturating Rounding Doubling Multiply Subtract Returning High Half |
1424 | def SCALAR_SQRDMLSH_LANE : SOpInst<"vqrdmlsh_lane", "sssdi", "SsSi", OP_SCALAR_QRDMLSH_LN>; |
1425 | def SCALAR_SQRDMLSH_LANEQ : SOpInst<"vqrdmlsh_laneq", "sssji", "SsSi", OP_SCALAR_QRDMLSH_LN>; |
1426 | } |
1427 | |
1428 | def SCALAR_VDUP_LANE : IInst<"vdup_lane", "sdi", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">; |
1429 | def SCALAR_VDUP_LANEQ : IInst<"vdup_laneq", "sji", "ScSsSiSlSfSdSUcSUsSUiSUlSPcSPs">; |
1430 | } |
1431 | |
1432 | // ARMv8.2-A FP16 vector intrinsics for A32/A64. |
1433 | let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { |
1434 | |
1435 | // ARMv8.2-A FP16 one-operand vector intrinsics. |
1436 | |
1437 | // Comparison |
1438 | def CMEQH : SInst<"vceqz", "ud", "hQh">; |
1439 | def CMGEH : SInst<"vcgez", "ud", "hQh">; |
1440 | def CMGTH : SInst<"vcgtz", "ud", "hQh">; |
1441 | def CMLEH : SInst<"vclez", "ud", "hQh">; |
1442 | def CMLTH : SInst<"vcltz", "ud", "hQh">; |
1443 | |
1444 | // Vector conversion |
1445 | def VCVT_F16 : SInst<"vcvt_f16", "Hd", "sUsQsQUs">; |
1446 | def VCVT_S16 : SInst<"vcvt_s16", "xd", "hQh">; |
1447 | def VCVT_U16 : SInst<"vcvt_u16", "ud", "hQh">; |
1448 | def VCVTA_S16 : SInst<"vcvta_s16", "xd", "hQh">; |
1449 | def VCVTA_U16 : SInst<"vcvta_u16", "ud", "hQh">; |
1450 | def VCVTM_S16 : SInst<"vcvtm_s16", "xd", "hQh">; |
1451 | def VCVTM_U16 : SInst<"vcvtm_u16", "ud", "hQh">; |
1452 | def VCVTN_S16 : SInst<"vcvtn_s16", "xd", "hQh">; |
1453 | def VCVTN_U16 : SInst<"vcvtn_u16", "ud", "hQh">; |
1454 | def VCVTP_S16 : SInst<"vcvtp_s16", "xd", "hQh">; |
1455 | def VCVTP_U16 : SInst<"vcvtp_u16", "ud", "hQh">; |
1456 | |
1457 | // Vector rounding |
1458 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_DIRECTED_ROUNDING) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { |
1459 | def FRINTZH : SInst<"vrnd", "dd", "hQh">; |
1460 | def FRINTNH : SInst<"vrndn", "dd", "hQh">; |
1461 | def FRINTAH : SInst<"vrnda", "dd", "hQh">; |
1462 | def FRINTPH : SInst<"vrndp", "dd", "hQh">; |
1463 | def FRINTMH : SInst<"vrndm", "dd", "hQh">; |
1464 | def FRINTXH : SInst<"vrndx", "dd", "hQh">; |
1465 | } |
1466 | |
1467 | // Misc. |
1468 | def VABSH : SInst<"vabs", "dd", "hQh">; |
1469 | def VNEGH : SOpInst<"vneg", "dd", "hQh", OP_NEG>; |
1470 | def VRECPEH : SInst<"vrecpe", "dd", "hQh">; |
1471 | def FRSQRTEH : SInst<"vrsqrte", "dd", "hQh">; |
1472 | |
1473 | // ARMv8.2-A FP16 two-operands vector intrinsics. |
1474 | |
1475 | // Misc. |
1476 | def VADDH : SOpInst<"vadd", "ddd", "hQh", OP_ADD>; |
1477 | def VABDH : SInst<"vabd", "ddd", "hQh">; |
1478 | def VSUBH : SOpInst<"vsub", "ddd", "hQh", OP_SUB>; |
1479 | |
1480 | // Comparison |
1481 | let InstName = "vacge" in { |
1482 | def VCAGEH : SInst<"vcage", "udd", "hQh">; |
1483 | def VCALEH : SInst<"vcale", "udd", "hQh">; |
1484 | } |
1485 | let InstName = "vacgt" in { |
1486 | def VCAGTH : SInst<"vcagt", "udd", "hQh">; |
1487 | def VCALTH : SInst<"vcalt", "udd", "hQh">; |
1488 | } |
1489 | def VCEQH : SOpInst<"vceq", "udd", "hQh", OP_EQ>; |
1490 | def VCGEH : SOpInst<"vcge", "udd", "hQh", OP_GE>; |
1491 | def VCGTH : SOpInst<"vcgt", "udd", "hQh", OP_GT>; |
1492 | let InstName = "vcge" in |
1493 | def VCLEH : SOpInst<"vcle", "udd", "hQh", OP_LE>; |
1494 | let InstName = "vcgt" in |
1495 | def VCLTH : SOpInst<"vclt", "udd", "hQh", OP_LT>; |
1496 | |
1497 | // Vector conversion |
1498 | let isVCVT_N = 1 in { |
1499 | def VCVT_N_F16 : SInst<"vcvt_n_f16", "Hdi", "sUsQsQUs">; |
1500 | def VCVT_N_S16 : SInst<"vcvt_n_s16", "xdi", "hQh">; |
1501 | def VCVT_N_U16 : SInst<"vcvt_n_u16", "udi", "hQh">; |
1502 | } |
1503 | |
1504 | // Max/Min |
1505 | def VMAXH : SInst<"vmax", "ddd", "hQh">; |
1506 | def VMINH : SInst<"vmin", "ddd", "hQh">; |
1507 | let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_NUMERIC_MAXMIN) && defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)" in { |
1508 | def FMAXNMH : SInst<"vmaxnm", "ddd", "hQh">; |
1509 | def FMINNMH : SInst<"vminnm", "ddd", "hQh">; |
1510 | } |
1511 | |
1512 | // Multiplication/Division |
1513 | def VMULH : SOpInst<"vmul", "ddd", "hQh", OP_MUL>; |
1514 | |
1515 | // Pairwise addition |
1516 | def VPADDH : SInst<"vpadd", "ddd", "h">; |
1517 | |
1518 | // Pairwise Max/Min |
1519 | def VPMAXH : SInst<"vpmax", "ddd", "h">; |
1520 | def VPMINH : SInst<"vpmin", "ddd", "h">; |
1521 | |
1522 | // Reciprocal/Sqrt |
1523 | def VRECPSH : SInst<"vrecps", "ddd", "hQh">; |
1524 | def VRSQRTSH : SInst<"vrsqrts", "ddd", "hQh">; |
1525 | |
1526 | // ARMv8.2-A FP16 three-operands vector intrinsics. |
1527 | |
1528 | // Vector fused multiply-add operations |
1529 | def VFMAH : SInst<"vfma", "dddd", "hQh">; |
1530 | def VFMSH : SOpInst<"vfms", "dddd", "hQh", OP_FMLS>; |
1531 | |
1532 | // ARMv8.2-A FP16 lane vector intrinsics. |
1533 | |
1534 | // Mul lane |
1535 | def VMUL_LANEH : IOpInst<"vmul_lane", "ddgi", "hQh", OP_MUL_LN>; |
1536 | def VMUL_NH : IOpInst<"vmul_n", "dds", "hQh", OP_MUL_N>; |
1537 | |
1538 | // Data processing intrinsics - section 5 |
1539 | |
1540 | // Logical operations |
1541 | let isHiddenLInst = 1 in |
1542 | def VBSLH : SInst<"vbsl", "dudd", "hQh">; |
1543 | |
1544 | // Transposition operations |
1545 | def VZIPH : WInst<"vzip", "2dd", "hQh">; |
1546 | def VUZPH : WInst<"vuzp", "2dd", "hQh">; |
1547 | def VTRNH : WInst<"vtrn", "2dd", "hQh">; |
1548 | |
1549 | |
1550 | let ArchGuard = "!defined(__aarch64__)" in { |
1551 | // Set all lanes to same value. |
1552 | // Already implemented prior to ARMv8.2-A. |
1553 | def VMOV_NH : WOpInst<"vmov_n", "ds", "hQh", OP_DUP>; |
1554 | def VDUP_NH : WOpInst<"vdup_n", "ds", "hQh", OP_DUP>; |
1555 | def VDUP_LANE1H : WOpInst<"vdup_lane", "dgi", "hQh", OP_DUP_LN>; |
1556 | } |
1557 | |
1558 | // Vector Extract |
1559 | def VEXTH : WInst<"vext", "dddi", "hQh">; |
1560 | |
1561 | // Reverse vector elements |
1562 | def VREV64H : WOpInst<"vrev64", "dd", "hQh", OP_REV64>; |
1563 | } |
1564 | |
1565 | // ARMv8.2-A FP16 vector intrinsics for A64 only. |
1566 | let ArchGuard = "defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(__aarch64__)" in { |
1567 | |
1568 | // Vector rounding |
1569 | def FRINTIH : SInst<"vrndi", "dd", "hQh">; |
1570 | |
1571 | // Misc. |
1572 | def FSQRTH : SInst<"vsqrt", "dd", "hQh">; |
1573 | |
1574 | // Multiplication/Division |
1575 | def MULXH : SInst<"vmulx", "ddd", "hQh">; |
1576 | def FDIVH : IOpInst<"vdiv", "ddd", "hQh", OP_DIV>; |
1577 | |
1578 | // Pairwise addition |
1579 | def VPADDH1 : SInst<"vpadd", "ddd", "Qh">; |
1580 | |
1581 | // Pairwise Max/Min |
1582 | def VPMAXH1 : SInst<"vpmax", "ddd", "Qh">; |
1583 | def VPMINH1 : SInst<"vpmin", "ddd", "Qh">; |
1584 | |
1585 | // Pairwise MaxNum/MinNum |
1586 | def FMAXNMPH : SInst<"vpmaxnm", "ddd", "hQh">; |
1587 | def FMINNMPH : SInst<"vpminnm", "ddd", "hQh">; |
1588 | |
1589 | // ARMv8.2-A FP16 lane vector intrinsics. |
1590 | |
1591 | // FMA lane |
1592 | def VFMA_LANEH : IInst<"vfma_lane", "dddgi", "hQh">; |
1593 | def VFMA_LANEQH : IInst<"vfma_laneq", "dddji", "hQh">; |
1594 | |
1595 | // FMA lane with scalar argument |
1596 | def FMLA_NH : SOpInst<"vfma_n", "ddds", "hQh", OP_FMLA_N>; |
1597 | // Scalar floating point fused multiply-add (scalar, by element) |
1598 | def SCALAR_FMLA_LANEH : IInst<"vfma_lane", "sssdi", "Sh">; |
1599 | def SCALAR_FMLA_LANEQH : IInst<"vfma_laneq", "sssji", "Sh">; |
1600 | |
1601 | // FMS lane |
1602 | def VFMS_LANEH : IOpInst<"vfms_lane", "dddgi", "hQh", OP_FMS_LN>; |
1603 | def VFMS_LANEQH : IOpInst<"vfms_laneq", "dddji", "hQh", OP_FMS_LNQ>; |
1604 | // FMS lane with scalar argument |
1605 | def FMLS_NH : SOpInst<"vfms_n", "ddds", "hQh", OP_FMLS_N>; |
1606 | // Scalar floating foint fused multiply-subtract (scalar, by element) |
1607 | def SCALAR_FMLS_LANEH : IOpInst<"vfms_lane", "sssdi", "Sh", OP_FMS_LN>; |
1608 | def SCALAR_FMLS_LANEQH : IOpInst<"vfms_laneq", "sssji", "Sh", OP_FMS_LNQ>; |
1609 | |
1610 | // Mul lane |
1611 | def VMUL_LANEQH : IOpInst<"vmul_laneq", "ddji", "hQh", OP_MUL_LN>; |
1612 | // Scalar floating point multiply (scalar, by element) |
1613 | def SCALAR_FMUL_LANEH : IOpInst<"vmul_lane", "ssdi", "Sh", OP_SCALAR_MUL_LN>; |
1614 | def SCALAR_FMUL_LANEQH : IOpInst<"vmul_laneq", "ssji", "Sh", OP_SCALAR_MUL_LN>; |
1615 | |
1616 | // Mulx lane |
1617 | def VMULX_LANEH : IOpInst<"vmulx_lane", "ddgi", "hQh", OP_MULX_LN>; |
1618 | def VMULX_LANEQH : IOpInst<"vmulx_laneq", "ddji", "hQh", OP_MULX_LN>; |
1619 | def VMULX_NH : IOpInst<"vmulx_n", "dds", "hQh", OP_MULX_N>; |
1620 | // Scalar floating point mulx (scalar, by element) |
1621 | def SCALAR_FMULX_LANEH : IInst<"vmulx_lane", "ssdi", "Sh">; |
1622 | def SCALAR_FMULX_LANEQH : IInst<"vmulx_laneq", "ssji", "Sh">; |
1623 | |
1624 | // ARMv8.2-A FP16 reduction vector intrinsics. |
1625 | def VMAXVH : SInst<"vmaxv", "sd", "hQh">; |
1626 | def VMINVH : SInst<"vminv", "sd", "hQh">; |
1627 | def FMAXNMVH : SInst<"vmaxnmv", "sd", "hQh">; |
1628 | def FMINNMVH : SInst<"vminnmv", "sd", "hQh">; |
1629 | |
1630 | // Permutation |
1631 | def VTRN1H : SOpInst<"vtrn1", "ddd", "hQh", OP_TRN1>; |
1632 | def VZIP1H : SOpInst<"vzip1", "ddd", "hQh", OP_ZIP1>; |
1633 | def VUZP1H : SOpInst<"vuzp1", "ddd", "hQh", OP_UZP1>; |
1634 | def VTRN2H : SOpInst<"vtrn2", "ddd", "hQh", OP_TRN2>; |
1635 | def VZIP2H : SOpInst<"vzip2", "ddd", "hQh", OP_ZIP2>; |
1636 | def VUZP2H : SOpInst<"vuzp2", "ddd", "hQh", OP_UZP2>; |
1637 | |
1638 | def SCALAR_VDUP_LANEH : IInst<"vdup_lane", "sdi", "Sh">; |
1639 | def SCALAR_VDUP_LANEQH : IInst<"vdup_laneq", "sji", "Sh">; |
1640 | } |
1641 | |
1642 | // v8.2-A dot product instructions. |
1643 | let ArchGuard = "defined(__ARM_FEATURE_DOTPROD)" in { |
1644 | def DOT : SInst<"vdot", "dd88", "iQiUiQUi">; |
1645 | def DOT_LANE : SOpInst<"vdot_lane", "dd87i", "iUiQiQUi", OP_DOT_LN>; |
1646 | } |
1647 | let ArchGuard = "defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)" in { |
1648 | // Variants indexing into a 128-bit vector are A64 only. |
1649 | def UDOT_LANEQ : SOpInst<"vdot_laneq", "dd89i", "iUiQiQUi", OP_DOT_LNQ>; |
1650 | } |
1651 | |
1652 | // v8.2-A FP16 fused multiply-add long instructions. |
1653 | let ArchGuard = "defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)" in { |
1654 | def VFMLAL_LOW : SInst<"vfmlal_low", "ffHH", "hQh">; |
1655 | def VFMLSL_LOW : SInst<"vfmlsl_low", "ffHH", "hQh">; |
1656 | def VFMLAL_HIGH : SInst<"vfmlal_high", "ffHH", "hQh">; |
1657 | def VFMLSL_HIGH : SInst<"vfmlsl_high", "ffHH", "hQh">; |
1658 | |
1659 | def VFMLAL_LANE_LOW : SOpInst<"vfmlal_lane_low", "ffH0i", "hQh", OP_FMLAL_LN>; |
1660 | def VFMLSL_LANE_LOW : SOpInst<"vfmlsl_lane_low", "ffH0i", "hQh", OP_FMLSL_LN>; |
1661 | def VFMLAL_LANE_HIGH : SOpInst<"vfmlal_lane_high", "ffH0i", "hQh", OP_FMLAL_LN_Hi>; |
1662 | def VFMLSL_LANE_HIGH : SOpInst<"vfmlsl_lane_high", "ffH0i", "hQh", OP_FMLSL_LN_Hi>; |
1663 | |
1664 | def VFMLAL_LANEQ_LOW : SOpInst<"vfmlal_laneq_low", "ffH1i", "hQh", OP_FMLAL_LN>; |
1665 | def VFMLSL_LANEQ_LOW : SOpInst<"vfmlsl_laneq_low", "ffH1i", "hQh", OP_FMLSL_LN>; |
1666 | def VFMLAL_LANEQ_HIGH : SOpInst<"vfmlal_laneq_high", "ffH1i", "hQh", OP_FMLAL_LN_Hi>; |
1667 | def VFMLSL_LANEQ_HIGH : SOpInst<"vfmlsl_laneq_high", "ffH1i", "hQh", OP_FMLSL_LN_Hi>; |
1668 | } |
1669 | |