1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements a TargetTransformInfo analysis pass specific to the
10 /// X86 target machine. It uses the target's detailed information to provide
11 /// more precise answers to certain TTI queries, while letting the target
12 /// independent and default TTI implementations handle the rest.
14 //===----------------------------------------------------------------------===//
15 /// About Cost Model numbers used below it's necessary to say the following:
16 /// the numbers correspond to some "generic" X86 CPU instead of usage of
17 /// concrete CPU model. Usually the numbers correspond to CPU where the feature
18 /// apeared at the first time. For example, if we do Subtarget.hasSSE42() in
19 /// the lookups below the cost is based on Nehalem as that was the first CPU
20 /// to support that feature level and thus has most likely the worst case cost.
21 /// Some examples of other technologies/CPUs:
22 /// SSE 3 - Pentium4 / Athlon64
25 /// AVX - Sandy Bridge
27 /// AVX-512 - Xeon Phi / Skylake
28 /// And some examples of instruction target dependent costs (latency)
29 /// divss sqrtss rsqrtss
31 /// Piledriver 9-24 13-15 5
33 /// Pentium II,III 18 30 2
34 /// Nehalem 7-14 7-18 3
35 /// Haswell 10-13 11 5
36 /// TODO: Develop and implement the target dependent cost model and
37 /// specialize cost numbers for different Cost Model Targets such as throughput,
38 /// code size, latency and uop count.
39 //===----------------------------------------------------------------------===//
41 #include "X86TargetTransformInfo.h"
42 #include "llvm/Analysis/TargetTransformInfo.h"
43 #include "llvm/CodeGen/BasicTTIImpl.h"
44 #include "llvm/CodeGen/CostTable.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/IR/IntrinsicInst.h"
47 #include "llvm/Support/Debug.h"
51 #define DEBUG_TYPE "x86tti"
53 //===----------------------------------------------------------------------===//
57 //===----------------------------------------------------------------------===//
59 TargetTransformInfo::PopcntSupportKind
60 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
61 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
62 // TODO: Currently the __builtin_popcount() implementation using SSE3
63 // instructions is inefficient. Once the problem is fixed, we should
64 // call ST->hasSSE3() instead of ST->hasPOPCNT().
65 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
68 llvm::Optional<unsigned> X86TTIImpl::getCacheSize(
69 TargetTransformInfo::CacheLevel Level) const {
71 case TargetTransformInfo::CacheLevel::L1D:
81 return 32 * 1024; // 32 KByte
82 case TargetTransformInfo::CacheLevel::L2D:
92 return 256 * 1024; // 256 KByte
95 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
98 llvm::Optional<unsigned> X86TTIImpl::getCacheAssociativity(
99 TargetTransformInfo::CacheLevel Level) const {
110 case TargetTransformInfo::CacheLevel::L1D:
112 case TargetTransformInfo::CacheLevel::L2D:
116 llvm_unreachable("Unknown TargetTransformInfo::CacheLevel");
119 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
120 if (Vector && !ST->hasSSE1())
124 if (Vector && ST->hasAVX512())
131 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) const {
132 unsigned PreferVectorWidth = ST->getPreferVectorWidth();
134 if (ST->hasAVX512() && PreferVectorWidth >= 512)
136 if (ST->hasAVX() && PreferVectorWidth >= 256)
138 if (ST->hasSSE1() && PreferVectorWidth >= 128)
149 unsigned X86TTIImpl::getLoadStoreVecRegBitWidth(unsigned) const {
150 return getRegisterBitWidth(true);
153 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
154 // If the loop will not be vectorized, don't interleave the loop.
155 // Let regular unroll to unroll the loop, which saves the overflow
156 // check and memory check cost.
163 // Sandybridge and Haswell have multiple execution ports and pipelined
171 int X86TTIImpl::getArithmeticInstrCost(
172 unsigned Opcode, Type *Ty,
173 TTI::OperandValueKind Op1Info, TTI::OperandValueKind Op2Info,
174 TTI::OperandValueProperties Opd1PropInfo,
175 TTI::OperandValueProperties Opd2PropInfo,
176 ArrayRef<const Value *> Args) {
177 // Legalize the type.
178 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
180 int ISD = TLI->InstructionOpcodeToISD(Opcode);
181 assert(ISD && "Invalid opcode");
183 static const CostTblEntry GLMCostTable[] = {
184 { ISD::FDIV, MVT::f32, 18 }, // divss
185 { ISD::FDIV, MVT::v4f32, 35 }, // divps
186 { ISD::FDIV, MVT::f64, 33 }, // divsd
187 { ISD::FDIV, MVT::v2f64, 65 }, // divpd
191 if (const auto *Entry = CostTableLookup(GLMCostTable, ISD,
193 return LT.first * Entry->Cost;
195 static const CostTblEntry SLMCostTable[] = {
196 { ISD::MUL, MVT::v4i32, 11 }, // pmulld
197 { ISD::MUL, MVT::v8i16, 2 }, // pmullw
198 { ISD::MUL, MVT::v16i8, 14 }, // extend/pmullw/trunc sequence.
199 { ISD::FMUL, MVT::f64, 2 }, // mulsd
200 { ISD::FMUL, MVT::v2f64, 4 }, // mulpd
201 { ISD::FMUL, MVT::v4f32, 2 }, // mulps
202 { ISD::FDIV, MVT::f32, 17 }, // divss
203 { ISD::FDIV, MVT::v4f32, 39 }, // divps
204 { ISD::FDIV, MVT::f64, 32 }, // divsd
205 { ISD::FDIV, MVT::v2f64, 69 }, // divpd
206 { ISD::FADD, MVT::v2f64, 2 }, // addpd
207 { ISD::FSUB, MVT::v2f64, 2 }, // subpd
208 // v2i64/v4i64 mul is custom lowered as a series of long:
209 // multiplies(3), shifts(3) and adds(2)
210 // slm muldq version throughput is 2 and addq throughput 4
211 // thus: 3X2 (muldq throughput) + 3X1 (shift throughput) +
212 // 3X4 (addq throughput) = 17
213 { ISD::MUL, MVT::v2i64, 17 },
214 // slm addq\subq throughput is 4
215 { ISD::ADD, MVT::v2i64, 4 },
216 { ISD::SUB, MVT::v2i64, 4 },
220 if (Args.size() == 2 && ISD == ISD::MUL && LT.second == MVT::v4i32) {
221 // Check if the operands can be shrinked into a smaller datatype.
222 bool Op1Signed = false;
223 unsigned Op1MinSize = BaseT::minRequiredElementSize(Args[0], Op1Signed);
224 bool Op2Signed = false;
225 unsigned Op2MinSize = BaseT::minRequiredElementSize(Args[1], Op2Signed);
227 bool signedMode = Op1Signed | Op2Signed;
228 unsigned OpMinSize = std::max(Op1MinSize, Op2MinSize);
231 return LT.first * 3; // pmullw/sext
232 if (!signedMode && OpMinSize <= 8)
233 return LT.first * 3; // pmullw/zext
235 return LT.first * 5; // pmullw/pmulhw/pshuf
236 if (!signedMode && OpMinSize <= 16)
237 return LT.first * 5; // pmullw/pmulhw/pshuf
240 if (const auto *Entry = CostTableLookup(SLMCostTable, ISD,
242 return LT.first * Entry->Cost;
246 if ((ISD == ISD::SDIV || ISD == ISD::SREM || ISD == ISD::UDIV ||
248 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
249 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
250 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
251 if (ISD == ISD::SDIV || ISD == ISD::SREM) {
252 // On X86, vector signed division by constants power-of-two are
253 // normally expanded to the sequence SRA + SRL + ADD + SRA.
254 // The OperandValue properties may not be the same as that of the previous
255 // operation; conservatively assume OP_None.
257 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
258 TargetTransformInfo::OP_None,
259 TargetTransformInfo::OP_None);
260 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
261 TargetTransformInfo::OP_None,
262 TargetTransformInfo::OP_None);
263 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
264 TargetTransformInfo::OP_None,
265 TargetTransformInfo::OP_None);
267 if (ISD == ISD::SREM) {
268 // For SREM: (X % C) is the equivalent of (X - (X/C)*C)
269 Cost += getArithmeticInstrCost(Instruction::Mul, Ty, Op1Info, Op2Info);
270 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Op1Info, Op2Info);
276 // Vector unsigned division/remainder will be simplified to shifts/masks.
277 if (ISD == ISD::UDIV)
278 return getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
279 TargetTransformInfo::OP_None,
280 TargetTransformInfo::OP_None);
282 if (ISD == ISD::UREM)
283 return getArithmeticInstrCost(Instruction::And, Ty, Op1Info, Op2Info,
284 TargetTransformInfo::OP_None,
285 TargetTransformInfo::OP_None);
288 static const CostTblEntry AVX512BWUniformConstCostTable[] = {
289 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand.
290 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand.
291 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb.
294 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
296 if (const auto *Entry = CostTableLookup(AVX512BWUniformConstCostTable, ISD,
298 return LT.first * Entry->Cost;
301 static const CostTblEntry AVX512UniformConstCostTable[] = {
302 { ISD::SRA, MVT::v2i64, 1 },
303 { ISD::SRA, MVT::v4i64, 1 },
304 { ISD::SRA, MVT::v8i64, 1 },
307 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
309 if (const auto *Entry = CostTableLookup(AVX512UniformConstCostTable, ISD,
311 return LT.first * Entry->Cost;
314 static const CostTblEntry AVX2UniformConstCostTable[] = {
315 { ISD::SHL, MVT::v32i8, 2 }, // psllw + pand.
316 { ISD::SRL, MVT::v32i8, 2 }, // psrlw + pand.
317 { ISD::SRA, MVT::v32i8, 4 }, // psrlw, pand, pxor, psubb.
319 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
322 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
324 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
326 return LT.first * Entry->Cost;
329 static const CostTblEntry SSE2UniformConstCostTable[] = {
330 { ISD::SHL, MVT::v16i8, 2 }, // psllw + pand.
331 { ISD::SRL, MVT::v16i8, 2 }, // psrlw + pand.
332 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
334 { ISD::SHL, MVT::v32i8, 4+2 }, // 2*(psllw + pand) + split.
335 { ISD::SRL, MVT::v32i8, 4+2 }, // 2*(psrlw + pand) + split.
336 { ISD::SRA, MVT::v32i8, 8+2 }, // 2*(psrlw, pand, pxor, psubb) + split.
339 // XOP has faster vXi8 shifts.
340 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
341 ST->hasSSE2() && !ST->hasXOP()) {
342 if (const auto *Entry =
343 CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second))
344 return LT.first * Entry->Cost;
347 static const CostTblEntry AVX512BWConstCostTable[] = {
348 { ISD::SDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
349 { ISD::SREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
350 { ISD::UDIV, MVT::v64i8, 14 }, // 2*ext+2*pmulhw sequence
351 { ISD::UREM, MVT::v64i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
352 { ISD::SDIV, MVT::v32i16, 6 }, // vpmulhw sequence
353 { ISD::SREM, MVT::v32i16, 8 }, // vpmulhw+mul+sub sequence
354 { ISD::UDIV, MVT::v32i16, 6 }, // vpmulhuw sequence
355 { ISD::UREM, MVT::v32i16, 8 }, // vpmulhuw+mul+sub sequence
358 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
359 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
361 if (const auto *Entry =
362 CostTableLookup(AVX512BWConstCostTable, ISD, LT.second))
363 return LT.first * Entry->Cost;
366 static const CostTblEntry AVX512ConstCostTable[] = {
367 { ISD::SDIV, MVT::v16i32, 15 }, // vpmuldq sequence
368 { ISD::SREM, MVT::v16i32, 17 }, // vpmuldq+mul+sub sequence
369 { ISD::UDIV, MVT::v16i32, 15 }, // vpmuludq sequence
370 { ISD::UREM, MVT::v16i32, 17 }, // vpmuludq+mul+sub sequence
373 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
374 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
376 if (const auto *Entry =
377 CostTableLookup(AVX512ConstCostTable, ISD, LT.second))
378 return LT.first * Entry->Cost;
381 static const CostTblEntry AVX2ConstCostTable[] = {
382 { ISD::SDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
383 { ISD::SREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
384 { ISD::UDIV, MVT::v32i8, 14 }, // 2*ext+2*pmulhw sequence
385 { ISD::UREM, MVT::v32i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
386 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
387 { ISD::SREM, MVT::v16i16, 8 }, // vpmulhw+mul+sub sequence
388 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
389 { ISD::UREM, MVT::v16i16, 8 }, // vpmulhuw+mul+sub sequence
390 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
391 { ISD::SREM, MVT::v8i32, 19 }, // vpmuldq+mul+sub sequence
392 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
393 { ISD::UREM, MVT::v8i32, 19 }, // vpmuludq+mul+sub sequence
396 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
397 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
399 if (const auto *Entry = CostTableLookup(AVX2ConstCostTable, ISD, LT.second))
400 return LT.first * Entry->Cost;
403 static const CostTblEntry SSE2ConstCostTable[] = {
404 { ISD::SDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
405 { ISD::SREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
406 { ISD::SDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
407 { ISD::SREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
408 { ISD::UDIV, MVT::v32i8, 28+2 }, // 4*ext+4*pmulhw sequence + split.
409 { ISD::UREM, MVT::v32i8, 32+2 }, // 4*ext+4*pmulhw+mul+sub sequence + split.
410 { ISD::UDIV, MVT::v16i8, 14 }, // 2*ext+2*pmulhw sequence
411 { ISD::UREM, MVT::v16i8, 16 }, // 2*ext+2*pmulhw+mul+sub sequence
412 { ISD::SDIV, MVT::v16i16, 12+2 }, // 2*pmulhw sequence + split.
413 { ISD::SREM, MVT::v16i16, 16+2 }, // 2*pmulhw+mul+sub sequence + split.
414 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
415 { ISD::SREM, MVT::v8i16, 8 }, // pmulhw+mul+sub sequence
416 { ISD::UDIV, MVT::v16i16, 12+2 }, // 2*pmulhuw sequence + split.
417 { ISD::UREM, MVT::v16i16, 16+2 }, // 2*pmulhuw+mul+sub sequence + split.
418 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
419 { ISD::UREM, MVT::v8i16, 8 }, // pmulhuw+mul+sub sequence
420 { ISD::SDIV, MVT::v8i32, 38+2 }, // 2*pmuludq sequence + split.
421 { ISD::SREM, MVT::v8i32, 48+2 }, // 2*pmuludq+mul+sub sequence + split.
422 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
423 { ISD::SREM, MVT::v4i32, 24 }, // pmuludq+mul+sub sequence
424 { ISD::UDIV, MVT::v8i32, 30+2 }, // 2*pmuludq sequence + split.
425 { ISD::UREM, MVT::v8i32, 40+2 }, // 2*pmuludq+mul+sub sequence + split.
426 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
427 { ISD::UREM, MVT::v4i32, 20 }, // pmuludq+mul+sub sequence
430 if ((Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
431 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) &&
434 if (ISD == ISD::SDIV && LT.second == MVT::v8i32 && ST->hasAVX())
435 return LT.first * 32;
436 if (ISD == ISD::SREM && LT.second == MVT::v8i32 && ST->hasAVX())
437 return LT.first * 38;
438 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
439 return LT.first * 15;
440 if (ISD == ISD::SREM && LT.second == MVT::v4i32 && ST->hasSSE41())
441 return LT.first * 20;
443 if (const auto *Entry = CostTableLookup(SSE2ConstCostTable, ISD, LT.second))
444 return LT.first * Entry->Cost;
447 static const CostTblEntry AVX2UniformCostTable[] = {
448 // Uniform splats are cheaper for the following instructions.
449 { ISD::SHL, MVT::v16i16, 1 }, // psllw.
450 { ISD::SRL, MVT::v16i16, 1 }, // psrlw.
451 { ISD::SRA, MVT::v16i16, 1 }, // psraw.
455 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
456 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
457 if (const auto *Entry =
458 CostTableLookup(AVX2UniformCostTable, ISD, LT.second))
459 return LT.first * Entry->Cost;
462 static const CostTblEntry SSE2UniformCostTable[] = {
463 // Uniform splats are cheaper for the following instructions.
464 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
465 { ISD::SHL, MVT::v4i32, 1 }, // pslld
466 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
468 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
469 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
470 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
472 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
473 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
477 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
478 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
479 if (const auto *Entry =
480 CostTableLookup(SSE2UniformCostTable, ISD, LT.second))
481 return LT.first * Entry->Cost;
484 static const CostTblEntry AVX512DQCostTable[] = {
485 { ISD::MUL, MVT::v2i64, 1 },
486 { ISD::MUL, MVT::v4i64, 1 },
487 { ISD::MUL, MVT::v8i64, 1 }
490 // Look for AVX512DQ lowering tricks for custom cases.
492 if (const auto *Entry = CostTableLookup(AVX512DQCostTable, ISD, LT.second))
493 return LT.first * Entry->Cost;
495 static const CostTblEntry AVX512BWCostTable[] = {
496 { ISD::SHL, MVT::v8i16, 1 }, // vpsllvw
497 { ISD::SRL, MVT::v8i16, 1 }, // vpsrlvw
498 { ISD::SRA, MVT::v8i16, 1 }, // vpsravw
500 { ISD::SHL, MVT::v16i16, 1 }, // vpsllvw
501 { ISD::SRL, MVT::v16i16, 1 }, // vpsrlvw
502 { ISD::SRA, MVT::v16i16, 1 }, // vpsravw
504 { ISD::SHL, MVT::v32i16, 1 }, // vpsllvw
505 { ISD::SRL, MVT::v32i16, 1 }, // vpsrlvw
506 { ISD::SRA, MVT::v32i16, 1 }, // vpsravw
508 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence.
509 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence.
510 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence.
512 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence.
513 { ISD::MUL, MVT::v32i8, 4 }, // extend/pmullw/trunc sequence.
514 { ISD::MUL, MVT::v16i8, 4 }, // extend/pmullw/trunc sequence.
517 // Look for AVX512BW lowering tricks for custom cases.
519 if (const auto *Entry = CostTableLookup(AVX512BWCostTable, ISD, LT.second))
520 return LT.first * Entry->Cost;
522 static const CostTblEntry AVX512CostTable[] = {
523 { ISD::SHL, MVT::v16i32, 1 },
524 { ISD::SRL, MVT::v16i32, 1 },
525 { ISD::SRA, MVT::v16i32, 1 },
527 { ISD::SHL, MVT::v8i64, 1 },
528 { ISD::SRL, MVT::v8i64, 1 },
530 { ISD::SRA, MVT::v2i64, 1 },
531 { ISD::SRA, MVT::v4i64, 1 },
532 { ISD::SRA, MVT::v8i64, 1 },
534 { ISD::MUL, MVT::v32i8, 13 }, // extend/pmullw/trunc sequence.
535 { ISD::MUL, MVT::v16i8, 5 }, // extend/pmullw/trunc sequence.
536 { ISD::MUL, MVT::v16i32, 1 }, // pmulld (Skylake from agner.org)
537 { ISD::MUL, MVT::v8i32, 1 }, // pmulld (Skylake from agner.org)
538 { ISD::MUL, MVT::v4i32, 1 }, // pmulld (Skylake from agner.org)
539 { ISD::MUL, MVT::v8i64, 8 }, // 3*pmuludq/3*shift/2*add
541 { ISD::FADD, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
542 { ISD::FSUB, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
543 { ISD::FMUL, MVT::v8f64, 1 }, // Skylake from http://www.agner.org/
545 { ISD::FADD, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
546 { ISD::FSUB, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
547 { ISD::FMUL, MVT::v16f32, 1 }, // Skylake from http://www.agner.org/
551 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
552 return LT.first * Entry->Cost;
554 static const CostTblEntry AVX2ShiftCostTable[] = {
555 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
556 // customize them to detect the cases where shift amount is a scalar one.
557 { ISD::SHL, MVT::v4i32, 1 },
558 { ISD::SRL, MVT::v4i32, 1 },
559 { ISD::SRA, MVT::v4i32, 1 },
560 { ISD::SHL, MVT::v8i32, 1 },
561 { ISD::SRL, MVT::v8i32, 1 },
562 { ISD::SRA, MVT::v8i32, 1 },
563 { ISD::SHL, MVT::v2i64, 1 },
564 { ISD::SRL, MVT::v2i64, 1 },
565 { ISD::SHL, MVT::v4i64, 1 },
566 { ISD::SRL, MVT::v4i64, 1 },
569 // Look for AVX2 lowering tricks.
571 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
572 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
573 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
574 // On AVX2, a packed v16i16 shift left by a constant build_vector
575 // is lowered into a vector multiply (vpmullw).
576 return getArithmeticInstrCost(Instruction::Mul, Ty, Op1Info, Op2Info,
577 TargetTransformInfo::OP_None,
578 TargetTransformInfo::OP_None);
580 if (const auto *Entry = CostTableLookup(AVX2ShiftCostTable, ISD, LT.second))
581 return LT.first * Entry->Cost;
584 static const CostTblEntry XOPShiftCostTable[] = {
585 // 128bit shifts take 1cy, but right shifts require negation beforehand.
586 { ISD::SHL, MVT::v16i8, 1 },
587 { ISD::SRL, MVT::v16i8, 2 },
588 { ISD::SRA, MVT::v16i8, 2 },
589 { ISD::SHL, MVT::v8i16, 1 },
590 { ISD::SRL, MVT::v8i16, 2 },
591 { ISD::SRA, MVT::v8i16, 2 },
592 { ISD::SHL, MVT::v4i32, 1 },
593 { ISD::SRL, MVT::v4i32, 2 },
594 { ISD::SRA, MVT::v4i32, 2 },
595 { ISD::SHL, MVT::v2i64, 1 },
596 { ISD::SRL, MVT::v2i64, 2 },
597 { ISD::SRA, MVT::v2i64, 2 },
598 // 256bit shifts require splitting if AVX2 didn't catch them above.
599 { ISD::SHL, MVT::v32i8, 2+2 },
600 { ISD::SRL, MVT::v32i8, 4+2 },
601 { ISD::SRA, MVT::v32i8, 4+2 },
602 { ISD::SHL, MVT::v16i16, 2+2 },
603 { ISD::SRL, MVT::v16i16, 4+2 },
604 { ISD::SRA, MVT::v16i16, 4+2 },
605 { ISD::SHL, MVT::v8i32, 2+2 },
606 { ISD::SRL, MVT::v8i32, 4+2 },
607 { ISD::SRA, MVT::v8i32, 4+2 },
608 { ISD::SHL, MVT::v4i64, 2+2 },
609 { ISD::SRL, MVT::v4i64, 4+2 },
610 { ISD::SRA, MVT::v4i64, 4+2 },
613 // Look for XOP lowering tricks.
615 // If the right shift is constant then we'll fold the negation so
616 // it's as cheap as a left shift.
618 if ((ShiftISD == ISD::SRL || ShiftISD == ISD::SRA) &&
619 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
620 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
622 if (const auto *Entry =
623 CostTableLookup(XOPShiftCostTable, ShiftISD, LT.second))
624 return LT.first * Entry->Cost;
627 static const CostTblEntry SSE2UniformShiftCostTable[] = {
628 // Uniform splats are cheaper for the following instructions.
629 { ISD::SHL, MVT::v16i16, 2+2 }, // 2*psllw + split.
630 { ISD::SHL, MVT::v8i32, 2+2 }, // 2*pslld + split.
631 { ISD::SHL, MVT::v4i64, 2+2 }, // 2*psllq + split.
633 { ISD::SRL, MVT::v16i16, 2+2 }, // 2*psrlw + split.
634 { ISD::SRL, MVT::v8i32, 2+2 }, // 2*psrld + split.
635 { ISD::SRL, MVT::v4i64, 2+2 }, // 2*psrlq + split.
637 { ISD::SRA, MVT::v16i16, 2+2 }, // 2*psraw + split.
638 { ISD::SRA, MVT::v8i32, 2+2 }, // 2*psrad + split.
639 { ISD::SRA, MVT::v2i64, 4 }, // 2*psrad + shuffle.
640 { ISD::SRA, MVT::v4i64, 8+2 }, // 2*(2*psrad + shuffle) + split.
644 ((Op2Info == TargetTransformInfo::OK_UniformConstantValue) ||
645 (Op2Info == TargetTransformInfo::OK_UniformValue))) {
647 // Handle AVX2 uniform v4i64 ISD::SRA, it's not worth a table.
648 if (ISD == ISD::SRA && LT.second == MVT::v4i64 && ST->hasAVX2())
649 return LT.first * 4; // 2*psrad + shuffle.
651 if (const auto *Entry =
652 CostTableLookup(SSE2UniformShiftCostTable, ISD, LT.second))
653 return LT.first * Entry->Cost;
656 if (ISD == ISD::SHL &&
657 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
659 // Vector shift left by non uniform constant can be lowered
660 // into vector multiply.
661 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) ||
662 ((VT == MVT::v16i16 || VT == MVT::v8i32) && ST->hasAVX()))
666 static const CostTblEntry AVX2CostTable[] = {
667 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
668 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
670 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
671 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
673 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
674 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
675 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
676 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
678 { ISD::SUB, MVT::v32i8, 1 }, // psubb
679 { ISD::ADD, MVT::v32i8, 1 }, // paddb
680 { ISD::SUB, MVT::v16i16, 1 }, // psubw
681 { ISD::ADD, MVT::v16i16, 1 }, // paddw
682 { ISD::SUB, MVT::v8i32, 1 }, // psubd
683 { ISD::ADD, MVT::v8i32, 1 }, // paddd
684 { ISD::SUB, MVT::v4i64, 1 }, // psubq
685 { ISD::ADD, MVT::v4i64, 1 }, // paddq
687 { ISD::MUL, MVT::v32i8, 17 }, // extend/pmullw/trunc sequence.
688 { ISD::MUL, MVT::v16i8, 7 }, // extend/pmullw/trunc sequence.
689 { ISD::MUL, MVT::v16i16, 1 }, // pmullw
690 { ISD::MUL, MVT::v8i32, 2 }, // pmulld (Haswell from agner.org)
691 { ISD::MUL, MVT::v4i64, 8 }, // 3*pmuludq/3*shift/2*add
693 { ISD::FADD, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
694 { ISD::FADD, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
695 { ISD::FSUB, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
696 { ISD::FSUB, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
697 { ISD::FMUL, MVT::v4f64, 1 }, // Haswell from http://www.agner.org/
698 { ISD::FMUL, MVT::v8f32, 1 }, // Haswell from http://www.agner.org/
700 { ISD::FDIV, MVT::f32, 7 }, // Haswell from http://www.agner.org/
701 { ISD::FDIV, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
702 { ISD::FDIV, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
703 { ISD::FDIV, MVT::f64, 14 }, // Haswell from http://www.agner.org/
704 { ISD::FDIV, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
705 { ISD::FDIV, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
708 // Look for AVX2 lowering tricks for custom cases.
710 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
711 return LT.first * Entry->Cost;
713 static const CostTblEntry AVX1CostTable[] = {
714 // We don't have to scalarize unsupported ops. We can issue two half-sized
715 // operations and we only need to extract the upper YMM half.
716 // Two ops + 1 extract + 1 insert = 4.
717 { ISD::MUL, MVT::v16i16, 4 },
718 { ISD::MUL, MVT::v8i32, 4 },
719 { ISD::SUB, MVT::v32i8, 4 },
720 { ISD::ADD, MVT::v32i8, 4 },
721 { ISD::SUB, MVT::v16i16, 4 },
722 { ISD::ADD, MVT::v16i16, 4 },
723 { ISD::SUB, MVT::v8i32, 4 },
724 { ISD::ADD, MVT::v8i32, 4 },
725 { ISD::SUB, MVT::v4i64, 4 },
726 { ISD::ADD, MVT::v4i64, 4 },
728 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
729 // are lowered as a series of long multiplies(3), shifts(3) and adds(2)
730 // Because we believe v4i64 to be a legal type, we must also include the
731 // extract+insert in the cost table. Therefore, the cost here is 18
733 { ISD::MUL, MVT::v4i64, 18 },
735 { ISD::MUL, MVT::v32i8, 26 }, // extend/pmullw/trunc sequence.
737 { ISD::FDIV, MVT::f32, 14 }, // SNB from http://www.agner.org/
738 { ISD::FDIV, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
739 { ISD::FDIV, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
740 { ISD::FDIV, MVT::f64, 22 }, // SNB from http://www.agner.org/
741 { ISD::FDIV, MVT::v2f64, 22 }, // SNB from http://www.agner.org/
742 { ISD::FDIV, MVT::v4f64, 44 }, // SNB from http://www.agner.org/
746 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, LT.second))
747 return LT.first * Entry->Cost;
749 static const CostTblEntry SSE42CostTable[] = {
750 { ISD::FADD, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
751 { ISD::FADD, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
752 { ISD::FADD, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
753 { ISD::FADD, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
755 { ISD::FSUB, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
756 { ISD::FSUB, MVT::f32 , 1 }, // Nehalem from http://www.agner.org/
757 { ISD::FSUB, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
758 { ISD::FSUB, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
760 { ISD::FMUL, MVT::f64, 1 }, // Nehalem from http://www.agner.org/
761 { ISD::FMUL, MVT::f32, 1 }, // Nehalem from http://www.agner.org/
762 { ISD::FMUL, MVT::v2f64, 1 }, // Nehalem from http://www.agner.org/
763 { ISD::FMUL, MVT::v4f32, 1 }, // Nehalem from http://www.agner.org/
765 { ISD::FDIV, MVT::f32, 14 }, // Nehalem from http://www.agner.org/
766 { ISD::FDIV, MVT::v4f32, 14 }, // Nehalem from http://www.agner.org/
767 { ISD::FDIV, MVT::f64, 22 }, // Nehalem from http://www.agner.org/
768 { ISD::FDIV, MVT::v2f64, 22 }, // Nehalem from http://www.agner.org/
772 if (const auto *Entry = CostTableLookup(SSE42CostTable, ISD, LT.second))
773 return LT.first * Entry->Cost;
775 static const CostTblEntry SSE41CostTable[] = {
776 { ISD::SHL, MVT::v16i8, 11 }, // pblendvb sequence.
777 { ISD::SHL, MVT::v32i8, 2*11+2 }, // pblendvb sequence + split.
778 { ISD::SHL, MVT::v8i16, 14 }, // pblendvb sequence.
779 { ISD::SHL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
780 { ISD::SHL, MVT::v4i32, 4 }, // pslld/paddd/cvttps2dq/pmulld
781 { ISD::SHL, MVT::v8i32, 2*4+2 }, // pslld/paddd/cvttps2dq/pmulld + split
783 { ISD::SRL, MVT::v16i8, 12 }, // pblendvb sequence.
784 { ISD::SRL, MVT::v32i8, 2*12+2 }, // pblendvb sequence + split.
785 { ISD::SRL, MVT::v8i16, 14 }, // pblendvb sequence.
786 { ISD::SRL, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
787 { ISD::SRL, MVT::v4i32, 11 }, // Shift each lane + blend.
788 { ISD::SRL, MVT::v8i32, 2*11+2 }, // Shift each lane + blend + split.
790 { ISD::SRA, MVT::v16i8, 24 }, // pblendvb sequence.
791 { ISD::SRA, MVT::v32i8, 2*24+2 }, // pblendvb sequence + split.
792 { ISD::SRA, MVT::v8i16, 14 }, // pblendvb sequence.
793 { ISD::SRA, MVT::v16i16, 2*14+2 }, // pblendvb sequence + split.
794 { ISD::SRA, MVT::v4i32, 12 }, // Shift each lane + blend.
795 { ISD::SRA, MVT::v8i32, 2*12+2 }, // Shift each lane + blend + split.
797 { ISD::MUL, MVT::v4i32, 2 } // pmulld (Nehalem from agner.org)
801 if (const auto *Entry = CostTableLookup(SSE41CostTable, ISD, LT.second))
802 return LT.first * Entry->Cost;
804 static const CostTblEntry SSE2CostTable[] = {
805 // We don't correctly identify costs of casts because they are marked as
807 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
808 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
809 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
810 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
811 { ISD::SHL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
813 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
814 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
815 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
816 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
817 { ISD::SRL, MVT::v4i64, 2*4+2 }, // splat+shuffle sequence + split.
819 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
820 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
821 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
822 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
823 { ISD::SRA, MVT::v4i64, 2*12+2 }, // srl/xor/sub sequence+split.
825 { ISD::MUL, MVT::v16i8, 12 }, // extend/pmullw/trunc sequence.
826 { ISD::MUL, MVT::v8i16, 1 }, // pmullw
827 { ISD::MUL, MVT::v4i32, 6 }, // 3*pmuludq/4*shuffle
828 { ISD::MUL, MVT::v2i64, 8 }, // 3*pmuludq/3*shift/2*add
830 { ISD::FDIV, MVT::f32, 23 }, // Pentium IV from http://www.agner.org/
831 { ISD::FDIV, MVT::v4f32, 39 }, // Pentium IV from http://www.agner.org/
832 { ISD::FDIV, MVT::f64, 38 }, // Pentium IV from http://www.agner.org/
833 { ISD::FDIV, MVT::v2f64, 69 }, // Pentium IV from http://www.agner.org/
835 { ISD::FADD, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
836 { ISD::FADD, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
838 { ISD::FSUB, MVT::f32, 2 }, // Pentium IV from http://www.agner.org/
839 { ISD::FSUB, MVT::f64, 2 }, // Pentium IV from http://www.agner.org/
843 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
844 return LT.first * Entry->Cost;
846 static const CostTblEntry SSE1CostTable[] = {
847 { ISD::FDIV, MVT::f32, 17 }, // Pentium III from http://www.agner.org/
848 { ISD::FDIV, MVT::v4f32, 34 }, // Pentium III from http://www.agner.org/
850 { ISD::FADD, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
851 { ISD::FADD, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
853 { ISD::FSUB, MVT::f32, 1 }, // Pentium III from http://www.agner.org/
854 { ISD::FSUB, MVT::v4f32, 2 }, // Pentium III from http://www.agner.org/
856 { ISD::ADD, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
857 { ISD::ADD, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
858 { ISD::ADD, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
860 { ISD::SUB, MVT::i8, 1 }, // Pentium III from http://www.agner.org/
861 { ISD::SUB, MVT::i16, 1 }, // Pentium III from http://www.agner.org/
862 { ISD::SUB, MVT::i32, 1 }, // Pentium III from http://www.agner.org/
866 if (const auto *Entry = CostTableLookup(SSE1CostTable, ISD, LT.second))
867 return LT.first * Entry->Cost;
869 // It is not a good idea to vectorize division. We have to scalarize it and
870 // in the process we will often end up having to spilling regular
871 // registers. The overhead of division is going to dominate most kernels
872 // anyways so try hard to prevent vectorization of division - it is
873 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
874 // to hide "20 cycles" for each lane.
875 if (LT.second.isVector() && (ISD == ISD::SDIV || ISD == ISD::SREM ||
876 ISD == ISD::UDIV || ISD == ISD::UREM)) {
877 int ScalarCost = getArithmeticInstrCost(
878 Opcode, Ty->getScalarType(), Op1Info, Op2Info,
879 TargetTransformInfo::OP_None, TargetTransformInfo::OP_None);
880 return 20 * LT.first * LT.second.getVectorNumElements() * ScalarCost;
883 // Fallback to the default implementation.
884 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
887 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
889 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
890 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
891 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
893 // Treat Transpose as 2-op shuffles - there's no difference in lowering.
894 if (Kind == TTI::SK_Transpose)
895 Kind = TTI::SK_PermuteTwoSrc;
897 // For Broadcasts we are splatting the first element from the first input
898 // register, so only need to reference that input and all the output
899 // registers are the same.
900 if (Kind == TTI::SK_Broadcast)
903 // Subvector extractions are free if they start at the beginning of a
904 // vector and cheap if the subvectors are aligned.
905 if (Kind == TTI::SK_ExtractSubvector && LT.second.isVector()) {
906 int NumElts = LT.second.getVectorNumElements();
907 if ((Index % NumElts) == 0)
909 std::pair<int, MVT> SubLT = TLI->getTypeLegalizationCost(DL, SubTp);
910 if (SubLT.second.isVector()) {
911 int NumSubElts = SubLT.second.getVectorNumElements();
912 if ((Index % NumSubElts) == 0 && (NumElts % NumSubElts) == 0)
917 // We are going to permute multiple sources and the result will be in multiple
918 // destinations. Providing an accurate cost only for splits where the element
919 // type remains the same.
920 if (Kind == TTI::SK_PermuteSingleSrc && LT.first != 1) {
921 MVT LegalVT = LT.second;
922 if (LegalVT.isVector() &&
923 LegalVT.getVectorElementType().getSizeInBits() ==
924 Tp->getVectorElementType()->getPrimitiveSizeInBits() &&
925 LegalVT.getVectorNumElements() < Tp->getVectorNumElements()) {
927 unsigned VecTySize = DL.getTypeStoreSize(Tp);
928 unsigned LegalVTSize = LegalVT.getStoreSize();
929 // Number of source vectors after legalization:
930 unsigned NumOfSrcs = (VecTySize + LegalVTSize - 1) / LegalVTSize;
931 // Number of destination vectors after legalization:
932 unsigned NumOfDests = LT.first;
934 Type *SingleOpTy = VectorType::get(Tp->getVectorElementType(),
935 LegalVT.getVectorNumElements());
937 unsigned NumOfShuffles = (NumOfSrcs - 1) * NumOfDests;
938 return NumOfShuffles *
939 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleOpTy, 0, nullptr);
942 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
945 // For 2-input shuffles, we must account for splitting the 2 inputs into many.
946 if (Kind == TTI::SK_PermuteTwoSrc && LT.first != 1) {
947 // We assume that source and destination have the same vector type.
948 int NumOfDests = LT.first;
949 int NumOfShufflesPerDest = LT.first * 2 - 1;
950 LT.first = NumOfDests * NumOfShufflesPerDest;
953 static const CostTblEntry AVX512VBMIShuffleTbl[] = {
954 {TTI::SK_Reverse, MVT::v64i8, 1}, // vpermb
955 {TTI::SK_Reverse, MVT::v32i8, 1}, // vpermb
957 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 1}, // vpermb
958 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 1}, // vpermb
960 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 1}, // vpermt2b
961 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 1}, // vpermt2b
962 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1} // vpermt2b
966 if (const auto *Entry =
967 CostTableLookup(AVX512VBMIShuffleTbl, Kind, LT.second))
968 return LT.first * Entry->Cost;
970 static const CostTblEntry AVX512BWShuffleTbl[] = {
971 {TTI::SK_Broadcast, MVT::v32i16, 1}, // vpbroadcastw
972 {TTI::SK_Broadcast, MVT::v64i8, 1}, // vpbroadcastb
974 {TTI::SK_Reverse, MVT::v32i16, 1}, // vpermw
975 {TTI::SK_Reverse, MVT::v16i16, 1}, // vpermw
976 {TTI::SK_Reverse, MVT::v64i8, 2}, // pshufb + vshufi64x2
978 {TTI::SK_PermuteSingleSrc, MVT::v32i16, 1}, // vpermw
979 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 1}, // vpermw
980 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // vpermw
981 {TTI::SK_PermuteSingleSrc, MVT::v64i8, 8}, // extend to v32i16
982 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 3}, // vpermw + zext/trunc
984 {TTI::SK_PermuteTwoSrc, MVT::v32i16, 1}, // vpermt2w
985 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 1}, // vpermt2w
986 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpermt2w
987 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 3}, // zext + vpermt2w + trunc
988 {TTI::SK_PermuteTwoSrc, MVT::v64i8, 19}, // 6 * v32i8 + 1
989 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3} // zext + vpermt2w + trunc
993 if (const auto *Entry =
994 CostTableLookup(AVX512BWShuffleTbl, Kind, LT.second))
995 return LT.first * Entry->Cost;
997 static const CostTblEntry AVX512ShuffleTbl[] = {
998 {TTI::SK_Broadcast, MVT::v8f64, 1}, // vbroadcastpd
999 {TTI::SK_Broadcast, MVT::v16f32, 1}, // vbroadcastps
1000 {TTI::SK_Broadcast, MVT::v8i64, 1}, // vpbroadcastq
1001 {TTI::SK_Broadcast, MVT::v16i32, 1}, // vpbroadcastd
1003 {TTI::SK_Reverse, MVT::v8f64, 1}, // vpermpd
1004 {TTI::SK_Reverse, MVT::v16f32, 1}, // vpermps
1005 {TTI::SK_Reverse, MVT::v8i64, 1}, // vpermq
1006 {TTI::SK_Reverse, MVT::v16i32, 1}, // vpermd
1008 {TTI::SK_PermuteSingleSrc, MVT::v8f64, 1}, // vpermpd
1009 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1010 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // vpermpd
1011 {TTI::SK_PermuteSingleSrc, MVT::v16f32, 1}, // vpermps
1012 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1013 {TTI::SK_PermuteSingleSrc, MVT::v4f32, 1}, // vpermps
1014 {TTI::SK_PermuteSingleSrc, MVT::v8i64, 1}, // vpermq
1015 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1016 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // vpermq
1017 {TTI::SK_PermuteSingleSrc, MVT::v16i32, 1}, // vpermd
1018 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1019 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // vpermd
1020 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1022 {TTI::SK_PermuteTwoSrc, MVT::v8f64, 1}, // vpermt2pd
1023 {TTI::SK_PermuteTwoSrc, MVT::v16f32, 1}, // vpermt2ps
1024 {TTI::SK_PermuteTwoSrc, MVT::v8i64, 1}, // vpermt2q
1025 {TTI::SK_PermuteTwoSrc, MVT::v16i32, 1}, // vpermt2d
1026 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 1}, // vpermt2pd
1027 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 1}, // vpermt2ps
1028 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 1}, // vpermt2q
1029 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 1}, // vpermt2d
1030 {TTI::SK_PermuteTwoSrc, MVT::v2f64, 1}, // vpermt2pd
1031 {TTI::SK_PermuteTwoSrc, MVT::v4f32, 1}, // vpermt2ps
1032 {TTI::SK_PermuteTwoSrc, MVT::v2i64, 1}, // vpermt2q
1033 {TTI::SK_PermuteTwoSrc, MVT::v4i32, 1} // vpermt2d
1036 if (ST->hasAVX512())
1037 if (const auto *Entry = CostTableLookup(AVX512ShuffleTbl, Kind, LT.second))
1038 return LT.first * Entry->Cost;
1040 static const CostTblEntry AVX2ShuffleTbl[] = {
1041 {TTI::SK_Broadcast, MVT::v4f64, 1}, // vbroadcastpd
1042 {TTI::SK_Broadcast, MVT::v8f32, 1}, // vbroadcastps
1043 {TTI::SK_Broadcast, MVT::v4i64, 1}, // vpbroadcastq
1044 {TTI::SK_Broadcast, MVT::v8i32, 1}, // vpbroadcastd
1045 {TTI::SK_Broadcast, MVT::v16i16, 1}, // vpbroadcastw
1046 {TTI::SK_Broadcast, MVT::v32i8, 1}, // vpbroadcastb
1048 {TTI::SK_Reverse, MVT::v4f64, 1}, // vpermpd
1049 {TTI::SK_Reverse, MVT::v8f32, 1}, // vpermps
1050 {TTI::SK_Reverse, MVT::v4i64, 1}, // vpermq
1051 {TTI::SK_Reverse, MVT::v8i32, 1}, // vpermd
1052 {TTI::SK_Reverse, MVT::v16i16, 2}, // vperm2i128 + pshufb
1053 {TTI::SK_Reverse, MVT::v32i8, 2}, // vperm2i128 + pshufb
1055 {TTI::SK_Select, MVT::v16i16, 1}, // vpblendvb
1056 {TTI::SK_Select, MVT::v32i8, 1}, // vpblendvb
1058 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 1}, // vpermpd
1059 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 1}, // vpermps
1060 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 1}, // vpermq
1061 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 1}, // vpermd
1062 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vperm2i128 + 2*vpshufb
1064 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vperm2i128 + 2*vpshufb
1067 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vpermpd + vblendpd
1068 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 3}, // 2*vpermps + vblendps
1069 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vpermq + vpblendd
1070 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 3}, // 2*vpermd + vpblendd
1071 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 7}, // 2*vperm2i128 + 4*vpshufb
1073 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 7}, // 2*vperm2i128 + 4*vpshufb
1078 if (const auto *Entry = CostTableLookup(AVX2ShuffleTbl, Kind, LT.second))
1079 return LT.first * Entry->Cost;
1081 static const CostTblEntry XOPShuffleTbl[] = {
1082 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vpermil2pd
1083 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 2}, // vperm2f128 + vpermil2ps
1084 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vpermil2pd
1085 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 2}, // vperm2f128 + vpermil2ps
1086 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 4}, // vextractf128 + 2*vpperm
1088 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 4}, // vextractf128 + 2*vpperm
1091 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 9}, // 2*vextractf128 + 6*vpperm
1093 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 1}, // vpperm
1094 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 9}, // 2*vextractf128 + 6*vpperm
1096 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 1}, // vpperm
1100 if (const auto *Entry = CostTableLookup(XOPShuffleTbl, Kind, LT.second))
1101 return LT.first * Entry->Cost;
1103 static const CostTblEntry AVX1ShuffleTbl[] = {
1104 {TTI::SK_Broadcast, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1105 {TTI::SK_Broadcast, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1106 {TTI::SK_Broadcast, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1107 {TTI::SK_Broadcast, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1108 {TTI::SK_Broadcast, MVT::v16i16, 3}, // vpshuflw + vpshufd + vinsertf128
1109 {TTI::SK_Broadcast, MVT::v32i8, 2}, // vpshufb + vinsertf128
1111 {TTI::SK_Reverse, MVT::v4f64, 2}, // vperm2f128 + vpermilpd
1112 {TTI::SK_Reverse, MVT::v8f32, 2}, // vperm2f128 + vpermilps
1113 {TTI::SK_Reverse, MVT::v4i64, 2}, // vperm2f128 + vpermilpd
1114 {TTI::SK_Reverse, MVT::v8i32, 2}, // vperm2f128 + vpermilps
1115 {TTI::SK_Reverse, MVT::v16i16, 4}, // vextractf128 + 2*pshufb
1117 {TTI::SK_Reverse, MVT::v32i8, 4}, // vextractf128 + 2*pshufb
1120 {TTI::SK_Select, MVT::v4i64, 1}, // vblendpd
1121 {TTI::SK_Select, MVT::v4f64, 1}, // vblendpd
1122 {TTI::SK_Select, MVT::v8i32, 1}, // vblendps
1123 {TTI::SK_Select, MVT::v8f32, 1}, // vblendps
1124 {TTI::SK_Select, MVT::v16i16, 3}, // vpand + vpandn + vpor
1125 {TTI::SK_Select, MVT::v32i8, 3}, // vpand + vpandn + vpor
1127 {TTI::SK_PermuteSingleSrc, MVT::v4f64, 2}, // vperm2f128 + vshufpd
1128 {TTI::SK_PermuteSingleSrc, MVT::v4i64, 2}, // vperm2f128 + vshufpd
1129 {TTI::SK_PermuteSingleSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1130 {TTI::SK_PermuteSingleSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1131 {TTI::SK_PermuteSingleSrc, MVT::v16i16, 8}, // vextractf128 + 4*pshufb
1132 // + 2*por + vinsertf128
1133 {TTI::SK_PermuteSingleSrc, MVT::v32i8, 8}, // vextractf128 + 4*pshufb
1134 // + 2*por + vinsertf128
1136 {TTI::SK_PermuteTwoSrc, MVT::v4f64, 3}, // 2*vperm2f128 + vshufpd
1137 {TTI::SK_PermuteTwoSrc, MVT::v4i64, 3}, // 2*vperm2f128 + vshufpd
1138 {TTI::SK_PermuteTwoSrc, MVT::v8f32, 4}, // 2*vperm2f128 + 2*vshufps
1139 {TTI::SK_PermuteTwoSrc, MVT::v8i32, 4}, // 2*vperm2f128 + 2*vshufps
1140 {TTI::SK_PermuteTwoSrc, MVT::v16i16, 15}, // 2*vextractf128 + 8*pshufb
1141 // + 4*por + vinsertf128
1142 {TTI::SK_PermuteTwoSrc, MVT::v32i8, 15}, // 2*vextractf128 + 8*pshufb
1143 // + 4*por + vinsertf128
1147 if (const auto *Entry = CostTableLookup(AVX1ShuffleTbl, Kind, LT.second))
1148 return LT.first * Entry->Cost;
1150 static const CostTblEntry SSE41ShuffleTbl[] = {
1151 {TTI::SK_Select, MVT::v2i64, 1}, // pblendw
1152 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1153 {TTI::SK_Select, MVT::v4i32, 1}, // pblendw
1154 {TTI::SK_Select, MVT::v4f32, 1}, // blendps
1155 {TTI::SK_Select, MVT::v8i16, 1}, // pblendw
1156 {TTI::SK_Select, MVT::v16i8, 1} // pblendvb
1160 if (const auto *Entry = CostTableLookup(SSE41ShuffleTbl, Kind, LT.second))
1161 return LT.first * Entry->Cost;
1163 static const CostTblEntry SSSE3ShuffleTbl[] = {
1164 {TTI::SK_Broadcast, MVT::v8i16, 1}, // pshufb
1165 {TTI::SK_Broadcast, MVT::v16i8, 1}, // pshufb
1167 {TTI::SK_Reverse, MVT::v8i16, 1}, // pshufb
1168 {TTI::SK_Reverse, MVT::v16i8, 1}, // pshufb
1170 {TTI::SK_Select, MVT::v8i16, 3}, // 2*pshufb + por
1171 {TTI::SK_Select, MVT::v16i8, 3}, // 2*pshufb + por
1173 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 1}, // pshufb
1174 {TTI::SK_PermuteSingleSrc, MVT::v16i8, 1}, // pshufb
1176 {TTI::SK_PermuteTwoSrc, MVT::v8i16, 3}, // 2*pshufb + por
1177 {TTI::SK_PermuteTwoSrc, MVT::v16i8, 3}, // 2*pshufb + por
1181 if (const auto *Entry = CostTableLookup(SSSE3ShuffleTbl, Kind, LT.second))
1182 return LT.first * Entry->Cost;
1184 static const CostTblEntry SSE2ShuffleTbl[] = {
1185 {TTI::SK_Broadcast, MVT::v2f64, 1}, // shufpd
1186 {TTI::SK_Broadcast, MVT::v2i64, 1}, // pshufd
1187 {TTI::SK_Broadcast, MVT::v4i32, 1}, // pshufd
1188 {TTI::SK_Broadcast, MVT::v8i16, 2}, // pshuflw + pshufd
1189 {TTI::SK_Broadcast, MVT::v16i8, 3}, // unpck + pshuflw + pshufd
1191 {TTI::SK_Reverse, MVT::v2f64, 1}, // shufpd
1192 {TTI::SK_Reverse, MVT::v2i64, 1}, // pshufd
1193 {TTI::SK_Reverse, MVT::v4i32, 1}, // pshufd
1194 {TTI::SK_Reverse, MVT::v8i16, 3}, // pshuflw + pshufhw + pshufd
1195 {TTI::SK_Reverse, MVT::v16i8, 9}, // 2*pshuflw + 2*pshufhw
1196 // + 2*pshufd + 2*unpck + packus
1198 {TTI::SK_Select, MVT::v2i64, 1}, // movsd
1199 {TTI::SK_Select, MVT::v2f64, 1}, // movsd
1200 {TTI::SK_Select, MVT::v4i32, 2}, // 2*shufps
1201 {TTI::SK_Select, MVT::v8i16, 3}, // pand + pandn + por
1202 {TTI::SK_Select, MVT::v16i8, 3}, // pand + pandn + por
1204 {TTI::SK_PermuteSingleSrc, MVT::v2f64, 1}, // shufpd
1205 {TTI::SK_PermuteSingleSrc, MVT::v2i64, 1}, // pshufd
1206 {TTI::SK_PermuteSingleSrc, MVT::v4i32, 1}, // pshufd
1207 {TTI::SK_PermuteSingleSrc, MVT::v8i16, 5}, // 2*pshuflw + 2*pshufhw
1209 { TTI::SK_PermuteSingleSrc, MVT::v16i8, 10 }, // 2*pshuflw + 2*pshufhw
1210 // + 2*pshufd + 2*unpck + 2*packus
1212 { TTI::SK_PermuteTwoSrc, MVT::v2f64, 1 }, // shufpd
1213 { TTI::SK_PermuteTwoSrc, MVT::v2i64, 1 }, // shufpd
1214 { TTI::SK_PermuteTwoSrc, MVT::v4i32, 2 }, // 2*{unpck,movsd,pshufd}
1215 { TTI::SK_PermuteTwoSrc, MVT::v8i16, 8 }, // blend+permute
1216 { TTI::SK_PermuteTwoSrc, MVT::v16i8, 13 }, // blend+permute
1220 if (const auto *Entry = CostTableLookup(SSE2ShuffleTbl, Kind, LT.second))
1221 return LT.first * Entry->Cost;
1223 static const CostTblEntry SSE1ShuffleTbl[] = {
1224 { TTI::SK_Broadcast, MVT::v4f32, 1 }, // shufps
1225 { TTI::SK_Reverse, MVT::v4f32, 1 }, // shufps
1226 { TTI::SK_Select, MVT::v4f32, 2 }, // 2*shufps
1227 { TTI::SK_PermuteSingleSrc, MVT::v4f32, 1 }, // shufps
1228 { TTI::SK_PermuteTwoSrc, MVT::v4f32, 2 }, // 2*shufps
1232 if (const auto *Entry = CostTableLookup(SSE1ShuffleTbl, Kind, LT.second))
1233 return LT.first * Entry->Cost;
1235 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
1238 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src,
1239 const Instruction *I) {
1240 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1241 assert(ISD && "Invalid opcode");
1243 // FIXME: Need a better design of the cost table to handle non-simple types of
1244 // potential massive combinations (elem_num x src_type x dst_type).
1246 static const TypeConversionCostTblEntry AVX512BWConversionTbl[] {
1247 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1248 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i8, 1 },
1250 // Mask sign extend has an instruction.
1251 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i1, 1 },
1252 { ISD::SIGN_EXTEND, MVT::v16i8, MVT::v16i1, 1 },
1253 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i1, 1 },
1254 { ISD::SIGN_EXTEND, MVT::v32i8, MVT::v32i1, 1 },
1255 { ISD::SIGN_EXTEND, MVT::v32i16, MVT::v32i1, 1 },
1256 { ISD::SIGN_EXTEND, MVT::v64i8, MVT::v64i1, 1 },
1258 // Mask zero extend is a load + broadcast.
1259 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i1, 2 },
1260 { ISD::ZERO_EXTEND, MVT::v16i8, MVT::v16i1, 2 },
1261 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i1, 2 },
1262 { ISD::ZERO_EXTEND, MVT::v32i8, MVT::v32i1, 2 },
1263 { ISD::ZERO_EXTEND, MVT::v32i16, MVT::v32i1, 2 },
1264 { ISD::ZERO_EXTEND, MVT::v64i8, MVT::v64i1, 2 },
1267 static const TypeConversionCostTblEntry AVX512DQConversionTbl[] = {
1268 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1269 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1270 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1271 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1272 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1273 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1275 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 1 },
1276 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
1277 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i64, 1 },
1278 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 1 },
1279 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 },
1280 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 },
1282 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 1 },
1283 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f32, 1 },
1284 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f32, 1 },
1285 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
1286 { ISD::FP_TO_SINT, MVT::v4i64, MVT::v4f64, 1 },
1287 { ISD::FP_TO_SINT, MVT::v8i64, MVT::v8f64, 1 },
1289 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 1 },
1290 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f32, 1 },
1291 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 },
1292 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
1293 { ISD::FP_TO_UINT, MVT::v4i64, MVT::v4f64, 1 },
1294 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 },
1297 // TODO: For AVX512DQ + AVX512VL, we also have cheap casts for 128-bit and
1298 // 256-bit wide vectors.
1300 static const TypeConversionCostTblEntry AVX512FConversionTbl[] = {
1301 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
1302 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
1303 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
1305 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
1306 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
1307 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
1308 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
1310 // v16i1 -> v16i32 - load + broadcast
1311 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
1312 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
1313 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1314 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
1315 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1316 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
1317 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1318 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 1 },
1319 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1320 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i32, 1 },
1322 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1323 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1324 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1325 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1326 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1327 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1328 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1329 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1331 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
1332 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
1333 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 2 },
1334 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1335 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 2 },
1336 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i8, 2 },
1337 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
1338 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 5 },
1339 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1340 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 2 },
1341 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
1342 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
1343 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 2 },
1344 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 1 },
1345 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1346 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1347 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1348 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
1349 { ISD::UINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
1350 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 5 },
1351 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 26 },
1352 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1353 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 5 },
1354 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 5 },
1356 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 1 },
1358 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
1359 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
1360 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 1 },
1361 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 1 },
1362 { ISD::FP_TO_UINT, MVT::v8i16, MVT::v8f64, 2 },
1363 { ISD::FP_TO_UINT, MVT::v8i8, MVT::v8f64, 2 },
1364 { ISD::FP_TO_UINT, MVT::v16i32, MVT::v16f32, 1 },
1365 { ISD::FP_TO_UINT, MVT::v16i16, MVT::v16f32, 2 },
1366 { ISD::FP_TO_UINT, MVT::v16i8, MVT::v16f32, 2 },
1369 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
1370 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1371 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
1372 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1373 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
1374 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1375 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
1376 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1377 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
1378 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1379 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
1380 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1381 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1382 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1383 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
1384 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1385 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
1387 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
1388 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
1389 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
1390 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
1391 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
1392 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
1394 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
1395 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
1397 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
1400 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
1401 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
1402 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
1403 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
1404 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
1405 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
1406 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1407 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
1408 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
1409 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1410 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1411 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
1412 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1413 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1414 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1415 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1416 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
1418 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
1419 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1420 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1421 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
1422 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
1423 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
1424 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
1426 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
1427 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
1428 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
1429 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
1430 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
1431 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
1432 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
1433 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
1434 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1435 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
1436 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
1437 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
1439 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
1440 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
1441 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
1442 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
1443 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
1444 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
1445 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
1446 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
1447 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
1448 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 6 },
1449 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
1450 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
1451 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
1452 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 5 },
1453 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 6 },
1454 // The generic code to compute the scalar overhead is currently broken.
1455 // Workaround this limitation by estimating the scalarization overhead
1456 // here. We have roughly 10 instructions per scalar element.
1457 // Multiply that by the vector width.
1458 // FIXME: remove that when PR19268 is fixed.
1459 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1460 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i64, 13 },
1462 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
1463 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
1464 // This node is expanded into scalarized operations but BasicTTI is overly
1465 // optimistic estimating its cost. It computes 3 per element (one
1466 // vector-extract, one scalar conversion and one vector-insert). The
1467 // problem is that the inserts form a read-modify-write chain so latency
1468 // should be factored in too. Inflating the cost per element by 1.
1469 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
1470 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
1472 { ISD::FP_EXTEND, MVT::v4f64, MVT::v4f32, 1 },
1473 { ISD::FP_ROUND, MVT::v4f32, MVT::v4f64, 1 },
1476 static const TypeConversionCostTblEntry SSE41ConversionTbl[] = {
1477 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1478 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 2 },
1479 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1480 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 2 },
1481 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1482 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 2 },
1484 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1485 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 2 },
1486 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1487 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 1 },
1488 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1489 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1490 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1491 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 2 },
1492 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1493 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 2 },
1494 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1495 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 4 },
1496 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1497 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1498 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1499 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 2 },
1500 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1501 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 4 },
1503 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 2 },
1504 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 1 },
1505 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 1 },
1506 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
1507 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 3 },
1508 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 3 },
1509 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 6 },
1511 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 4 },
1514 static const TypeConversionCostTblEntry SSE2ConversionTbl[] = {
1515 // These are somewhat magic numbers justified by looking at the output of
1516 // Intel's IACA, running some kernels and making sure when we take
1517 // legalization into account the throughput will be overestimated.
1518 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1519 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1520 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1521 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1522 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 5 },
1523 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1524 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1525 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
1527 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
1528 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
1529 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
1530 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
1531 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
1532 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
1533 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 6 },
1534 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
1536 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 3 },
1538 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 6 },
1540 { ISD::ZERO_EXTEND, MVT::v4i16, MVT::v4i8, 1 },
1541 { ISD::SIGN_EXTEND, MVT::v4i16, MVT::v4i8, 6 },
1542 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i8, 2 },
1543 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i8, 3 },
1544 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
1545 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 8 },
1546 { ISD::ZERO_EXTEND, MVT::v8i16, MVT::v8i8, 1 },
1547 { ISD::SIGN_EXTEND, MVT::v8i16, MVT::v8i8, 2 },
1548 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1549 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 6 },
1550 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 3 },
1551 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
1552 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 9 },
1553 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 12 },
1554 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 1 },
1555 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 2 },
1556 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
1557 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 10 },
1558 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 3 },
1559 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
1560 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 6 },
1561 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 8 },
1562 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 3 },
1563 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 5 },
1565 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i16, 4 },
1566 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i16, 2 },
1567 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 3 },
1568 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i32, 3 },
1569 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 3 },
1570 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
1571 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 7 },
1572 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
1573 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 10 },
1576 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
1577 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
1579 if (ST->hasSSE2() && !ST->hasAVX()) {
1580 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1581 LTDest.second, LTSrc.second))
1582 return LTSrc.first * Entry->Cost;
1585 EVT SrcTy = TLI->getValueType(DL, Src);
1586 EVT DstTy = TLI->getValueType(DL, Dst);
1588 // The function getSimpleVT only handles simple value types.
1589 if (!SrcTy.isSimple() || !DstTy.isSimple())
1590 return BaseT::getCastInstrCost(Opcode, Dst, Src);
1592 MVT SimpleSrcTy = SrcTy.getSimpleVT();
1593 MVT SimpleDstTy = DstTy.getSimpleVT();
1595 // Make sure that neither type is going to be split before using the
1596 // AVX512 tables. This handles -mprefer-vector-width=256
1597 // with -min-legal-vector-width<=256
1598 if (TLI->getTypeAction(SimpleSrcTy) != TargetLowering::TypeSplitVector &&
1599 TLI->getTypeAction(SimpleDstTy) != TargetLowering::TypeSplitVector) {
1601 if (const auto *Entry = ConvertCostTableLookup(AVX512BWConversionTbl, ISD,
1602 SimpleDstTy, SimpleSrcTy))
1606 if (const auto *Entry = ConvertCostTableLookup(AVX512DQConversionTbl, ISD,
1607 SimpleDstTy, SimpleSrcTy))
1610 if (ST->hasAVX512())
1611 if (const auto *Entry = ConvertCostTableLookup(AVX512FConversionTbl, ISD,
1612 SimpleDstTy, SimpleSrcTy))
1616 if (ST->hasAVX2()) {
1617 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
1618 SimpleDstTy, SimpleSrcTy))
1623 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
1624 SimpleDstTy, SimpleSrcTy))
1628 if (ST->hasSSE41()) {
1629 if (const auto *Entry = ConvertCostTableLookup(SSE41ConversionTbl, ISD,
1630 SimpleDstTy, SimpleSrcTy))
1634 if (ST->hasSSE2()) {
1635 if (const auto *Entry = ConvertCostTableLookup(SSE2ConversionTbl, ISD,
1636 SimpleDstTy, SimpleSrcTy))
1640 return BaseT::getCastInstrCost(Opcode, Dst, Src, I);
1643 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy,
1644 const Instruction *I) {
1645 // Legalize the type.
1646 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
1648 MVT MTy = LT.second;
1650 int ISD = TLI->InstructionOpcodeToISD(Opcode);
1651 assert(ISD && "Invalid opcode");
1653 unsigned ExtraCost = 0;
1654 if (I && (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)) {
1655 // Some vector comparison predicates cost extra instructions.
1656 if (MTy.isVector() &&
1657 !((ST->hasXOP() && (!ST->hasAVX2() || MTy.is128BitVector())) ||
1658 (ST->hasAVX512() && 32 <= MTy.getScalarSizeInBits()) ||
1660 switch (cast<CmpInst>(I)->getPredicate()) {
1661 case CmpInst::Predicate::ICMP_NE:
1662 // xor(cmpeq(x,y),-1)
1665 case CmpInst::Predicate::ICMP_SGE:
1666 case CmpInst::Predicate::ICMP_SLE:
1667 // xor(cmpgt(x,y),-1)
1670 case CmpInst::Predicate::ICMP_ULT:
1671 case CmpInst::Predicate::ICMP_UGT:
1672 // cmpgt(xor(x,signbit),xor(y,signbit))
1673 // xor(cmpeq(pmaxu(x,y),x),-1)
1676 case CmpInst::Predicate::ICMP_ULE:
1677 case CmpInst::Predicate::ICMP_UGE:
1678 if ((ST->hasSSE41() && MTy.getScalarSizeInBits() == 32) ||
1679 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) {
1680 // cmpeq(psubus(x,y),0)
1681 // cmpeq(pminu(x,y),x)
1684 // xor(cmpgt(xor(x,signbit),xor(y,signbit)),-1)
1694 static const CostTblEntry AVX512BWCostTbl[] = {
1695 { ISD::SETCC, MVT::v32i16, 1 },
1696 { ISD::SETCC, MVT::v64i8, 1 },
1698 { ISD::SELECT, MVT::v32i16, 1 },
1699 { ISD::SELECT, MVT::v64i8, 1 },
1702 static const CostTblEntry AVX512CostTbl[] = {
1703 { ISD::SETCC, MVT::v8i64, 1 },
1704 { ISD::SETCC, MVT::v16i32, 1 },
1705 { ISD::SETCC, MVT::v8f64, 1 },
1706 { ISD::SETCC, MVT::v16f32, 1 },
1708 { ISD::SELECT, MVT::v8i64, 1 },
1709 { ISD::SELECT, MVT::v16i32, 1 },
1710 { ISD::SELECT, MVT::v8f64, 1 },
1711 { ISD::SELECT, MVT::v16f32, 1 },
1714 static const CostTblEntry AVX2CostTbl[] = {
1715 { ISD::SETCC, MVT::v4i64, 1 },
1716 { ISD::SETCC, MVT::v8i32, 1 },
1717 { ISD::SETCC, MVT::v16i16, 1 },
1718 { ISD::SETCC, MVT::v32i8, 1 },
1720 { ISD::SELECT, MVT::v4i64, 1 }, // pblendvb
1721 { ISD::SELECT, MVT::v8i32, 1 }, // pblendvb
1722 { ISD::SELECT, MVT::v16i16, 1 }, // pblendvb
1723 { ISD::SELECT, MVT::v32i8, 1 }, // pblendvb
1726 static const CostTblEntry AVX1CostTbl[] = {
1727 { ISD::SETCC, MVT::v4f64, 1 },
1728 { ISD::SETCC, MVT::v8f32, 1 },
1729 // AVX1 does not support 8-wide integer compare.
1730 { ISD::SETCC, MVT::v4i64, 4 },
1731 { ISD::SETCC, MVT::v8i32, 4 },
1732 { ISD::SETCC, MVT::v16i16, 4 },
1733 { ISD::SETCC, MVT::v32i8, 4 },
1735 { ISD::SELECT, MVT::v4f64, 1 }, // vblendvpd
1736 { ISD::SELECT, MVT::v8f32, 1 }, // vblendvps
1737 { ISD::SELECT, MVT::v4i64, 1 }, // vblendvpd
1738 { ISD::SELECT, MVT::v8i32, 1 }, // vblendvps
1739 { ISD::SELECT, MVT::v16i16, 3 }, // vandps + vandnps + vorps
1740 { ISD::SELECT, MVT::v32i8, 3 }, // vandps + vandnps + vorps
1743 static const CostTblEntry SSE42CostTbl[] = {
1744 { ISD::SETCC, MVT::v2f64, 1 },
1745 { ISD::SETCC, MVT::v4f32, 1 },
1746 { ISD::SETCC, MVT::v2i64, 1 },
1749 static const CostTblEntry SSE41CostTbl[] = {
1750 { ISD::SELECT, MVT::v2f64, 1 }, // blendvpd
1751 { ISD::SELECT, MVT::v4f32, 1 }, // blendvps
1752 { ISD::SELECT, MVT::v2i64, 1 }, // pblendvb
1753 { ISD::SELECT, MVT::v4i32, 1 }, // pblendvb
1754 { ISD::SELECT, MVT::v8i16, 1 }, // pblendvb
1755 { ISD::SELECT, MVT::v16i8, 1 }, // pblendvb
1758 static const CostTblEntry SSE2CostTbl[] = {
1759 { ISD::SETCC, MVT::v2f64, 2 },
1760 { ISD::SETCC, MVT::f64, 1 },
1761 { ISD::SETCC, MVT::v2i64, 8 },
1762 { ISD::SETCC, MVT::v4i32, 1 },
1763 { ISD::SETCC, MVT::v8i16, 1 },
1764 { ISD::SETCC, MVT::v16i8, 1 },
1766 { ISD::SELECT, MVT::v2f64, 3 }, // andpd + andnpd + orpd
1767 { ISD::SELECT, MVT::v2i64, 3 }, // pand + pandn + por
1768 { ISD::SELECT, MVT::v4i32, 3 }, // pand + pandn + por
1769 { ISD::SELECT, MVT::v8i16, 3 }, // pand + pandn + por
1770 { ISD::SELECT, MVT::v16i8, 3 }, // pand + pandn + por
1773 static const CostTblEntry SSE1CostTbl[] = {
1774 { ISD::SETCC, MVT::v4f32, 2 },
1775 { ISD::SETCC, MVT::f32, 1 },
1777 { ISD::SELECT, MVT::v4f32, 3 }, // andps + andnps + orps
1781 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
1782 return LT.first * (ExtraCost + Entry->Cost);
1784 if (ST->hasAVX512())
1785 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
1786 return LT.first * (ExtraCost + Entry->Cost);
1789 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
1790 return LT.first * (ExtraCost + Entry->Cost);
1793 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
1794 return LT.first * (ExtraCost + Entry->Cost);
1797 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
1798 return LT.first * (ExtraCost + Entry->Cost);
1801 if (const auto *Entry = CostTableLookup(SSE41CostTbl, ISD, MTy))
1802 return LT.first * (ExtraCost + Entry->Cost);
1805 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
1806 return LT.first * (ExtraCost + Entry->Cost);
1809 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
1810 return LT.first * (ExtraCost + Entry->Cost);
1812 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy, I);
1815 unsigned X86TTIImpl::getAtomicMemIntrinsicMaxElementSize() const { return 16; }
1817 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
1818 ArrayRef<Type *> Tys, FastMathFlags FMF,
1819 unsigned ScalarizationCostPassed) {
1820 // Costs should match the codegen from:
1821 // BITREVERSE: llvm\test\CodeGen\X86\vector-bitreverse.ll
1822 // BSWAP: llvm\test\CodeGen\X86\bswap-vector.ll
1823 // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll
1824 // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll
1825 // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll
1826 static const CostTblEntry AVX512CDCostTbl[] = {
1827 { ISD::CTLZ, MVT::v8i64, 1 },
1828 { ISD::CTLZ, MVT::v16i32, 1 },
1829 { ISD::CTLZ, MVT::v32i16, 8 },
1830 { ISD::CTLZ, MVT::v64i8, 20 },
1831 { ISD::CTLZ, MVT::v4i64, 1 },
1832 { ISD::CTLZ, MVT::v8i32, 1 },
1833 { ISD::CTLZ, MVT::v16i16, 4 },
1834 { ISD::CTLZ, MVT::v32i8, 10 },
1835 { ISD::CTLZ, MVT::v2i64, 1 },
1836 { ISD::CTLZ, MVT::v4i32, 1 },
1837 { ISD::CTLZ, MVT::v8i16, 4 },
1838 { ISD::CTLZ, MVT::v16i8, 4 },
1840 static const CostTblEntry AVX512BWCostTbl[] = {
1841 { ISD::BITREVERSE, MVT::v8i64, 5 },
1842 { ISD::BITREVERSE, MVT::v16i32, 5 },
1843 { ISD::BITREVERSE, MVT::v32i16, 5 },
1844 { ISD::BITREVERSE, MVT::v64i8, 5 },
1845 { ISD::CTLZ, MVT::v8i64, 23 },
1846 { ISD::CTLZ, MVT::v16i32, 22 },
1847 { ISD::CTLZ, MVT::v32i16, 18 },
1848 { ISD::CTLZ, MVT::v64i8, 17 },
1849 { ISD::CTPOP, MVT::v8i64, 7 },
1850 { ISD::CTPOP, MVT::v16i32, 11 },
1851 { ISD::CTPOP, MVT::v32i16, 9 },
1852 { ISD::CTPOP, MVT::v64i8, 6 },
1853 { ISD::CTTZ, MVT::v8i64, 10 },
1854 { ISD::CTTZ, MVT::v16i32, 14 },
1855 { ISD::CTTZ, MVT::v32i16, 12 },
1856 { ISD::CTTZ, MVT::v64i8, 9 },
1857 { ISD::SADDSAT, MVT::v32i16, 1 },
1858 { ISD::SADDSAT, MVT::v64i8, 1 },
1859 { ISD::SSUBSAT, MVT::v32i16, 1 },
1860 { ISD::SSUBSAT, MVT::v64i8, 1 },
1861 { ISD::UADDSAT, MVT::v32i16, 1 },
1862 { ISD::UADDSAT, MVT::v64i8, 1 },
1863 { ISD::USUBSAT, MVT::v32i16, 1 },
1864 { ISD::USUBSAT, MVT::v64i8, 1 },
1866 static const CostTblEntry AVX512CostTbl[] = {
1867 { ISD::BITREVERSE, MVT::v8i64, 36 },
1868 { ISD::BITREVERSE, MVT::v16i32, 24 },
1869 { ISD::CTLZ, MVT::v8i64, 29 },
1870 { ISD::CTLZ, MVT::v16i32, 35 },
1871 { ISD::CTPOP, MVT::v8i64, 16 },
1872 { ISD::CTPOP, MVT::v16i32, 24 },
1873 { ISD::CTTZ, MVT::v8i64, 20 },
1874 { ISD::CTTZ, MVT::v16i32, 28 },
1875 { ISD::USUBSAT, MVT::v16i32, 2 }, // pmaxud + psubd
1876 { ISD::USUBSAT, MVT::v2i64, 2 }, // pmaxuq + psubq
1877 { ISD::USUBSAT, MVT::v4i64, 2 }, // pmaxuq + psubq
1878 { ISD::USUBSAT, MVT::v8i64, 2 }, // pmaxuq + psubq
1879 { ISD::UADDSAT, MVT::v16i32, 3 }, // not + pminud + paddd
1880 { ISD::UADDSAT, MVT::v2i64, 3 }, // not + pminuq + paddq
1881 { ISD::UADDSAT, MVT::v4i64, 3 }, // not + pminuq + paddq
1882 { ISD::UADDSAT, MVT::v8i64, 3 }, // not + pminuq + paddq
1884 static const CostTblEntry XOPCostTbl[] = {
1885 { ISD::BITREVERSE, MVT::v4i64, 4 },
1886 { ISD::BITREVERSE, MVT::v8i32, 4 },
1887 { ISD::BITREVERSE, MVT::v16i16, 4 },
1888 { ISD::BITREVERSE, MVT::v32i8, 4 },
1889 { ISD::BITREVERSE, MVT::v2i64, 1 },
1890 { ISD::BITREVERSE, MVT::v4i32, 1 },
1891 { ISD::BITREVERSE, MVT::v8i16, 1 },
1892 { ISD::BITREVERSE, MVT::v16i8, 1 },
1893 { ISD::BITREVERSE, MVT::i64, 3 },
1894 { ISD::BITREVERSE, MVT::i32, 3 },
1895 { ISD::BITREVERSE, MVT::i16, 3 },
1896 { ISD::BITREVERSE, MVT::i8, 3 }
1898 static const CostTblEntry AVX2CostTbl[] = {
1899 { ISD::BITREVERSE, MVT::v4i64, 5 },
1900 { ISD::BITREVERSE, MVT::v8i32, 5 },
1901 { ISD::BITREVERSE, MVT::v16i16, 5 },
1902 { ISD::BITREVERSE, MVT::v32i8, 5 },
1903 { ISD::BSWAP, MVT::v4i64, 1 },
1904 { ISD::BSWAP, MVT::v8i32, 1 },
1905 { ISD::BSWAP, MVT::v16i16, 1 },
1906 { ISD::CTLZ, MVT::v4i64, 23 },
1907 { ISD::CTLZ, MVT::v8i32, 18 },
1908 { ISD::CTLZ, MVT::v16i16, 14 },
1909 { ISD::CTLZ, MVT::v32i8, 9 },
1910 { ISD::CTPOP, MVT::v4i64, 7 },
1911 { ISD::CTPOP, MVT::v8i32, 11 },
1912 { ISD::CTPOP, MVT::v16i16, 9 },
1913 { ISD::CTPOP, MVT::v32i8, 6 },
1914 { ISD::CTTZ, MVT::v4i64, 10 },
1915 { ISD::CTTZ, MVT::v8i32, 14 },
1916 { ISD::CTTZ, MVT::v16i16, 12 },
1917 { ISD::CTTZ, MVT::v32i8, 9 },
1918 { ISD::SADDSAT, MVT::v16i16, 1 },
1919 { ISD::SADDSAT, MVT::v32i8, 1 },
1920 { ISD::SSUBSAT, MVT::v16i16, 1 },
1921 { ISD::SSUBSAT, MVT::v32i8, 1 },
1922 { ISD::UADDSAT, MVT::v16i16, 1 },
1923 { ISD::UADDSAT, MVT::v32i8, 1 },
1924 { ISD::UADDSAT, MVT::v8i32, 3 }, // not + pminud + paddd
1925 { ISD::USUBSAT, MVT::v16i16, 1 },
1926 { ISD::USUBSAT, MVT::v32i8, 1 },
1927 { ISD::USUBSAT, MVT::v8i32, 2 }, // pmaxud + psubd
1928 { ISD::FSQRT, MVT::f32, 7 }, // Haswell from http://www.agner.org/
1929 { ISD::FSQRT, MVT::v4f32, 7 }, // Haswell from http://www.agner.org/
1930 { ISD::FSQRT, MVT::v8f32, 14 }, // Haswell from http://www.agner.org/
1931 { ISD::FSQRT, MVT::f64, 14 }, // Haswell from http://www.agner.org/
1932 { ISD::FSQRT, MVT::v2f64, 14 }, // Haswell from http://www.agner.org/
1933 { ISD::FSQRT, MVT::v4f64, 28 }, // Haswell from http://www.agner.org/
1935 static const CostTblEntry AVX1CostTbl[] = {
1936 { ISD::BITREVERSE, MVT::v4i64, 12 }, // 2 x 128-bit Op + extract/insert
1937 { ISD::BITREVERSE, MVT::v8i32, 12 }, // 2 x 128-bit Op + extract/insert
1938 { ISD::BITREVERSE, MVT::v16i16, 12 }, // 2 x 128-bit Op + extract/insert
1939 { ISD::BITREVERSE, MVT::v32i8, 12 }, // 2 x 128-bit Op + extract/insert
1940 { ISD::BSWAP, MVT::v4i64, 4 },
1941 { ISD::BSWAP, MVT::v8i32, 4 },
1942 { ISD::BSWAP, MVT::v16i16, 4 },
1943 { ISD::CTLZ, MVT::v4i64, 48 }, // 2 x 128-bit Op + extract/insert
1944 { ISD::CTLZ, MVT::v8i32, 38 }, // 2 x 128-bit Op + extract/insert
1945 { ISD::CTLZ, MVT::v16i16, 30 }, // 2 x 128-bit Op + extract/insert
1946 { ISD::CTLZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
1947 { ISD::CTPOP, MVT::v4i64, 16 }, // 2 x 128-bit Op + extract/insert
1948 { ISD::CTPOP, MVT::v8i32, 24 }, // 2 x 128-bit Op + extract/insert
1949 { ISD::CTPOP, MVT::v16i16, 20 }, // 2 x 128-bit Op + extract/insert
1950 { ISD::CTPOP, MVT::v32i8, 14 }, // 2 x 128-bit Op + extract/insert
1951 { ISD::CTTZ, MVT::v4i64, 22 }, // 2 x 128-bit Op + extract/insert
1952 { ISD::CTTZ, MVT::v8i32, 30 }, // 2 x 128-bit Op + extract/insert
1953 { ISD::CTTZ, MVT::v16i16, 26 }, // 2 x 128-bit Op + extract/insert
1954 { ISD::CTTZ, MVT::v32i8, 20 }, // 2 x 128-bit Op + extract/insert
1955 { ISD::SADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1956 { ISD::SADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1957 { ISD::SSUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1958 { ISD::SSUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1959 { ISD::UADDSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1960 { ISD::UADDSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1961 { ISD::UADDSAT, MVT::v8i32, 8 }, // 2 x 128-bit Op + extract/insert
1962 { ISD::USUBSAT, MVT::v16i16, 4 }, // 2 x 128-bit Op + extract/insert
1963 { ISD::USUBSAT, MVT::v32i8, 4 }, // 2 x 128-bit Op + extract/insert
1964 { ISD::USUBSAT, MVT::v8i32, 6 }, // 2 x 128-bit Op + extract/insert
1965 { ISD::FSQRT, MVT::f32, 14 }, // SNB from http://www.agner.org/
1966 { ISD::FSQRT, MVT::v4f32, 14 }, // SNB from http://www.agner.org/
1967 { ISD::FSQRT, MVT::v8f32, 28 }, // SNB from http://www.agner.org/
1968 { ISD::FSQRT, MVT::f64, 21 }, // SNB from http://www.agner.org/
1969 { ISD::FSQRT, MVT::v2f64, 21 }, // SNB from http://www.agner.org/
1970 { ISD::FSQRT, MVT::v4f64, 43 }, // SNB from http://www.agner.org/
1972 static const CostTblEntry GLMCostTbl[] = {
1973 { ISD::FSQRT, MVT::f32, 19 }, // sqrtss
1974 { ISD::FSQRT, MVT::v4f32, 37 }, // sqrtps
1975 { ISD::FSQRT, MVT::f64, 34 }, // sqrtsd
1976 { ISD::FSQRT, MVT::v2f64, 67 }, // sqrtpd
1978 static const CostTblEntry SLMCostTbl[] = {
1979 { ISD::FSQRT, MVT::f32, 20 }, // sqrtss
1980 { ISD::FSQRT, MVT::v4f32, 40 }, // sqrtps
1981 { ISD::FSQRT, MVT::f64, 35 }, // sqrtsd
1982 { ISD::FSQRT, MVT::v2f64, 70 }, // sqrtpd
1984 static const CostTblEntry SSE42CostTbl[] = {
1985 { ISD::USUBSAT, MVT::v4i32, 2 }, // pmaxud + psubd
1986 { ISD::UADDSAT, MVT::v4i32, 3 }, // not + pminud + paddd
1987 { ISD::FSQRT, MVT::f32, 18 }, // Nehalem from http://www.agner.org/
1988 { ISD::FSQRT, MVT::v4f32, 18 }, // Nehalem from http://www.agner.org/
1990 static const CostTblEntry SSSE3CostTbl[] = {
1991 { ISD::BITREVERSE, MVT::v2i64, 5 },
1992 { ISD::BITREVERSE, MVT::v4i32, 5 },
1993 { ISD::BITREVERSE, MVT::v8i16, 5 },
1994 { ISD::BITREVERSE, MVT::v16i8, 5 },
1995 { ISD::BSWAP, MVT::v2i64, 1 },
1996 { ISD::BSWAP, MVT::v4i32, 1 },
1997 { ISD::BSWAP, MVT::v8i16, 1 },
1998 { ISD::CTLZ, MVT::v2i64, 23 },
1999 { ISD::CTLZ, MVT::v4i32, 18 },
2000 { ISD::CTLZ, MVT::v8i16, 14 },
2001 { ISD::CTLZ, MVT::v16i8, 9 },
2002 { ISD::CTPOP, MVT::v2i64, 7 },
2003 { ISD::CTPOP, MVT::v4i32, 11 },
2004 { ISD::CTPOP, MVT::v8i16, 9 },
2005 { ISD::CTPOP, MVT::v16i8, 6 },
2006 { ISD::CTTZ, MVT::v2i64, 10 },
2007 { ISD::CTTZ, MVT::v4i32, 14 },
2008 { ISD::CTTZ, MVT::v8i16, 12 },
2009 { ISD::CTTZ, MVT::v16i8, 9 }
2011 static const CostTblEntry SSE2CostTbl[] = {
2012 { ISD::BITREVERSE, MVT::v2i64, 29 },
2013 { ISD::BITREVERSE, MVT::v4i32, 27 },
2014 { ISD::BITREVERSE, MVT::v8i16, 27 },
2015 { ISD::BITREVERSE, MVT::v16i8, 20 },
2016 { ISD::BSWAP, MVT::v2i64, 7 },
2017 { ISD::BSWAP, MVT::v4i32, 7 },
2018 { ISD::BSWAP, MVT::v8i16, 7 },
2019 { ISD::CTLZ, MVT::v2i64, 25 },
2020 { ISD::CTLZ, MVT::v4i32, 26 },
2021 { ISD::CTLZ, MVT::v8i16, 20 },
2022 { ISD::CTLZ, MVT::v16i8, 17 },
2023 { ISD::CTPOP, MVT::v2i64, 12 },
2024 { ISD::CTPOP, MVT::v4i32, 15 },
2025 { ISD::CTPOP, MVT::v8i16, 13 },
2026 { ISD::CTPOP, MVT::v16i8, 10 },
2027 { ISD::CTTZ, MVT::v2i64, 14 },
2028 { ISD::CTTZ, MVT::v4i32, 18 },
2029 { ISD::CTTZ, MVT::v8i16, 16 },
2030 { ISD::CTTZ, MVT::v16i8, 13 },
2031 { ISD::SADDSAT, MVT::v8i16, 1 },
2032 { ISD::SADDSAT, MVT::v16i8, 1 },
2033 { ISD::SSUBSAT, MVT::v8i16, 1 },
2034 { ISD::SSUBSAT, MVT::v16i8, 1 },
2035 { ISD::UADDSAT, MVT::v8i16, 1 },
2036 { ISD::UADDSAT, MVT::v16i8, 1 },
2037 { ISD::USUBSAT, MVT::v8i16, 1 },
2038 { ISD::USUBSAT, MVT::v16i8, 1 },
2039 { ISD::FSQRT, MVT::f64, 32 }, // Nehalem from http://www.agner.org/
2040 { ISD::FSQRT, MVT::v2f64, 32 }, // Nehalem from http://www.agner.org/
2042 static const CostTblEntry SSE1CostTbl[] = {
2043 { ISD::FSQRT, MVT::f32, 28 }, // Pentium III from http://www.agner.org/
2044 { ISD::FSQRT, MVT::v4f32, 56 }, // Pentium III from http://www.agner.org/
2046 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2047 { ISD::BITREVERSE, MVT::i64, 14 },
2048 { ISD::SADDO, MVT::i64, 1 },
2049 { ISD::UADDO, MVT::i64, 1 },
2051 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2052 { ISD::BITREVERSE, MVT::i32, 14 },
2053 { ISD::BITREVERSE, MVT::i16, 14 },
2054 { ISD::BITREVERSE, MVT::i8, 11 },
2055 { ISD::SADDO, MVT::i32, 1 },
2056 { ISD::SADDO, MVT::i16, 1 },
2057 { ISD::SADDO, MVT::i8, 1 },
2058 { ISD::UADDO, MVT::i32, 1 },
2059 { ISD::UADDO, MVT::i16, 1 },
2060 { ISD::UADDO, MVT::i8, 1 },
2064 unsigned ISD = ISD::DELETED_NODE;
2068 case Intrinsic::bitreverse:
2069 ISD = ISD::BITREVERSE;
2071 case Intrinsic::bswap:
2074 case Intrinsic::ctlz:
2077 case Intrinsic::ctpop:
2080 case Intrinsic::cttz:
2083 case Intrinsic::sadd_sat:
2086 case Intrinsic::ssub_sat:
2089 case Intrinsic::uadd_sat:
2092 case Intrinsic::usub_sat:
2095 case Intrinsic::sqrt:
2098 case Intrinsic::sadd_with_overflow:
2099 case Intrinsic::ssub_with_overflow:
2100 // SSUBO has same costs so don't duplicate.
2102 OpTy = RetTy->getContainedType(0);
2104 case Intrinsic::uadd_with_overflow:
2105 case Intrinsic::usub_with_overflow:
2106 // USUBO has same costs so don't duplicate.
2108 OpTy = RetTy->getContainedType(0);
2112 if (ISD != ISD::DELETED_NODE) {
2113 // Legalize the type.
2114 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, OpTy);
2115 MVT MTy = LT.second;
2117 // Attempt to lookup cost.
2119 if (const auto *Entry = CostTableLookup(GLMCostTbl, ISD, MTy))
2120 return LT.first * Entry->Cost;
2123 if (const auto *Entry = CostTableLookup(SLMCostTbl, ISD, MTy))
2124 return LT.first * Entry->Cost;
2127 if (const auto *Entry = CostTableLookup(AVX512CDCostTbl, ISD, MTy))
2128 return LT.first * Entry->Cost;
2131 if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy))
2132 return LT.first * Entry->Cost;
2134 if (ST->hasAVX512())
2135 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2136 return LT.first * Entry->Cost;
2139 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2140 return LT.first * Entry->Cost;
2143 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
2144 return LT.first * Entry->Cost;
2147 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
2148 return LT.first * Entry->Cost;
2151 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
2152 return LT.first * Entry->Cost;
2155 if (const auto *Entry = CostTableLookup(SSSE3CostTbl, ISD, MTy))
2156 return LT.first * Entry->Cost;
2159 if (const auto *Entry = CostTableLookup(SSE2CostTbl, ISD, MTy))
2160 return LT.first * Entry->Cost;
2163 if (const auto *Entry = CostTableLookup(SSE1CostTbl, ISD, MTy))
2164 return LT.first * Entry->Cost;
2167 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2168 return LT.first * Entry->Cost;
2170 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2171 return LT.first * Entry->Cost;
2174 return BaseT::getIntrinsicInstrCost(IID, RetTy, Tys, FMF, ScalarizationCostPassed);
2177 int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
2178 ArrayRef<Value *> Args, FastMathFlags FMF,
2180 static const CostTblEntry AVX512CostTbl[] = {
2181 { ISD::ROTL, MVT::v8i64, 1 },
2182 { ISD::ROTL, MVT::v4i64, 1 },
2183 { ISD::ROTL, MVT::v2i64, 1 },
2184 { ISD::ROTL, MVT::v16i32, 1 },
2185 { ISD::ROTL, MVT::v8i32, 1 },
2186 { ISD::ROTL, MVT::v4i32, 1 },
2187 { ISD::ROTR, MVT::v8i64, 1 },
2188 { ISD::ROTR, MVT::v4i64, 1 },
2189 { ISD::ROTR, MVT::v2i64, 1 },
2190 { ISD::ROTR, MVT::v16i32, 1 },
2191 { ISD::ROTR, MVT::v8i32, 1 },
2192 { ISD::ROTR, MVT::v4i32, 1 }
2194 // XOP: ROTL = VPROT(X,Y), ROTR = VPROT(X,SUB(0,Y))
2195 static const CostTblEntry XOPCostTbl[] = {
2196 { ISD::ROTL, MVT::v4i64, 4 },
2197 { ISD::ROTL, MVT::v8i32, 4 },
2198 { ISD::ROTL, MVT::v16i16, 4 },
2199 { ISD::ROTL, MVT::v32i8, 4 },
2200 { ISD::ROTL, MVT::v2i64, 1 },
2201 { ISD::ROTL, MVT::v4i32, 1 },
2202 { ISD::ROTL, MVT::v8i16, 1 },
2203 { ISD::ROTL, MVT::v16i8, 1 },
2204 { ISD::ROTR, MVT::v4i64, 6 },
2205 { ISD::ROTR, MVT::v8i32, 6 },
2206 { ISD::ROTR, MVT::v16i16, 6 },
2207 { ISD::ROTR, MVT::v32i8, 6 },
2208 { ISD::ROTR, MVT::v2i64, 2 },
2209 { ISD::ROTR, MVT::v4i32, 2 },
2210 { ISD::ROTR, MVT::v8i16, 2 },
2211 { ISD::ROTR, MVT::v16i8, 2 }
2213 static const CostTblEntry X64CostTbl[] = { // 64-bit targets
2214 { ISD::ROTL, MVT::i64, 1 },
2215 { ISD::ROTR, MVT::i64, 1 },
2216 { ISD::FSHL, MVT::i64, 4 }
2218 static const CostTblEntry X86CostTbl[] = { // 32 or 64-bit targets
2219 { ISD::ROTL, MVT::i32, 1 },
2220 { ISD::ROTL, MVT::i16, 1 },
2221 { ISD::ROTL, MVT::i8, 1 },
2222 { ISD::ROTR, MVT::i32, 1 },
2223 { ISD::ROTR, MVT::i16, 1 },
2224 { ISD::ROTR, MVT::i8, 1 },
2225 { ISD::FSHL, MVT::i32, 4 },
2226 { ISD::FSHL, MVT::i16, 4 },
2227 { ISD::FSHL, MVT::i8, 4 }
2230 unsigned ISD = ISD::DELETED_NODE;
2234 case Intrinsic::fshl:
2236 if (Args[0] == Args[1])
2239 case Intrinsic::fshr:
2240 // FSHR has same costs so don't duplicate.
2242 if (Args[0] == Args[1])
2247 if (ISD != ISD::DELETED_NODE) {
2248 // Legalize the type.
2249 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
2250 MVT MTy = LT.second;
2252 // Attempt to lookup cost.
2253 if (ST->hasAVX512())
2254 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
2255 return LT.first * Entry->Cost;
2258 if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy))
2259 return LT.first * Entry->Cost;
2262 if (const auto *Entry = CostTableLookup(X64CostTbl, ISD, MTy))
2263 return LT.first * Entry->Cost;
2265 if (const auto *Entry = CostTableLookup(X86CostTbl, ISD, MTy))
2266 return LT.first * Entry->Cost;
2269 return BaseT::getIntrinsicInstrCost(IID, RetTy, Args, FMF, VF);
2272 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
2273 assert(Val->isVectorTy() && "This must be a vector type");
2275 Type *ScalarType = Val->getScalarType();
2278 // Legalize the type.
2279 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
2281 // This type is legalized to a scalar type.
2282 if (!LT.second.isVector())
2285 // The type may be split. Normalize the index to the new type.
2286 unsigned Width = LT.second.getVectorNumElements();
2287 Index = Index % Width;
2289 // Floating point scalars are already located in index #0.
2290 if (ScalarType->isFloatingPointTy() && Index == 0)
2294 // Add to the base cost if we know that the extracted element of a vector is
2295 // destined to be moved to and used in the integer register file.
2296 int RegisterFileMoveCost = 0;
2297 if (Opcode == Instruction::ExtractElement && ScalarType->isPointerTy())
2298 RegisterFileMoveCost = 1;
2300 return BaseT::getVectorInstrCost(Opcode, Val, Index) + RegisterFileMoveCost;
2303 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
2304 unsigned AddressSpace, const Instruction *I) {
2305 // Handle non-power-of-two vectors such as <3 x float>
2306 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
2307 unsigned NumElem = VTy->getVectorNumElements();
2309 // Handle a few common cases:
2311 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
2312 // Cost = 64 bit store + extract + 32 bit store.
2316 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
2317 // Cost = 128 bit store + unpack + 64 bit store.
2320 // Assume that all other non-power-of-two numbers are scalarized.
2321 if (!isPowerOf2_32(NumElem)) {
2322 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
2324 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
2325 Opcode == Instruction::Store);
2326 return NumElem * Cost + SplitCost;
2330 // Legalize the type.
2331 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
2332 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
2335 // Each load/store unit costs 1.
2336 int Cost = LT.first * 1;
2338 // This isn't exactly right. We're using slow unaligned 32-byte accesses as a
2339 // proxy for a double-pumped AVX memory interface such as on Sandybridge.
2340 if (LT.second.getStoreSize() == 32 && ST->isUnalignedMem32Slow())
2346 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
2348 unsigned AddressSpace) {
2349 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
2351 // To calculate scalar take the regular cost, without mask
2352 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
2354 unsigned NumElem = SrcVTy->getVectorNumElements();
2355 VectorType *MaskTy =
2356 VectorType::get(Type::getInt8Ty(SrcVTy->getContext()), NumElem);
2357 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
2358 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
2359 !isPowerOf2_32(NumElem)) {
2361 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
2362 int ScalarCompareCost = getCmpSelInstrCost(
2363 Instruction::ICmp, Type::getInt8Ty(SrcVTy->getContext()), nullptr);
2364 int BranchCost = getCFInstrCost(Instruction::Br);
2365 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
2367 int ValueSplitCost = getScalarizationOverhead(
2368 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
2370 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2371 Alignment, AddressSpace);
2372 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
2375 // Legalize the type.
2376 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
2377 auto VT = TLI->getValueType(DL, SrcVTy);
2379 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
2380 LT.second.getVectorNumElements() == NumElem)
2381 // Promotion requires expand/truncate for data and a shuffle for mask.
2382 Cost += getShuffleCost(TTI::SK_PermuteTwoSrc, SrcVTy, 0, nullptr) +
2383 getShuffleCost(TTI::SK_PermuteTwoSrc, MaskTy, 0, nullptr);
2385 else if (LT.second.getVectorNumElements() > NumElem) {
2386 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
2387 LT.second.getVectorNumElements());
2388 // Expanding requires fill mask with zeroes
2389 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
2391 if (!ST->hasAVX512())
2392 return Cost + LT.first*4; // Each maskmov costs 4
2394 // AVX-512 masked load/store is cheapper
2395 return Cost+LT.first;
2398 int X86TTIImpl::getAddressComputationCost(Type *Ty, ScalarEvolution *SE,
2400 // Address computations in vectorized code with non-consecutive addresses will
2401 // likely result in more instructions compared to scalar code where the
2402 // computation can more often be merged into the index mode. The resulting
2403 // extra micro-ops can significantly decrease throughput.
2404 const unsigned NumVectorInstToHideOverhead = 10;
2406 // Cost modeling of Strided Access Computation is hidden by the indexing
2407 // modes of X86 regardless of the stride value. We dont believe that there
2408 // is a difference between constant strided access in gerenal and constant
2409 // strided value which is less than or equal to 64.
2410 // Even in the case of (loop invariant) stride whose value is not known at
2411 // compile time, the address computation will not incur more than one extra
2413 if (Ty->isVectorTy() && SE) {
2414 if (!BaseT::isStridedAccess(Ptr))
2415 return NumVectorInstToHideOverhead;
2416 if (!BaseT::getConstantStrideStep(SE, Ptr))
2420 return BaseT::getAddressComputationCost(Ty, SE, Ptr);
2423 int X86TTIImpl::getArithmeticReductionCost(unsigned Opcode, Type *ValTy,
2426 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2428 MVT MTy = LT.second;
2430 int ISD = TLI->InstructionOpcodeToISD(Opcode);
2431 assert(ISD && "Invalid opcode");
2433 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
2434 // and make it as the cost.
2436 static const CostTblEntry SSE42CostTblPairWise[] = {
2437 { ISD::FADD, MVT::v2f64, 2 },
2438 { ISD::FADD, MVT::v4f32, 4 },
2439 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
2440 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
2441 { ISD::ADD, MVT::v8i16, 5 },
2444 static const CostTblEntry AVX1CostTblPairWise[] = {
2445 { ISD::FADD, MVT::v4f32, 4 },
2446 { ISD::FADD, MVT::v4f64, 5 },
2447 { ISD::FADD, MVT::v8f32, 7 },
2448 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
2449 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
2450 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
2451 { ISD::ADD, MVT::v8i16, 5 },
2452 { ISD::ADD, MVT::v8i32, 5 },
2455 static const CostTblEntry SSE42CostTblNoPairWise[] = {
2456 { ISD::FADD, MVT::v2f64, 2 },
2457 { ISD::FADD, MVT::v4f32, 4 },
2458 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
2459 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
2460 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
2463 static const CostTblEntry AVX1CostTblNoPairWise[] = {
2464 { ISD::FADD, MVT::v4f32, 3 },
2465 { ISD::FADD, MVT::v4f64, 3 },
2466 { ISD::FADD, MVT::v8f32, 4 },
2467 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
2468 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
2469 { ISD::ADD, MVT::v4i64, 3 },
2470 { ISD::ADD, MVT::v8i16, 4 },
2471 { ISD::ADD, MVT::v8i32, 5 },
2476 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
2477 return LT.first * Entry->Cost;
2480 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
2481 return LT.first * Entry->Cost;
2484 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
2485 return LT.first * Entry->Cost;
2488 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
2489 return LT.first * Entry->Cost;
2492 static const CostTblEntry AVX2BoolReduction[] = {
2493 { ISD::AND, MVT::v16i16, 2 }, // vpmovmskb + cmp
2494 { ISD::AND, MVT::v32i8, 2 }, // vpmovmskb + cmp
2495 { ISD::OR, MVT::v16i16, 2 }, // vpmovmskb + cmp
2496 { ISD::OR, MVT::v32i8, 2 }, // vpmovmskb + cmp
2499 static const CostTblEntry AVX1BoolReduction[] = {
2500 { ISD::AND, MVT::v4i64, 2 }, // vmovmskpd + cmp
2501 { ISD::AND, MVT::v8i32, 2 }, // vmovmskps + cmp
2502 { ISD::AND, MVT::v16i16, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
2503 { ISD::AND, MVT::v32i8, 4 }, // vextractf128 + vpand + vpmovmskb + cmp
2504 { ISD::OR, MVT::v4i64, 2 }, // vmovmskpd + cmp
2505 { ISD::OR, MVT::v8i32, 2 }, // vmovmskps + cmp
2506 { ISD::OR, MVT::v16i16, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
2507 { ISD::OR, MVT::v32i8, 4 }, // vextractf128 + vpor + vpmovmskb + cmp
2510 static const CostTblEntry SSE2BoolReduction[] = {
2511 { ISD::AND, MVT::v2i64, 2 }, // movmskpd + cmp
2512 { ISD::AND, MVT::v4i32, 2 }, // movmskps + cmp
2513 { ISD::AND, MVT::v8i16, 2 }, // pmovmskb + cmp
2514 { ISD::AND, MVT::v16i8, 2 }, // pmovmskb + cmp
2515 { ISD::OR, MVT::v2i64, 2 }, // movmskpd + cmp
2516 { ISD::OR, MVT::v4i32, 2 }, // movmskps + cmp
2517 { ISD::OR, MVT::v8i16, 2 }, // pmovmskb + cmp
2518 { ISD::OR, MVT::v16i8, 2 }, // pmovmskb + cmp
2521 // Handle bool allof/anyof patterns.
2522 if (ValTy->getVectorElementType()->isIntegerTy(1)) {
2524 if (const auto *Entry = CostTableLookup(AVX2BoolReduction, ISD, MTy))
2525 return LT.first * Entry->Cost;
2527 if (const auto *Entry = CostTableLookup(AVX1BoolReduction, ISD, MTy))
2528 return LT.first * Entry->Cost;
2530 if (const auto *Entry = CostTableLookup(SSE2BoolReduction, ISD, MTy))
2531 return LT.first * Entry->Cost;
2534 return BaseT::getArithmeticReductionCost(Opcode, ValTy, IsPairwise);
2537 int X86TTIImpl::getMinMaxReductionCost(Type *ValTy, Type *CondTy,
2538 bool IsPairwise, bool IsUnsigned) {
2539 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
2541 MVT MTy = LT.second;
2544 if (ValTy->isIntOrIntVectorTy()) {
2545 ISD = IsUnsigned ? ISD::UMIN : ISD::SMIN;
2547 assert(ValTy->isFPOrFPVectorTy() &&
2548 "Expected float point or integer vector type.");
2552 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
2553 // and make it as the cost.
2555 static const CostTblEntry SSE42CostTblPairWise[] = {
2556 {ISD::FMINNUM, MVT::v2f64, 3},
2557 {ISD::FMINNUM, MVT::v4f32, 2},
2558 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8"
2559 {ISD::UMIN, MVT::v2i64, 8}, // The data reported by the IACA is "8.6"
2560 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5"
2561 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8"
2562 {ISD::SMIN, MVT::v8i16, 2},
2563 {ISD::UMIN, MVT::v8i16, 2},
2566 static const CostTblEntry AVX1CostTblPairWise[] = {
2567 {ISD::FMINNUM, MVT::v4f32, 1},
2568 {ISD::FMINNUM, MVT::v4f64, 1},
2569 {ISD::FMINNUM, MVT::v8f32, 2},
2570 {ISD::SMIN, MVT::v2i64, 3},
2571 {ISD::UMIN, MVT::v2i64, 3},
2572 {ISD::SMIN, MVT::v4i32, 1},
2573 {ISD::UMIN, MVT::v4i32, 1},
2574 {ISD::SMIN, MVT::v8i16, 1},
2575 {ISD::UMIN, MVT::v8i16, 1},
2576 {ISD::SMIN, MVT::v8i32, 3},
2577 {ISD::UMIN, MVT::v8i32, 3},
2580 static const CostTblEntry AVX2CostTblPairWise[] = {
2581 {ISD::SMIN, MVT::v4i64, 2},
2582 {ISD::UMIN, MVT::v4i64, 2},
2583 {ISD::SMIN, MVT::v8i32, 1},
2584 {ISD::UMIN, MVT::v8i32, 1},
2585 {ISD::SMIN, MVT::v16i16, 1},
2586 {ISD::UMIN, MVT::v16i16, 1},
2587 {ISD::SMIN, MVT::v32i8, 2},
2588 {ISD::UMIN, MVT::v32i8, 2},
2591 static const CostTblEntry AVX512CostTblPairWise[] = {
2592 {ISD::FMINNUM, MVT::v8f64, 1},
2593 {ISD::FMINNUM, MVT::v16f32, 2},
2594 {ISD::SMIN, MVT::v8i64, 2},
2595 {ISD::UMIN, MVT::v8i64, 2},
2596 {ISD::SMIN, MVT::v16i32, 1},
2597 {ISD::UMIN, MVT::v16i32, 1},
2600 static const CostTblEntry SSE42CostTblNoPairWise[] = {
2601 {ISD::FMINNUM, MVT::v2f64, 3},
2602 {ISD::FMINNUM, MVT::v4f32, 3},
2603 {ISD::SMIN, MVT::v2i64, 7}, // The data reported by the IACA is "6.8"
2604 {ISD::UMIN, MVT::v2i64, 9}, // The data reported by the IACA is "8.6"
2605 {ISD::SMIN, MVT::v4i32, 1}, // The data reported by the IACA is "1.5"
2606 {ISD::UMIN, MVT::v4i32, 2}, // The data reported by the IACA is "1.8"
2607 {ISD::SMIN, MVT::v8i16, 1}, // The data reported by the IACA is "1.5"
2608 {ISD::UMIN, MVT::v8i16, 2}, // The data reported by the IACA is "1.8"
2611 static const CostTblEntry AVX1CostTblNoPairWise[] = {
2612 {ISD::FMINNUM, MVT::v4f32, 1},
2613 {ISD::FMINNUM, MVT::v4f64, 1},
2614 {ISD::FMINNUM, MVT::v8f32, 1},
2615 {ISD::SMIN, MVT::v2i64, 3},
2616 {ISD::UMIN, MVT::v2i64, 3},
2617 {ISD::SMIN, MVT::v4i32, 1},
2618 {ISD::UMIN, MVT::v4i32, 1},
2619 {ISD::SMIN, MVT::v8i16, 1},
2620 {ISD::UMIN, MVT::v8i16, 1},
2621 {ISD::SMIN, MVT::v8i32, 2},
2622 {ISD::UMIN, MVT::v8i32, 2},
2625 static const CostTblEntry AVX2CostTblNoPairWise[] = {
2626 {ISD::SMIN, MVT::v4i64, 1},
2627 {ISD::UMIN, MVT::v4i64, 1},
2628 {ISD::SMIN, MVT::v8i32, 1},
2629 {ISD::UMIN, MVT::v8i32, 1},
2630 {ISD::SMIN, MVT::v16i16, 1},
2631 {ISD::UMIN, MVT::v16i16, 1},
2632 {ISD::SMIN, MVT::v32i8, 1},
2633 {ISD::UMIN, MVT::v32i8, 1},
2636 static const CostTblEntry AVX512CostTblNoPairWise[] = {
2637 {ISD::FMINNUM, MVT::v8f64, 1},
2638 {ISD::FMINNUM, MVT::v16f32, 2},
2639 {ISD::SMIN, MVT::v8i64, 1},
2640 {ISD::UMIN, MVT::v8i64, 1},
2641 {ISD::SMIN, MVT::v16i32, 1},
2642 {ISD::UMIN, MVT::v16i32, 1},
2646 if (ST->hasAVX512())
2647 if (const auto *Entry = CostTableLookup(AVX512CostTblPairWise, ISD, MTy))
2648 return LT.first * Entry->Cost;
2651 if (const auto *Entry = CostTableLookup(AVX2CostTblPairWise, ISD, MTy))
2652 return LT.first * Entry->Cost;
2655 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
2656 return LT.first * Entry->Cost;
2659 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
2660 return LT.first * Entry->Cost;
2662 if (ST->hasAVX512())
2663 if (const auto *Entry =
2664 CostTableLookup(AVX512CostTblNoPairWise, ISD, MTy))
2665 return LT.first * Entry->Cost;
2668 if (const auto *Entry = CostTableLookup(AVX2CostTblNoPairWise, ISD, MTy))
2669 return LT.first * Entry->Cost;
2672 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
2673 return LT.first * Entry->Cost;
2676 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
2677 return LT.first * Entry->Cost;
2680 return BaseT::getMinMaxReductionCost(ValTy, CondTy, IsPairwise, IsUnsigned);
2683 /// Calculate the cost of materializing a 64-bit value. This helper
2684 /// method might only calculate a fraction of a larger immediate. Therefore it
2685 /// is valid to return a cost of ZERO.
2686 int X86TTIImpl::getIntImmCost(int64_t Val) {
2688 return TTI::TCC_Free;
2691 return TTI::TCC_Basic;
2693 return 2 * TTI::TCC_Basic;
2696 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
2697 assert(Ty->isIntegerTy());
2699 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2703 // Never hoist constants larger than 128bit, because this might lead to
2704 // incorrect code generation or assertions in codegen.
2705 // Fixme: Create a cost model for types larger than i128 once the codegen
2706 // issues have been fixed.
2708 return TTI::TCC_Free;
2711 return TTI::TCC_Free;
2713 // Sign-extend all constants to a multiple of 64-bit.
2715 if (BitSize % 64 != 0)
2716 ImmVal = Imm.sext(alignTo(BitSize, 64));
2718 // Split the constant into 64-bit chunks and calculate the cost for each
2721 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
2722 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
2723 int64_t Val = Tmp.getSExtValue();
2724 Cost += getIntImmCost(Val);
2726 // We need at least one instruction to materialize the constant.
2727 return std::max(1, Cost);
2730 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
2732 assert(Ty->isIntegerTy());
2734 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2735 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2736 // here, so that constant hoisting will ignore this constant.
2738 return TTI::TCC_Free;
2740 unsigned ImmIdx = ~0U;
2743 return TTI::TCC_Free;
2744 case Instruction::GetElementPtr:
2745 // Always hoist the base address of a GetElementPtr. This prevents the
2746 // creation of new constants for every base constant that gets constant
2747 // folded with the offset.
2749 return 2 * TTI::TCC_Basic;
2750 return TTI::TCC_Free;
2751 case Instruction::Store:
2754 case Instruction::ICmp:
2755 // This is an imperfect hack to prevent constant hoisting of
2756 // compares that might be trying to check if a 64-bit value fits in
2757 // 32-bits. The backend can optimize these cases using a right shift by 32.
2758 // Ideally we would check the compare predicate here. There also other
2759 // similar immediates the backend can use shifts for.
2760 if (Idx == 1 && Imm.getBitWidth() == 64) {
2761 uint64_t ImmVal = Imm.getZExtValue();
2762 if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
2763 return TTI::TCC_Free;
2767 case Instruction::And:
2768 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
2769 // by using a 32-bit operation with implicit zero extension. Detect such
2770 // immediates here as the normal path expects bit 31 to be sign extended.
2771 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
2772 return TTI::TCC_Free;
2775 case Instruction::Add:
2776 case Instruction::Sub:
2777 // For add/sub, we can use the opposite instruction for INT32_MIN.
2778 if (Idx == 1 && Imm.getBitWidth() == 64 && Imm.getZExtValue() == 0x80000000)
2779 return TTI::TCC_Free;
2782 case Instruction::UDiv:
2783 case Instruction::SDiv:
2784 case Instruction::URem:
2785 case Instruction::SRem:
2786 // Division by constant is typically expanded later into a different
2787 // instruction sequence. This completely changes the constants.
2788 // Report them as "free" to stop ConstantHoist from marking them as opaque.
2789 return TTI::TCC_Free;
2790 case Instruction::Mul:
2791 case Instruction::Or:
2792 case Instruction::Xor:
2795 // Always return TCC_Free for the shift value of a shift instruction.
2796 case Instruction::Shl:
2797 case Instruction::LShr:
2798 case Instruction::AShr:
2800 return TTI::TCC_Free;
2802 case Instruction::Trunc:
2803 case Instruction::ZExt:
2804 case Instruction::SExt:
2805 case Instruction::IntToPtr:
2806 case Instruction::PtrToInt:
2807 case Instruction::BitCast:
2808 case Instruction::PHI:
2809 case Instruction::Call:
2810 case Instruction::Select:
2811 case Instruction::Ret:
2812 case Instruction::Load:
2816 if (Idx == ImmIdx) {
2817 int NumConstants = divideCeil(BitSize, 64);
2818 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
2819 return (Cost <= NumConstants * TTI::TCC_Basic)
2820 ? static_cast<int>(TTI::TCC_Free)
2824 return X86TTIImpl::getIntImmCost(Imm, Ty);
2827 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
2829 assert(Ty->isIntegerTy());
2831 unsigned BitSize = Ty->getPrimitiveSizeInBits();
2832 // There is no cost model for constants with a bit size of 0. Return TCC_Free
2833 // here, so that constant hoisting will ignore this constant.
2835 return TTI::TCC_Free;
2839 return TTI::TCC_Free;
2840 case Intrinsic::sadd_with_overflow:
2841 case Intrinsic::uadd_with_overflow:
2842 case Intrinsic::ssub_with_overflow:
2843 case Intrinsic::usub_with_overflow:
2844 case Intrinsic::smul_with_overflow:
2845 case Intrinsic::umul_with_overflow:
2846 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
2847 return TTI::TCC_Free;
2849 case Intrinsic::experimental_stackmap:
2850 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2851 return TTI::TCC_Free;
2853 case Intrinsic::experimental_patchpoint_void:
2854 case Intrinsic::experimental_patchpoint_i64:
2855 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
2856 return TTI::TCC_Free;
2859 return X86TTIImpl::getIntImmCost(Imm, Ty);
2862 unsigned X86TTIImpl::getUserCost(const User *U,
2863 ArrayRef<const Value *> Operands) {
2864 if (isa<StoreInst>(U)) {
2865 Value *Ptr = U->getOperand(1);
2866 // Store instruction with index and scale costs 2 Uops.
2867 // Check the preceding GEP to identify non-const indices.
2868 if (auto GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
2869 if (!all_of(GEP->indices(), [](Value *V) { return isa<Constant>(V); }))
2870 return TTI::TCC_Basic * 2;
2872 return TTI::TCC_Basic;
2874 return BaseT::getUserCost(U, Operands);
2877 // Return an average cost of Gather / Scatter instruction, maybe improved later
2878 int X86TTIImpl::getGSVectorCost(unsigned Opcode, Type *SrcVTy, Value *Ptr,
2879 unsigned Alignment, unsigned AddressSpace) {
2881 assert(isa<VectorType>(SrcVTy) && "Unexpected type in getGSVectorCost");
2882 unsigned VF = SrcVTy->getVectorNumElements();
2884 // Try to reduce index size from 64 bit (default for GEP)
2885 // to 32. It is essential for VF 16. If the index can't be reduced to 32, the
2886 // operation will use 16 x 64 indices which do not fit in a zmm and needs
2887 // to split. Also check that the base pointer is the same for all lanes,
2888 // and that there's at most one variable index.
2889 auto getIndexSizeInBits = [](Value *Ptr, const DataLayout& DL) {
2890 unsigned IndexSize = DL.getPointerSizeInBits();
2891 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
2892 if (IndexSize < 64 || !GEP)
2895 unsigned NumOfVarIndices = 0;
2896 Value *Ptrs = GEP->getPointerOperand();
2897 if (Ptrs->getType()->isVectorTy() && !getSplatValue(Ptrs))
2899 for (unsigned i = 1; i < GEP->getNumOperands(); ++i) {
2900 if (isa<Constant>(GEP->getOperand(i)))
2902 Type *IndxTy = GEP->getOperand(i)->getType();
2903 if (IndxTy->isVectorTy())
2904 IndxTy = IndxTy->getVectorElementType();
2905 if ((IndxTy->getPrimitiveSizeInBits() == 64 &&
2906 !isa<SExtInst>(GEP->getOperand(i))) ||
2907 ++NumOfVarIndices > 1)
2908 return IndexSize; // 64
2910 return (unsigned)32;
2914 // Trying to reduce IndexSize to 32 bits for vector 16.
2915 // By default the IndexSize is equal to pointer size.
2916 unsigned IndexSize = (ST->hasAVX512() && VF >= 16)
2917 ? getIndexSizeInBits(Ptr, DL)
2918 : DL.getPointerSizeInBits();
2920 Type *IndexVTy = VectorType::get(IntegerType::get(SrcVTy->getContext(),
2922 std::pair<int, MVT> IdxsLT = TLI->getTypeLegalizationCost(DL, IndexVTy);
2923 std::pair<int, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, SrcVTy);
2924 int SplitFactor = std::max(IdxsLT.first, SrcLT.first);
2925 if (SplitFactor > 1) {
2926 // Handle splitting of vector of pointers
2927 Type *SplitSrcTy = VectorType::get(SrcVTy->getScalarType(), VF / SplitFactor);
2928 return SplitFactor * getGSVectorCost(Opcode, SplitSrcTy, Ptr, Alignment,
2932 // The gather / scatter cost is given by Intel architects. It is a rough
2933 // number since we are looking at one instruction in a time.
2934 const int GSOverhead = (Opcode == Instruction::Load)
2935 ? ST->getGatherOverhead()
2936 : ST->getScatterOverhead();
2937 return GSOverhead + VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2938 Alignment, AddressSpace);
2941 /// Return the cost of full scalarization of gather / scatter operation.
2943 /// Opcode - Load or Store instruction.
2944 /// SrcVTy - The type of the data vector that should be gathered or scattered.
2945 /// VariableMask - The mask is non-constant at compile time.
2946 /// Alignment - Alignment for one element.
2947 /// AddressSpace - pointer[s] address space.
2949 int X86TTIImpl::getGSScalarCost(unsigned Opcode, Type *SrcVTy,
2950 bool VariableMask, unsigned Alignment,
2951 unsigned AddressSpace) {
2952 unsigned VF = SrcVTy->getVectorNumElements();
2954 int MaskUnpackCost = 0;
2956 VectorType *MaskTy =
2957 VectorType::get(Type::getInt1Ty(SrcVTy->getContext()), VF);
2958 MaskUnpackCost = getScalarizationOverhead(MaskTy, false, true);
2959 int ScalarCompareCost =
2960 getCmpSelInstrCost(Instruction::ICmp, Type::getInt1Ty(SrcVTy->getContext()),
2962 int BranchCost = getCFInstrCost(Instruction::Br);
2963 MaskUnpackCost += VF * (BranchCost + ScalarCompareCost);
2966 // The cost of the scalar loads/stores.
2967 int MemoryOpCost = VF * getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
2968 Alignment, AddressSpace);
2970 int InsertExtractCost = 0;
2971 if (Opcode == Instruction::Load)
2972 for (unsigned i = 0; i < VF; ++i)
2973 // Add the cost of inserting each scalar load into the vector
2974 InsertExtractCost +=
2975 getVectorInstrCost(Instruction::InsertElement, SrcVTy, i);
2977 for (unsigned i = 0; i < VF; ++i)
2978 // Add the cost of extracting each element out of the data vector
2979 InsertExtractCost +=
2980 getVectorInstrCost(Instruction::ExtractElement, SrcVTy, i);
2982 return MemoryOpCost + MaskUnpackCost + InsertExtractCost;
2985 /// Calculate the cost of Gather / Scatter operation
2986 int X86TTIImpl::getGatherScatterOpCost(unsigned Opcode, Type *SrcVTy,
2987 Value *Ptr, bool VariableMask,
2988 unsigned Alignment) {
2989 assert(SrcVTy->isVectorTy() && "Unexpected data type for Gather/Scatter");
2990 unsigned VF = SrcVTy->getVectorNumElements();
2991 PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
2992 if (!PtrTy && Ptr->getType()->isVectorTy())
2993 PtrTy = dyn_cast<PointerType>(Ptr->getType()->getVectorElementType());
2994 assert(PtrTy && "Unexpected type for Ptr argument");
2995 unsigned AddressSpace = PtrTy->getAddressSpace();
2997 bool Scalarize = false;
2998 if ((Opcode == Instruction::Load && !isLegalMaskedGather(SrcVTy)) ||
2999 (Opcode == Instruction::Store && !isLegalMaskedScatter(SrcVTy)))
3001 // Gather / Scatter for vector 2 is not profitable on KNL / SKX
3002 // Vector-4 of gather/scatter instruction does not exist on KNL.
3003 // We can extend it to 8 elements, but zeroing upper bits of
3004 // the mask vector will add more instructions. Right now we give the scalar
3005 // cost of vector-4 for KNL. TODO: Check, maybe the gather/scatter instruction
3006 // is better in the VariableMask case.
3007 if (ST->hasAVX512() && (VF == 2 || (VF == 4 && !ST->hasVLX())))
3011 return getGSScalarCost(Opcode, SrcVTy, VariableMask, Alignment,
3014 return getGSVectorCost(Opcode, SrcVTy, Ptr, Alignment, AddressSpace);
3017 bool X86TTIImpl::isLSRCostLess(TargetTransformInfo::LSRCost &C1,
3018 TargetTransformInfo::LSRCost &C2) {
3019 // X86 specific here are "instruction number 1st priority".
3020 return std::tie(C1.Insns, C1.NumRegs, C1.AddRecCost,
3021 C1.NumIVMuls, C1.NumBaseAdds,
3022 C1.ScaleCost, C1.ImmCost, C1.SetupCost) <
3023 std::tie(C2.Insns, C2.NumRegs, C2.AddRecCost,
3024 C2.NumIVMuls, C2.NumBaseAdds,
3025 C2.ScaleCost, C2.ImmCost, C2.SetupCost);
3028 bool X86TTIImpl::canMacroFuseCmp() {
3029 return ST->hasMacroFusion() || ST->hasBranchFusion();
3032 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
3036 // The backend can't handle a single element vector.
3037 if (isa<VectorType>(DataTy) && DataTy->getVectorNumElements() == 1)
3039 Type *ScalarTy = DataTy->getScalarType();
3041 if (ScalarTy->isPointerTy())
3044 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
3047 if (!ScalarTy->isIntegerTy())
3050 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
3051 return IntWidth == 32 || IntWidth == 64 ||
3052 ((IntWidth == 8 || IntWidth == 16) && ST->hasBWI());
3055 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
3056 return isLegalMaskedLoad(DataType);
3059 bool X86TTIImpl::isLegalMaskedExpandLoad(Type *DataTy) {
3060 if (!isa<VectorType>(DataTy))
3063 if (!ST->hasAVX512())
3066 // The backend can't handle a single element vector.
3067 if (DataTy->getVectorNumElements() == 1)
3070 Type *ScalarTy = DataTy->getVectorElementType();
3072 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
3075 if (!ScalarTy->isIntegerTy())
3078 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
3079 return IntWidth == 32 || IntWidth == 64 ||
3080 ((IntWidth == 8 || IntWidth == 16) && ST->hasVBMI2());
3083 bool X86TTIImpl::isLegalMaskedCompressStore(Type *DataTy) {
3084 return isLegalMaskedExpandLoad(DataTy);
3087 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
3088 // Some CPUs have better gather performance than others.
3089 // TODO: Remove the explicit ST->hasAVX512()?, That would mean we would only
3090 // enable gather with a -march.
3091 if (!(ST->hasAVX512() || (ST->hasFastGather() && ST->hasAVX2())))
3094 // This function is called now in two cases: from the Loop Vectorizer
3095 // and from the Scalarizer.
3096 // When the Loop Vectorizer asks about legality of the feature,
3097 // the vectorization factor is not calculated yet. The Loop Vectorizer
3098 // sends a scalar type and the decision is based on the width of the
3100 // Later on, the cost model will estimate usage this intrinsic based on
3102 // The Scalarizer asks again about legality. It sends a vector type.
3103 // In this case we can reject non-power-of-2 vectors.
3104 // We also reject single element vectors as the type legalizer can't
3106 if (isa<VectorType>(DataTy)) {
3107 unsigned NumElts = DataTy->getVectorNumElements();
3108 if (NumElts == 1 || !isPowerOf2_32(NumElts))
3111 Type *ScalarTy = DataTy->getScalarType();
3112 if (ScalarTy->isPointerTy())
3115 if (ScalarTy->isFloatTy() || ScalarTy->isDoubleTy())
3118 if (!ScalarTy->isIntegerTy())
3121 unsigned IntWidth = ScalarTy->getIntegerBitWidth();
3122 return IntWidth == 32 || IntWidth == 64;
3125 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
3126 // AVX2 doesn't support scatter
3127 if (!ST->hasAVX512())
3129 return isLegalMaskedGather(DataType);
3132 bool X86TTIImpl::hasDivRemOp(Type *DataType, bool IsSigned) {
3133 EVT VT = TLI->getValueType(DL, DataType);
3134 return TLI->isOperationLegal(IsSigned ? ISD::SDIVREM : ISD::UDIVREM, VT);
3137 bool X86TTIImpl::isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
3141 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
3142 const Function *Callee) const {
3143 const TargetMachine &TM = getTLI()->getTargetMachine();
3145 // Work this as a subsetting of subtarget features.
3146 const FeatureBitset &CallerBits =
3147 TM.getSubtargetImpl(*Caller)->getFeatureBits();
3148 const FeatureBitset &CalleeBits =
3149 TM.getSubtargetImpl(*Callee)->getFeatureBits();
3151 FeatureBitset RealCallerBits = CallerBits & ~InlineFeatureIgnoreList;
3152 FeatureBitset RealCalleeBits = CalleeBits & ~InlineFeatureIgnoreList;
3153 return (RealCallerBits & RealCalleeBits) == RealCalleeBits;
3156 bool X86TTIImpl::areFunctionArgsABICompatible(
3157 const Function *Caller, const Function *Callee,
3158 SmallPtrSetImpl<Argument *> &Args) const {
3159 if (!BaseT::areFunctionArgsABICompatible(Caller, Callee, Args))
3162 // If we get here, we know the target features match. If one function
3163 // considers 512-bit vectors legal and the other does not, consider them
3165 // FIXME Look at the arguments and only consider 512 bit or larger vectors?
3166 const TargetMachine &TM = getTLI()->getTargetMachine();
3168 return TM.getSubtarget<X86Subtarget>(*Caller).useAVX512Regs() ==
3169 TM.getSubtarget<X86Subtarget>(*Callee).useAVX512Regs();
3172 const X86TTIImpl::TTI::MemCmpExpansionOptions *
3173 X86TTIImpl::enableMemCmpExpansion(bool IsZeroCmp) const {
3174 // Only enable vector loads for equality comparison.
3175 // Right now the vector version is not as fast, see #33329.
3176 static const auto ThreeWayOptions = [this]() {
3177 TTI::MemCmpExpansionOptions Options;
3178 if (ST->is64Bit()) {
3179 Options.LoadSizes.push_back(8);
3181 Options.LoadSizes.push_back(4);
3182 Options.LoadSizes.push_back(2);
3183 Options.LoadSizes.push_back(1);
3186 static const auto EqZeroOptions = [this]() {
3187 TTI::MemCmpExpansionOptions Options;
3188 // TODO: enable AVX512 when the DAG is ready.
3189 // if (ST->hasAVX512()) Options.LoadSizes.push_back(64);
3190 if (ST->hasAVX2()) Options.LoadSizes.push_back(32);
3191 if (ST->hasSSE2()) Options.LoadSizes.push_back(16);
3192 if (ST->is64Bit()) {
3193 Options.LoadSizes.push_back(8);
3195 Options.LoadSizes.push_back(4);
3196 Options.LoadSizes.push_back(2);
3197 Options.LoadSizes.push_back(1);
3198 // All GPR and vector loads can be unaligned. SIMD compare requires integer
3199 // vectors (SSE2/AVX2).
3200 Options.AllowOverlappingLoads = true;
3203 return IsZeroCmp ? &EqZeroOptions : &ThreeWayOptions;
3206 bool X86TTIImpl::enableInterleavedAccessVectorization() {
3207 // TODO: We expect this to be beneficial regardless of arch,
3208 // but there are currently some unexplained performance artifacts on Atom.
3209 // As a temporary solution, disable on Atom.
3210 return !(ST->isAtom());
3213 // Get estimation for interleaved load/store operations for AVX2.
3214 // \p Factor is the interleaved-access factor (stride) - number of
3215 // (interleaved) elements in the group.
3216 // \p Indices contains the indices for a strided load: when the
3217 // interleaved load has gaps they indicate which elements are used.
3218 // If Indices is empty (or if the number of indices is equal to the size
3219 // of the interleaved-access as given in \p Factor) the access has no gaps.
3221 // As opposed to AVX-512, AVX2 does not have generic shuffles that allow
3222 // computing the cost using a generic formula as a function of generic
3223 // shuffles. We therefore use a lookup table instead, filled according to
3224 // the instruction sequences that codegen currently generates.
3225 int X86TTIImpl::getInterleavedMemoryOpCostAVX2(unsigned Opcode, Type *VecTy,
3227 ArrayRef<unsigned> Indices,
3229 unsigned AddressSpace,
3230 bool UseMaskForCond,
3231 bool UseMaskForGaps) {
3233 if (UseMaskForCond || UseMaskForGaps)
3234 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3235 Alignment, AddressSpace,
3236 UseMaskForCond, UseMaskForGaps);
3238 // We currently Support only fully-interleaved groups, with no gaps.
3239 // TODO: Support also strided loads (interleaved-groups with gaps).
3240 if (Indices.size() && Indices.size() != Factor)
3241 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3242 Alignment, AddressSpace);
3244 // VecTy for interleave memop is <VF*Factor x Elt>.
3245 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
3246 // VecTy = <12 x i32>.
3247 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
3249 // This function can be called with VecTy=<6xi128>, Factor=3, in which case
3250 // the VF=2, while v2i128 is an unsupported MVT vector type
3251 // (see MachineValueType.h::getVectorVT()).
3252 if (!LegalVT.isVector())
3253 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3254 Alignment, AddressSpace);
3256 unsigned VF = VecTy->getVectorNumElements() / Factor;
3257 Type *ScalarTy = VecTy->getVectorElementType();
3259 // Calculate the number of memory operations (NumOfMemOps), required
3260 // for load/store the VecTy.
3261 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
3262 unsigned LegalVTSize = LegalVT.getStoreSize();
3263 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
3265 // Get the cost of one memory operation.
3266 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
3267 LegalVT.getVectorNumElements());
3268 unsigned MemOpCost =
3269 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
3271 VectorType *VT = VectorType::get(ScalarTy, VF);
3272 EVT ETy = TLI->getValueType(DL, VT);
3273 if (!ETy.isSimple())
3274 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3275 Alignment, AddressSpace);
3277 // TODO: Complete for other data-types and strides.
3278 // Each combination of Stride, ElementTy and VF results in a different
3279 // sequence; The cost tables are therefore accessed with:
3280 // Factor (stride) and VectorType=VFxElemType.
3281 // The Cost accounts only for the shuffle sequence;
3282 // The cost of the loads/stores is accounted for separately.
3284 static const CostTblEntry AVX2InterleavedLoadTbl[] = {
3285 { 2, MVT::v4i64, 6 }, //(load 8i64 and) deinterleave into 2 x 4i64
3286 { 2, MVT::v4f64, 6 }, //(load 8f64 and) deinterleave into 2 x 4f64
3288 { 3, MVT::v2i8, 10 }, //(load 6i8 and) deinterleave into 3 x 2i8
3289 { 3, MVT::v4i8, 4 }, //(load 12i8 and) deinterleave into 3 x 4i8
3290 { 3, MVT::v8i8, 9 }, //(load 24i8 and) deinterleave into 3 x 8i8
3291 { 3, MVT::v16i8, 11}, //(load 48i8 and) deinterleave into 3 x 16i8
3292 { 3, MVT::v32i8, 13}, //(load 96i8 and) deinterleave into 3 x 32i8
3293 { 3, MVT::v8f32, 17 }, //(load 24f32 and)deinterleave into 3 x 8f32
3295 { 4, MVT::v2i8, 12 }, //(load 8i8 and) deinterleave into 4 x 2i8
3296 { 4, MVT::v4i8, 4 }, //(load 16i8 and) deinterleave into 4 x 4i8
3297 { 4, MVT::v8i8, 20 }, //(load 32i8 and) deinterleave into 4 x 8i8
3298 { 4, MVT::v16i8, 39 }, //(load 64i8 and) deinterleave into 4 x 16i8
3299 { 4, MVT::v32i8, 80 }, //(load 128i8 and) deinterleave into 4 x 32i8
3301 { 8, MVT::v8f32, 40 } //(load 64f32 and)deinterleave into 8 x 8f32
3304 static const CostTblEntry AVX2InterleavedStoreTbl[] = {
3305 { 2, MVT::v4i64, 6 }, //interleave into 2 x 4i64 into 8i64 (and store)
3306 { 2, MVT::v4f64, 6 }, //interleave into 2 x 4f64 into 8f64 (and store)
3308 { 3, MVT::v2i8, 7 }, //interleave 3 x 2i8 into 6i8 (and store)
3309 { 3, MVT::v4i8, 8 }, //interleave 3 x 4i8 into 12i8 (and store)
3310 { 3, MVT::v8i8, 11 }, //interleave 3 x 8i8 into 24i8 (and store)
3311 { 3, MVT::v16i8, 11 }, //interleave 3 x 16i8 into 48i8 (and store)
3312 { 3, MVT::v32i8, 13 }, //interleave 3 x 32i8 into 96i8 (and store)
3314 { 4, MVT::v2i8, 12 }, //interleave 4 x 2i8 into 8i8 (and store)
3315 { 4, MVT::v4i8, 9 }, //interleave 4 x 4i8 into 16i8 (and store)
3316 { 4, MVT::v8i8, 10 }, //interleave 4 x 8i8 into 32i8 (and store)
3317 { 4, MVT::v16i8, 10 }, //interleave 4 x 16i8 into 64i8 (and store)
3318 { 4, MVT::v32i8, 12 } //interleave 4 x 32i8 into 128i8 (and store)
3321 if (Opcode == Instruction::Load) {
3322 if (const auto *Entry =
3323 CostTableLookup(AVX2InterleavedLoadTbl, Factor, ETy.getSimpleVT()))
3324 return NumOfMemOps * MemOpCost + Entry->Cost;
3326 assert(Opcode == Instruction::Store &&
3327 "Expected Store Instruction at this point");
3328 if (const auto *Entry =
3329 CostTableLookup(AVX2InterleavedStoreTbl, Factor, ETy.getSimpleVT()))
3330 return NumOfMemOps * MemOpCost + Entry->Cost;
3333 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3334 Alignment, AddressSpace);
3337 // Get estimation for interleaved load/store operations and strided load.
3338 // \p Indices contains indices for strided load.
3339 // \p Factor - the factor of interleaving.
3340 // AVX-512 provides 3-src shuffles that significantly reduces the cost.
3341 int X86TTIImpl::getInterleavedMemoryOpCostAVX512(unsigned Opcode, Type *VecTy,
3343 ArrayRef<unsigned> Indices,
3345 unsigned AddressSpace,
3346 bool UseMaskForCond,
3347 bool UseMaskForGaps) {
3349 if (UseMaskForCond || UseMaskForGaps)
3350 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3351 Alignment, AddressSpace,
3352 UseMaskForCond, UseMaskForGaps);
3354 // VecTy for interleave memop is <VF*Factor x Elt>.
3355 // So, for VF=4, Interleave Factor = 3, Element type = i32 we have
3356 // VecTy = <12 x i32>.
3358 // Calculate the number of memory operations (NumOfMemOps), required
3359 // for load/store the VecTy.
3360 MVT LegalVT = getTLI()->getTypeLegalizationCost(DL, VecTy).second;
3361 unsigned VecTySize = DL.getTypeStoreSize(VecTy);
3362 unsigned LegalVTSize = LegalVT.getStoreSize();
3363 unsigned NumOfMemOps = (VecTySize + LegalVTSize - 1) / LegalVTSize;
3365 // Get the cost of one memory operation.
3366 Type *SingleMemOpTy = VectorType::get(VecTy->getVectorElementType(),
3367 LegalVT.getVectorNumElements());
3368 unsigned MemOpCost =
3369 getMemoryOpCost(Opcode, SingleMemOpTy, Alignment, AddressSpace);
3371 unsigned VF = VecTy->getVectorNumElements() / Factor;
3372 MVT VT = MVT::getVectorVT(MVT::getVT(VecTy->getScalarType()), VF);
3374 if (Opcode == Instruction::Load) {
3375 // The tables (AVX512InterleavedLoadTbl and AVX512InterleavedStoreTbl)
3376 // contain the cost of the optimized shuffle sequence that the
3377 // X86InterleavedAccess pass will generate.
3378 // The cost of loads and stores are computed separately from the table.
3380 // X86InterleavedAccess support only the following interleaved-access group.
3381 static const CostTblEntry AVX512InterleavedLoadTbl[] = {
3382 {3, MVT::v16i8, 12}, //(load 48i8 and) deinterleave into 3 x 16i8
3383 {3, MVT::v32i8, 14}, //(load 96i8 and) deinterleave into 3 x 32i8
3384 {3, MVT::v64i8, 22}, //(load 96i8 and) deinterleave into 3 x 32i8
3387 if (const auto *Entry =
3388 CostTableLookup(AVX512InterleavedLoadTbl, Factor, VT))
3389 return NumOfMemOps * MemOpCost + Entry->Cost;
3390 //If an entry does not exist, fallback to the default implementation.
3392 // Kind of shuffle depends on number of loaded values.
3393 // If we load the entire data in one register, we can use a 1-src shuffle.
3394 // Otherwise, we'll merge 2 sources in each operation.
3395 TTI::ShuffleKind ShuffleKind =
3396 (NumOfMemOps > 1) ? TTI::SK_PermuteTwoSrc : TTI::SK_PermuteSingleSrc;
3398 unsigned ShuffleCost =
3399 getShuffleCost(ShuffleKind, SingleMemOpTy, 0, nullptr);
3401 unsigned NumOfLoadsInInterleaveGrp =
3402 Indices.size() ? Indices.size() : Factor;
3403 Type *ResultTy = VectorType::get(VecTy->getVectorElementType(),
3404 VecTy->getVectorNumElements() / Factor);
3405 unsigned NumOfResults =
3406 getTLI()->getTypeLegalizationCost(DL, ResultTy).first *
3407 NumOfLoadsInInterleaveGrp;
3409 // About a half of the loads may be folded in shuffles when we have only
3410 // one result. If we have more than one result, we do not fold loads at all.
3411 unsigned NumOfUnfoldedLoads =
3412 NumOfResults > 1 ? NumOfMemOps : NumOfMemOps / 2;
3414 // Get a number of shuffle operations per result.
3415 unsigned NumOfShufflesPerResult =
3416 std::max((unsigned)1, (unsigned)(NumOfMemOps - 1));
3418 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
3419 // When we have more than one destination, we need additional instructions
3421 unsigned NumOfMoves = 0;
3422 if (NumOfResults > 1 && ShuffleKind == TTI::SK_PermuteTwoSrc)
3423 NumOfMoves = NumOfResults * NumOfShufflesPerResult / 2;
3425 int Cost = NumOfResults * NumOfShufflesPerResult * ShuffleCost +
3426 NumOfUnfoldedLoads * MemOpCost + NumOfMoves;
3432 assert(Opcode == Instruction::Store &&
3433 "Expected Store Instruction at this point");
3434 // X86InterleavedAccess support only the following interleaved-access group.
3435 static const CostTblEntry AVX512InterleavedStoreTbl[] = {
3436 {3, MVT::v16i8, 12}, // interleave 3 x 16i8 into 48i8 (and store)
3437 {3, MVT::v32i8, 14}, // interleave 3 x 32i8 into 96i8 (and store)
3438 {3, MVT::v64i8, 26}, // interleave 3 x 64i8 into 96i8 (and store)
3440 {4, MVT::v8i8, 10}, // interleave 4 x 8i8 into 32i8 (and store)
3441 {4, MVT::v16i8, 11}, // interleave 4 x 16i8 into 64i8 (and store)
3442 {4, MVT::v32i8, 14}, // interleave 4 x 32i8 into 128i8 (and store)
3443 {4, MVT::v64i8, 24} // interleave 4 x 32i8 into 256i8 (and store)
3446 if (const auto *Entry =
3447 CostTableLookup(AVX512InterleavedStoreTbl, Factor, VT))
3448 return NumOfMemOps * MemOpCost + Entry->Cost;
3449 //If an entry does not exist, fallback to the default implementation.
3451 // There is no strided stores meanwhile. And store can't be folded in
3453 unsigned NumOfSources = Factor; // The number of values to be merged.
3454 unsigned ShuffleCost =
3455 getShuffleCost(TTI::SK_PermuteTwoSrc, SingleMemOpTy, 0, nullptr);
3456 unsigned NumOfShufflesPerStore = NumOfSources - 1;
3458 // The SK_MergeTwoSrc shuffle clobbers one of src operands.
3459 // We need additional instructions to keep sources.
3460 unsigned NumOfMoves = NumOfMemOps * NumOfShufflesPerStore / 2;
3461 int Cost = NumOfMemOps * (MemOpCost + NumOfShufflesPerStore * ShuffleCost) +
3466 int X86TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
3468 ArrayRef<unsigned> Indices,
3470 unsigned AddressSpace,
3471 bool UseMaskForCond,
3472 bool UseMaskForGaps) {
3473 auto isSupportedOnAVX512 = [](Type *VecTy, bool HasBW) {
3474 Type *EltTy = VecTy->getVectorElementType();
3475 if (EltTy->isFloatTy() || EltTy->isDoubleTy() || EltTy->isIntegerTy(64) ||
3476 EltTy->isIntegerTy(32) || EltTy->isPointerTy())
3478 if (EltTy->isIntegerTy(16) || EltTy->isIntegerTy(8))
3482 if (ST->hasAVX512() && isSupportedOnAVX512(VecTy, ST->hasBWI()))
3483 return getInterleavedMemoryOpCostAVX512(Opcode, VecTy, Factor, Indices,
3484 Alignment, AddressSpace,
3485 UseMaskForCond, UseMaskForGaps);
3487 return getInterleavedMemoryOpCostAVX2(Opcode, VecTy, Factor, Indices,
3488 Alignment, AddressSpace,
3489 UseMaskForCond, UseMaskForGaps);
3491 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
3492 Alignment, AddressSpace,
3493 UseMaskForCond, UseMaskForGaps);