1 // Copyright 2016 The SwiftShader Authors. All Rights Reserved.
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
7 // http://www.apache.org/licenses/LICENSE-2.0
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
15 #include "Reactor.hpp"
20 #include "ExecutableMemory.hpp"
21 #include "MutexLock.hpp"
26 #if REACTOR_LLVM_VERSION < 7
27 #include "llvm/Analysis/LoopPass.h"
28 #include "llvm/Constants.h"
29 #include "llvm/Function.h"
30 #include "llvm/GlobalVariable.h"
31 #include "llvm/Intrinsics.h"
32 #include "llvm/LLVMContext.h"
33 #include "llvm/Module.h"
34 #include "llvm/PassManager.h"
35 #include "llvm/Support/IRBuilder.h"
36 #include "llvm/Support/TargetSelect.h"
37 #include "llvm/Target/TargetData.h"
38 #include "llvm/Target/TargetOptions.h"
39 #include "llvm/Transforms/Scalar.h"
40 #include "../lib/ExecutionEngine/JIT/JIT.h"
42 #include "LLVMRoutine.hpp"
43 #include "LLVMRoutineManager.hpp"
45 #define ARGS(...) __VA_ARGS__
47 #include "llvm/Analysis/LoopPass.h"
48 #include "llvm/ExecutionEngine/ExecutionEngine.h"
49 #include "llvm/ExecutionEngine/JITSymbol.h"
50 #include "llvm/ExecutionEngine/Orc/CompileUtils.h"
51 #include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
52 #include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
53 #include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
54 #include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
55 #include "llvm/ExecutionEngine/SectionMemoryManager.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/IRBuilder.h"
61 #include "llvm/IR/Intrinsics.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/IR/LegacyPassManager.h"
64 #include "llvm/IR/Mangler.h"
65 #include "llvm/IR/Module.h"
66 #include "llvm/Support/Error.h"
67 #include "llvm/Support/TargetSelect.h"
68 #include "llvm/Target/TargetOptions.h"
69 #include "llvm/Transforms/InstCombine/InstCombine.h"
70 #include "llvm/Transforms/Scalar.h"
71 #include "llvm/Transforms/Scalar/GVN.h"
73 #include "LLVMRoutine.hpp"
75 #define ARGS(...) {__VA_ARGS__}
76 #define CreateCall2 CreateCall
77 #define CreateCall3 CreateCall
79 #include <unordered_map>
86 #if defined(__i386__) || defined(__x86_64__)
87 #include <xmmintrin.h>
92 #if defined(__x86_64__) && defined(_WIN32)
93 extern "C" void X86CompilationCallback()
95 assert(false); // UNIMPLEMENTED
99 #if REACTOR_LLVM_VERSION < 7
102 extern bool JITEmitDebugInfo;
108 class LLVMReactorJIT;
113 rr::LLVMReactorJIT *reactorJIT = nullptr;
114 llvm::IRBuilder<> *builder = nullptr;
115 llvm::LLVMContext *context = nullptr;
116 llvm::Module *module = nullptr;
117 llvm::Function *function = nullptr;
119 rr::MutexLock codegenMutex;
121 #ifdef ENABLE_RR_PRINT
122 std::string replace(std::string str, const std::string& substr, const std::string& replacement)
125 while((pos = str.find(substr, pos)) != std::string::npos) {
126 str.replace(pos, substr.length(), replacement);
127 pos += replacement.length();
131 #endif // ENABLE_RR_PRINT
133 #if REACTOR_LLVM_VERSION >= 7
134 llvm::Value *lowerPAVG(llvm::Value *x, llvm::Value *y)
136 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
138 llvm::VectorType *extTy =
139 llvm::VectorType::getExtendedElementVectorType(ty);
140 x = ::builder->CreateZExt(x, extTy);
141 y = ::builder->CreateZExt(y, extTy);
144 llvm::Constant *one = llvm::ConstantInt::get(extTy, 1);
145 llvm::Value *res = ::builder->CreateAdd(x, y);
146 res = ::builder->CreateAdd(res, one);
147 res = ::builder->CreateLShr(res, one);
148 return ::builder->CreateTrunc(res, ty);
151 llvm::Value *lowerPMINMAX(llvm::Value *x, llvm::Value *y,
152 llvm::ICmpInst::Predicate pred)
154 return ::builder->CreateSelect(::builder->CreateICmp(pred, x, y), x, y);
157 llvm::Value *lowerPCMP(llvm::ICmpInst::Predicate pred, llvm::Value *x,
158 llvm::Value *y, llvm::Type *dstTy)
160 return ::builder->CreateSExt(::builder->CreateICmp(pred, x, y), dstTy, "");
163 #if defined(__i386__) || defined(__x86_64__)
164 llvm::Value *lowerPMOV(llvm::Value *op, llvm::Type *dstType, bool sext)
166 llvm::VectorType *srcTy = llvm::cast<llvm::VectorType>(op->getType());
167 llvm::VectorType *dstTy = llvm::cast<llvm::VectorType>(dstType);
169 llvm::Value *undef = llvm::UndefValue::get(srcTy);
170 llvm::SmallVector<uint32_t, 16> mask(dstTy->getNumElements());
171 std::iota(mask.begin(), mask.end(), 0);
172 llvm::Value *v = ::builder->CreateShuffleVector(op, undef, mask);
174 return sext ? ::builder->CreateSExt(v, dstTy)
175 : ::builder->CreateZExt(v, dstTy);
178 llvm::Value *lowerPABS(llvm::Value *v)
180 llvm::Value *zero = llvm::Constant::getNullValue(v->getType());
181 llvm::Value *cmp = ::builder->CreateICmp(llvm::ICmpInst::ICMP_SGT, v, zero);
182 llvm::Value *neg = ::builder->CreateNeg(v);
183 return ::builder->CreateSelect(cmp, v, neg);
185 #endif // defined(__i386__) || defined(__x86_64__)
187 #if !defined(__i386__) && !defined(__x86_64__)
188 llvm::Value *lowerPFMINMAX(llvm::Value *x, llvm::Value *y,
189 llvm::FCmpInst::Predicate pred)
191 return ::builder->CreateSelect(::builder->CreateFCmp(pred, x, y), x, y);
194 llvm::Value *lowerRound(llvm::Value *x)
196 llvm::Function *nearbyint = llvm::Intrinsic::getDeclaration(
197 ::module, llvm::Intrinsic::nearbyint, {x->getType()});
198 return ::builder->CreateCall(nearbyint, ARGS(x));
201 llvm::Value *lowerRoundInt(llvm::Value *x, llvm::Type *ty)
203 return ::builder->CreateFPToSI(lowerRound(x), ty);
206 llvm::Value *lowerFloor(llvm::Value *x)
208 llvm::Function *floor = llvm::Intrinsic::getDeclaration(
209 ::module, llvm::Intrinsic::floor, {x->getType()});
210 return ::builder->CreateCall(floor, ARGS(x));
213 llvm::Value *lowerTrunc(llvm::Value *x)
215 llvm::Function *trunc = llvm::Intrinsic::getDeclaration(
216 ::module, llvm::Intrinsic::trunc, {x->getType()});
217 return ::builder->CreateCall(trunc, ARGS(x));
220 // Packed add/sub saturatation
221 llvm::Value *lowerPSAT(llvm::Value *x, llvm::Value *y, bool isAdd, bool isSigned)
223 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
224 llvm::VectorType *extTy = llvm::VectorType::getExtendedElementVectorType(ty);
226 unsigned numBits = ty->getScalarSizeInBits();
228 llvm::Value *max, *min, *extX, *extY;
231 max = llvm::ConstantInt::get(extTy, (1LL << (numBits - 1)) - 1, true);
232 min = llvm::ConstantInt::get(extTy, (-1LL << (numBits - 1)), true);
233 extX = ::builder->CreateSExt(x, extTy);
234 extY = ::builder->CreateSExt(y, extTy);
238 assert(numBits <= 64);
239 uint64_t maxVal = (numBits == 64) ? ~0ULL : (1ULL << numBits) - 1;
240 max = llvm::ConstantInt::get(extTy, maxVal, false);
241 min = llvm::ConstantInt::get(extTy, 0, false);
242 extX = ::builder->CreateZExt(x, extTy);
243 extY = ::builder->CreateZExt(y, extTy);
246 llvm::Value *res = isAdd ? ::builder->CreateAdd(extX, extY)
247 : ::builder->CreateSub(extX, extY);
249 res = lowerPMINMAX(res, min, llvm::ICmpInst::ICMP_SGT);
250 res = lowerPMINMAX(res, max, llvm::ICmpInst::ICMP_SLT);
252 return ::builder->CreateTrunc(res, ty);
255 llvm::Value *lowerPUADDSAT(llvm::Value *x, llvm::Value *y)
257 return lowerPSAT(x, y, true, false);
260 llvm::Value *lowerPSADDSAT(llvm::Value *x, llvm::Value *y)
262 return lowerPSAT(x, y, true, true);
265 llvm::Value *lowerPUSUBSAT(llvm::Value *x, llvm::Value *y)
267 return lowerPSAT(x, y, false, false);
270 llvm::Value *lowerPSSUBSAT(llvm::Value *x, llvm::Value *y)
272 return lowerPSAT(x, y, false, true);
275 llvm::Value *lowerSQRT(llvm::Value *x)
277 llvm::Function *sqrt = llvm::Intrinsic::getDeclaration(
278 ::module, llvm::Intrinsic::sqrt, {x->getType()});
279 return ::builder->CreateCall(sqrt, ARGS(x));
282 llvm::Value *lowerRCP(llvm::Value *x)
284 llvm::Type *ty = x->getType();
286 if (llvm::VectorType *vectorTy = llvm::dyn_cast<llvm::VectorType>(ty))
288 one = llvm::ConstantVector::getSplat(
289 vectorTy->getNumElements(),
290 llvm::ConstantFP::get(vectorTy->getElementType(), 1));
294 one = llvm::ConstantFP::get(ty, 1);
296 return ::builder->CreateFDiv(one, x);
299 llvm::Value *lowerRSQRT(llvm::Value *x)
301 return lowerRCP(lowerSQRT(x));
304 llvm::Value *lowerVectorShl(llvm::Value *x, uint64_t scalarY)
306 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
307 llvm::Value *y = llvm::ConstantVector::getSplat(
308 ty->getNumElements(),
309 llvm::ConstantInt::get(ty->getElementType(), scalarY));
310 return ::builder->CreateShl(x, y);
313 llvm::Value *lowerVectorAShr(llvm::Value *x, uint64_t scalarY)
315 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
316 llvm::Value *y = llvm::ConstantVector::getSplat(
317 ty->getNumElements(),
318 llvm::ConstantInt::get(ty->getElementType(), scalarY));
319 return ::builder->CreateAShr(x, y);
322 llvm::Value *lowerVectorLShr(llvm::Value *x, uint64_t scalarY)
324 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
325 llvm::Value *y = llvm::ConstantVector::getSplat(
326 ty->getNumElements(),
327 llvm::ConstantInt::get(ty->getElementType(), scalarY));
328 return ::builder->CreateLShr(x, y);
331 llvm::Value *lowerMulAdd(llvm::Value *x, llvm::Value *y)
333 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
334 llvm::VectorType *extTy = llvm::VectorType::getExtendedElementVectorType(ty);
336 llvm::Value *extX = ::builder->CreateSExt(x, extTy);
337 llvm::Value *extY = ::builder->CreateSExt(y, extTy);
338 llvm::Value *mult = ::builder->CreateMul(extX, extY);
340 llvm::Value *undef = llvm::UndefValue::get(extTy);
342 llvm::SmallVector<uint32_t, 16> evenIdx;
343 llvm::SmallVector<uint32_t, 16> oddIdx;
344 for (uint64_t i = 0, n = ty->getNumElements(); i < n; i += 2)
346 evenIdx.push_back(i);
347 oddIdx.push_back(i + 1);
350 llvm::Value *lhs = ::builder->CreateShuffleVector(mult, undef, evenIdx);
351 llvm::Value *rhs = ::builder->CreateShuffleVector(mult, undef, oddIdx);
352 return ::builder->CreateAdd(lhs, rhs);
355 llvm::Value *lowerPack(llvm::Value *x, llvm::Value *y, bool isSigned)
357 llvm::VectorType *srcTy = llvm::cast<llvm::VectorType>(x->getType());
358 llvm::VectorType *dstTy = llvm::VectorType::getTruncatedElementVectorType(srcTy);
360 llvm::IntegerType *dstElemTy =
361 llvm::cast<llvm::IntegerType>(dstTy->getElementType());
363 uint64_t truncNumBits = dstElemTy->getIntegerBitWidth();
364 assert(truncNumBits < 64 && "shift 64 must be handled separately");
365 llvm::Constant *max, *min;
368 max = llvm::ConstantInt::get(srcTy, (1LL << (truncNumBits - 1)) - 1, true);
369 min = llvm::ConstantInt::get(srcTy, (-1LL << (truncNumBits - 1)), true);
373 max = llvm::ConstantInt::get(srcTy, (1ULL << truncNumBits) - 1, false);
374 min = llvm::ConstantInt::get(srcTy, 0, false);
377 x = lowerPMINMAX(x, min, llvm::ICmpInst::ICMP_SGT);
378 x = lowerPMINMAX(x, max, llvm::ICmpInst::ICMP_SLT);
379 y = lowerPMINMAX(y, min, llvm::ICmpInst::ICMP_SGT);
380 y = lowerPMINMAX(y, max, llvm::ICmpInst::ICMP_SLT);
382 x = ::builder->CreateTrunc(x, dstTy);
383 y = ::builder->CreateTrunc(y, dstTy);
385 llvm::SmallVector<uint32_t, 16> index(srcTy->getNumElements() * 2);
386 std::iota(index.begin(), index.end(), 0);
388 return ::builder->CreateShuffleVector(x, y, index);
391 llvm::Value *lowerSignMask(llvm::Value *x, llvm::Type *retTy)
393 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
394 llvm::Constant *zero = llvm::ConstantInt::get(ty, 0);
395 llvm::Value *cmp = ::builder->CreateICmpSLT(x, zero);
397 llvm::Value *ret = ::builder->CreateZExt(
398 ::builder->CreateExtractElement(cmp, static_cast<uint64_t>(0)), retTy);
399 for (uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
401 llvm::Value *elem = ::builder->CreateZExt(
402 ::builder->CreateExtractElement(cmp, i), retTy);
403 ret = ::builder->CreateOr(ret, ::builder->CreateShl(elem, i));
408 llvm::Value *lowerFPSignMask(llvm::Value *x, llvm::Type *retTy)
410 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
411 llvm::Constant *zero = llvm::ConstantFP::get(ty, 0);
412 llvm::Value *cmp = ::builder->CreateFCmpULT(x, zero);
414 llvm::Value *ret = ::builder->CreateZExt(
415 ::builder->CreateExtractElement(cmp, static_cast<uint64_t>(0)), retTy);
416 for (uint64_t i = 1, n = ty->getNumElements(); i < n; ++i)
418 llvm::Value *elem = ::builder->CreateZExt(
419 ::builder->CreateExtractElement(cmp, i), retTy);
420 ret = ::builder->CreateOr(ret, ::builder->CreateShl(elem, i));
424 #endif // !defined(__i386__) && !defined(__x86_64__)
425 #endif // REACTOR_LLVM_VERSION >= 7
427 llvm::Value *lowerMulHigh(llvm::Value *x, llvm::Value *y, bool sext)
429 llvm::VectorType *ty = llvm::cast<llvm::VectorType>(x->getType());
430 llvm::VectorType *extTy = llvm::VectorType::getExtendedElementVectorType(ty);
432 llvm::Value *extX, *extY;
435 extX = ::builder->CreateSExt(x, extTy);
436 extY = ::builder->CreateSExt(y, extTy);
440 extX = ::builder->CreateZExt(x, extTy);
441 extY = ::builder->CreateZExt(y, extTy);
444 llvm::Value *mult = ::builder->CreateMul(extX, extY);
446 llvm::IntegerType *intTy = llvm::cast<llvm::IntegerType>(ty->getElementType());
447 llvm::Value *mulh = ::builder->CreateAShr(mult, intTy->getBitWidth());
448 return ::builder->CreateTrunc(mulh, ty);
454 #if REACTOR_LLVM_VERSION < 7
459 llvm::SmallVector<std::string, 16> mattrs;
460 llvm::ExecutionEngine *executionEngine;
461 LLVMRoutineManager *routineManager;
464 LLVMReactorJIT(const std::string &arch_,
465 const llvm::SmallVectorImpl<std::string> &mattrs_) :
467 mattrs(mattrs_.begin(), mattrs_.end()),
468 executionEngine(nullptr),
469 routineManager(nullptr)
477 ::module = new llvm::Module("", *::context);
479 routineManager = new LLVMRoutineManager();
481 llvm::TargetMachine *targetMachine =
482 llvm::EngineBuilder::selectTarget(
483 ::module, arch, "", mattrs, llvm::Reloc::Default,
484 llvm::CodeModel::JITDefault, &error);
486 executionEngine = llvm::JIT::createJIT(
487 ::module, &error, routineManager, llvm::CodeGenOpt::Aggressive,
488 true, targetMachine);
493 delete executionEngine;
494 executionEngine = nullptr;
495 routineManager = nullptr;
497 ::function = nullptr;
501 LLVMRoutine *acquireRoutine(llvm::Function *func)
503 void *entry = executionEngine->getPointerToFunction(::function);
504 return routineManager->acquireRoutine(entry);
507 void optimize(llvm::Module *module)
509 static llvm::PassManager *passManager = nullptr;
513 passManager = new llvm::PassManager();
515 passManager->add(new llvm::TargetData(*executionEngine->getTargetData()));
516 passManager->add(llvm::createScalarReplAggregatesPass());
518 for(int pass = 0; pass < 10 && optimization[pass] != Disabled; pass++)
520 switch(optimization[pass])
522 case Disabled: break;
523 case CFGSimplification: passManager->add(llvm::createCFGSimplificationPass()); break;
524 case LICM: passManager->add(llvm::createLICMPass()); break;
525 case AggressiveDCE: passManager->add(llvm::createAggressiveDCEPass()); break;
526 case GVN: passManager->add(llvm::createGVNPass()); break;
527 case InstructionCombining: passManager->add(llvm::createInstructionCombiningPass()); break;
528 case Reassociate: passManager->add(llvm::createReassociatePass()); break;
529 case DeadStoreElimination: passManager->add(llvm::createDeadStoreEliminationPass()); break;
530 case SCCP: passManager->add(llvm::createSCCPPass()); break;
531 case ScalarReplAggregates: passManager->add(llvm::createScalarReplAggregatesPass()); break;
538 passManager->run(*::module);
542 class ExternalFunctionSymbolResolver
545 using FunctionMap = std::unordered_map<std::string, void *>;
549 ExternalFunctionSymbolResolver()
551 func_.emplace("floorf", reinterpret_cast<void*>(floorf));
552 func_.emplace("nearbyintf", reinterpret_cast<void*>(nearbyintf));
553 func_.emplace("truncf", reinterpret_cast<void*>(truncf));
554 func_.emplace("printf", reinterpret_cast<void*>(printf));
555 func_.emplace("puts", reinterpret_cast<void*>(puts));
556 func_.emplace("fmodf", reinterpret_cast<void*>(fmodf));
559 void *findSymbol(const std::string &name) const
561 // Trim off any underscores from the start of the symbol. LLVM likes
562 // to append these on macOS.
563 const char* trimmed = name.c_str();
564 while (trimmed[0] == '_') { trimmed++; }
566 FunctionMap::const_iterator it = func_.find(trimmed);
567 assert(it != func_.end()); // Missing functions will likely make the module fail in exciting non-obvious ways.
575 using ObjLayer = llvm::orc::RTDyldObjectLinkingLayer;
576 using CompileLayer = llvm::orc::IRCompileLayer<ObjLayer, llvm::orc::SimpleCompiler>;
578 llvm::orc::ExecutionSession session;
579 ExternalFunctionSymbolResolver externalSymbolResolver;
580 std::shared_ptr<llvm::orc::SymbolResolver> resolver;
581 std::unique_ptr<llvm::TargetMachine> targetMachine;
582 const llvm::DataLayout dataLayout;
584 CompileLayer compileLayer;
585 size_t emittedFunctionsNum;
588 LLVMReactorJIT(const char *arch, const llvm::SmallVectorImpl<std::string>& mattrs,
589 const llvm::TargetOptions &targetOpts):
590 resolver(createLegacyLookupResolver(
592 [this](const std::string &name) {
593 void *func = externalSymbolResolver.findSymbol(name);
596 return llvm::JITSymbol(
597 reinterpret_cast<uintptr_t>(func), llvm::JITSymbolFlags::Absolute);
600 return objLayer.findSymbol(name, true);
602 [](llvm::Error err) {
605 // TODO: Log the symbol resolution errors.
609 targetMachine(llvm::EngineBuilder()
612 .setTargetOptions(targetOpts)
614 dataLayout(targetMachine->createDataLayout()),
617 [this](llvm::orc::VModuleKey) {
618 return ObjLayer::Resources{
619 std::make_shared<llvm::SectionMemoryManager>(),
622 compileLayer(objLayer, llvm::orc::SimpleCompiler(*targetMachine)),
623 emittedFunctionsNum(0)
629 ::module = new llvm::Module("", *::context);
634 ::function = nullptr;
638 LLVMRoutine *acquireRoutine(llvm::Function *func)
640 std::string name = "f" + llvm::Twine(emittedFunctionsNum++).str();
642 func->setLinkage(llvm::GlobalValue::ExternalLinkage);
643 func->setDoesNotThrow();
645 std::unique_ptr<llvm::Module> mod(::module);
647 mod->setDataLayout(dataLayout);
649 auto moduleKey = session.allocateVModule();
650 llvm::cantFail(compileLayer.addModule(moduleKey, std::move(mod)));
652 std::string mangledName;
654 llvm::raw_string_ostream mangledNameStream(mangledName);
655 llvm::Mangler::getNameWithPrefix(mangledNameStream, name, dataLayout);
658 llvm::JITSymbol symbol = compileLayer.findSymbolIn(moduleKey, mangledName, false);
660 llvm::Expected<llvm::JITTargetAddress> expectAddr = symbol.getAddress();
666 void *addr = reinterpret_cast<void *>(static_cast<intptr_t>(expectAddr.get()));
667 return new LLVMRoutine(addr, releaseRoutineCallback, this, moduleKey);
670 void optimize(llvm::Module *module)
672 std::unique_ptr<llvm::legacy::PassManager> passManager(
673 new llvm::legacy::PassManager());
675 passManager->add(llvm::createSROAPass());
677 for(int pass = 0; pass < 10 && optimization[pass] != Disabled; pass++)
679 switch(optimization[pass])
681 case Disabled: break;
682 case CFGSimplification: passManager->add(llvm::createCFGSimplificationPass()); break;
683 case LICM: passManager->add(llvm::createLICMPass()); break;
684 case AggressiveDCE: passManager->add(llvm::createAggressiveDCEPass()); break;
685 case GVN: passManager->add(llvm::createGVNPass()); break;
686 case InstructionCombining: passManager->add(llvm::createInstructionCombiningPass()); break;
687 case Reassociate: passManager->add(llvm::createReassociatePass()); break;
688 case DeadStoreElimination: passManager->add(llvm::createDeadStoreEliminationPass()); break;
689 case SCCP: passManager->add(llvm::createSCCPPass()); break;
690 case ScalarReplAggregates: passManager->add(llvm::createSROAPass()); break;
696 passManager->run(*::module);
700 void releaseRoutineModule(llvm::orc::VModuleKey moduleKey)
702 llvm::cantFail(compileLayer.removeModule(moduleKey));
705 static void releaseRoutineCallback(LLVMReactorJIT *jit, uint64_t moduleKey)
707 jit->releaseRoutineModule(moduleKey);
712 Optimization optimization[10] = {InstructionCombining, Disabled};
714 // The abstract Type* types are implemented as LLVM types, except that
715 // 64-bit vectors are emulated using 128-bit ones to avoid use of MMX in x86
716 // and VFP in ARM, and eliminate the overhead of converting them to explicit
717 // 128-bit ones. LLVM types are pointers, so we can represent emulated types
718 // as abstract pointers with small enum values.
719 enum InternalType : uintptr_t
729 // Returned by asInternalType() to indicate that the abstract Type*
730 // should be interpreted as LLVM type pointer:
734 inline InternalType asInternalType(Type *type)
736 InternalType t = static_cast<InternalType>(reinterpret_cast<uintptr_t>(type));
737 return (t < EmulatedTypeCount) ? t : Type_LLVM;
740 llvm::Type *T(Type *t)
742 // Use 128-bit vectors to implement logically shorter ones.
743 switch(asInternalType(t))
745 case Type_v2i32: return T(Int4::getType());
746 case Type_v4i16: return T(Short8::getType());
747 case Type_v2i16: return T(Short8::getType());
748 case Type_v8i8: return T(Byte16::getType());
749 case Type_v4i8: return T(Byte16::getType());
750 case Type_v2f32: return T(Float4::getType());
751 case Type_LLVM: return reinterpret_cast<llvm::Type*>(t);
752 default: assert(false); return nullptr;
756 inline Type *T(llvm::Type *t)
758 return reinterpret_cast<Type*>(t);
761 Type *T(InternalType t)
763 return reinterpret_cast<Type*>(t);
766 inline llvm::Value *V(Value *t)
768 return reinterpret_cast<llvm::Value*>(t);
771 inline Value *V(llvm::Value *t)
773 return reinterpret_cast<Value*>(t);
776 inline std::vector<llvm::Type*> &T(std::vector<Type*> &t)
778 return reinterpret_cast<std::vector<llvm::Type*>&>(t);
781 inline llvm::BasicBlock *B(BasicBlock *t)
783 return reinterpret_cast<llvm::BasicBlock*>(t);
786 inline BasicBlock *B(llvm::BasicBlock *t)
788 return reinterpret_cast<BasicBlock*>(t);
791 static size_t typeSize(Type *type)
793 switch(asInternalType(type))
795 case Type_v2i32: return 8;
796 case Type_v4i16: return 8;
797 case Type_v2i16: return 4;
798 case Type_v8i8: return 8;
799 case Type_v4i8: return 4;
800 case Type_v2f32: return 8;
803 llvm::Type *t = T(type);
807 return sizeof(void*);
810 // At this point we should only have LLVM 'primitive' types.
811 unsigned int bits = t->getPrimitiveSizeInBits();
814 // TODO(capn): Booleans are 1 bit integers in LLVM's SSA type system,
815 // but are typically stored as one byte. The DataLayout structure should
816 // be used here and many other places if this assumption fails.
817 return (bits + 7) / 8;
826 static unsigned int elementCount(Type *type)
828 switch(asInternalType(type))
830 case Type_v2i32: return 2;
831 case Type_v4i16: return 4;
832 case Type_v2i16: return 2;
833 case Type_v8i8: return 8;
834 case Type_v4i8: return 4;
835 case Type_v2f32: return 2;
836 case Type_LLVM: return llvm::cast<llvm::VectorType>(T(type))->getNumElements();
837 default: assert(false); return 0;
841 static llvm::AtomicOrdering atomicOrdering(bool atomic, std::memory_order memoryOrder)
843 #if REACTOR_LLVM_VERSION < 7
844 return llvm::AtomicOrdering::NotAtomic;
849 return llvm::AtomicOrdering::NotAtomic;
854 case std::memory_order_relaxed: return llvm::AtomicOrdering::Monotonic; // https://llvm.org/docs/Atomics.html#monotonic
855 case std::memory_order_consume: return llvm::AtomicOrdering::Acquire; // https://llvm.org/docs/Atomics.html#acquire: "It should also be used for C++11/C11 memory_order_consume."
856 case std::memory_order_acquire: return llvm::AtomicOrdering::Acquire;
857 case std::memory_order_release: return llvm::AtomicOrdering::Release;
858 case std::memory_order_acq_rel: return llvm::AtomicOrdering::AcquireRelease;
859 case std::memory_order_seq_cst: return llvm::AtomicOrdering::SequentiallyConsistent;
860 default: assert(false); return llvm::AtomicOrdering::AcquireRelease;
866 ::codegenMutex.lock(); // Reactor and LLVM are currently not thread safe
868 llvm::InitializeNativeTarget();
870 #if REACTOR_LLVM_VERSION >= 7
871 llvm::InitializeNativeTargetAsmPrinter();
872 llvm::InitializeNativeTargetAsmParser();
877 ::context = new llvm::LLVMContext();
880 #if defined(__x86_64__)
881 static const char arch[] = "x86-64";
882 #elif defined(__i386__)
883 static const char arch[] = "x86";
884 #elif defined(__aarch64__)
885 static const char arch[] = "arm64";
886 #elif defined(__arm__)
887 static const char arch[] = "arm";
888 #elif defined(__mips__)
889 #if defined(__mips64)
890 static const char arch[] = "mips64el";
892 static const char arch[] = "mipsel";
895 #error "unknown architecture"
898 llvm::SmallVector<std::string, 1> mattrs;
899 #if defined(__i386__) || defined(__x86_64__)
900 mattrs.push_back(CPUID::supportsMMX() ? "+mmx" : "-mmx");
901 mattrs.push_back(CPUID::supportsCMOV() ? "+cmov" : "-cmov");
902 mattrs.push_back(CPUID::supportsSSE() ? "+sse" : "-sse");
903 mattrs.push_back(CPUID::supportsSSE2() ? "+sse2" : "-sse2");
904 mattrs.push_back(CPUID::supportsSSE3() ? "+sse3" : "-sse3");
905 mattrs.push_back(CPUID::supportsSSSE3() ? "+ssse3" : "-ssse3");
906 #if REACTOR_LLVM_VERSION < 7
907 mattrs.push_back(CPUID::supportsSSE4_1() ? "+sse41" : "-sse41");
909 mattrs.push_back(CPUID::supportsSSE4_1() ? "+sse4.1" : "-sse4.1");
911 #elif defined(__arm__)
913 mattrs.push_back("+armv8-a");
915 // armv7-a requires compiler-rt routines; otherwise, compiled kernel
916 // might fail to link.
920 #if REACTOR_LLVM_VERSION < 7
921 llvm::JITEmitDebugInfo = false;
922 llvm::UnsafeFPMath = true;
923 // llvm::NoInfsFPMath = true;
924 // llvm::NoNaNsFPMath = true;
926 llvm::TargetOptions targetOpts;
927 targetOpts.UnsafeFPMath = false;
928 // targetOpts.NoInfsFPMath = true;
929 // targetOpts.NoNaNsFPMath = true;
934 #if REACTOR_LLVM_VERSION < 7
935 ::reactorJIT = new LLVMReactorJIT(arch, mattrs);
937 ::reactorJIT = new LLVMReactorJIT(arch, mattrs, targetOpts);
941 ::reactorJIT->startSession();
945 ::builder = new llvm::IRBuilder<>(*::context);
951 ::reactorJIT->endSession();
953 ::codegenMutex.unlock();
956 Routine *Nucleus::acquireRoutine(const char *name, bool runOptimizations)
958 if(::builder->GetInsertBlock()->empty() || !::builder->GetInsertBlock()->back().isTerminator())
960 llvm::Type *type = ::function->getReturnType();
968 createRet(V(llvm::UndefValue::get(type)));
974 #if REACTOR_LLVM_VERSION < 7
976 llvm::raw_fd_ostream file((std::string(name) + "-llvm-dump-unopt.txt").c_str(), error);
978 std::error_code error;
979 llvm::raw_fd_ostream file(std::string(name) + "-llvm-dump-unopt.txt", error);
982 ::module->print(file, 0);
992 #if REACTOR_LLVM_VERSION < 7
994 llvm::raw_fd_ostream file((std::string(name) + "-llvm-dump-opt.txt").c_str(), error);
996 std::error_code error;
997 llvm::raw_fd_ostream file(std::string(name) + "-llvm-dump-opt.txt", error);
1000 ::module->print(file, 0);
1003 LLVMRoutine *routine = ::reactorJIT->acquireRoutine(::function);
1008 void Nucleus::optimize()
1010 ::reactorJIT->optimize(::module);
1013 Value *Nucleus::allocateStackVariable(Type *type, int arraySize)
1015 // Need to allocate it in the entry block for mem2reg to work
1016 llvm::BasicBlock &entryBlock = ::function->getEntryBlock();
1018 llvm::Instruction *declaration;
1022 #if REACTOR_LLVM_VERSION < 7
1023 declaration = new llvm::AllocaInst(T(type), V(Nucleus::createConstantInt(arraySize)));
1025 declaration = new llvm::AllocaInst(T(type), 0, V(Nucleus::createConstantInt(arraySize)));
1030 #if REACTOR_LLVM_VERSION < 7
1031 declaration = new llvm::AllocaInst(T(type), (llvm::Value*)nullptr);
1033 declaration = new llvm::AllocaInst(T(type), 0, (llvm::Value*)nullptr);
1037 entryBlock.getInstList().push_front(declaration);
1039 return V(declaration);
1042 BasicBlock *Nucleus::createBasicBlock()
1044 return B(llvm::BasicBlock::Create(*::context, "", ::function));
1047 BasicBlock *Nucleus::getInsertBlock()
1049 return B(::builder->GetInsertBlock());
1052 void Nucleus::setInsertBlock(BasicBlock *basicBlock)
1054 // assert(::builder->GetInsertBlock()->back().isTerminator());
1055 ::builder->SetInsertPoint(B(basicBlock));
1058 void Nucleus::createFunction(Type *ReturnType, std::vector<Type*> &Params)
1060 llvm::FunctionType *functionType = llvm::FunctionType::get(T(ReturnType), T(Params), false);
1061 ::function = llvm::Function::Create(functionType, llvm::GlobalValue::InternalLinkage, "", ::module);
1062 ::function->setCallingConv(llvm::CallingConv::C);
1064 #if defined(_WIN32) && REACTOR_LLVM_VERSION >= 7
1066 // On Windows, stack memory is committed in increments of 4 kB pages, with the last page
1067 // having a trap which allows the OS to grow the stack. For functions with a stack frame
1068 // larger than 4 kB this can cause an issue when a variable is accessed beyond the guard
1069 // page. Therefore the compiler emits a call to __chkstk in the function prolog to probe
1070 // the stack and ensure all pages have been committed. This is currently broken in LLVM
1071 // JIT, but we can prevent emitting the stack probe call:
1072 ::function->addFnAttr("stack-probe-size", "1048576");
1075 ::builder->SetInsertPoint(llvm::BasicBlock::Create(*::context, "", ::function));
1078 Value *Nucleus::getArgument(unsigned int index)
1080 llvm::Function::arg_iterator args = ::function->arg_begin();
1091 void Nucleus::createRetVoid()
1093 ::builder->CreateRetVoid();
1096 void Nucleus::createRet(Value *v)
1098 ::builder->CreateRet(V(v));
1101 void Nucleus::createBr(BasicBlock *dest)
1103 ::builder->CreateBr(B(dest));
1106 void Nucleus::createCondBr(Value *cond, BasicBlock *ifTrue, BasicBlock *ifFalse)
1108 ::builder->CreateCondBr(V(cond), B(ifTrue), B(ifFalse));
1111 Value *Nucleus::createAdd(Value *lhs, Value *rhs)
1113 return V(::builder->CreateAdd(V(lhs), V(rhs)));
1116 Value *Nucleus::createSub(Value *lhs, Value *rhs)
1118 return V(::builder->CreateSub(V(lhs), V(rhs)));
1121 Value *Nucleus::createMul(Value *lhs, Value *rhs)
1123 return V(::builder->CreateMul(V(lhs), V(rhs)));
1126 Value *Nucleus::createUDiv(Value *lhs, Value *rhs)
1128 return V(::builder->CreateUDiv(V(lhs), V(rhs)));
1131 Value *Nucleus::createSDiv(Value *lhs, Value *rhs)
1133 return V(::builder->CreateSDiv(V(lhs), V(rhs)));
1136 Value *Nucleus::createFAdd(Value *lhs, Value *rhs)
1138 return V(::builder->CreateFAdd(V(lhs), V(rhs)));
1141 Value *Nucleus::createFSub(Value *lhs, Value *rhs)
1143 return V(::builder->CreateFSub(V(lhs), V(rhs)));
1146 Value *Nucleus::createFMul(Value *lhs, Value *rhs)
1148 return V(::builder->CreateFMul(V(lhs), V(rhs)));
1151 Value *Nucleus::createFDiv(Value *lhs, Value *rhs)
1153 return V(::builder->CreateFDiv(V(lhs), V(rhs)));
1156 Value *Nucleus::createURem(Value *lhs, Value *rhs)
1158 return V(::builder->CreateURem(V(lhs), V(rhs)));
1161 Value *Nucleus::createSRem(Value *lhs, Value *rhs)
1163 return V(::builder->CreateSRem(V(lhs), V(rhs)));
1166 Value *Nucleus::createFRem(Value *lhs, Value *rhs)
1168 return V(::builder->CreateFRem(V(lhs), V(rhs)));
1171 Value *Nucleus::createShl(Value *lhs, Value *rhs)
1173 return V(::builder->CreateShl(V(lhs), V(rhs)));
1176 Value *Nucleus::createLShr(Value *lhs, Value *rhs)
1178 return V(::builder->CreateLShr(V(lhs), V(rhs)));
1181 Value *Nucleus::createAShr(Value *lhs, Value *rhs)
1183 return V(::builder->CreateAShr(V(lhs), V(rhs)));
1186 Value *Nucleus::createAnd(Value *lhs, Value *rhs)
1188 return V(::builder->CreateAnd(V(lhs), V(rhs)));
1191 Value *Nucleus::createOr(Value *lhs, Value *rhs)
1193 return V(::builder->CreateOr(V(lhs), V(rhs)));
1196 Value *Nucleus::createXor(Value *lhs, Value *rhs)
1198 return V(::builder->CreateXor(V(lhs), V(rhs)));
1201 Value *Nucleus::createNeg(Value *v)
1203 return V(::builder->CreateNeg(V(v)));
1206 Value *Nucleus::createFNeg(Value *v)
1208 return V(::builder->CreateFNeg(V(v)));
1211 Value *Nucleus::createNot(Value *v)
1213 return V(::builder->CreateNot(V(v)));
1216 Value *Nucleus::createLoad(Value *ptr, Type *type, bool isVolatile, unsigned int alignment, bool atomic, std::memory_order memoryOrder)
1218 switch(asInternalType(type))
1224 return createBitCast(
1225 createInsertElement(
1226 V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::getType()), 2))),
1227 createLoad(createBitCast(ptr, Pointer<Long>::getType()), Long::getType(), isVolatile, alignment, atomic, memoryOrder),
1232 if(alignment != 0) // Not a local variable (all vectors are 128-bit).
1234 Value *u = V(llvm::UndefValue::get(llvm::VectorType::get(T(Long::getType()), 2)));
1235 Value *i = createLoad(createBitCast(ptr, Pointer<Int>::getType()), Int::getType(), isVolatile, alignment, atomic, memoryOrder);
1236 i = createZExt(i, Long::getType());
1237 Value *v = createInsertElement(u, i, 0);
1238 return createBitCast(v, type);
1240 // Fallthrough to non-emulated case.
1243 assert(V(ptr)->getType()->getContainedType(0) == T(type));
1244 auto load = new llvm::LoadInst(V(ptr), "", isVolatile, alignment);
1245 load->setAtomic(atomicOrdering(atomic, memoryOrder));
1247 return V(::builder->Insert(load));
1250 assert(false); return nullptr;
1254 Value *Nucleus::createStore(Value *value, Value *ptr, Type *type, bool isVolatile, unsigned int alignment, bool atomic, std::memory_order memoryOrder)
1256 switch(asInternalType(type))
1263 createExtractElement(
1264 createBitCast(value, T(llvm::VectorType::get(T(Long::getType()), 2))), Long::getType(), 0),
1265 createBitCast(ptr, Pointer<Long>::getType()),
1266 Long::getType(), isVolatile, alignment, atomic, memoryOrder);
1270 if(alignment != 0) // Not a local variable (all vectors are 128-bit).
1273 createExtractElement(createBitCast(value, Int4::getType()), Int::getType(), 0),
1274 createBitCast(ptr, Pointer<Int>::getType()),
1275 Int::getType(), isVolatile, alignment, atomic, memoryOrder);
1278 // Fallthrough to non-emulated case.
1281 assert(V(ptr)->getType()->getContainedType(0) == T(type));
1282 auto store = ::builder->Insert(new llvm::StoreInst(V(value), V(ptr), isVolatile, alignment));
1283 store->setAtomic(atomicOrdering(atomic, memoryOrder));
1288 assert(false); return nullptr;
1292 Value *Nucleus::createGEP(Value *ptr, Type *type, Value *index, bool unsignedIndex)
1294 assert(V(ptr)->getType()->getContainedType(0) == T(type));
1296 if(sizeof(void*) == 8)
1298 // LLVM manual: "When indexing into an array, pointer or vector,
1299 // integers of any width are allowed, and they are not required to
1300 // be constant. These integers are treated as signed values where
1303 // Thus if we want indexes to be treated as unsigned we have to
1304 // zero-extend them ourselves.
1306 // Note that this is not because we want to address anywhere near
1307 // 4 GB of data. Instead this is important for performance because
1308 // x86 supports automatic zero-extending of 32-bit registers to
1309 // 64-bit. Thus when indexing into an array using a uint32 is
1310 // actually faster than an int32.
1311 index = unsignedIndex ?
1312 createZExt(index, Long::getType()) :
1313 createSExt(index, Long::getType());
1316 // For non-emulated types we can rely on LLVM's GEP to calculate the
1317 // effective address correctly.
1318 if(asInternalType(type) == Type_LLVM)
1320 return V(::builder->CreateGEP(V(ptr), V(index)));
1323 // For emulated types we have to multiply the index by the intended
1324 // type size ourselves to obain the byte offset.
1325 index = (sizeof(void*) == 8) ?
1326 createMul(index, createConstantLong((int64_t)typeSize(type))) :
1327 createMul(index, createConstantInt((int)typeSize(type)));
1329 // Cast to a byte pointer, apply the byte offset, and cast back to the
1330 // original pointer type.
1331 return createBitCast(
1332 V(::builder->CreateGEP(V(createBitCast(ptr, T(llvm::PointerType::get(T(Byte::getType()), 0)))), V(index))),
1333 T(llvm::PointerType::get(T(type), 0)));
1336 Value *Nucleus::createAtomicAdd(Value *ptr, Value *value)
1338 return V(::builder->CreateAtomicRMW(llvm::AtomicRMWInst::Add, V(ptr), V(value), llvm::AtomicOrdering::SequentiallyConsistent));
1341 Value *Nucleus::createTrunc(Value *v, Type *destType)
1343 return V(::builder->CreateTrunc(V(v), T(destType)));
1346 Value *Nucleus::createZExt(Value *v, Type *destType)
1348 return V(::builder->CreateZExt(V(v), T(destType)));
1351 Value *Nucleus::createSExt(Value *v, Type *destType)
1353 return V(::builder->CreateSExt(V(v), T(destType)));
1356 Value *Nucleus::createFPToSI(Value *v, Type *destType)
1358 return V(::builder->CreateFPToSI(V(v), T(destType)));
1361 Value *Nucleus::createSIToFP(Value *v, Type *destType)
1363 return V(::builder->CreateSIToFP(V(v), T(destType)));
1366 Value *Nucleus::createFPTrunc(Value *v, Type *destType)
1368 return V(::builder->CreateFPTrunc(V(v), T(destType)));
1371 Value *Nucleus::createFPExt(Value *v, Type *destType)
1373 return V(::builder->CreateFPExt(V(v), T(destType)));
1376 Value *Nucleus::createBitCast(Value *v, Type *destType)
1378 // Bitcasts must be between types of the same logical size. But with emulated narrow vectors we need
1379 // support for casting between scalars and wide vectors. Emulate them by writing to the stack and
1380 // reading back as the destination type.
1381 if(!V(v)->getType()->isVectorTy() && T(destType)->isVectorTy())
1383 Value *readAddress = allocateStackVariable(destType);
1384 Value *writeAddress = createBitCast(readAddress, T(llvm::PointerType::get(V(v)->getType(), 0)));
1385 createStore(v, writeAddress, T(V(v)->getType()));
1386 return createLoad(readAddress, destType);
1388 else if(V(v)->getType()->isVectorTy() && !T(destType)->isVectorTy())
1390 Value *writeAddress = allocateStackVariable(T(V(v)->getType()));
1391 createStore(v, writeAddress, T(V(v)->getType()));
1392 Value *readAddress = createBitCast(writeAddress, T(llvm::PointerType::get(T(destType), 0)));
1393 return createLoad(readAddress, destType);
1396 return V(::builder->CreateBitCast(V(v), T(destType)));
1399 Value *Nucleus::createICmpEQ(Value *lhs, Value *rhs)
1401 return V(::builder->CreateICmpEQ(V(lhs), V(rhs)));
1404 Value *Nucleus::createICmpNE(Value *lhs, Value *rhs)
1406 return V(::builder->CreateICmpNE(V(lhs), V(rhs)));
1409 Value *Nucleus::createICmpUGT(Value *lhs, Value *rhs)
1411 return V(::builder->CreateICmpUGT(V(lhs), V(rhs)));
1414 Value *Nucleus::createICmpUGE(Value *lhs, Value *rhs)
1416 return V(::builder->CreateICmpUGE(V(lhs), V(rhs)));
1419 Value *Nucleus::createICmpULT(Value *lhs, Value *rhs)
1421 return V(::builder->CreateICmpULT(V(lhs), V(rhs)));
1424 Value *Nucleus::createICmpULE(Value *lhs, Value *rhs)
1426 return V(::builder->CreateICmpULE(V(lhs), V(rhs)));
1429 Value *Nucleus::createICmpSGT(Value *lhs, Value *rhs)
1431 return V(::builder->CreateICmpSGT(V(lhs), V(rhs)));
1434 Value *Nucleus::createICmpSGE(Value *lhs, Value *rhs)
1436 return V(::builder->CreateICmpSGE(V(lhs), V(rhs)));
1439 Value *Nucleus::createICmpSLT(Value *lhs, Value *rhs)
1441 return V(::builder->CreateICmpSLT(V(lhs), V(rhs)));
1444 Value *Nucleus::createICmpSLE(Value *lhs, Value *rhs)
1446 return V(::builder->CreateICmpSLE(V(lhs), V(rhs)));
1449 Value *Nucleus::createFCmpOEQ(Value *lhs, Value *rhs)
1451 return V(::builder->CreateFCmpOEQ(V(lhs), V(rhs)));
1454 Value *Nucleus::createFCmpOGT(Value *lhs, Value *rhs)
1456 return V(::builder->CreateFCmpOGT(V(lhs), V(rhs)));
1459 Value *Nucleus::createFCmpOGE(Value *lhs, Value *rhs)
1461 return V(::builder->CreateFCmpOGE(V(lhs), V(rhs)));
1464 Value *Nucleus::createFCmpOLT(Value *lhs, Value *rhs)
1466 return V(::builder->CreateFCmpOLT(V(lhs), V(rhs)));
1469 Value *Nucleus::createFCmpOLE(Value *lhs, Value *rhs)
1471 return V(::builder->CreateFCmpOLE(V(lhs), V(rhs)));
1474 Value *Nucleus::createFCmpONE(Value *lhs, Value *rhs)
1476 return V(::builder->CreateFCmpONE(V(lhs), V(rhs)));
1479 Value *Nucleus::createFCmpORD(Value *lhs, Value *rhs)
1481 return V(::builder->CreateFCmpORD(V(lhs), V(rhs)));
1484 Value *Nucleus::createFCmpUNO(Value *lhs, Value *rhs)
1486 return V(::builder->CreateFCmpUNO(V(lhs), V(rhs)));
1489 Value *Nucleus::createFCmpUEQ(Value *lhs, Value *rhs)
1491 return V(::builder->CreateFCmpUEQ(V(lhs), V(rhs)));
1494 Value *Nucleus::createFCmpUGT(Value *lhs, Value *rhs)
1496 return V(::builder->CreateFCmpUGT(V(lhs), V(rhs)));
1499 Value *Nucleus::createFCmpUGE(Value *lhs, Value *rhs)
1501 return V(::builder->CreateFCmpUGE(V(lhs), V(rhs)));
1504 Value *Nucleus::createFCmpULT(Value *lhs, Value *rhs)
1506 return V(::builder->CreateFCmpULT(V(lhs), V(rhs)));
1509 Value *Nucleus::createFCmpULE(Value *lhs, Value *rhs)
1511 return V(::builder->CreateFCmpULE(V(lhs), V(rhs)));
1514 Value *Nucleus::createFCmpUNE(Value *lhs, Value *rhs)
1516 return V(::builder->CreateFCmpUNE(V(lhs), V(rhs)));
1519 Value *Nucleus::createExtractElement(Value *vector, Type *type, int index)
1521 assert(V(vector)->getType()->getContainedType(0) == T(type));
1522 return V(::builder->CreateExtractElement(V(vector), V(createConstantInt(index))));
1525 Value *Nucleus::createInsertElement(Value *vector, Value *element, int index)
1527 return V(::builder->CreateInsertElement(V(vector), V(element), V(createConstantInt(index))));
1530 Value *Nucleus::createShuffleVector(Value *v1, Value *v2, const int *select)
1532 int size = llvm::cast<llvm::VectorType>(V(v1)->getType())->getNumElements();
1533 const int maxSize = 16;
1534 llvm::Constant *swizzle[maxSize];
1535 assert(size <= maxSize);
1537 for(int i = 0; i < size; i++)
1539 swizzle[i] = llvm::ConstantInt::get(llvm::Type::getInt32Ty(*::context), select[i]);
1542 llvm::Value *shuffle = llvm::ConstantVector::get(llvm::ArrayRef<llvm::Constant*>(swizzle, size));
1544 return V(::builder->CreateShuffleVector(V(v1), V(v2), shuffle));
1547 Value *Nucleus::createSelect(Value *c, Value *ifTrue, Value *ifFalse)
1549 return V(::builder->CreateSelect(V(c), V(ifTrue), V(ifFalse)));
1552 SwitchCases *Nucleus::createSwitch(Value *control, BasicBlock *defaultBranch, unsigned numCases)
1554 return reinterpret_cast<SwitchCases*>(::builder->CreateSwitch(V(control), B(defaultBranch), numCases));
1557 void Nucleus::addSwitchCase(SwitchCases *switchCases, int label, BasicBlock *branch)
1559 llvm::SwitchInst *sw = reinterpret_cast<llvm::SwitchInst *>(switchCases);
1560 sw->addCase(llvm::ConstantInt::get(llvm::Type::getInt32Ty(*::context), label, true), B(branch));
1563 void Nucleus::createUnreachable()
1565 ::builder->CreateUnreachable();
1568 Type *Nucleus::getPointerType(Type *ElementType)
1570 return T(llvm::PointerType::get(T(ElementType), 0));
1573 Value *Nucleus::createNullValue(Type *Ty)
1575 return V(llvm::Constant::getNullValue(T(Ty)));
1578 Value *Nucleus::createConstantLong(int64_t i)
1580 return V(llvm::ConstantInt::get(llvm::Type::getInt64Ty(*::context), i, true));
1583 Value *Nucleus::createConstantInt(int i)
1585 return V(llvm::ConstantInt::get(llvm::Type::getInt32Ty(*::context), i, true));
1588 Value *Nucleus::createConstantInt(unsigned int i)
1590 return V(llvm::ConstantInt::get(llvm::Type::getInt32Ty(*::context), i, false));
1593 Value *Nucleus::createConstantBool(bool b)
1595 return V(llvm::ConstantInt::get(llvm::Type::getInt1Ty(*::context), b));
1598 Value *Nucleus::createConstantByte(signed char i)
1600 return V(llvm::ConstantInt::get(llvm::Type::getInt8Ty(*::context), i, true));
1603 Value *Nucleus::createConstantByte(unsigned char i)
1605 return V(llvm::ConstantInt::get(llvm::Type::getInt8Ty(*::context), i, false));
1608 Value *Nucleus::createConstantShort(short i)
1610 return V(llvm::ConstantInt::get(llvm::Type::getInt16Ty(*::context), i, true));
1613 Value *Nucleus::createConstantShort(unsigned short i)
1615 return V(llvm::ConstantInt::get(llvm::Type::getInt16Ty(*::context), i, false));
1618 Value *Nucleus::createConstantFloat(float x)
1620 return V(llvm::ConstantFP::get(T(Float::getType()), x));
1623 Value *Nucleus::createNullPointer(Type *Ty)
1625 return V(llvm::ConstantPointerNull::get(llvm::PointerType::get(T(Ty), 0)));
1628 Value *Nucleus::createConstantVector(const int64_t *constants, Type *type)
1630 assert(llvm::isa<llvm::VectorType>(T(type)));
1631 const int numConstants = elementCount(type); // Number of provided constants for the (emulated) type.
1632 const int numElements = llvm::cast<llvm::VectorType>(T(type))->getNumElements(); // Number of elements of the underlying vector type.
1633 assert(numElements <= 16 && numConstants <= numElements);
1634 llvm::Constant *constantVector[16];
1636 for(int i = 0; i < numElements; i++)
1638 constantVector[i] = llvm::ConstantInt::get(T(type)->getContainedType(0), constants[i % numConstants]);
1641 return V(llvm::ConstantVector::get(llvm::ArrayRef<llvm::Constant*>(constantVector, numElements)));
1644 Value *Nucleus::createConstantVector(const double *constants, Type *type)
1646 assert(llvm::isa<llvm::VectorType>(T(type)));
1647 const int numConstants = elementCount(type); // Number of provided constants for the (emulated) type.
1648 const int numElements = llvm::cast<llvm::VectorType>(T(type))->getNumElements(); // Number of elements of the underlying vector type.
1649 assert(numElements <= 8 && numConstants <= numElements);
1650 llvm::Constant *constantVector[8];
1652 for(int i = 0; i < numElements; i++)
1654 constantVector[i] = llvm::ConstantFP::get(T(type)->getContainedType(0), constants[i % numConstants]);
1657 return V(llvm::ConstantVector::get(llvm::ArrayRef<llvm::Constant*>(constantVector, numElements)));
1660 Type *Void::getType()
1662 return T(llvm::Type::getVoidTy(*::context));
1665 Type *Bool::getType()
1667 return T(llvm::Type::getInt1Ty(*::context));
1670 Type *Byte::getType()
1672 return T(llvm::Type::getInt8Ty(*::context));
1675 Type *SByte::getType()
1677 return T(llvm::Type::getInt8Ty(*::context));
1680 Type *Short::getType()
1682 return T(llvm::Type::getInt16Ty(*::context));
1685 Type *UShort::getType()
1687 return T(llvm::Type::getInt16Ty(*::context));
1690 Type *Byte4::getType()
1692 return T(Type_v4i8);
1695 Type *SByte4::getType()
1697 return T(Type_v4i8);
1700 RValue<Byte8> AddSat(RValue<Byte8> x, RValue<Byte8> y)
1702 #if defined(__i386__) || defined(__x86_64__)
1703 return x86::paddusb(x, y);
1705 return As<Byte8>(V(lowerPUADDSAT(V(x.value), V(y.value))));
1709 RValue<Byte8> SubSat(RValue<Byte8> x, RValue<Byte8> y)
1711 #if defined(__i386__) || defined(__x86_64__)
1712 return x86::psubusb(x, y);
1714 return As<Byte8>(V(lowerPUSUBSAT(V(x.value), V(y.value))));
1718 RValue<Int> SignMask(RValue<Byte8> x)
1720 #if defined(__i386__) || defined(__x86_64__)
1721 return x86::pmovmskb(x);
1723 return As<Int>(V(lowerSignMask(V(x.value), T(Int::getType()))));
1727 // RValue<Byte8> CmpGT(RValue<Byte8> x, RValue<Byte8> y)
1729 //#if defined(__i386__) || defined(__x86_64__)
1730 // return x86::pcmpgtb(x, y); // FIXME: Signedness
1732 // return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value), V(y.value), T(Byte8::getType()))));
1736 RValue<Byte8> CmpEQ(RValue<Byte8> x, RValue<Byte8> y)
1738 #if defined(__i386__) || defined(__x86_64__)
1739 return x86::pcmpeqb(x, y);
1741 return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value), V(y.value), T(Byte8::getType()))));
1745 Type *Byte8::getType()
1747 return T(Type_v8i8);
1750 RValue<SByte8> AddSat(RValue<SByte8> x, RValue<SByte8> y)
1752 #if defined(__i386__) || defined(__x86_64__)
1753 return x86::paddsb(x, y);
1755 return As<SByte8>(V(lowerPSADDSAT(V(x.value), V(y.value))));
1759 RValue<SByte8> SubSat(RValue<SByte8> x, RValue<SByte8> y)
1761 #if defined(__i386__) || defined(__x86_64__)
1762 return x86::psubsb(x, y);
1764 return As<SByte8>(V(lowerPSSUBSAT(V(x.value), V(y.value))));
1768 RValue<Int> SignMask(RValue<SByte8> x)
1770 #if defined(__i386__) || defined(__x86_64__)
1771 return x86::pmovmskb(As<Byte8>(x));
1773 return As<Int>(V(lowerSignMask(V(x.value), T(Int::getType()))));
1777 RValue<Byte8> CmpGT(RValue<SByte8> x, RValue<SByte8> y)
1779 #if defined(__i386__) || defined(__x86_64__)
1780 return x86::pcmpgtb(x, y);
1782 return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value), V(y.value), T(Byte8::getType()))));
1786 RValue<Byte8> CmpEQ(RValue<SByte8> x, RValue<SByte8> y)
1788 #if defined(__i386__) || defined(__x86_64__)
1789 return x86::pcmpeqb(As<Byte8>(x), As<Byte8>(y));
1791 return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value), V(y.value), T(Byte8::getType()))));
1795 Type *SByte8::getType()
1797 return T(Type_v8i8);
1800 Type *Byte16::getType()
1802 return T(llvm::VectorType::get(T(Byte::getType()), 16));
1805 Type *SByte16::getType()
1807 return T(llvm::VectorType::get(T(SByte::getType()), 16));
1810 Type *Short2::getType()
1812 return T(Type_v2i16);
1815 Type *UShort2::getType()
1817 return T(Type_v2i16);
1820 Short4::Short4(RValue<Int4> cast)
1822 int select[8] = {0, 2, 4, 6, 0, 2, 4, 6};
1823 Value *short8 = Nucleus::createBitCast(cast.value, Short8::getType());
1825 Value *packed = Nucleus::createShuffleVector(short8, short8, select);
1826 Value *short4 = As<Short4>(Int2(As<Int4>(packed))).value;
1831 // Short4::Short4(RValue<Float> cast)
1835 Short4::Short4(RValue<Float4> cast)
1837 Int4 v4i32 = Int4(cast);
1838 #if defined(__i386__) || defined(__x86_64__)
1839 v4i32 = As<Int4>(x86::packssdw(v4i32, v4i32));
1841 Value *v = v4i32.loadValue();
1842 v4i32 = As<Int4>(V(lowerPack(V(v), V(v), true)));
1845 storeValue(As<Short4>(Int2(v4i32)).value);
1848 RValue<Short4> operator<<(RValue<Short4> lhs, unsigned char rhs)
1850 #if defined(__i386__) || defined(__x86_64__)
1851 // return RValue<Short4>(Nucleus::createShl(lhs.value, rhs.value));
1853 return x86::psllw(lhs, rhs);
1855 return As<Short4>(V(lowerVectorShl(V(lhs.value), rhs)));
1859 RValue<Short4> operator>>(RValue<Short4> lhs, unsigned char rhs)
1861 #if defined(__i386__) || defined(__x86_64__)
1862 return x86::psraw(lhs, rhs);
1864 return As<Short4>(V(lowerVectorAShr(V(lhs.value), rhs)));
1868 RValue<Short4> Max(RValue<Short4> x, RValue<Short4> y)
1870 #if defined(__i386__) || defined(__x86_64__)
1871 return x86::pmaxsw(x, y);
1873 return RValue<Short4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_SGT)));
1877 RValue<Short4> Min(RValue<Short4> x, RValue<Short4> y)
1879 #if defined(__i386__) || defined(__x86_64__)
1880 return x86::pminsw(x, y);
1882 return RValue<Short4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_SLT)));
1886 RValue<Short4> AddSat(RValue<Short4> x, RValue<Short4> y)
1888 #if defined(__i386__) || defined(__x86_64__)
1889 return x86::paddsw(x, y);
1891 return As<Short4>(V(lowerPSADDSAT(V(x.value), V(y.value))));
1895 RValue<Short4> SubSat(RValue<Short4> x, RValue<Short4> y)
1897 #if defined(__i386__) || defined(__x86_64__)
1898 return x86::psubsw(x, y);
1900 return As<Short4>(V(lowerPSSUBSAT(V(x.value), V(y.value))));
1904 RValue<Short4> MulHigh(RValue<Short4> x, RValue<Short4> y)
1906 #if defined(__i386__) || defined(__x86_64__)
1907 return x86::pmulhw(x, y);
1909 return As<Short4>(V(lowerMulHigh(V(x.value), V(y.value), true)));
1913 RValue<Int2> MulAdd(RValue<Short4> x, RValue<Short4> y)
1915 #if defined(__i386__) || defined(__x86_64__)
1916 return x86::pmaddwd(x, y);
1918 return As<Int2>(V(lowerMulAdd(V(x.value), V(y.value))));
1922 RValue<SByte8> PackSigned(RValue<Short4> x, RValue<Short4> y)
1924 #if defined(__i386__) || defined(__x86_64__)
1925 auto result = x86::packsswb(x, y);
1927 auto result = V(lowerPack(V(x.value), V(y.value), true));
1929 return As<SByte8>(Swizzle(As<Int4>(result), 0x88));
1932 RValue<Byte8> PackUnsigned(RValue<Short4> x, RValue<Short4> y)
1934 #if defined(__i386__) || defined(__x86_64__)
1935 auto result = x86::packuswb(x, y);
1937 auto result = V(lowerPack(V(x.value), V(y.value), false));
1939 return As<Byte8>(Swizzle(As<Int4>(result), 0x88));
1942 RValue<Short4> CmpGT(RValue<Short4> x, RValue<Short4> y)
1944 #if defined(__i386__) || defined(__x86_64__)
1945 return x86::pcmpgtw(x, y);
1947 return As<Short4>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value), V(y.value), T(Short4::getType()))));
1951 RValue<Short4> CmpEQ(RValue<Short4> x, RValue<Short4> y)
1953 #if defined(__i386__) || defined(__x86_64__)
1954 return x86::pcmpeqw(x, y);
1956 return As<Short4>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value), V(y.value), T(Short4::getType()))));
1960 Type *Short4::getType()
1962 return T(Type_v4i16);
1965 UShort4::UShort4(RValue<Float4> cast, bool saturate)
1969 #if defined(__i386__) || defined(__x86_64__)
1970 if(CPUID::supportsSSE4_1())
1972 Int4 int4(Min(cast, Float4(0xFFFF))); // packusdw takes care of 0x0000 saturation
1973 *this = As<Short4>(PackUnsigned(int4, int4));
1978 *this = Short4(Int4(Max(Min(cast, Float4(0xFFFF)), Float4(0x0000))));
1983 *this = Short4(Int4(cast));
1987 RValue<UShort4> operator<<(RValue<UShort4> lhs, unsigned char rhs)
1989 #if defined(__i386__) || defined(__x86_64__)
1990 // return RValue<Short4>(Nucleus::createShl(lhs.value, rhs.value));
1992 return As<UShort4>(x86::psllw(As<Short4>(lhs), rhs));
1994 return As<UShort4>(V(lowerVectorShl(V(lhs.value), rhs)));
1998 RValue<UShort4> operator>>(RValue<UShort4> lhs, unsigned char rhs)
2000 #if defined(__i386__) || defined(__x86_64__)
2001 // return RValue<Short4>(Nucleus::createLShr(lhs.value, rhs.value));
2003 return x86::psrlw(lhs, rhs);
2005 return As<UShort4>(V(lowerVectorLShr(V(lhs.value), rhs)));
2009 RValue<UShort4> Max(RValue<UShort4> x, RValue<UShort4> y)
2011 return RValue<UShort4>(Max(As<Short4>(x) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u), As<Short4>(y) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u)) + Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u));
2014 RValue<UShort4> Min(RValue<UShort4> x, RValue<UShort4> y)
2016 return RValue<UShort4>(Min(As<Short4>(x) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u), As<Short4>(y) - Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u)) + Short4(0x8000u, 0x8000u, 0x8000u, 0x8000u));
2019 RValue<UShort4> AddSat(RValue<UShort4> x, RValue<UShort4> y)
2021 #if defined(__i386__) || defined(__x86_64__)
2022 return x86::paddusw(x, y);
2024 return As<UShort4>(V(lowerPUADDSAT(V(x.value), V(y.value))));
2028 RValue<UShort4> SubSat(RValue<UShort4> x, RValue<UShort4> y)
2030 #if defined(__i386__) || defined(__x86_64__)
2031 return x86::psubusw(x, y);
2033 return As<UShort4>(V(lowerPUSUBSAT(V(x.value), V(y.value))));
2037 RValue<UShort4> MulHigh(RValue<UShort4> x, RValue<UShort4> y)
2039 #if defined(__i386__) || defined(__x86_64__)
2040 return x86::pmulhuw(x, y);
2042 return As<UShort4>(V(lowerMulHigh(V(x.value), V(y.value), false)));
2046 RValue<UShort4> Average(RValue<UShort4> x, RValue<UShort4> y)
2048 #if defined(__i386__) || defined(__x86_64__)
2049 return x86::pavgw(x, y);
2051 return As<UShort4>(V(lowerPAVG(V(x.value), V(y.value))));
2055 Type *UShort4::getType()
2057 return T(Type_v4i16);
2060 RValue<Short8> operator<<(RValue<Short8> lhs, unsigned char rhs)
2062 #if defined(__i386__) || defined(__x86_64__)
2063 return x86::psllw(lhs, rhs);
2065 return As<Short8>(V(lowerVectorShl(V(lhs.value), rhs)));
2069 RValue<Short8> operator>>(RValue<Short8> lhs, unsigned char rhs)
2071 #if defined(__i386__) || defined(__x86_64__)
2072 return x86::psraw(lhs, rhs);
2074 return As<Short8>(V(lowerVectorAShr(V(lhs.value), rhs)));
2078 RValue<Int4> MulAdd(RValue<Short8> x, RValue<Short8> y)
2080 #if defined(__i386__) || defined(__x86_64__)
2081 return x86::pmaddwd(x, y);
2083 return As<Int4>(V(lowerMulAdd(V(x.value), V(y.value))));
2087 RValue<Short8> MulHigh(RValue<Short8> x, RValue<Short8> y)
2089 #if defined(__i386__) || defined(__x86_64__)
2090 return x86::pmulhw(x, y);
2092 return As<Short8>(V(lowerMulHigh(V(x.value), V(y.value), true)));
2096 Type *Short8::getType()
2098 return T(llvm::VectorType::get(T(Short::getType()), 8));
2101 RValue<UShort8> operator<<(RValue<UShort8> lhs, unsigned char rhs)
2103 #if defined(__i386__) || defined(__x86_64__)
2104 return As<UShort8>(x86::psllw(As<Short8>(lhs), rhs));
2106 return As<UShort8>(V(lowerVectorShl(V(lhs.value), rhs)));
2110 RValue<UShort8> operator>>(RValue<UShort8> lhs, unsigned char rhs)
2112 #if defined(__i386__) || defined(__x86_64__)
2113 return x86::psrlw(lhs, rhs); // FIXME: Fallback required
2115 return As<UShort8>(V(lowerVectorLShr(V(lhs.value), rhs)));
2119 RValue<UShort8> Swizzle(RValue<UShort8> x, char select0, char select1, char select2, char select3, char select4, char select5, char select6, char select7)
2141 Value *byte16 = Nucleus::createBitCast(x.value, Byte16::getType());
2142 Value *shuffle = Nucleus::createShuffleVector(byte16, byte16, pshufb);
2143 Value *short8 = Nucleus::createBitCast(shuffle, UShort8::getType());
2145 return RValue<UShort8>(short8);
2148 RValue<UShort8> MulHigh(RValue<UShort8> x, RValue<UShort8> y)
2150 #if defined(__i386__) || defined(__x86_64__)
2151 return x86::pmulhuw(x, y);
2153 return As<UShort8>(V(lowerMulHigh(V(x.value), V(y.value), false)));
2157 Type *UShort8::getType()
2159 return T(llvm::VectorType::get(T(UShort::getType()), 8));
2162 RValue<Int> operator++(Int &val, int) // Post-increment
2164 RValue<Int> res = val;
2166 Value *inc = Nucleus::createAdd(res.value, Nucleus::createConstantInt(1));
2167 val.storeValue(inc);
2172 const Int &operator++(Int &val) // Pre-increment
2174 Value *inc = Nucleus::createAdd(val.loadValue(), Nucleus::createConstantInt(1));
2175 val.storeValue(inc);
2180 RValue<Int> operator--(Int &val, int) // Post-decrement
2182 RValue<Int> res = val;
2184 Value *inc = Nucleus::createSub(res.value, Nucleus::createConstantInt(1));
2185 val.storeValue(inc);
2190 const Int &operator--(Int &val) // Pre-decrement
2192 Value *inc = Nucleus::createSub(val.loadValue(), Nucleus::createConstantInt(1));
2193 val.storeValue(inc);
2198 RValue<Int> RoundInt(RValue<Float> cast)
2200 #if defined(__i386__) || defined(__x86_64__)
2201 return x86::cvtss2si(cast);
2203 return RValue<Int>(V(lowerRoundInt(V(cast.value), T(Int::getType()))));
2207 Type *Int::getType()
2209 return T(llvm::Type::getInt32Ty(*::context));
2212 Type *Long::getType()
2214 return T(llvm::Type::getInt64Ty(*::context));
2217 UInt::UInt(RValue<Float> cast)
2219 // Note: createFPToUI is broken, must perform conversion using createFPtoSI
2220 // Value *integer = Nucleus::createFPToUI(cast.value, UInt::getType());
2222 // Smallest positive value representable in UInt, but not in Int
2223 const unsigned int ustart = 0x80000000u;
2224 const float ustartf = float(ustart);
2226 // If the value is negative, store 0, otherwise store the result of the conversion
2227 storeValue((~(As<Int>(cast) >> 31) &
2228 // Check if the value can be represented as an Int
2229 IfThenElse(cast >= ustartf,
2230 // If the value is too large, subtract ustart and re-add it after conversion.
2231 As<Int>(As<UInt>(Int(cast - Float(ustartf))) + UInt(ustart)),
2232 // Otherwise, just convert normally
2236 RValue<UInt> operator++(UInt &val, int) // Post-increment
2238 RValue<UInt> res = val;
2240 Value *inc = Nucleus::createAdd(res.value, Nucleus::createConstantInt(1));
2241 val.storeValue(inc);
2246 const UInt &operator++(UInt &val) // Pre-increment
2248 Value *inc = Nucleus::createAdd(val.loadValue(), Nucleus::createConstantInt(1));
2249 val.storeValue(inc);
2254 RValue<UInt> operator--(UInt &val, int) // Post-decrement
2256 RValue<UInt> res = val;
2258 Value *inc = Nucleus::createSub(res.value, Nucleus::createConstantInt(1));
2259 val.storeValue(inc);
2264 const UInt &operator--(UInt &val) // Pre-decrement
2266 Value *inc = Nucleus::createSub(val.loadValue(), Nucleus::createConstantInt(1));
2267 val.storeValue(inc);
2272 // RValue<UInt> RoundUInt(RValue<Float> cast)
2274 //#if defined(__i386__) || defined(__x86_64__)
2275 // return x86::cvtss2si(val); // FIXME: Unsigned
2277 // return IfThenElse(cast > 0.0f, Int(cast + 0.5f), Int(cast - 0.5f));
2281 Type *UInt::getType()
2283 return T(llvm::Type::getInt32Ty(*::context));
2286 // Int2::Int2(RValue<Int> cast)
2288 // Value *extend = Nucleus::createZExt(cast.value, Long::getType());
2289 // Value *vector = Nucleus::createBitCast(extend, Int2::getType());
2291 // int shuffle[2] = {0, 0};
2292 // Value *replicate = Nucleus::createShuffleVector(vector, vector, shuffle);
2294 // storeValue(replicate);
2297 RValue<Int2> operator<<(RValue<Int2> lhs, unsigned char rhs)
2299 #if defined(__i386__) || defined(__x86_64__)
2300 // return RValue<Int2>(Nucleus::createShl(lhs.value, rhs.value));
2302 return x86::pslld(lhs, rhs);
2304 return As<Int2>(V(lowerVectorShl(V(lhs.value), rhs)));
2308 RValue<Int2> operator>>(RValue<Int2> lhs, unsigned char rhs)
2310 #if defined(__i386__) || defined(__x86_64__)
2311 // return RValue<Int2>(Nucleus::createAShr(lhs.value, rhs.value));
2313 return x86::psrad(lhs, rhs);
2315 return As<Int2>(V(lowerVectorAShr(V(lhs.value), rhs)));
2319 Type *Int2::getType()
2321 return T(Type_v2i32);
2324 RValue<UInt2> operator<<(RValue<UInt2> lhs, unsigned char rhs)
2326 #if defined(__i386__) || defined(__x86_64__)
2327 // return RValue<UInt2>(Nucleus::createShl(lhs.value, rhs.value));
2329 return As<UInt2>(x86::pslld(As<Int2>(lhs), rhs));
2331 return As<UInt2>(V(lowerVectorShl(V(lhs.value), rhs)));
2335 RValue<UInt2> operator>>(RValue<UInt2> lhs, unsigned char rhs)
2337 #if defined(__i386__) || defined(__x86_64__)
2338 // return RValue<UInt2>(Nucleus::createLShr(lhs.value, rhs.value));
2340 return x86::psrld(lhs, rhs);
2342 return As<UInt2>(V(lowerVectorLShr(V(lhs.value), rhs)));
2346 Type *UInt2::getType()
2348 return T(Type_v2i32);
2351 Int4::Int4(RValue<Byte4> cast) : XYZW(this)
2353 #if defined(__i386__) || defined(__x86_64__)
2354 if(CPUID::supportsSSE4_1())
2356 *this = x86::pmovzxbd(As<Byte16>(cast));
2361 int swizzle[16] = {0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23};
2362 Value *a = Nucleus::createBitCast(cast.value, Byte16::getType());
2363 Value *b = Nucleus::createShuffleVector(a, Nucleus::createNullValue(Byte16::getType()), swizzle);
2365 int swizzle2[8] = {0, 8, 1, 9, 2, 10, 3, 11};
2366 Value *c = Nucleus::createBitCast(b, Short8::getType());
2367 Value *d = Nucleus::createShuffleVector(c, Nucleus::createNullValue(Short8::getType()), swizzle2);
2369 *this = As<Int4>(d);
2373 Int4::Int4(RValue<SByte4> cast) : XYZW(this)
2375 #if defined(__i386__) || defined(__x86_64__)
2376 if(CPUID::supportsSSE4_1())
2378 *this = x86::pmovsxbd(As<SByte16>(cast));
2383 int swizzle[16] = {0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7};
2384 Value *a = Nucleus::createBitCast(cast.value, Byte16::getType());
2385 Value *b = Nucleus::createShuffleVector(a, a, swizzle);
2387 int swizzle2[8] = {0, 0, 1, 1, 2, 2, 3, 3};
2388 Value *c = Nucleus::createBitCast(b, Short8::getType());
2389 Value *d = Nucleus::createShuffleVector(c, c, swizzle2);
2391 *this = As<Int4>(d) >> 24;
2395 Int4::Int4(RValue<Short4> cast) : XYZW(this)
2397 #if defined(__i386__) || defined(__x86_64__)
2398 if(CPUID::supportsSSE4_1())
2400 *this = x86::pmovsxwd(As<Short8>(cast));
2405 int swizzle[8] = {0, 0, 1, 1, 2, 2, 3, 3};
2406 Value *c = Nucleus::createShuffleVector(cast.value, cast.value, swizzle);
2407 *this = As<Int4>(c) >> 16;
2411 Int4::Int4(RValue<UShort4> cast) : XYZW(this)
2413 #if defined(__i386__) || defined(__x86_64__)
2414 if(CPUID::supportsSSE4_1())
2416 *this = x86::pmovzxwd(As<UShort8>(cast));
2421 int swizzle[8] = {0, 8, 1, 9, 2, 10, 3, 11};
2422 Value *c = Nucleus::createShuffleVector(cast.value, Short8(0, 0, 0, 0, 0, 0, 0, 0).loadValue(), swizzle);
2423 *this = As<Int4>(c);
2427 Int4::Int4(RValue<Int> rhs) : XYZW(this)
2429 Value *vector = loadValue();
2430 Value *insert = Nucleus::createInsertElement(vector, rhs.value, 0);
2432 int swizzle[4] = {0, 0, 0, 0};
2433 Value *replicate = Nucleus::createShuffleVector(insert, insert, swizzle);
2435 storeValue(replicate);
2438 RValue<Int4> operator<<(RValue<Int4> lhs, unsigned char rhs)
2440 #if defined(__i386__) || defined(__x86_64__)
2441 return x86::pslld(lhs, rhs);
2443 return As<Int4>(V(lowerVectorShl(V(lhs.value), rhs)));
2447 RValue<Int4> operator>>(RValue<Int4> lhs, unsigned char rhs)
2449 #if defined(__i386__) || defined(__x86_64__)
2450 return x86::psrad(lhs, rhs);
2452 return As<Int4>(V(lowerVectorAShr(V(lhs.value), rhs)));
2456 RValue<Int4> CmpEQ(RValue<Int4> x, RValue<Int4> y)
2458 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2459 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2460 // return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpEQ(x.value, y.value), Int4::getType()));
2461 return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpNE(x.value, y.value), Int4::getType())) ^ Int4(0xFFFFFFFF);
2464 RValue<Int4> CmpLT(RValue<Int4> x, RValue<Int4> y)
2466 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2467 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2468 // return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSLT(x.value, y.value), Int4::getType()));
2469 return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSGE(x.value, y.value), Int4::getType())) ^ Int4(0xFFFFFFFF);
2472 RValue<Int4> CmpLE(RValue<Int4> x, RValue<Int4> y)
2474 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2475 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2476 // return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSLE(x.value, y.value), Int4::getType()));
2477 return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSGT(x.value, y.value), Int4::getType())) ^ Int4(0xFFFFFFFF);
2480 RValue<Int4> CmpNEQ(RValue<Int4> x, RValue<Int4> y)
2482 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2483 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2484 // return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpNE(x.value, y.value), Int4::getType()));
2485 return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpEQ(x.value, y.value), Int4::getType())) ^ Int4(0xFFFFFFFF);
2488 RValue<Int4> CmpNLT(RValue<Int4> x, RValue<Int4> y)
2490 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2491 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2492 // return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSGE(x.value, y.value), Int4::getType()));
2493 return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSLT(x.value, y.value), Int4::getType())) ^ Int4(0xFFFFFFFF);
2496 RValue<Int4> CmpNLE(RValue<Int4> x, RValue<Int4> y)
2498 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2499 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2500 // return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSGT(x.value, y.value), Int4::getType()));
2501 return RValue<Int4>(Nucleus::createSExt(Nucleus::createICmpSLE(x.value, y.value), Int4::getType())) ^ Int4(0xFFFFFFFF);
2504 RValue<Int4> Max(RValue<Int4> x, RValue<Int4> y)
2506 #if defined(__i386__) || defined(__x86_64__)
2507 if(CPUID::supportsSSE4_1())
2509 return x86::pmaxsd(x, y);
2514 RValue<Int4> greater = CmpNLE(x, y);
2515 return (x & greater) | (y & ~greater);
2519 RValue<Int4> Min(RValue<Int4> x, RValue<Int4> y)
2521 #if defined(__i386__) || defined(__x86_64__)
2522 if(CPUID::supportsSSE4_1())
2524 return x86::pminsd(x, y);
2529 RValue<Int4> less = CmpLT(x, y);
2530 return (x & less) | (y & ~less);
2534 RValue<Int4> RoundInt(RValue<Float4> cast)
2536 #if defined(__i386__) || defined(__x86_64__)
2537 return x86::cvtps2dq(cast);
2539 return As<Int4>(V(lowerRoundInt(V(cast.value), T(Int4::getType()))));
2543 RValue<Int4> MulHigh(RValue<Int4> x, RValue<Int4> y)
2545 // TODO: For x86, build an intrinsics version of this which uses shuffles + pmuludq.
2546 return As<Int4>(V(lowerMulHigh(V(x.value), V(y.value), true)));
2549 RValue<UInt4> MulHigh(RValue<UInt4> x, RValue<UInt4> y)
2551 // TODO: For x86, build an intrinsics version of this which uses shuffles + pmuludq.
2552 return As<UInt4>(V(lowerMulHigh(V(x.value), V(y.value), false)));
2555 RValue<Short8> PackSigned(RValue<Int4> x, RValue<Int4> y)
2557 #if defined(__i386__) || defined(__x86_64__)
2558 return x86::packssdw(x, y);
2560 return As<Short8>(V(lowerPack(V(x.value), V(y.value), true)));
2564 RValue<UShort8> PackUnsigned(RValue<Int4> x, RValue<Int4> y)
2566 #if defined(__i386__) || defined(__x86_64__)
2567 return x86::packusdw(x, y);
2569 return As<UShort8>(V(lowerPack(V(x.value), V(y.value), false)));
2573 RValue<Int> SignMask(RValue<Int4> x)
2575 #if defined(__i386__) || defined(__x86_64__)
2576 return x86::movmskps(As<Float4>(x));
2578 return As<Int>(V(lowerSignMask(V(x.value), T(Int::getType()))));
2582 Type *Int4::getType()
2584 return T(llvm::VectorType::get(T(Int::getType()), 4));
2587 UInt4::UInt4(RValue<Float4> cast) : XYZW(this)
2589 // Note: createFPToUI is broken, must perform conversion using createFPtoSI
2590 // Value *xyzw = Nucleus::createFPToUI(cast.value, UInt4::getType());
2592 // Smallest positive value representable in UInt, but not in Int
2593 const unsigned int ustart = 0x80000000u;
2594 const float ustartf = float(ustart);
2596 // Check if the value can be represented as an Int
2597 Int4 uiValue = CmpNLT(cast, Float4(ustartf));
2598 // If the value is too large, subtract ustart and re-add it after conversion.
2599 uiValue = (uiValue & As<Int4>(As<UInt4>(Int4(cast - Float4(ustartf))) + UInt4(ustart))) |
2600 // Otherwise, just convert normally
2601 (~uiValue & Int4(cast));
2602 // If the value is negative, store 0, otherwise store the result of the conversion
2603 storeValue((~(As<Int4>(cast) >> 31) & uiValue).value);
2606 RValue<UInt4> operator<<(RValue<UInt4> lhs, unsigned char rhs)
2608 #if defined(__i386__) || defined(__x86_64__)
2609 return As<UInt4>(x86::pslld(As<Int4>(lhs), rhs));
2611 return As<UInt4>(V(lowerVectorShl(V(lhs.value), rhs)));
2615 RValue<UInt4> operator>>(RValue<UInt4> lhs, unsigned char rhs)
2617 #if defined(__i386__) || defined(__x86_64__)
2618 return x86::psrld(lhs, rhs);
2620 return As<UInt4>(V(lowerVectorLShr(V(lhs.value), rhs)));
2624 RValue<UInt4> CmpEQ(RValue<UInt4> x, RValue<UInt4> y)
2626 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2627 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2628 // return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpEQ(x.value, y.value), Int4::getType()));
2629 return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpNE(x.value, y.value), Int4::getType())) ^ UInt4(0xFFFFFFFF);
2632 RValue<UInt4> CmpLT(RValue<UInt4> x, RValue<UInt4> y)
2634 return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpULT(x.value, y.value), Int4::getType()));
2637 RValue<UInt4> CmpLE(RValue<UInt4> x, RValue<UInt4> y)
2639 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2640 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2641 // return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpULE(x.value, y.value), Int4::getType()));
2642 return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpUGT(x.value, y.value), Int4::getType())) ^ UInt4(0xFFFFFFFF);
2645 RValue<UInt4> CmpNEQ(RValue<UInt4> x, RValue<UInt4> y)
2647 return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpNE(x.value, y.value), Int4::getType()));
2650 RValue<UInt4> CmpNLT(RValue<UInt4> x, RValue<UInt4> y)
2652 // FIXME: An LLVM bug causes SExt(ICmpCC()) to produce 0 or 1 instead of 0 or ~0
2653 // Restore the following line when LLVM is updated to a version where this issue is fixed.
2654 // return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpUGE(x.value, y.value), Int4::getType()));
2655 return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpULT(x.value, y.value), Int4::getType())) ^ UInt4(0xFFFFFFFF);
2658 RValue<UInt4> CmpNLE(RValue<UInt4> x, RValue<UInt4> y)
2660 return RValue<UInt4>(Nucleus::createSExt(Nucleus::createICmpUGT(x.value, y.value), Int4::getType()));
2663 RValue<UInt4> Max(RValue<UInt4> x, RValue<UInt4> y)
2665 #if defined(__i386__) || defined(__x86_64__)
2666 if(CPUID::supportsSSE4_1())
2668 return x86::pmaxud(x, y);
2673 RValue<UInt4> greater = CmpNLE(x, y);
2674 return (x & greater) | (y & ~greater);
2678 RValue<UInt4> Min(RValue<UInt4> x, RValue<UInt4> y)
2680 #if defined(__i386__) || defined(__x86_64__)
2681 if(CPUID::supportsSSE4_1())
2683 return x86::pminud(x, y);
2688 RValue<UInt4> less = CmpLT(x, y);
2689 return (x & less) | (y & ~less);
2693 Type *UInt4::getType()
2695 return T(llvm::VectorType::get(T(UInt::getType()), 4));
2698 Type *Half::getType()
2700 return T(llvm::Type::getInt16Ty(*::context));
2703 RValue<Float> Rcp_pp(RValue<Float> x, bool exactAtPow2)
2705 #if defined(__i386__) || defined(__x86_64__)
2708 // rcpss uses a piecewise-linear approximation which minimizes the relative error
2709 // but is not exact at power-of-two values. Rectify by multiplying by the inverse.
2710 return x86::rcpss(x) * Float(1.0f / _mm_cvtss_f32(_mm_rcp_ss(_mm_set_ps1(1.0f))));
2712 return x86::rcpss(x);
2714 return As<Float>(V(lowerRCP(V(x.value))));
2718 RValue<Float> RcpSqrt_pp(RValue<Float> x)
2720 #if defined(__i386__) || defined(__x86_64__)
2721 return x86::rsqrtss(x);
2723 return As<Float>(V(lowerRSQRT(V(x.value))));
2727 RValue<Float> Sqrt(RValue<Float> x)
2729 #if defined(__i386__) || defined(__x86_64__)
2730 return x86::sqrtss(x);
2732 return As<Float>(V(lowerSQRT(V(x.value))));
2736 RValue<Float> Round(RValue<Float> x)
2738 #if defined(__i386__) || defined(__x86_64__)
2739 if(CPUID::supportsSSE4_1())
2741 return x86::roundss(x, 0);
2745 return Float4(Round(Float4(x))).x;
2748 return RValue<Float>(V(lowerRound(V(x.value))));
2752 RValue<Float> Trunc(RValue<Float> x)
2754 #if defined(__i386__) || defined(__x86_64__)
2755 if(CPUID::supportsSSE4_1())
2757 return x86::roundss(x, 3);
2761 return Float(Int(x)); // Rounded toward zero
2764 return RValue<Float>(V(lowerTrunc(V(x.value))));
2768 RValue<Float> Frac(RValue<Float> x)
2770 #if defined(__i386__) || defined(__x86_64__)
2771 if(CPUID::supportsSSE4_1())
2773 return x - x86::floorss(x);
2777 return Float4(Frac(Float4(x))).x;
2780 // x - floor(x) can be 1.0 for very small negative x.
2781 // Clamp against the value just below 1.0.
2782 return Min(x - Floor(x), As<Float>(Int(0x3F7FFFFF)));
2786 RValue<Float> Floor(RValue<Float> x)
2788 #if defined(__i386__) || defined(__x86_64__)
2789 if(CPUID::supportsSSE4_1())
2791 return x86::floorss(x);
2795 return Float4(Floor(Float4(x))).x;
2798 return RValue<Float>(V(lowerFloor(V(x.value))));
2802 RValue<Float> Ceil(RValue<Float> x)
2804 #if defined(__i386__) || defined(__x86_64__)
2805 if(CPUID::supportsSSE4_1())
2807 return x86::ceilss(x);
2812 return Float4(Ceil(Float4(x))).x;
2816 Type *Float::getType()
2818 return T(llvm::Type::getFloatTy(*::context));
2821 Type *Float2::getType()
2823 return T(Type_v2f32);
2826 Float4::Float4(RValue<Float> rhs) : XYZW(this)
2828 Value *vector = loadValue();
2829 Value *insert = Nucleus::createInsertElement(vector, rhs.value, 0);
2831 int swizzle[4] = {0, 0, 0, 0};
2832 Value *replicate = Nucleus::createShuffleVector(insert, insert, swizzle);
2834 storeValue(replicate);
2837 RValue<Float4> Max(RValue<Float4> x, RValue<Float4> y)
2839 #if defined(__i386__) || defined(__x86_64__)
2840 return x86::maxps(x, y);
2842 return As<Float4>(V(lowerPFMINMAX(V(x.value), V(y.value), llvm::FCmpInst::FCMP_OGT)));
2846 RValue<Float4> Min(RValue<Float4> x, RValue<Float4> y)
2848 #if defined(__i386__) || defined(__x86_64__)
2849 return x86::minps(x, y);
2851 return As<Float4>(V(lowerPFMINMAX(V(x.value), V(y.value), llvm::FCmpInst::FCMP_OLT)));
2855 RValue<Float4> Rcp_pp(RValue<Float4> x, bool exactAtPow2)
2857 #if defined(__i386__) || defined(__x86_64__)
2860 // rcpps uses a piecewise-linear approximation which minimizes the relative error
2861 // but is not exact at power-of-two values. Rectify by multiplying by the inverse.
2862 return x86::rcpps(x) * Float4(1.0f / _mm_cvtss_f32(_mm_rcp_ss(_mm_set_ps1(1.0f))));
2864 return x86::rcpps(x);
2866 return As<Float4>(V(lowerRCP(V(x.value))));
2870 RValue<Float4> RcpSqrt_pp(RValue<Float4> x)
2872 #if defined(__i386__) || defined(__x86_64__)
2873 return x86::rsqrtps(x);
2875 return As<Float4>(V(lowerRSQRT(V(x.value))));
2879 RValue<Float4> Sqrt(RValue<Float4> x)
2881 #if defined(__i386__) || defined(__x86_64__)
2882 return x86::sqrtps(x);
2884 return As<Float4>(V(lowerSQRT(V(x.value))));
2888 RValue<Int> SignMask(RValue<Float4> x)
2890 #if defined(__i386__) || defined(__x86_64__)
2891 return x86::movmskps(x);
2893 return As<Int>(V(lowerFPSignMask(V(x.value), T(Int::getType()))));
2897 RValue<Int4> CmpEQ(RValue<Float4> x, RValue<Float4> y)
2899 // return As<Int4>(x86::cmpeqps(x, y));
2900 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpOEQ(x.value, y.value), Int4::getType()));
2903 RValue<Int4> CmpLT(RValue<Float4> x, RValue<Float4> y)
2905 // return As<Int4>(x86::cmpltps(x, y));
2906 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpOLT(x.value, y.value), Int4::getType()));
2909 RValue<Int4> CmpLE(RValue<Float4> x, RValue<Float4> y)
2911 // return As<Int4>(x86::cmpleps(x, y));
2912 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpOLE(x.value, y.value), Int4::getType()));
2915 RValue<Int4> CmpNEQ(RValue<Float4> x, RValue<Float4> y)
2917 // return As<Int4>(x86::cmpneqps(x, y));
2918 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpONE(x.value, y.value), Int4::getType()));
2921 RValue<Int4> CmpNLT(RValue<Float4> x, RValue<Float4> y)
2923 // return As<Int4>(x86::cmpnltps(x, y));
2924 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpOGE(x.value, y.value), Int4::getType()));
2927 RValue<Int4> CmpNLE(RValue<Float4> x, RValue<Float4> y)
2929 // return As<Int4>(x86::cmpnleps(x, y));
2930 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpOGT(x.value, y.value), Int4::getType()));
2933 RValue<Int4> CmpUEQ(RValue<Float4> x, RValue<Float4> y)
2935 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpUEQ(x.value, y.value), Int4::getType()));
2938 RValue<Int4> CmpULT(RValue<Float4> x, RValue<Float4> y)
2940 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpULT(x.value, y.value), Int4::getType()));
2943 RValue<Int4> CmpULE(RValue<Float4> x, RValue<Float4> y)
2945 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpULE(x.value, y.value), Int4::getType()));
2948 RValue<Int4> CmpUNEQ(RValue<Float4> x, RValue<Float4> y)
2950 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpUNE(x.value, y.value), Int4::getType()));
2953 RValue<Int4> CmpUNLT(RValue<Float4> x, RValue<Float4> y)
2955 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpUGE(x.value, y.value), Int4::getType()));
2958 RValue<Int4> CmpUNLE(RValue<Float4> x, RValue<Float4> y)
2960 return RValue<Int4>(Nucleus::createSExt(Nucleus::createFCmpUGT(x.value, y.value), Int4::getType()));
2963 RValue<Float4> Round(RValue<Float4> x)
2965 #if defined(__i386__) || defined(__x86_64__)
2966 if(CPUID::supportsSSE4_1())
2968 return x86::roundps(x, 0);
2972 return Float4(RoundInt(x));
2975 return RValue<Float4>(V(lowerRound(V(x.value))));
2979 RValue<Float4> Trunc(RValue<Float4> x)
2981 #if defined(__i386__) || defined(__x86_64__)
2982 if(CPUID::supportsSSE4_1())
2984 return x86::roundps(x, 3);
2988 return Float4(Int4(x));
2991 return RValue<Float4>(V(lowerTrunc(V(x.value))));
2995 RValue<Float4> Frac(RValue<Float4> x)
2999 #if defined(__i386__) || defined(__x86_64__)
3000 if(CPUID::supportsSSE4_1())
3006 frc = x - Float4(Int4(x)); // Signed fractional part.
3008 frc += As<Float4>(As<Int4>(CmpNLE(Float4(0.0f), frc)) & As<Int4>(Float4(1.0f))); // Add 1.0 if negative.
3014 // x - floor(x) can be 1.0 for very small negative x.
3015 // Clamp against the value just below 1.0.
3016 return Min(frc, As<Float4>(Int4(0x3F7FFFFF)));
3019 RValue<Float4> Floor(RValue<Float4> x)
3021 #if defined(__i386__) || defined(__x86_64__)
3022 if(CPUID::supportsSSE4_1())
3024 return x86::floorps(x);
3031 return RValue<Float4>(V(lowerFloor(V(x.value))));
3035 RValue<Float4> Ceil(RValue<Float4> x)
3037 #if defined(__i386__) || defined(__x86_64__)
3038 if(CPUID::supportsSSE4_1())
3040 return x86::ceilps(x);
3049 Type *Float4::getType()
3051 return T(llvm::VectorType::get(T(Float::getType()), 4));
3054 RValue<Long> Ticks()
3056 llvm::Function *rdtsc = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::readcyclecounter);
3058 return RValue<Long>(V(::builder->CreateCall(rdtsc)));
3064 #if defined(__i386__) || defined(__x86_64__)
3067 RValue<Int> cvtss2si(RValue<Float> val)
3069 llvm::Function *cvtss2si = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_cvtss2si);
3074 return RValue<Int>(V(::builder->CreateCall(cvtss2si, ARGS(V(RValue<Float4>(vector).value)))));
3077 RValue<Int4> cvtps2dq(RValue<Float4> val)
3079 llvm::Function *cvtps2dq = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_cvtps2dq);
3081 return RValue<Int4>(V(::builder->CreateCall(cvtps2dq, ARGS(V(val.value)))));
3084 RValue<Float> rcpss(RValue<Float> val)
3086 llvm::Function *rcpss = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_rcp_ss);
3088 Value *vector = Nucleus::createInsertElement(V(llvm::UndefValue::get(T(Float4::getType()))), val.value, 0);
3090 return RValue<Float>(Nucleus::createExtractElement(V(::builder->CreateCall(rcpss, ARGS(V(vector)))), Float::getType(), 0));
3093 RValue<Float> sqrtss(RValue<Float> val)
3095 #if REACTOR_LLVM_VERSION < 7
3096 llvm::Function *sqrtss = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_sqrt_ss);
3097 Value *vector = Nucleus::createInsertElement(V(llvm::UndefValue::get(T(Float4::getType()))), val.value, 0);
3099 return RValue<Float>(Nucleus::createExtractElement(V(::builder->CreateCall(sqrtss, ARGS(V(vector)))), Float::getType(), 0));
3101 llvm::Function *sqrt = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::sqrt, {V(val.value)->getType()});
3102 return RValue<Float>(V(::builder->CreateCall(sqrt, ARGS(V(val.value)))));
3106 RValue<Float> rsqrtss(RValue<Float> val)
3108 llvm::Function *rsqrtss = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_rsqrt_ss);
3110 Value *vector = Nucleus::createInsertElement(V(llvm::UndefValue::get(T(Float4::getType()))), val.value, 0);
3112 return RValue<Float>(Nucleus::createExtractElement(V(::builder->CreateCall(rsqrtss, ARGS(V(vector)))), Float::getType(), 0));
3115 RValue<Float4> rcpps(RValue<Float4> val)
3117 llvm::Function *rcpps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_rcp_ps);
3119 return RValue<Float4>(V(::builder->CreateCall(rcpps, ARGS(V(val.value)))));
3122 RValue<Float4> sqrtps(RValue<Float4> val)
3124 #if REACTOR_LLVM_VERSION < 7
3125 llvm::Function *sqrtps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_sqrt_ps);
3127 llvm::Function *sqrtps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::sqrt, {V(val.value)->getType()});
3130 return RValue<Float4>(V(::builder->CreateCall(sqrtps, ARGS(V(val.value)))));
3133 RValue<Float4> rsqrtps(RValue<Float4> val)
3135 llvm::Function *rsqrtps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_rsqrt_ps);
3137 return RValue<Float4>(V(::builder->CreateCall(rsqrtps, ARGS(V(val.value)))));
3140 RValue<Float4> maxps(RValue<Float4> x, RValue<Float4> y)
3142 llvm::Function *maxps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_max_ps);
3144 return RValue<Float4>(V(::builder->CreateCall2(maxps, ARGS(V(x.value), V(y.value)))));
3147 RValue<Float4> minps(RValue<Float4> x, RValue<Float4> y)
3149 llvm::Function *minps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_min_ps);
3151 return RValue<Float4>(V(::builder->CreateCall2(minps, ARGS(V(x.value), V(y.value)))));
3154 RValue<Float> roundss(RValue<Float> val, unsigned char imm)
3156 llvm::Function *roundss = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_round_ss);
3158 Value *undef = V(llvm::UndefValue::get(T(Float4::getType())));
3159 Value *vector = Nucleus::createInsertElement(undef, val.value, 0);
3161 return RValue<Float>(Nucleus::createExtractElement(V(::builder->CreateCall3(roundss, ARGS(V(undef), V(vector), V(Nucleus::createConstantInt(imm))))), Float::getType(), 0));
3164 RValue<Float> floorss(RValue<Float> val)
3166 return roundss(val, 1);
3169 RValue<Float> ceilss(RValue<Float> val)
3171 return roundss(val, 2);
3174 RValue<Float4> roundps(RValue<Float4> val, unsigned char imm)
3176 llvm::Function *roundps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_round_ps);
3178 return RValue<Float4>(V(::builder->CreateCall2(roundps, ARGS(V(val.value), V(Nucleus::createConstantInt(imm))))));
3181 RValue<Float4> floorps(RValue<Float4> val)
3183 return roundps(val, 1);
3186 RValue<Float4> ceilps(RValue<Float4> val)
3188 return roundps(val, 2);
3191 RValue<Int4> pabsd(RValue<Int4> x)
3193 #if REACTOR_LLVM_VERSION < 7
3194 llvm::Function *pabsd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_ssse3_pabs_d_128);
3196 return RValue<Int4>(V(::builder->CreateCall(pabsd, ARGS(V(x.value)))));
3198 return RValue<Int4>(V(lowerPABS(V(x.value))));
3202 RValue<Short4> paddsw(RValue<Short4> x, RValue<Short4> y)
3204 llvm::Function *paddsw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_padds_w);
3206 return As<Short4>(V(::builder->CreateCall2(paddsw, ARGS(V(x.value), V(y.value)))));
3209 RValue<Short4> psubsw(RValue<Short4> x, RValue<Short4> y)
3211 llvm::Function *psubsw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psubs_w);
3213 return As<Short4>(V(::builder->CreateCall2(psubsw, ARGS(V(x.value), V(y.value)))));
3216 RValue<UShort4> paddusw(RValue<UShort4> x, RValue<UShort4> y)
3218 llvm::Function *paddusw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_paddus_w);
3220 return As<UShort4>(V(::builder->CreateCall2(paddusw, ARGS(V(x.value), V(y.value)))));
3223 RValue<UShort4> psubusw(RValue<UShort4> x, RValue<UShort4> y)
3225 llvm::Function *psubusw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psubus_w);
3227 return As<UShort4>(V(::builder->CreateCall2(psubusw, ARGS(V(x.value), V(y.value)))));
3230 RValue<SByte8> paddsb(RValue<SByte8> x, RValue<SByte8> y)
3232 llvm::Function *paddsb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_padds_b);
3234 return As<SByte8>(V(::builder->CreateCall2(paddsb, ARGS(V(x.value), V(y.value)))));
3237 RValue<SByte8> psubsb(RValue<SByte8> x, RValue<SByte8> y)
3239 llvm::Function *psubsb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psubs_b);
3241 return As<SByte8>(V(::builder->CreateCall2(psubsb, ARGS(V(x.value), V(y.value)))));
3244 RValue<Byte8> paddusb(RValue<Byte8> x, RValue<Byte8> y)
3246 llvm::Function *paddusb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_paddus_b);
3248 return As<Byte8>(V(::builder->CreateCall2(paddusb, ARGS(V(x.value), V(y.value)))));
3251 RValue<Byte8> psubusb(RValue<Byte8> x, RValue<Byte8> y)
3253 llvm::Function *psubusb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psubus_b);
3255 return As<Byte8>(V(::builder->CreateCall2(psubusb, ARGS(V(x.value), V(y.value)))));
3258 RValue<UShort4> pavgw(RValue<UShort4> x, RValue<UShort4> y)
3260 #if REACTOR_LLVM_VERSION < 7
3261 llvm::Function *pavgw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pavg_w);
3263 return As<UShort4>(V(::builder->CreateCall2(pavgw, ARGS(V(x.value), V(y.value)))));
3265 return As<UShort4>(V(lowerPAVG(V(x.value), V(y.value))));
3269 RValue<Short4> pmaxsw(RValue<Short4> x, RValue<Short4> y)
3271 #if REACTOR_LLVM_VERSION < 7
3272 llvm::Function *pmaxsw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmaxs_w);
3274 return As<Short4>(V(::builder->CreateCall2(pmaxsw, ARGS(V(x.value), V(y.value)))));
3276 return As<Short4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_SGT)));
3280 RValue<Short4> pminsw(RValue<Short4> x, RValue<Short4> y)
3282 #if REACTOR_LLVM_VERSION < 7
3283 llvm::Function *pminsw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmins_w);
3285 return As<Short4>(V(::builder->CreateCall2(pminsw, ARGS(V(x.value), V(y.value)))));
3287 return As<Short4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_SLT)));
3291 RValue<Short4> pcmpgtw(RValue<Short4> x, RValue<Short4> y)
3293 #if REACTOR_LLVM_VERSION < 7
3294 llvm::Function *pcmpgtw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pcmpgt_w);
3296 return As<Short4>(V(::builder->CreateCall2(pcmpgtw, ARGS(V(x.value), V(y.value)))));
3298 return As<Short4>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value), V(y.value), T(Short4::getType()))));
3302 RValue<Short4> pcmpeqw(RValue<Short4> x, RValue<Short4> y)
3304 #if REACTOR_LLVM_VERSION < 7
3305 llvm::Function *pcmpeqw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pcmpeq_w);
3307 return As<Short4>(V(::builder->CreateCall2(pcmpeqw, ARGS(V(x.value), V(y.value)))));
3309 return As<Short4>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value), V(y.value), T(Short4::getType()))));
3313 RValue<Byte8> pcmpgtb(RValue<SByte8> x, RValue<SByte8> y)
3315 #if REACTOR_LLVM_VERSION < 7
3316 llvm::Function *pcmpgtb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pcmpgt_b);
3318 return As<Byte8>(V(::builder->CreateCall2(pcmpgtb, ARGS(V(x.value), V(y.value)))));
3320 return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_SGT, V(x.value), V(y.value), T(Byte8::getType()))));
3324 RValue<Byte8> pcmpeqb(RValue<Byte8> x, RValue<Byte8> y)
3326 #if REACTOR_LLVM_VERSION < 7
3327 llvm::Function *pcmpeqb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pcmpeq_b);
3329 return As<Byte8>(V(::builder->CreateCall2(pcmpeqb, ARGS(V(x.value), V(y.value)))));
3331 return As<Byte8>(V(lowerPCMP(llvm::ICmpInst::ICMP_EQ, V(x.value), V(y.value), T(Byte8::getType()))));
3335 RValue<Short4> packssdw(RValue<Int2> x, RValue<Int2> y)
3337 llvm::Function *packssdw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_packssdw_128);
3339 return As<Short4>(V(::builder->CreateCall2(packssdw, ARGS(V(x.value), V(y.value)))));
3342 RValue<Short8> packssdw(RValue<Int4> x, RValue<Int4> y)
3344 llvm::Function *packssdw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_packssdw_128);
3346 return RValue<Short8>(V(::builder->CreateCall2(packssdw, ARGS(V(x.value), V(y.value)))));
3349 RValue<SByte8> packsswb(RValue<Short4> x, RValue<Short4> y)
3351 llvm::Function *packsswb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_packsswb_128);
3353 return As<SByte8>(V(::builder->CreateCall2(packsswb, ARGS(V(x.value), V(y.value)))));
3356 RValue<Byte8> packuswb(RValue<Short4> x, RValue<Short4> y)
3358 llvm::Function *packuswb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_packuswb_128);
3360 return As<Byte8>(V(::builder->CreateCall2(packuswb, ARGS(V(x.value), V(y.value)))));
3363 RValue<UShort8> packusdw(RValue<Int4> x, RValue<Int4> y)
3365 if(CPUID::supportsSSE4_1())
3367 llvm::Function *packusdw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_packusdw);
3369 return RValue<UShort8>(V(::builder->CreateCall2(packusdw, ARGS(V(x.value), V(y.value)))));
3373 RValue<Int4> bx = (x & ~(x >> 31)) - Int4(0x8000);
3374 RValue<Int4> by = (y & ~(y >> 31)) - Int4(0x8000);
3376 return As<UShort8>(packssdw(bx, by) + Short8(0x8000u));
3380 RValue<UShort4> psrlw(RValue<UShort4> x, unsigned char y)
3382 llvm::Function *psrlw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrli_w);
3384 return As<UShort4>(V(::builder->CreateCall2(psrlw, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3387 RValue<UShort8> psrlw(RValue<UShort8> x, unsigned char y)
3389 llvm::Function *psrlw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrli_w);
3391 return RValue<UShort8>(V(::builder->CreateCall2(psrlw, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3394 RValue<Short4> psraw(RValue<Short4> x, unsigned char y)
3396 llvm::Function *psraw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrai_w);
3398 return As<Short4>(V(::builder->CreateCall2(psraw, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3401 RValue<Short8> psraw(RValue<Short8> x, unsigned char y)
3403 llvm::Function *psraw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrai_w);
3405 return RValue<Short8>(V(::builder->CreateCall2(psraw, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3408 RValue<Short4> psllw(RValue<Short4> x, unsigned char y)
3410 llvm::Function *psllw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pslli_w);
3412 return As<Short4>(V(::builder->CreateCall2(psllw, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3415 RValue<Short8> psllw(RValue<Short8> x, unsigned char y)
3417 llvm::Function *psllw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pslli_w);
3419 return RValue<Short8>(V(::builder->CreateCall2(psllw, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3422 RValue<Int2> pslld(RValue<Int2> x, unsigned char y)
3424 llvm::Function *pslld = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pslli_d);
3426 return As<Int2>(V(::builder->CreateCall2(pslld, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3429 RValue<Int4> pslld(RValue<Int4> x, unsigned char y)
3431 llvm::Function *pslld = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pslli_d);
3433 return RValue<Int4>(V(::builder->CreateCall2(pslld, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3436 RValue<Int2> psrad(RValue<Int2> x, unsigned char y)
3438 llvm::Function *psrad = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrai_d);
3440 return As<Int2>(V(::builder->CreateCall2(psrad, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3443 RValue<Int4> psrad(RValue<Int4> x, unsigned char y)
3445 llvm::Function *psrad = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrai_d);
3447 return RValue<Int4>(V(::builder->CreateCall2(psrad, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3450 RValue<UInt2> psrld(RValue<UInt2> x, unsigned char y)
3452 llvm::Function *psrld = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrli_d);
3454 return As<UInt2>(V(::builder->CreateCall2(psrld, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3457 RValue<UInt4> psrld(RValue<UInt4> x, unsigned char y)
3459 llvm::Function *psrld = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_psrli_d);
3461 return RValue<UInt4>(V(::builder->CreateCall2(psrld, ARGS(V(x.value), V(Nucleus::createConstantInt(y))))));
3464 RValue<Int4> pmaxsd(RValue<Int4> x, RValue<Int4> y)
3466 #if REACTOR_LLVM_VERSION < 7
3467 llvm::Function *pmaxsd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pmaxsd);
3469 return RValue<Int4>(V(::builder->CreateCall2(pmaxsd, ARGS(V(x.value), V(y.value)))));
3471 return RValue<Int4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_SGT)));
3475 RValue<Int4> pminsd(RValue<Int4> x, RValue<Int4> y)
3477 #if REACTOR_LLVM_VERSION < 7
3478 llvm::Function *pminsd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pminsd);
3480 return RValue<Int4>(V(::builder->CreateCall2(pminsd, ARGS(V(x.value), V(y.value)))));
3482 return RValue<Int4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_SLT)));
3486 RValue<UInt4> pmaxud(RValue<UInt4> x, RValue<UInt4> y)
3488 #if REACTOR_LLVM_VERSION < 7
3489 llvm::Function *pmaxud = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pmaxud);
3491 return RValue<UInt4>(V(::builder->CreateCall2(pmaxud, ARGS(V(x.value), V(y.value)))));
3493 return RValue<UInt4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_UGT)));
3497 RValue<UInt4> pminud(RValue<UInt4> x, RValue<UInt4> y)
3499 #if REACTOR_LLVM_VERSION < 7
3500 llvm::Function *pminud = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pminud);
3502 return RValue<UInt4>(V(::builder->CreateCall2(pminud, ARGS(V(x.value), V(y.value)))));
3504 return RValue<UInt4>(V(lowerPMINMAX(V(x.value), V(y.value), llvm::ICmpInst::ICMP_ULT)));
3508 RValue<Short4> pmulhw(RValue<Short4> x, RValue<Short4> y)
3510 llvm::Function *pmulhw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmulh_w);
3512 return As<Short4>(V(::builder->CreateCall2(pmulhw, ARGS(V(x.value), V(y.value)))));
3515 RValue<UShort4> pmulhuw(RValue<UShort4> x, RValue<UShort4> y)
3517 llvm::Function *pmulhuw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmulhu_w);
3519 return As<UShort4>(V(::builder->CreateCall2(pmulhuw, ARGS(V(x.value), V(y.value)))));
3522 RValue<Int2> pmaddwd(RValue<Short4> x, RValue<Short4> y)
3524 llvm::Function *pmaddwd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmadd_wd);
3526 return As<Int2>(V(::builder->CreateCall2(pmaddwd, ARGS(V(x.value), V(y.value)))));
3529 RValue<Short8> pmulhw(RValue<Short8> x, RValue<Short8> y)
3531 llvm::Function *pmulhw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmulh_w);
3533 return RValue<Short8>(V(::builder->CreateCall2(pmulhw, ARGS(V(x.value), V(y.value)))));
3536 RValue<UShort8> pmulhuw(RValue<UShort8> x, RValue<UShort8> y)
3538 llvm::Function *pmulhuw = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmulhu_w);
3540 return RValue<UShort8>(V(::builder->CreateCall2(pmulhuw, ARGS(V(x.value), V(y.value)))));
3543 RValue<Int4> pmaddwd(RValue<Short8> x, RValue<Short8> y)
3545 llvm::Function *pmaddwd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmadd_wd);
3547 return RValue<Int4>(V(::builder->CreateCall2(pmaddwd, ARGS(V(x.value), V(y.value)))));
3550 RValue<Int> movmskps(RValue<Float4> x)
3552 llvm::Function *movmskps = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse_movmsk_ps);
3554 return RValue<Int>(V(::builder->CreateCall(movmskps, ARGS(V(x.value)))));
3557 RValue<Int> pmovmskb(RValue<Byte8> x)
3559 llvm::Function *pmovmskb = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse2_pmovmskb_128);
3561 return RValue<Int>(V(::builder->CreateCall(pmovmskb, ARGS(V(x.value))))) & 0xFF;
3564 RValue<Int4> pmovzxbd(RValue<Byte16> x)
3566 #if REACTOR_LLVM_VERSION < 7
3567 llvm::Function *pmovzxbd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pmovzxbd);
3569 return RValue<Int4>(V(::builder->CreateCall(pmovzxbd, ARGS(V(x.value)))));
3571 return RValue<Int4>(V(lowerPMOV(V(x.value), T(Int4::getType()), false)));
3575 RValue<Int4> pmovsxbd(RValue<SByte16> x)
3577 #if REACTOR_LLVM_VERSION < 7
3578 llvm::Function *pmovsxbd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pmovsxbd);
3580 return RValue<Int4>(V(::builder->CreateCall(pmovsxbd, ARGS(V(x.value)))));
3582 return RValue<Int4>(V(lowerPMOV(V(x.value), T(Int4::getType()), true)));
3586 RValue<Int4> pmovzxwd(RValue<UShort8> x)
3588 #if REACTOR_LLVM_VERSION < 7
3589 llvm::Function *pmovzxwd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pmovzxwd);
3591 return RValue<Int4>(V(::builder->CreateCall(pmovzxwd, ARGS(V(x.value)))));
3593 return RValue<Int4>(V(lowerPMOV(V(x.value), T(Int4::getType()), false)));
3597 RValue<Int4> pmovsxwd(RValue<Short8> x)
3599 #if REACTOR_LLVM_VERSION < 7
3600 llvm::Function *pmovsxwd = llvm::Intrinsic::getDeclaration(::module, llvm::Intrinsic::x86_sse41_pmovsxwd);
3602 return RValue<Int4>(V(::builder->CreateCall(pmovsxwd, ARGS(V(x.value)))));
3604 return RValue<Int4>(V(lowerPMOV(V(x.value), T(Int4::getType()), true)));
3608 #endif // defined(__i386__) || defined(__x86_64__)
3610 #ifdef ENABLE_RR_PRINT
3611 // extractAll returns a vector containing the extracted n scalar value of
3613 static std::vector<Value*> extractAll(Value* vec, int n)
3615 std::vector<Value*> elements;
3616 elements.reserve(n);
3617 for (int i = 0; i < n; i++)
3619 auto el = V(::builder->CreateExtractElement(V(vec), i));
3620 elements.push_back(el);
3625 // toDouble returns all the float values in vals extended to doubles.
3626 static std::vector<Value*> toDouble(const std::vector<Value*>& vals)
3628 auto doubleTy = ::llvm::Type::getDoubleTy(*::context);
3629 std::vector<Value*> elements;
3630 elements.reserve(vals.size());
3633 elements.push_back(V(::builder->CreateFPExt(V(v), doubleTy)));
3638 std::vector<Value*> PrintValue::Ty<Byte4>::val(const RValue<Byte4>& v) { return extractAll(v.value, 4); }
3639 std::vector<Value*> PrintValue::Ty<Int4>::val(const RValue<Int4>& v) { return extractAll(v.value, 4); }
3640 std::vector<Value*> PrintValue::Ty<UInt4>::val(const RValue<UInt4>& v) { return extractAll(v.value, 4); }
3641 std::vector<Value*> PrintValue::Ty<Short4>::val(const RValue<Short4>& v) { return extractAll(v.value, 4); }
3642 std::vector<Value*> PrintValue::Ty<UShort4>::val(const RValue<UShort4>& v) { return extractAll(v.value, 4); }
3643 std::vector<Value*> PrintValue::Ty<Float>::val(const RValue<Float>& v) { return toDouble({v.value}); }
3644 std::vector<Value*> PrintValue::Ty<Float4>::val(const RValue<Float4>& v) { return toDouble(extractAll(v.value, 4)); }
3646 void Printv(const char* function, const char* file, int line, const char* fmt, std::initializer_list<PrintValue> args)
3648 // LLVM types used below.
3649 auto i32Ty = ::llvm::Type::getInt32Ty(*::context);
3650 auto intTy = ::llvm::Type::getInt64Ty(*::context); // TODO: Natural int width.
3651 auto i8PtrTy = ::llvm::Type::getInt8PtrTy(*::context);
3652 auto funcTy = ::llvm::FunctionType::get(i32Ty, {i8PtrTy}, true);
3654 auto func = ::module->getOrInsertFunction("printf", funcTy);
3656 // Build the printf format message string.
3658 if (file != nullptr) { str += (line > 0) ? "%s:%d " : "%s "; }
3659 if (function != nullptr) { str += "%s "; }
3662 // Perform subsitution on all '{n}' bracketed indices in the format
3665 for (const PrintValue& arg : args)
3667 str = replace(str, "{" + std::to_string(i++) + "}", arg.format);
3670 ::llvm::SmallVector<::llvm::Value*, 8> vals;
3672 // The format message is always the first argument.
3673 vals.push_back(::builder->CreateGlobalStringPtr(str));
3675 // Add optional file, line and function info if provided.
3676 if (file != nullptr)
3678 vals.push_back(::builder->CreateGlobalStringPtr(file));
3681 vals.push_back(::llvm::ConstantInt::get(intTy, line));
3684 if (function != nullptr)
3686 vals.push_back(::builder->CreateGlobalStringPtr(function));
3689 // Add all format arguments.
3690 for (const PrintValue& arg : args)
3692 for (auto val : arg.values)
3694 vals.push_back(V(val));
3698 ::builder->CreateCall(func, vals);
3700 #endif // ENABLE_RR_PRINT