diff options
| author | 2019-01-27 16:42:12 +0000 | |
|---|---|---|
| committer | 2019-01-27 16:42:12 +0000 | |
| commit | b773203fb58f3ef282fb69c832d8710cab5bc82d (patch) | |
| tree | e75913f147570fbd75169647b144df85b88a038c /gnu/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | |
| parent | tweak errno in previous (diff) | |
| download | wireguard-openbsd-b773203fb58f3ef282fb69c832d8710cab5bc82d.tar.xz wireguard-openbsd-b773203fb58f3ef282fb69c832d8710cab5bc82d.zip | |
Import LLVM 7.0.1 release including clang, lld and lldb.
Diffstat (limited to 'gnu/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp')
| -rw-r--r-- | gnu/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 851 |
1 files changed, 524 insertions, 327 deletions
diff --git a/gnu/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/gnu/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index b3c39b5b166..4bcef697278 100644 --- a/gnu/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/gnu/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -101,6 +101,7 @@ #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/Analysis/TargetLibraryInfo.h" +#include "llvm/Transforms/Utils/Local.h" #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" @@ -138,7 +139,6 @@ #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/ModuleUtils.h" #include <algorithm> #include <cassert> @@ -163,7 +163,7 @@ static const unsigned kRetvalTLSSize = 800; // Accesses sizes are powers of two: 1, 2, 4, 8. static const size_t kNumberOfAccessSizes = 4; -/// \brief Track origins of uninitialized values. +/// Track origins of uninitialized values. /// /// Adds a section to MemorySanitizer report that points to the allocation /// (stack or heap) the uninitialized bits came from originally. @@ -199,6 +199,18 @@ static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact", cl::desc("exact handling of relational integer ICmp"), cl::Hidden, cl::init(false)); +// When compiling the Linux kernel, we sometimes see false positives related to +// MSan being unable to understand that inline assembly calls may initialize +// local variables. +// This flag makes the compiler conservatively unpoison every memory location +// passed into an assembly call. Note that this may cause false positives. +// Because it's impossible to figure out the array sizes, we can only unpoison +// the first sizeof(type) bytes for each type* pointer. +static cl::opt<bool> ClHandleAsmConservative( + "msan-handle-asm-conservative", + cl::desc("conservative handling of inline assembly"), cl::Hidden, + cl::init(false)); + // This flag controls whether we check the shadow of the address // operand of load or store. Such bugs are very rare, since load from // a garbage address typically results in SEGV, but still happen @@ -234,6 +246,24 @@ static cl::opt<bool> ClWithComdat("msan-with-comdat", cl::desc("Place MSan constructors in comdat sections"), cl::Hidden, cl::init(false)); +// These options allow to specify custom memory map parameters +// See MemoryMapParams for details. +static cl::opt<unsigned long long> ClAndMask("msan-and-mask", + cl::desc("Define custom MSan AndMask"), + cl::Hidden, cl::init(0)); + +static cl::opt<unsigned long long> ClXorMask("msan-xor-mask", + cl::desc("Define custom MSan XorMask"), + cl::Hidden, cl::init(0)); + +static cl::opt<unsigned long long> ClShadowBase("msan-shadow-base", + cl::desc("Define custom MSan ShadowBase"), + cl::Hidden, cl::init(0)); + +static cl::opt<unsigned long long> ClOriginBase("msan-origin-base", + cl::desc("Define custom MSan OriginBase"), + cl::Hidden, cl::init(0)); + static const char *const kMsanModuleCtorName = "msan.module_ctor"; static const char *const kMsanInitName = "__msan_init"; @@ -360,7 +390,7 @@ static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = { namespace { -/// \brief An instrumentation pass implementing detection of uninitialized +/// An instrumentation pass implementing detection of uninitialized /// reads. /// /// MemorySanitizer: instrument the code in module to find @@ -368,7 +398,7 @@ namespace { class MemorySanitizer : public FunctionPass { public: // Pass identification, replacement for typeid. - static char ID; + static char ID; MemorySanitizer(int TrackOrigins = 0, bool Recover = false) : FunctionPass(ID), @@ -392,8 +422,9 @@ private: friend struct VarArgPowerPC64Helper; void initializeCallbacks(Module &M); + void createUserspaceApi(Module &M); - /// \brief Track origins (allocation points) of uninitialized values. + /// Track origins (allocation points) of uninitialized values. int TrackOrigins; bool Recover; @@ -401,60 +432,67 @@ private: Type *IntptrTy; Type *OriginTy; - /// \brief Thread-local shadow storage for function parameters. + /// Thread-local shadow storage for function parameters. GlobalVariable *ParamTLS; - /// \brief Thread-local origin storage for function parameters. + /// Thread-local origin storage for function parameters. GlobalVariable *ParamOriginTLS; - /// \brief Thread-local shadow storage for function return value. + /// Thread-local shadow storage for function return value. GlobalVariable *RetvalTLS; - /// \brief Thread-local origin storage for function return value. + /// Thread-local origin storage for function return value. GlobalVariable *RetvalOriginTLS; - /// \brief Thread-local shadow storage for in-register va_arg function + /// Thread-local shadow storage for in-register va_arg function /// parameters (x86_64-specific). GlobalVariable *VAArgTLS; - /// \brief Thread-local shadow storage for va_arg overflow area + /// Thread-local shadow storage for va_arg overflow area /// (x86_64-specific). GlobalVariable *VAArgOverflowSizeTLS; - /// \brief Thread-local space used to pass origin value to the UMR reporting + /// Thread-local space used to pass origin value to the UMR reporting /// function. GlobalVariable *OriginTLS; - /// \brief The run-time callback to print a warning. - Value *WarningFn = nullptr; + /// Are the instrumentation callbacks set up? + bool CallbacksInitialized = false; + + /// The run-time callback to print a warning. + Value *WarningFn; // These arrays are indexed by log2(AccessSize). Value *MaybeWarningFn[kNumberOfAccessSizes]; Value *MaybeStoreOriginFn[kNumberOfAccessSizes]; - /// \brief Run-time helper that generates a new origin value for a stack + /// Run-time helper that generates a new origin value for a stack /// allocation. Value *MsanSetAllocaOrigin4Fn; - /// \brief Run-time helper that poisons stack on function entry. + /// Run-time helper that poisons stack on function entry. Value *MsanPoisonStackFn; - /// \brief Run-time helper that records a store (or any event) of an + /// Run-time helper that records a store (or any event) of an /// uninitialized value and returns an updated origin id encoding this info. Value *MsanChainOriginFn; - /// \brief MSan runtime replacements for memmove, memcpy and memset. + /// MSan runtime replacements for memmove, memcpy and memset. Value *MemmoveFn, *MemcpyFn, *MemsetFn; - /// \brief Memory map parameters used in application-to-shadow calculation. + /// Memory map parameters used in application-to-shadow calculation. const MemoryMapParams *MapParams; + /// Custom memory map parameters used when -msan-shadow-base or + // -msan-origin-base is provided. + MemoryMapParams CustomMapParams; + MDNode *ColdCallWeights; - /// \brief Branch weights for origin store. + /// Branch weights for origin store. MDNode *OriginStoreWeights; - /// \brief An empty volatile inline asm that prevents callback merge. + /// An empty volatile inline asm that prevents callback merge. InlineAsm *EmptyAsm; Function *MsanCtorFunction; @@ -476,7 +514,7 @@ FunctionPass *llvm::createMemorySanitizerPass(int TrackOrigins, bool Recover) { return new MemorySanitizer(TrackOrigins, Recover); } -/// \brief Create a non-const global initialized with the given string. +/// Create a non-const global initialized with the given string. /// /// Creates a writable global for Str so that we can pass it to the /// run-time lib. Runtime uses first 4 bytes of the string to store the @@ -488,12 +526,8 @@ static GlobalVariable *createPrivateNonConstGlobalForString(Module &M, GlobalValue::PrivateLinkage, StrConst, ""); } -/// \brief Insert extern declaration of runtime-provided functions and globals. -void MemorySanitizer::initializeCallbacks(Module &M) { - // Only do this once. - if (WarningFn) - return; - +/// Insert declarations for userspace-specific functions and globals. +void MemorySanitizer::createUserspaceApi(Module &M) { IRBuilder<> IRB(*C); // Create the callback. // FIXME: this function should have "Cold" calling conv, @@ -502,6 +536,38 @@ void MemorySanitizer::initializeCallbacks(Module &M) { : "__msan_warning_noreturn"; WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy()); + // Create the global TLS variables. + RetvalTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + + RetvalOriginTLS = new GlobalVariable( + M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, + "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); + + ParamTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + + ParamOriginTLS = new GlobalVariable( + M, ArrayType::get(OriginTy, kParamTLSSize / 4), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls", + nullptr, GlobalVariable::InitialExecTLSModel); + + VAArgTLS = new GlobalVariable( + M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, + GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + VAArgOverflowSizeTLS = new GlobalVariable( + M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, + "__msan_va_arg_overflow_size_tls", nullptr, + GlobalVariable::InitialExecTLSModel); + OriginTLS = new GlobalVariable( + M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, + "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); + for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes; AccessSizeIndex++) { unsigned AccessSize = 1 << AccessSizeIndex; @@ -522,6 +588,17 @@ void MemorySanitizer::initializeCallbacks(Module &M) { MsanPoisonStackFn = M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy); +} + +/// Insert extern declaration of runtime-provided functions and globals. +void MemorySanitizer::initializeCallbacks(Module &M) { + // Only do this once. + if (CallbacksInitialized) + return; + + IRBuilder<> IRB(*C); + // Initialize callbacks that are common for kernel and userspace + // instrumentation. MsanChainOriginFn = M.getOrInsertFunction( "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty()); MemmoveFn = M.getOrInsertFunction( @@ -533,98 +610,81 @@ void MemorySanitizer::initializeCallbacks(Module &M) { MemsetFn = M.getOrInsertFunction( "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(), IntptrTy); - - // Create globals. - RetvalTLS = new GlobalVariable( - M, ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_retval_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - RetvalOriginTLS = new GlobalVariable( - M, OriginTy, false, GlobalVariable::ExternalLinkage, nullptr, - "__msan_retval_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); - - ParamTLS = new GlobalVariable( - M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_param_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - ParamOriginTLS = new GlobalVariable( - M, ArrayType::get(OriginTy, kParamTLSSize / 4), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_param_origin_tls", - nullptr, GlobalVariable::InitialExecTLSModel); - - VAArgTLS = new GlobalVariable( - M, ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), false, - GlobalVariable::ExternalLinkage, nullptr, "__msan_va_arg_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - VAArgOverflowSizeTLS = new GlobalVariable( - M, IRB.getInt64Ty(), false, GlobalVariable::ExternalLinkage, nullptr, - "__msan_va_arg_overflow_size_tls", nullptr, - GlobalVariable::InitialExecTLSModel); - OriginTLS = new GlobalVariable( - M, IRB.getInt32Ty(), false, GlobalVariable::ExternalLinkage, nullptr, - "__msan_origin_tls", nullptr, GlobalVariable::InitialExecTLSModel); - // We insert an empty inline asm after __msan_report* to avoid callback merge. EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false), StringRef(""), StringRef(""), /*hasSideEffects=*/true); + + createUserspaceApi(M); + CallbacksInitialized = true; } -/// \brief Module-level initialization. +/// Module-level initialization. /// /// inserts a call to __msan_init to the module's constructor list. bool MemorySanitizer::doInitialization(Module &M) { auto &DL = M.getDataLayout(); - Triple TargetTriple(M.getTargetTriple()); - switch (TargetTriple.getOS()) { - case Triple::FreeBSD: - switch (TargetTriple.getArch()) { - case Triple::x86_64: - MapParams = FreeBSD_X86_MemoryMapParams.bits64; - break; - case Triple::x86: - MapParams = FreeBSD_X86_MemoryMapParams.bits32; - break; - default: - report_fatal_error("unsupported architecture"); - } - break; - case Triple::NetBSD: - switch (TargetTriple.getArch()) { - case Triple::x86_64: - MapParams = NetBSD_X86_MemoryMapParams.bits64; - break; - default: - report_fatal_error("unsupported architecture"); - } - break; - case Triple::Linux: - switch (TargetTriple.getArch()) { - case Triple::x86_64: - MapParams = Linux_X86_MemoryMapParams.bits64; - break; - case Triple::x86: - MapParams = Linux_X86_MemoryMapParams.bits32; - break; - case Triple::mips64: - case Triple::mips64el: - MapParams = Linux_MIPS_MemoryMapParams.bits64; - break; - case Triple::ppc64: - case Triple::ppc64le: - MapParams = Linux_PowerPC_MemoryMapParams.bits64; - break; - case Triple::aarch64: - case Triple::aarch64_be: - MapParams = Linux_ARM_MemoryMapParams.bits64; - break; - default: - report_fatal_error("unsupported architecture"); - } - break; - default: - report_fatal_error("unsupported operating system"); + bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0; + bool OriginPassed = ClOriginBase.getNumOccurrences() > 0; + // Check the overrides first + if (ShadowPassed || OriginPassed) { + CustomMapParams.AndMask = ClAndMask; + CustomMapParams.XorMask = ClXorMask; + CustomMapParams.ShadowBase = ClShadowBase; + CustomMapParams.OriginBase = ClOriginBase; + MapParams = &CustomMapParams; + } else { + Triple TargetTriple(M.getTargetTriple()); + switch (TargetTriple.getOS()) { + case Triple::FreeBSD: + switch (TargetTriple.getArch()) { + case Triple::x86_64: + MapParams = FreeBSD_X86_MemoryMapParams.bits64; + break; + case Triple::x86: + MapParams = FreeBSD_X86_MemoryMapParams.bits32; + break; + default: + report_fatal_error("unsupported architecture"); + } + break; + case Triple::NetBSD: + switch (TargetTriple.getArch()) { + case Triple::x86_64: + MapParams = NetBSD_X86_MemoryMapParams.bits64; + break; + default: + report_fatal_error("unsupported architecture"); + } + break; + case Triple::Linux: + switch (TargetTriple.getArch()) { + case Triple::x86_64: + MapParams = Linux_X86_MemoryMapParams.bits64; + break; + case Triple::x86: + MapParams = Linux_X86_MemoryMapParams.bits32; + break; + case Triple::mips64: + case Triple::mips64el: + MapParams = Linux_MIPS_MemoryMapParams.bits64; + break; + case Triple::ppc64: + case Triple::ppc64le: + MapParams = Linux_PowerPC_MemoryMapParams.bits64; + break; + case Triple::aarch64: + case Triple::aarch64_be: + MapParams = Linux_ARM_MemoryMapParams.bits64; + break; + default: + report_fatal_error("unsupported architecture"); + } + break; + default: + report_fatal_error("unsupported operating system"); + } } C = &(M.getContext()); @@ -661,7 +721,7 @@ bool MemorySanitizer::doInitialization(Module &M) { namespace { -/// \brief A helper class that handles instrumentation of VarArg +/// A helper class that handles instrumentation of VarArg /// functions on a particular platform. /// /// Implementations are expected to insert the instrumentation @@ -672,16 +732,16 @@ namespace { struct VarArgHelper { virtual ~VarArgHelper() = default; - /// \brief Visit a CallSite. + /// Visit a CallSite. virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0; - /// \brief Visit a va_start call. + /// Visit a va_start call. virtual void visitVAStartInst(VAStartInst &I) = 0; - /// \brief Visit a va_copy call. + /// Visit a va_copy call. virtual void visitVACopyInst(VACopyInst &I) = 0; - /// \brief Finalize function instrumentation. + /// Finalize function instrumentation. /// /// This method is called after visiting all interesting (see above) /// instructions in a function. @@ -715,6 +775,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { ValueMap<Value*, Value*> ShadowMap, OriginMap; std::unique_ptr<VarArgHelper> VAHelper; const TargetLibraryInfo *TLI; + BasicBlock *ActualFnStart; // The following flags disable parts of MSan instrumentation based on // blacklist contents and command-line options. @@ -747,9 +808,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { CheckReturnValue = SanitizeFunction && (F.getName() == "main"); TLI = &MS.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); - DEBUG(if (!InsertChecks) - dbgs() << "MemorySanitizer is not inserting checks into '" - << F.getName() << "'\n"); + MS.initializeCallbacks(*F.getParent()); + ActualFnStart = &F.getEntryBlock(); + + LLVM_DEBUG(if (!InsertChecks) dbgs() + << "MemorySanitizer is not inserting checks into '" + << F.getName() << "'\n"); } Value *updateOrigin(Value *V, IRBuilder<> &IRB) { @@ -766,7 +830,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8)); } - /// \brief Fill memory range with the given origin value. + /// Fill memory range with the given origin value. void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr, unsigned Size, unsigned Alignment) { const DataLayout &DL = F.getParent()->getDataLayout(); @@ -849,13 +913,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned Alignment = SI->getAlignment(); unsigned OriginAlignment = std::max(kMinOriginAlignment, Alignment); std::tie(ShadowPtr, OriginPtr) = - getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment); + getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true); StoreInst *NewSI = IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment); - DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); - - if (ClCheckAccessAddress) - insertShadowCheck(Addr, NewSI); + LLVM_DEBUG(dbgs() << " STORE: " << *NewSI << "\n"); + (void)NewSI; if (SI->isAtomic()) SI->setOrdering(addReleaseOrdering(SI->getOrdering())); @@ -866,25 +928,31 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } + /// Helper function to insert a warning at IRB's current insert point. + void insertWarningFn(IRBuilder<> &IRB, Value *Origin) { + if (!Origin) + Origin = (Value *)IRB.getInt32(0); + if (MS.TrackOrigins) { + IRB.CreateStore(Origin, MS.OriginTLS); + } + IRB.CreateCall(MS.WarningFn, {}); + IRB.CreateCall(MS.EmptyAsm, {}); + // FIXME: Insert UnreachableInst if !MS.Recover? + // This may invalidate some of the following checks and needs to be done + // at the very end. + } + void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin, bool AsCall) { IRBuilder<> IRB(OrigIns); - DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); + LLVM_DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n"); Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB); - DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); + LLVM_DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n"); Constant *ConstantShadow = dyn_cast_or_null<Constant>(ConvertedShadow); if (ConstantShadow) { if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) { - if (MS.TrackOrigins) { - IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), - MS.OriginTLS); - } - IRB.CreateCall(MS.WarningFn, {}); - IRB.CreateCall(MS.EmptyAsm, {}); - // FIXME: Insert UnreachableInst if !MS.Recover? - // This may invalidate some of the following checks and needs to be done - // at the very end. + insertWarningFn(IRB, Origin); } return; } @@ -908,13 +976,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /* Unreachable */ !MS.Recover, MS.ColdCallWeights); IRB.SetInsertPoint(CheckTerm); - if (MS.TrackOrigins) { - IRB.CreateStore(Origin ? (Value *)Origin : (Value *)IRB.getInt32(0), - MS.OriginTLS); - } - IRB.CreateCall(MS.WarningFn, {}); - IRB.CreateCall(MS.EmptyAsm, {}); - DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); + insertWarningFn(IRB, Origin); + LLVM_DEBUG(dbgs() << " CHECK: " << *Cmp << "\n"); } } @@ -925,13 +988,11 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Value *Origin = ShadowData.Origin; materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls); } - DEBUG(dbgs() << "DONE:\n" << F); + LLVM_DEBUG(dbgs() << "DONE:\n" << F); } - /// \brief Add MemorySanitizer instrumentation to a function. + /// Add MemorySanitizer instrumentation to a function. bool runOnFunction() { - MS.initializeCallbacks(*F.getParent()); - // In the presence of unreachable blocks, we may see Phi nodes with // incoming nodes from such blocks. Since InstVisitor skips unreachable // blocks, such nodes will not have any shadow value associated with them. @@ -941,7 +1002,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // Iterate all BBs in depth-first order and create shadow instructions // for all instructions (where applicable). // For PHI nodes we create dummy shadow PHIs which will be finalized later. - for (BasicBlock *BB : depth_first(&F.getEntryBlock())) + for (BasicBlock *BB : depth_first(ActualFnStart)) visit(*BB); // Finalize PHI nodes. @@ -961,22 +1022,22 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { InstrumentationList.size() + StoreList.size() > (unsigned)ClInstrumentationWithCallThreshold; - // Delayed instrumentation of StoreInst. - // This may add new checks to be inserted later. - materializeStores(InstrumentWithCalls); - // Insert shadow value checks. materializeChecks(InstrumentWithCalls); + // Delayed instrumentation of StoreInst. + // This may not add new address checks. + materializeStores(InstrumentWithCalls); + return true; } - /// \brief Compute the shadow type that corresponds to a given Value. + /// Compute the shadow type that corresponds to a given Value. Type *getShadowTy(Value *V) { return getShadowTy(V->getType()); } - /// \brief Compute the shadow type that corresponds to a given Type. + /// Compute the shadow type that corresponds to a given Type. Type *getShadowTy(Type *OrigTy) { if (!OrigTy->isSized()) { return nullptr; @@ -1000,21 +1061,21 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { for (unsigned i = 0, n = ST->getNumElements(); i < n; i++) Elements.push_back(getShadowTy(ST->getElementType(i))); StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked()); - DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); + LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n"); return Res; } uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy); return IntegerType::get(*MS.C, TypeSize); } - /// \brief Flatten a vector type. + /// Flatten a vector type. Type *getShadowTyNoVec(Type *ty) { if (VectorType *vt = dyn_cast<VectorType>(ty)) return IntegerType::get(*MS.C, vt->getBitWidth()); return ty; } - /// \brief Convert a shadow value to it's flattened variant. + /// Convert a shadow value to it's flattened variant. Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) { Type *Ty = V->getType(); Type *NoVecTy = getShadowTyNoVec(Ty); @@ -1022,7 +1083,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateBitCast(V, NoVecTy); } - /// \brief Compute the integer shadow offset that corresponds to a given + /// Compute the integer shadow offset that corresponds to a given /// application address. /// /// Offset = (Addr & ~AndMask) ^ XorMask @@ -1041,18 +1102,18 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return OffsetLong; } - /// \brief Compute the shadow and origin addresses corresponding to a given + /// Compute the shadow and origin addresses corresponding to a given /// application address. /// /// Shadow = ShadowBase + Offset /// Origin = (OriginBase + Offset) & ~3ULL - std::pair<Value *, Value *> getShadowOriginPtrUserspace( - Value *Addr, IRBuilder<> &IRB, Type *ShadowTy, unsigned Alignment, - Instruction **FirstInsn) { + std::pair<Value *, Value *> getShadowOriginPtrUserspace(Value *Addr, + IRBuilder<> &IRB, + Type *ShadowTy, + unsigned Alignment) { Value *ShadowOffset = getShadowPtrOffset(Addr, IRB); Value *ShadowLong = ShadowOffset; uint64_t ShadowBase = MS.MapParams->ShadowBase; - *FirstInsn = dyn_cast<Instruction>(ShadowLong); if (ShadowBase != 0) { ShadowLong = IRB.CreateAdd(ShadowLong, @@ -1080,58 +1141,60 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy, - unsigned Alignment) { - Instruction *FirstInsn = nullptr; + unsigned Alignment, + bool isStore) { std::pair<Value *, Value *> ret = - getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment, &FirstInsn); + getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment); return ret; } - /// \brief Compute the shadow address for a given function argument. + /// Compute the shadow address for a given function argument. /// /// Shadow = ParamTLS+ArgOffset. Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy); - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + if (ArgOffset) + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0), "_msarg"); } - /// \brief Compute the origin address for a given function argument. + /// Compute the origin address for a given function argument. Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB, int ArgOffset) { if (!MS.TrackOrigins) return nullptr; Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy); - Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); + if (ArgOffset) + Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset)); return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0), "_msarg_o"); } - /// \brief Compute the shadow address for a retval. + /// Compute the shadow address for a retval. Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) { return IRB.CreatePointerCast(MS.RetvalTLS, PointerType::get(getShadowTy(A), 0), "_msret"); } - /// \brief Compute the origin address for a retval. + /// Compute the origin address for a retval. Value *getOriginPtrForRetval(IRBuilder<> &IRB) { // We keep a single origin for the entire retval. Might be too optimistic. return MS.RetvalOriginTLS; } - /// \brief Set SV to be the shadow value for V. + /// Set SV to be the shadow value for V. void setShadow(Value *V, Value *SV) { assert(!ShadowMap.count(V) && "Values may only have one shadow"); ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V); } - /// \brief Set Origin to be the origin value for V. + /// Set Origin to be the origin value for V. void setOrigin(Value *V, Value *Origin) { if (!MS.TrackOrigins) return; assert(!OriginMap.count(V) && "Values may only have one origin"); - DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); + LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n"); OriginMap[V] = Origin; } @@ -1142,7 +1205,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return Constant::getNullValue(ShadowTy); } - /// \brief Create a clean shadow value for a given value. + /// Create a clean shadow value for a given value. /// /// Clean shadow (all zeroes) means all bits of the value are defined /// (initialized). @@ -1150,7 +1213,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return getCleanShadow(V->getType()); } - /// \brief Create a dirty shadow of a given shadow type. + /// Create a dirty shadow of a given shadow type. Constant *getPoisonedShadow(Type *ShadowTy) { assert(ShadowTy); if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) @@ -1169,7 +1232,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { llvm_unreachable("Unexpected shadow type"); } - /// \brief Create a dirty shadow for a given value. + /// Create a dirty shadow for a given value. Constant *getPoisonedShadow(Value *V) { Type *ShadowTy = getShadowTy(V); if (!ShadowTy) @@ -1177,12 +1240,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return getPoisonedShadow(ShadowTy); } - /// \brief Create a clean (zero) origin. + /// Create a clean (zero) origin. Value *getCleanOrigin() { return Constant::getNullValue(MS.OriginTy); } - /// \brief Get the shadow value for a given Value. + /// Get the shadow value for a given Value. /// /// This function either returns the value set earlier with setShadow, /// or extracts if from ParamTLS (for function arguments). @@ -1194,7 +1257,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // For instructions the shadow is already stored in the map. Value *Shadow = ShadowMap[V]; if (!Shadow) { - DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); + LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent())); (void)I; assert(Shadow && "No shadow for a value"); } @@ -1202,7 +1265,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } if (UndefValue *U = dyn_cast<UndefValue>(V)) { Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V); - DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); + LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n"); (void)U; return AllOnes; } @@ -1212,12 +1275,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (*ShadowPtr) return *ShadowPtr; Function *F = A->getParent(); - IRBuilder<> EntryIRB(F->getEntryBlock().getFirstNonPHI()); + IRBuilder<> EntryIRB(ActualFnStart->getFirstNonPHI()); unsigned ArgOffset = 0; const DataLayout &DL = F->getParent()->getDataLayout(); for (auto &FArg : F->args()) { if (!FArg.getType()->isSized()) { - DEBUG(dbgs() << "Arg is not sized\n"); + LLVM_DEBUG(dbgs() << "Arg is not sized\n"); continue; } unsigned Size = @@ -1237,7 +1300,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { ArgAlign = DL.getABITypeAlignment(EltType); } Value *CpShadowPtr = - getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign) + getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign, + /*isStore*/ true) .first; if (Overflow) { // ParamTLS overflow. @@ -1246,9 +1310,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Size, ArgAlign); } else { unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); - Value *Cpy = - EntryIRB.CreateMemCpy(CpShadowPtr, Base, Size, CopyAlign); - DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); + Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base, + CopyAlign, Size); + LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); (void)Cpy; } *ShadowPtr = getCleanShadow(V); @@ -1261,8 +1325,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { EntryIRB.CreateAlignedLoad(Base, kShadowTLSAlignment); } } - DEBUG(dbgs() << " ARG: " << FArg << " ==> " << - **ShadowPtr << "\n"); + LLVM_DEBUG(dbgs() + << " ARG: " << FArg << " ==> " << **ShadowPtr << "\n"); if (MS.TrackOrigins && !Overflow) { Value *OriginPtr = getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset); @@ -1280,12 +1344,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return getCleanShadow(V); } - /// \brief Get the shadow for i-th argument of the instruction I. + /// Get the shadow for i-th argument of the instruction I. Value *getShadow(Instruction *I, int i) { return getShadow(I->getOperand(i)); } - /// \brief Get the origin for a value. + /// Get the origin for a value. Value *getOrigin(Value *V) { if (!MS.TrackOrigins) return nullptr; if (!PropagateShadow) return getCleanOrigin(); @@ -1301,12 +1365,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return Origin; } - /// \brief Get the origin for i-th argument of the instruction I. + /// Get the origin for i-th argument of the instruction I. Value *getOrigin(Instruction *I, int i) { return getOrigin(I->getOperand(i)); } - /// \brief Remember the place where a shadow check should be inserted. + /// Remember the place where a shadow check should be inserted. /// /// This location will be later instrumented with a check that will print a /// UMR warning in runtime if the shadow value is not 0. @@ -1322,7 +1386,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns)); } - /// \brief Remember the place where a shadow check should be inserted. + /// Remember the place where a shadow check should be inserted. /// /// This location will be later instrumented with a check that will print a /// UMR warning in runtime if the value is not fully defined. @@ -1382,7 +1446,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { InstVisitor<MemorySanitizerVisitor>::visit(I); } - /// \brief Instrument LoadInst + /// Instrument LoadInst /// /// Loads the corresponding shadow and (optionally) origin. /// Optionally, checks that the load address is fully defined. @@ -1396,7 +1460,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned Alignment = I.getAlignment(); if (PropagateShadow) { std::tie(ShadowPtr, OriginPtr) = - getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment); + getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld")); } else { setShadow(&I, getCleanShadow(&I)); @@ -1418,12 +1482,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - /// \brief Instrument StoreInst + /// Instrument StoreInst /// /// Stores the corresponding shadow and (optionally) origin. /// Optionally, checks that the store address is fully defined. void visitStoreInst(StoreInst &I) { StoreList.push_back(&I); + if (ClCheckAccessAddress) + insertShadowCheck(I.getPointerOperand(), &I); } void handleCASOrRMW(Instruction &I) { @@ -1431,8 +1497,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRBuilder<> IRB(&I); Value *Addr = I.getOperand(0); - Value *ShadowPtr = - getShadowOriginPtr(Addr, IRB, I.getType(), /*Alignment*/ 1).first; + Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, I.getType(), + /*Alignment*/ 1, /*isStore*/ true) + .first; if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); @@ -1536,7 +1603,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitFPExtInst(CastInst& I) { handleShadowOr(I); } void visitFPTruncInst(CastInst& I) { handleShadowOr(I); } - /// \brief Propagate shadow for bitwise AND. + /// Propagate shadow for bitwise AND. /// /// This code is exact, i.e. if, for example, a bit in the left argument /// is defined and 0, then neither the value not definedness of the @@ -1585,7 +1652,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - /// \brief Default propagation of shadow and/or origin. + /// Default propagation of shadow and/or origin. /// /// This class implements the general case of shadow propagation, used in all /// cases where we don't know and/or don't care about what the operation @@ -1611,7 +1678,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB) : IRB(IRB), MSV(MSV) {} - /// \brief Add a pair of shadow and origin values to the mix. + /// Add a pair of shadow and origin values to the mix. Combiner &Add(Value *OpShadow, Value *OpOrigin) { if (CombineShadow) { assert(OpShadow); @@ -1641,14 +1708,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return *this; } - /// \brief Add an application value to the mix. + /// Add an application value to the mix. Combiner &Add(Value *V) { Value *OpShadow = MSV->getShadow(V); Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr; return Add(OpShadow, OpOrigin); } - /// \brief Set the current combined values as the given instruction's shadow + /// Set the current combined values as the given instruction's shadow /// and origin. void Done(Instruction *I) { if (CombineShadow) { @@ -1666,7 +1733,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { using ShadowAndOriginCombiner = Combiner<true>; using OriginCombiner = Combiner<false>; - /// \brief Propagate origin for arbitrary operation. + /// Propagate origin for arbitrary operation. void setOriginForNaryOp(Instruction &I) { if (!MS.TrackOrigins) return; IRBuilder<> IRB(&I); @@ -1684,7 +1751,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Ty->getPrimitiveSizeInBits(); } - /// \brief Cast between two shadow types, extending or truncating as + /// Cast between two shadow types, extending or truncating as /// necessary. Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy, bool Signed = false) { @@ -1706,7 +1773,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // TODO: handle struct types. } - /// \brief Cast an application value to the type of its own shadow. + /// Cast an application value to the type of its own shadow. Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) { Type *ShadowTy = getShadowTy(V); if (V->getType() == ShadowTy) @@ -1717,7 +1784,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateBitCast(V, ShadowTy); } - /// \brief Propagate shadow for arbitrary operation. + /// Propagate shadow for arbitrary operation. void handleShadowOr(Instruction &I) { IRBuilder<> IRB(&I); ShadowAndOriginCombiner SC(this, IRB); @@ -1726,7 +1793,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { SC.Done(&I); } - // \brief Handle multiplication by constant. + // Handle multiplication by constant. // // Handle a special case of multiplication by constant that may have one or // more zeros in the lower bits. This makes corresponding number of lower bits @@ -1788,7 +1855,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitSub(BinaryOperator &I) { handleShadowOr(I); } void visitXor(BinaryOperator &I) { handleShadowOr(I); } - void handleDiv(Instruction &I) { + void handleIntegerDiv(Instruction &I) { IRBuilder<> IRB(&I); // Strict on the second argument. insertShadowCheck(I.getOperand(1), &I); @@ -1796,14 +1863,17 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOrigin(&I, getOrigin(&I, 0)); } - void visitUDiv(BinaryOperator &I) { handleDiv(I); } - void visitSDiv(BinaryOperator &I) { handleDiv(I); } - void visitFDiv(BinaryOperator &I) { handleDiv(I); } - void visitURem(BinaryOperator &I) { handleDiv(I); } - void visitSRem(BinaryOperator &I) { handleDiv(I); } - void visitFRem(BinaryOperator &I) { handleDiv(I); } + void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); } + void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); } + void visitURem(BinaryOperator &I) { handleIntegerDiv(I); } + void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); } + + // Floating point division is side-effect free. We can not require that the + // divisor is fully initialized and must propagate shadow. See PR37523. + void visitFDiv(BinaryOperator &I) { handleShadowOr(I); } + void visitFRem(BinaryOperator &I) { handleShadowOr(I); } - /// \brief Instrument == and != comparisons. + /// Instrument == and != comparisons. /// /// Sometimes the comparison result is known even if some of the bits of the /// arguments are not. @@ -1841,7 +1911,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - /// \brief Build the lowest possible value of V, taking into account V's + /// Build the lowest possible value of V, taking into account V's /// uninitialized bits. Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, bool isSigned) { @@ -1858,7 +1928,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - /// \brief Build the highest possible value of V, taking into account V's + /// Build the highest possible value of V, taking into account V's /// uninitialized bits. Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa, bool isSigned) { @@ -1875,7 +1945,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - /// \brief Instrument relational comparisons. + /// Instrument relational comparisons. /// /// This function does exact shadow propagation for all relational /// comparisons of integers, pointers and vectors of those. @@ -1908,7 +1978,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - /// \brief Instrument signed relational comparisons. + /// Instrument signed relational comparisons. /// /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest /// bit of the shadow. Everything else is delegated to handleShadowOr(). @@ -1992,7 +2062,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitAShr(BinaryOperator &I) { handleShift(I); } void visitLShr(BinaryOperator &I) { handleShift(I); } - /// \brief Instrument llvm.memmove + /// Instrument llvm.memmove /// /// At this point we don't know if llvm.memmove will be inlined or not. /// If we don't instrument it and it gets inlined, @@ -2045,7 +2115,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { VAHelper->visitVACopyInst(I); } - /// \brief Handle vector store-like intrinsics. + /// Handle vector store-like intrinsics. /// /// Instrument intrinsics that look like a simple SIMD store: writes memory, /// has 1 pointer argument and 1 vector argument, returns void. @@ -2057,8 +2127,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // We don't know the pointer alignment (could be unaligned SSE store!). // Have to assume to worst case. - std::tie(ShadowPtr, OriginPtr) = - getShadowOriginPtr(Addr, IRB, Shadow->getType(), /*Alignment*/ 1); + std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( + Addr, IRB, Shadow->getType(), /*Alignment*/ 1, /*isStore*/ true); IRB.CreateAlignedStore(Shadow, ShadowPtr, 1); if (ClCheckAccessAddress) @@ -2069,7 +2139,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return true; } - /// \brief Handle vector load-like intrinsics. + /// Handle vector load-like intrinsics. /// /// Instrument intrinsics that look like a simple SIMD load: reads memory, /// has 1 pointer argument, returns a vector. @@ -2084,7 +2154,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // Have to assume to worst case. unsigned Alignment = 1; std::tie(ShadowPtr, OriginPtr) = - getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment); + getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false); setShadow(&I, IRB.CreateAlignedLoad(ShadowPtr, Alignment, "_msld")); } else { setShadow(&I, getCleanShadow(&I)); @@ -2102,7 +2172,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return true; } - /// \brief Handle (SIMD arithmetic)-like intrinsics. + /// Handle (SIMD arithmetic)-like intrinsics. /// /// Instrument intrinsics with any number of arguments of the same type, /// equal to the return type. The type should be simple (no aggregates or @@ -2132,7 +2202,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return true; } - /// \brief Heuristically instrument unknown intrinsics. + /// Heuristically instrument unknown intrinsics. /// /// The main purpose of this code is to do something reasonable with all /// random intrinsics we might encounter, most importantly - SIMD intrinsics. @@ -2182,7 +2252,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOrigin(&I, getOrigin(Op)); } - // \brief Instrument vector convert instrinsic. + // Instrument vector convert instrinsic. // // This function instruments intrinsics like cvtsi2ss: // %Out = int_xxx_cvtyyy(%ConvertOp) @@ -2285,7 +2355,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { return IRB.CreateSExt(S2, T); } - // \brief Instrument vector shift instrinsic. + // Instrument vector shift instrinsic. // // This function instruments intrinsics like int_x86_avx2_psll_w. // Intrinsic shifts %In by %ShiftSize bits. @@ -2310,14 +2380,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Get an X86_MMX-sized vector type. + // Get an X86_MMX-sized vector type. Type *getMMXVectorTy(unsigned EltSizeInBits) { const unsigned X86_MMXSizeInBits = 64; return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits), X86_MMXSizeInBits / EltSizeInBits); } - // \brief Returns a signed counterpart for an (un)signed-saturate-and-pack + // Returns a signed counterpart for an (un)signed-saturate-and-pack // intrinsic. Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) { switch (id) { @@ -2348,7 +2418,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } } - // \brief Instrument vector pack instrinsic. + // Instrument vector pack instrinsic. // // This function instruments intrinsics like x86_mmx_packsswb, that // packs elements of 2 input vectors into half as many bits with saturation. @@ -2391,7 +2461,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument sum-of-absolute-differencies intrinsic. + // Instrument sum-of-absolute-differencies intrinsic. void handleVectorSadIntrinsic(IntrinsicInst &I) { const unsigned SignificantBitsPerResultElement = 16; bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); @@ -2410,7 +2480,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument multiply-add intrinsic. + // Instrument multiply-add intrinsic. void handleVectorPmaddIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) { bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy(); @@ -2425,7 +2495,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument compare-packed intrinsic. + // Instrument compare-packed intrinsic. // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or // all-ones shadow. void handleVectorComparePackedIntrinsic(IntrinsicInst &I) { @@ -2438,7 +2508,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { setOriginForNaryOp(I); } - // \brief Instrument compare-scalar intrinsic. + // Instrument compare-scalar intrinsic. // This handles both cmp* intrinsics which return the result in the first // element of a vector, and comi* which return the result as i32. void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) { @@ -2453,7 +2523,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRBuilder<> IRB(&I); Value* Addr = I.getArgOperand(0); Type *Ty = IRB.getInt32Ty(); - Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, Ty, /*Alignment*/ 1).first; + Value *ShadowPtr = + getShadowOriginPtr(Addr, IRB, Ty, /*Alignment*/ 1, /*isStore*/ true) + .first; IRB.CreateStore(getCleanShadow(Ty), IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo())); @@ -2471,7 +2543,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { unsigned Alignment = 1; Value *ShadowPtr, *OriginPtr; std::tie(ShadowPtr, OriginPtr) = - getShadowOriginPtr(Addr, IRB, Ty, Alignment); + getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false); if (ClCheckAccessAddress) insertShadowCheck(Addr, &I); @@ -2482,11 +2554,98 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { insertShadowCheck(Shadow, Origin, &I); } + void handleMaskedStore(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value *V = I.getArgOperand(0); + Value *Addr = I.getArgOperand(1); + unsigned Align = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue(); + Value *Mask = I.getArgOperand(3); + Value *Shadow = getShadow(V); + + Value *ShadowPtr; + Value *OriginPtr; + std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( + Addr, IRB, Shadow->getType(), Align, /*isStore*/ true); + + if (ClCheckAccessAddress) { + insertShadowCheck(Addr, &I); + // Uninitialized mask is kind of like uninitialized address, but not as + // scary. + insertShadowCheck(Mask, &I); + } + + IRB.CreateMaskedStore(Shadow, ShadowPtr, Align, Mask); + + if (MS.TrackOrigins) { + auto &DL = F.getParent()->getDataLayout(); + paintOrigin(IRB, getOrigin(V), OriginPtr, + DL.getTypeStoreSize(Shadow->getType()), + std::max(Align, kMinOriginAlignment)); + } + } + + bool handleMaskedLoad(IntrinsicInst &I) { + IRBuilder<> IRB(&I); + Value *Addr = I.getArgOperand(0); + unsigned Align = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); + Value *Mask = I.getArgOperand(2); + Value *PassThru = I.getArgOperand(3); + + Type *ShadowTy = getShadowTy(&I); + Value *ShadowPtr, *OriginPtr; + if (PropagateShadow) { + std::tie(ShadowPtr, OriginPtr) = + getShadowOriginPtr(Addr, IRB, ShadowTy, Align, /*isStore*/ false); + setShadow(&I, IRB.CreateMaskedLoad(ShadowPtr, Align, Mask, + getShadow(PassThru), "_msmaskedld")); + } else { + setShadow(&I, getCleanShadow(&I)); + } + + if (ClCheckAccessAddress) { + insertShadowCheck(Addr, &I); + insertShadowCheck(Mask, &I); + } + + if (MS.TrackOrigins) { + if (PropagateShadow) { + // Choose between PassThru's and the loaded value's origins. + Value *MaskedPassThruShadow = IRB.CreateAnd( + getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy)); + + Value *Acc = IRB.CreateExtractElement( + MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0)); + for (int i = 1, N = PassThru->getType()->getVectorNumElements(); i < N; + ++i) { + Value *More = IRB.CreateExtractElement( + MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i)); + Acc = IRB.CreateOr(Acc, More); + } + + Value *Origin = IRB.CreateSelect( + IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())), + getOrigin(PassThru), IRB.CreateLoad(OriginPtr)); + + setOrigin(&I, Origin); + } else { + setOrigin(&I, getCleanOrigin()); + } + } + return true; + } + + void visitIntrinsicInst(IntrinsicInst &I) { switch (I.getIntrinsicID()) { case Intrinsic::bswap: handleBswap(I); break; + case Intrinsic::masked_store: + handleMaskedStore(I); + break; + case Intrinsic::masked_load: + handleMaskedLoad(I); + break; case Intrinsic::x86_sse_stmxcsr: handleStmxcsr(I); break; @@ -2501,20 +2660,14 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { case Intrinsic::x86_avx512_cvttss2usi: case Intrinsic::x86_avx512_cvttsd2usi64: case Intrinsic::x86_avx512_cvttsd2usi: - case Intrinsic::x86_avx512_cvtusi2sd: case Intrinsic::x86_avx512_cvtusi2ss: case Intrinsic::x86_avx512_cvtusi642sd: case Intrinsic::x86_avx512_cvtusi642ss: case Intrinsic::x86_sse2_cvtsd2si64: case Intrinsic::x86_sse2_cvtsd2si: case Intrinsic::x86_sse2_cvtsd2ss: - case Intrinsic::x86_sse2_cvtsi2sd: - case Intrinsic::x86_sse2_cvtsi642sd: - case Intrinsic::x86_sse2_cvtss2sd: case Intrinsic::x86_sse2_cvttsd2si64: case Intrinsic::x86_sse2_cvttsd2si: - case Intrinsic::x86_sse_cvtsi2ss: - case Intrinsic::x86_sse_cvtsi642ss: case Intrinsic::x86_sse_cvtss2si64: case Intrinsic::x86_sse_cvtss2si: case Intrinsic::x86_sse_cvttss2si64: @@ -2715,7 +2868,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // outputs as clean. Note that any side effects of the inline asm that are // not immediately visible in its constraints are not handled. if (Call->isInlineAsm()) { - visitInstruction(I); + if (ClHandleAsmConservative) + visitAsmInstruction(I); + else + visitInstruction(I); return; } @@ -2738,13 +2894,13 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRBuilder<> IRB(&I); unsigned ArgOffset = 0; - DEBUG(dbgs() << " CallSite: " << I << "\n"); + LLVM_DEBUG(dbgs() << " CallSite: " << I << "\n"); for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end(); ArgIt != End; ++ArgIt) { Value *A = *ArgIt; unsigned i = ArgIt - CS.arg_begin(); if (!A->getType()->isSized()) { - DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); + LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n"); continue; } unsigned Size = 0; @@ -2754,8 +2910,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { // __msan_param_tls. Value *ArgShadow = getShadow(A); Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset); - DEBUG(dbgs() << " Arg#" << i << ": " << *A << - " Shadow: " << *ArgShadow << "\n"); + LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A + << " Shadow: " << *ArgShadow << "\n"); bool ArgIsInitialized = false; const DataLayout &DL = F.getParent()->getDataLayout(); if (CS.paramHasAttr(i, Attribute::ByVal)) { @@ -2765,10 +2921,12 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { if (ArgOffset + Size > kParamTLSSize) break; unsigned ParamAlignment = CS.getParamAlignment(i); unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment); - Value *AShadowPtr = - getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment).first; + Value *AShadowPtr = getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), + Alignment, /*isStore*/ false) + .first; - Store = IRB.CreateMemCpy(ArgShadowBase, AShadowPtr, Size, Alignment); + Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr, + Alignment, Size); } else { Size = DL.getTypeAllocSize(A->getType()); if (ArgOffset + Size > kParamTLSSize) break; @@ -2782,10 +2940,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { getOriginPtrForArgument(A, IRB, ArgOffset)); (void)Store; assert(Size != 0 && Store != nullptr); - DEBUG(dbgs() << " Param:" << *Store << "\n"); + LLVM_DEBUG(dbgs() << " Param:" << *Store << "\n"); ArgOffset += alignTo(Size, 8); } - DEBUG(dbgs() << " done with call args\n"); + LLVM_DEBUG(dbgs() << " done with call args\n"); FunctionType *FT = cast<FunctionType>(CS.getCalledValue()->getType()->getContainedType(0)); @@ -2888,8 +3046,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { IRB.CreateCall(MS.MsanPoisonStackFn, {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len}); } else { - Value *ShadowBase = - getShadowOriginPtr(&I, IRB, IRB.getInt8Ty(), I.getAlignment()).first; + Value *ShadowBase = getShadowOriginPtr(&I, IRB, IRB.getInt8Ty(), + I.getAlignment(), /*isStore*/ true) + .first; Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0); IRB.CreateMemSet(ShadowBase, PoisonValue, Len, I.getAlignment()); @@ -2991,24 +3150,24 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { void visitExtractValueInst(ExtractValueInst &I) { IRBuilder<> IRB(&I); Value *Agg = I.getAggregateOperand(); - DEBUG(dbgs() << "ExtractValue: " << I << "\n"); + LLVM_DEBUG(dbgs() << "ExtractValue: " << I << "\n"); Value *AggShadow = getShadow(Agg); - DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); + LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices()); - DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); + LLVM_DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n"); setShadow(&I, ResShadow); setOriginForNaryOp(I); } void visitInsertValueInst(InsertValueInst &I) { IRBuilder<> IRB(&I); - DEBUG(dbgs() << "InsertValue: " << I << "\n"); + LLVM_DEBUG(dbgs() << "InsertValue: " << I << "\n"); Value *AggShadow = getShadow(I.getAggregateOperand()); Value *InsShadow = getShadow(I.getInsertedValueOperand()); - DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); - DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); + LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n"); + LLVM_DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n"); Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices()); - DEBUG(dbgs() << " Res: " << *Res << "\n"); + LLVM_DEBUG(dbgs() << " Res: " << *Res << "\n"); setShadow(&I, Res); setOriginForNaryOp(I); } @@ -3023,25 +3182,58 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } void visitResumeInst(ResumeInst &I) { - DEBUG(dbgs() << "Resume: " << I << "\n"); + LLVM_DEBUG(dbgs() << "Resume: " << I << "\n"); // Nothing to do here. } void visitCleanupReturnInst(CleanupReturnInst &CRI) { - DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n"); + LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n"); // Nothing to do here. } void visitCatchReturnInst(CatchReturnInst &CRI) { - DEBUG(dbgs() << "CatchReturn: " << CRI << "\n"); + LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n"); // Nothing to do here. } + void visitAsmInstruction(Instruction &I) { + // Conservative inline assembly handling: check for poisoned shadow of + // asm() arguments, then unpoison the result and all the memory locations + // pointed to by those arguments. + CallInst *CI = dyn_cast<CallInst>(&I); + + for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) { + Value *Operand = CI->getOperand(i); + if (Operand->getType()->isSized()) + insertShadowCheck(Operand, &I); + } + setShadow(&I, getCleanShadow(&I)); + setOrigin(&I, getCleanOrigin()); + IRBuilder<> IRB(&I); + IRB.SetInsertPoint(I.getNextNode()); + for (size_t i = 0, n = CI->getNumOperands(); i < n; i++) { + Value *Operand = CI->getOperand(i); + Type *OpType = Operand->getType(); + if (!OpType->isPointerTy()) + continue; + Type *ElType = OpType->getPointerElementType(); + if (!ElType->isSized()) + continue; + Value *ShadowPtr, *OriginPtr; + std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr( + Operand, IRB, ElType, /*Alignment*/ 1, /*isStore*/ true); + Value *CShadow = getCleanShadow(ElType); + IRB.CreateStore( + CShadow, + IRB.CreatePointerCast(ShadowPtr, CShadow->getType()->getPointerTo())); + } + } + void visitInstruction(Instruction &I) { // Everything else: stop propagating and check for poisoned shadow. if (ClDumpStrictInstructions) dumpInst(I); - DEBUG(dbgs() << "DEFAULT: " << I << "\n"); + LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n"); for (size_t i = 0, n = I.getNumOperands(); i < n; i++) { Value *Operand = I.getOperand(i); if (Operand->getType()->isSized()) @@ -3052,7 +3244,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { } }; -/// \brief AMD64-specific implementation of VarArgHelper. +/// AMD64-specific implementation of VarArgHelper. struct VarArgAMD64Helper : public VarArgHelper { // An unfortunate workaround for asymmetric lowering of va_arg stuff. // See a comment in visitCallSite for more details. @@ -3116,10 +3308,12 @@ struct VarArgAMD64Helper : public VarArgHelper { getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset); OverflowOffset += alignTo(ArgSize, 8); Value *ShadowPtr, *OriginPtr; - std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( - A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment); + std::tie(ShadowPtr, OriginPtr) = + MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, + /*isStore*/ false); - IRB.CreateMemCpy(ShadowBase, ShadowPtr, ArgSize, kShadowTLSAlignment); + IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr, + kShadowTLSAlignment, ArgSize); } else { ArgKind AK = classifyArgument(A); if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) @@ -3157,7 +3351,7 @@ struct VarArgAMD64Helper : public VarArgHelper { IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS); } - /// \brief Compute the shadow address for a given va_arg. + /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); @@ -3172,7 +3366,8 @@ struct VarArgAMD64Helper : public VarArgHelper { Value *ShadowPtr, *OriginPtr; unsigned Alignment = 8; std::tie(ShadowPtr, OriginPtr) = - MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment); + MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment, + /*isStore*/ true); // Unpoison the whole __va_list_tag. // FIXME: magic ABI constants. @@ -3200,13 +3395,13 @@ struct VarArgAMD64Helper : public VarArgHelper { if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); + IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); } // Instrument va_start. @@ -3219,33 +3414,33 @@ struct VarArgAMD64Helper : public VarArgHelper { Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr( IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), ConstantInt::get(MS.IntptrTy, 16)), - Type::getInt64PtrTy(*MS.C)); + PointerType::get(Type::getInt64PtrTy(*MS.C), 0)); Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr; unsigned Alignment = 16; std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), - Alignment); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, AMD64FpEndOffset, - Alignment); + Alignment, /*isStore*/ true); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + AMD64FpEndOffset); Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr( IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), ConstantInt::get(MS.IntptrTy, 8)), - Type::getInt64PtrTy(*MS.C)); + PointerType::get(Type::getInt64PtrTy(*MS.C), 0)); Value *OverflowArgAreaPtr = IRB.CreateLoad(OverflowArgAreaPtrPtr); Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr; std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) = MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(), - Alignment); + Alignment, /*isStore*/ true); Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, AMD64FpEndOffset); - IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, - Alignment); + IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment, + VAArgOverflowSize); } } }; -/// \brief MIPS64-specific implementation of VarArgHelper. +/// MIPS64-specific implementation of VarArgHelper. struct VarArgMIPS64Helper : public VarArgHelper { Function &F; MemorySanitizer &MS; @@ -3286,7 +3481,7 @@ struct VarArgMIPS64Helper : public VarArgHelper { IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); } - /// \brief Compute the shadow address for a given va_arg. + /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); @@ -3301,8 +3496,8 @@ struct VarArgMIPS64Helper : public VarArgHelper { Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr, *OriginPtr; unsigned Alignment = 8; - std::tie(ShadowPtr, OriginPtr) = - MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 8, Alignment, false); } @@ -3313,8 +3508,8 @@ struct VarArgMIPS64Helper : public VarArgHelper { Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr, *OriginPtr; unsigned Alignment = 8; - std::tie(ShadowPtr, OriginPtr) = - MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 8, Alignment, false); } @@ -3322,7 +3517,7 @@ struct VarArgMIPS64Helper : public VarArgHelper { void finalizeInstrumentation() override { assert(!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); @@ -3331,7 +3526,7 @@ struct VarArgMIPS64Helper : public VarArgHelper { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); + IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); } // Instrument va_start. @@ -3341,20 +3536,21 @@ struct VarArgMIPS64Helper : public VarArgHelper { IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); Value *RegSaveAreaPtrPtr = - IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), - Type::getInt64PtrTy(*MS.C)); + IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + PointerType::get(Type::getInt64PtrTy(*MS.C), 0)); Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr; unsigned Alignment = 8; std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), - Alignment); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, Alignment); + Alignment, /*isStore*/ true); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); } } }; -/// \brief AArch64-specific implementation of VarArgHelper. +/// AArch64-specific implementation of VarArgHelper. struct VarArgAArch64Helper : public VarArgHelper { static const unsigned kAArch64GrArgSize = 64; static const unsigned kAArch64VrArgSize = 128; @@ -3461,8 +3657,8 @@ struct VarArgAArch64Helper : public VarArgHelper { Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr, *OriginPtr; unsigned Alignment = 8; - std::tie(ShadowPtr, OriginPtr) = - MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 32, Alignment, false); } @@ -3473,8 +3669,8 @@ struct VarArgAArch64Helper : public VarArgHelper { Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr, *OriginPtr; unsigned Alignment = 8; - std::tie(ShadowPtr, OriginPtr) = - MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 32, Alignment, false); } @@ -3506,13 +3702,13 @@ struct VarArgAArch64Helper : public VarArgHelper { if (!VAStartInstrumentationList.empty()) { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgOverflowSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); + IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); } Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize); @@ -3563,14 +3759,14 @@ struct VarArgAArch64Helper : public VarArgHelper { Value *GrRegSaveAreaShadowPtr = MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(), - /*Alignment*/ 8) + /*Alignment*/ 8, /*isStore*/ true) .first; Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy, GrRegSaveAreaShadowPtrOff); Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff); - IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, GrSrcPtr, GrCopySize, 8); + IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, 8, GrSrcPtr, 8, GrCopySize); // Again, but for FP/SIMD values. Value *VrRegSaveAreaShadowPtrOff = @@ -3578,7 +3774,7 @@ struct VarArgAArch64Helper : public VarArgHelper { Value *VrRegSaveAreaShadowPtr = MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(), - /*Alignment*/ 8) + /*Alignment*/ 8, /*isStore*/ true) .first; Value *VrSrcPtr = IRB.CreateInBoundsGEP( @@ -3588,25 +3784,25 @@ struct VarArgAArch64Helper : public VarArgHelper { VrRegSaveAreaShadowPtrOff); Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff); - IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, VrSrcPtr, VrCopySize, 8); + IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, 8, VrSrcPtr, 8, VrCopySize); // And finally for remaining arguments. Value *StackSaveAreaShadowPtr = MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(), - /*Alignment*/ 16) + /*Alignment*/ 16, /*isStore*/ true) .first; Value *StackSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy, IRB.getInt32(AArch64VAEndOffset)); - IRB.CreateMemCpy(StackSaveAreaShadowPtr, StackSrcPtr, - VAArgOverflowSize, 16); + IRB.CreateMemCpy(StackSaveAreaShadowPtr, 16, StackSrcPtr, 16, + VAArgOverflowSize); } } }; -/// \brief PowerPC64-specific implementation of VarArgHelper. +/// PowerPC64-specific implementation of VarArgHelper. struct VarArgPowerPC64Helper : public VarArgHelper { Function &F; MemorySanitizer &MS; @@ -3657,9 +3853,10 @@ struct VarArgPowerPC64Helper : public VarArgHelper { VAArgOffset - VAArgBase); Value *AShadowPtr, *AOriginPtr; std::tie(AShadowPtr, AOriginPtr) = MSV.getShadowOriginPtr( - A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment); + A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, /*isStore*/ false); - IRB.CreateMemCpy(Base, AShadowPtr, ArgSize, kShadowTLSAlignment); + IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr, + kShadowTLSAlignment, ArgSize); } VAArgOffset += alignTo(ArgSize, 8); } else { @@ -3704,7 +3901,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper { IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS); } - /// \brief Compute the shadow address for a given va_arg. + /// Compute the shadow address for a given va_arg. Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) { Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy); @@ -3719,8 +3916,8 @@ struct VarArgPowerPC64Helper : public VarArgHelper { Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr, *OriginPtr; unsigned Alignment = 8; - std::tie(ShadowPtr, OriginPtr) = - MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), /* size */ 8, Alignment, false); } @@ -3730,8 +3927,8 @@ struct VarArgPowerPC64Helper : public VarArgHelper { Value *VAListTag = I.getArgOperand(0); Value *ShadowPtr, *OriginPtr; unsigned Alignment = 8; - std::tie(ShadowPtr, OriginPtr) = - MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment); + std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr( + VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); // Unpoison the whole __va_list_tag. // FIXME: magic ABI constants. IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()), @@ -3741,7 +3938,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper { void finalizeInstrumentation() override { assert(!VAArgSize && !VAArgTLSCopy && "finalizeInstrumentation called twice"); - IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI()); + IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI()); VAArgSize = IRB.CreateLoad(MS.VAArgOverflowSizeTLS); Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0), VAArgSize); @@ -3750,7 +3947,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8); + IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); } // Instrument va_start. @@ -3760,20 +3957,21 @@ struct VarArgPowerPC64Helper : public VarArgHelper { IRBuilder<> IRB(OrigInst->getNextNode()); Value *VAListTag = OrigInst->getArgOperand(0); Value *RegSaveAreaPtrPtr = - IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), - Type::getInt64PtrTy(*MS.C)); + IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), + PointerType::get(Type::getInt64PtrTy(*MS.C), 0)); Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr); Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr; unsigned Alignment = 8; std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), - Alignment); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, Alignment); + Alignment, /*isStore*/ true); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); } } }; -/// \brief A no-op implementation of VarArgHelper. +/// A no-op implementation of VarArgHelper. struct VarArgNoOpHelper : public VarArgHelper { VarArgNoOpHelper(Function &F, MemorySanitizer &MS, MemorySanitizerVisitor &MSV) {} @@ -3796,8 +3994,7 @@ static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan, Triple TargetTriple(Func.getParent()->getTargetTriple()); if (TargetTriple.getArch() == Triple::x86_64) return new VarArgAMD64Helper(Func, Msan, Visitor); - else if (TargetTriple.getArch() == Triple::mips64 || - TargetTriple.getArch() == Triple::mips64el) + else if (TargetTriple.isMIPS64()) return new VarArgMIPS64Helper(Func, Msan, Visitor); else if (TargetTriple.getArch() == Triple::aarch64) return new VarArgAArch64Helper(Func, Msan, Visitor); |
