summaryrefslogtreecommitdiffstats
path: root/gnu/llvm/include/llvm/Transforms
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/llvm/include/llvm/Transforms')
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO.h237
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h35
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO/FunctionImport.h43
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h38
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO/InlinerPass.h94
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO/LowerBitSets.h201
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h179
-rw-r--r--gnu/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h34
-rw-r--r--gnu/llvm/include/llvm/Transforms/InstCombine/InstCombine.h46
-rw-r--r--gnu/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h116
-rw-r--r--gnu/llvm/include/llvm/Transforms/Instrumentation.h177
-rw-r--r--gnu/llvm/include/llvm/Transforms/ObjCARC.h48
-rw-r--r--gnu/llvm/include/llvm/Transforms/Scalar.h491
-rw-r--r--gnu/llvm/include/llvm/Transforms/Scalar/ADCE.h38
-rw-r--r--gnu/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h39
-rw-r--r--gnu/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h40
-rw-r--r--gnu/llvm/include/llvm/Transforms/Scalar/SROA.h129
-rw-r--r--gnu/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h46
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h64
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h297
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h116
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h36
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/Cloning.h236
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h65
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/CodeExtractor.h126
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/CtorUtils.h32
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/GlobalStatus.h82
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/IntegerDivision.h73
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/Local.h355
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/LoopUtils.h382
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/LoopVersioning.h114
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/ModuleUtils.h64
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h50
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/SSAUpdater.h178
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h460
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h71
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h172
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/SplitModule.h43
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h152
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h52
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/UnrollLoop.h45
-rw-r--r--gnu/llvm/include/llvm/Transforms/Utils/ValueMapper.h135
-rw-r--r--gnu/llvm/include/llvm/Transforms/Vectorize.h144
43 files changed, 5575 insertions, 0 deletions
diff --git a/gnu/llvm/include/llvm/Transforms/IPO.h b/gnu/llvm/include/llvm/Transforms/IPO.h
new file mode 100644
index 00000000000..78d2fadc519
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO.h
@@ -0,0 +1,237 @@
+//===- llvm/Transforms/IPO.h - Interprocedural Transformations --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the IPO transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_H
+#define LLVM_TRANSFORMS_IPO_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class FunctionInfoIndex;
+class ModulePass;
+class Pass;
+class Function;
+class BasicBlock;
+class GlobalValue;
+
+//===----------------------------------------------------------------------===//
+//
+// These functions removes symbols from functions and modules. If OnlyDebugInfo
+// is true, only debugging information is removed from the module.
+//
+ModulePass *createStripSymbolsPass(bool OnlyDebugInfo = false);
+
+//===----------------------------------------------------------------------===//
+//
+// These functions strips symbols from functions and modules.
+// Only debugging information is not stripped.
+//
+ModulePass *createStripNonDebugSymbolsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// These pass removes llvm.dbg.declare intrinsics.
+ModulePass *createStripDebugDeclarePass();
+
+//===----------------------------------------------------------------------===//
+//
+// These pass removes unused symbols' debug info.
+ModulePass *createStripDeadDebugInfoPass();
+
+//===----------------------------------------------------------------------===//
+/// createConstantMergePass - This function returns a new pass that merges
+/// duplicate global constants together into a single constant that is shared.
+/// This is useful because some passes (ie TraceValues) insert a lot of string
+/// constants into the program, regardless of whether or not they duplicate an
+/// existing string.
+///
+ModulePass *createConstantMergePass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalOptimizerPass - This function returns a new pass that optimizes
+/// non-address taken internal globals.
+///
+ModulePass *createGlobalOptimizerPass();
+
+//===----------------------------------------------------------------------===//
+/// createGlobalDCEPass - This transform is designed to eliminate unreachable
+/// internal globals (functions or global variables)
+///
+ModulePass *createGlobalDCEPass();
+
+//===----------------------------------------------------------------------===//
+/// This transform is designed to eliminate available external globals
+/// (functions or global variables)
+///
+ModulePass *createEliminateAvailableExternallyPass();
+
+//===----------------------------------------------------------------------===//
+/// createGVExtractionPass - If deleteFn is true, this pass deletes
+/// the specified global values. Otherwise, it deletes as much of the module as
+/// possible, except for the global values specified.
+///
+ModulePass *createGVExtractionPass(std::vector<GlobalValue*>& GVs, bool
+ deleteFn = false);
+
+//===----------------------------------------------------------------------===//
+/// This pass performs iterative function importing from other modules.
+Pass *createFunctionImportPass(const FunctionInfoIndex *Index = nullptr);
+
+//===----------------------------------------------------------------------===//
+/// createFunctionInliningPass - Return a new pass object that uses a heuristic
+/// to inline direct function calls to small functions.
+///
+/// The Threshold can be passed directly, or asked to be computed from the
+/// given optimization and size optimization arguments.
+///
+/// The -inline-threshold command line option takes precedence over the
+/// threshold given here.
+Pass *createFunctionInliningPass();
+Pass *createFunctionInliningPass(int Threshold);
+Pass *createFunctionInliningPass(unsigned OptLevel, unsigned SizeOptLevel);
+
+//===----------------------------------------------------------------------===//
+/// createAlwaysInlinerPass - Return a new pass object that inlines only
+/// functions that are marked as "always_inline".
+Pass *createAlwaysInlinerPass();
+Pass *createAlwaysInlinerPass(bool InsertLifetime);
+
+//===----------------------------------------------------------------------===//
+/// createPruneEHPass - Return a new pass object which transforms invoke
+/// instructions into calls, if the callee can _not_ unwind the stack.
+///
+Pass *createPruneEHPass();
+
+//===----------------------------------------------------------------------===//
+/// createInternalizePass - This pass loops over all of the functions in the
+/// input module, internalizing all globals (functions and variables) it can.
+////
+/// The symbols in \p ExportList are never internalized.
+///
+/// The symbol in DSOList are internalized if it is safe to drop them from
+/// the symbol table.
+///
+/// Note that commandline options that are used with the above function are not
+/// used now!
+ModulePass *createInternalizePass(ArrayRef<const char *> ExportList);
+/// createInternalizePass - Same as above, but with an empty exportList.
+ModulePass *createInternalizePass();
+
+//===----------------------------------------------------------------------===//
+/// createDeadArgEliminationPass - This pass removes arguments from functions
+/// which are not used by the body of the function.
+///
+ModulePass *createDeadArgEliminationPass();
+
+/// DeadArgHacking pass - Same as DAE, but delete arguments of external
+/// functions as well. This is definitely not safe, and should only be used by
+/// bugpoint.
+ModulePass *createDeadArgHackingPass();
+
+//===----------------------------------------------------------------------===//
+/// createArgumentPromotionPass - This pass promotes "by reference" arguments to
+/// be passed by value if the number of elements passed is smaller or
+/// equal to maxElements (maxElements == 0 means always promote).
+///
+Pass *createArgumentPromotionPass(unsigned maxElements = 3);
+
+//===----------------------------------------------------------------------===//
+/// createIPConstantPropagationPass - This pass propagates constants from call
+/// sites into the bodies of functions.
+///
+ModulePass *createIPConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+/// createIPSCCPPass - This pass propagates constants from call sites into the
+/// bodies of functions, and keeps track of whether basic blocks are executable
+/// in the process.
+///
+ModulePass *createIPSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+/// createLoopExtractorPass - This pass extracts all natural loops from the
+/// program into a function if it can.
+///
+Pass *createLoopExtractorPass();
+
+/// createSingleLoopExtractorPass - This pass extracts one natural loop from the
+/// program into a function if it can. This is used by bugpoint.
+///
+Pass *createSingleLoopExtractorPass();
+
+/// createBlockExtractorPass - This pass extracts all blocks (except those
+/// specified in the argument list) from the functions in the module.
+///
+ModulePass *createBlockExtractorPass();
+
+/// createStripDeadPrototypesPass - This pass removes any function declarations
+/// (prototypes) that are not used.
+ModulePass *createStripDeadPrototypesPass();
+
+//===----------------------------------------------------------------------===//
+/// createPostOrderFunctionAttrsPass - This pass walks SCCs of the call graph
+/// in post-order to deduce and propagate function attributes. It can discover
+/// functions that do not access memory, or only read memory, and give them the
+/// readnone/readonly attribute. It also discovers function arguments that are
+/// not captured by the function and marks them with the nocapture attribute.
+///
+Pass *createPostOrderFunctionAttrsPass();
+
+//===----------------------------------------------------------------------===//
+/// createReversePostOrderFunctionAttrsPass - This pass walks SCCs of the call
+/// graph in RPO to deduce and propagate function attributes. Currently it
+/// only handles synthesizing norecurse attributes.
+///
+Pass *createReversePostOrderFunctionAttrsPass();
+
+//===----------------------------------------------------------------------===//
+/// createMergeFunctionsPass - This pass discovers identical functions and
+/// collapses them.
+///
+ModulePass *createMergeFunctionsPass();
+
+//===----------------------------------------------------------------------===//
+/// createPartialInliningPass - This pass inlines parts of functions.
+///
+ModulePass *createPartialInliningPass();
+
+//===----------------------------------------------------------------------===//
+// createMetaRenamerPass - Rename everything with metasyntatic names.
+//
+ModulePass *createMetaRenamerPass();
+
+//===----------------------------------------------------------------------===//
+/// createBarrierNoopPass - This pass is purely a module pass barrier in a pass
+/// manager.
+ModulePass *createBarrierNoopPass();
+
+/// \brief This pass lowers bitset metadata and the llvm.bitset.test intrinsic
+/// to bitsets.
+ModulePass *createLowerBitSetsPass();
+
+/// \brief This pass export CFI checks for use by external modules.
+ModulePass *createCrossDSOCFIPass();
+
+//===----------------------------------------------------------------------===//
+// SampleProfilePass - Loads sample profile data from disk and generates
+// IR metadata to reflect the profile.
+ModulePass *createSampleProfileLoaderPass();
+ModulePass *createSampleProfileLoaderPass(StringRef Name);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h b/gnu/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
new file mode 100644
index 00000000000..0ff4afe79b0
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO/ForceFunctionAttrs.h
@@ -0,0 +1,35 @@
+//===-- ForceFunctionAttrs.h - Force function attrs for debugging ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// Super simple passes to force specific function attrs from the commandline
+/// into the IR for debugging purposes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass which forces specific function attributes into the IR, primarily as
+/// a debugging tool.
+class ForceFunctionAttrsPass {
+public:
+ static StringRef name() { return "ForceFunctionAttrsPass"; }
+ PreservedAnalyses run(Module &M);
+};
+
+/// Create a legacy pass manager instance of a pass to force function attrs.
+Pass *createForceFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_FORCEFUNCTIONATTRS_H
diff --git a/gnu/llvm/include/llvm/Transforms/IPO/FunctionImport.h b/gnu/llvm/include/llvm/Transforms/IPO/FunctionImport.h
new file mode 100644
index 00000000000..d7707790a01
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO/FunctionImport.h
@@ -0,0 +1,43 @@
+//===- llvm/Transforms/IPO/FunctionImport.h - ThinLTO importing -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUNCTIONIMPORT_H
+#define LLVM_FUNCTIONIMPORT_H
+
+#include "llvm/ADT/StringMap.h"
+#include <functional>
+
+namespace llvm {
+class LLVMContext;
+class Module;
+class FunctionInfoIndex;
+
+/// The function importer is automatically importing function from other modules
+/// based on the provided summary informations.
+class FunctionImporter {
+
+ /// The summaries index used to trigger importing.
+ const FunctionInfoIndex &Index;
+
+ /// Factory function to load a Module for a given identifier
+ std::function<std::unique_ptr<Module>(StringRef Identifier)> ModuleLoader;
+
+public:
+ /// Create a Function Importer.
+ FunctionImporter(
+ const FunctionInfoIndex &Index,
+ std::function<std::unique_ptr<Module>(StringRef Identifier)> ModuleLoader)
+ : Index(Index), ModuleLoader(ModuleLoader) {}
+
+ /// Import functions in Module \p M based on the summary informations.
+ bool importFunctions(Module &M);
+};
+}
+
+#endif // LLVM_FUNCTIONIMPORT_H
diff --git a/gnu/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h b/gnu/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h
new file mode 100644
index 00000000000..80afc02c62a
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO/InferFunctionAttrs.h
@@ -0,0 +1,38 @@
+//===-- InferFunctionAttrs.h - Infer implicit function attributes ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Interfaces for passes which infer implicit function attributes from the
+/// name and signature of function declarations.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+#define LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A pass which infers function attributes from the names and signatures of
+/// function declarations in a module.
+class InferFunctionAttrsPass {
+public:
+ static StringRef name() { return "InferFunctionAttrsPass"; }
+ PreservedAnalyses run(Module &M, AnalysisManager<Module> *AM);
+};
+
+/// Create a legacy pass manager instance of a pass to infer function
+/// attributes.
+Pass *createInferFunctionAttrsLegacyPass();
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_INFERFUNCTIONATTRS_H
diff --git a/gnu/llvm/include/llvm/Transforms/IPO/InlinerPass.h b/gnu/llvm/include/llvm/Transforms/IPO/InlinerPass.h
new file mode 100644
index 00000000000..58ef0cbbfb5
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO/InlinerPass.h
@@ -0,0 +1,94 @@
+//===- InlinerPass.h - Code common to all inliners --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a simple policy-based bottom-up inliner. This file
+// implements all of the boring mechanics of the bottom-up inlining, while the
+// subclass determines WHAT to inline, which is the much more interesting
+// component.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_INLINERPASS_H
+#define LLVM_TRANSFORMS_IPO_INLINERPASS_H
+
+#include "llvm/Analysis/CallGraphSCCPass.h"
+
+namespace llvm {
+class AssumptionCacheTracker;
+class CallSite;
+class DataLayout;
+class InlineCost;
+template <class PtrType, unsigned SmallSize> class SmallPtrSet;
+
+/// Inliner - This class contains all of the helper code which is used to
+/// perform the inlining operations that do not depend on the policy.
+///
+struct Inliner : public CallGraphSCCPass {
+ explicit Inliner(char &ID);
+ explicit Inliner(char &ID, int Threshold, bool InsertLifetime);
+
+ /// getAnalysisUsage - For this class, we declare that we require and preserve
+ /// the call graph. If the derived class implements this method, it should
+ /// always explicitly call the implementation here.
+ void getAnalysisUsage(AnalysisUsage &Info) const override;
+
+ // Main run interface method, this implements the interface required by the
+ // Pass class.
+ bool runOnSCC(CallGraphSCC &SCC) override;
+
+ using llvm::Pass::doFinalization;
+ // doFinalization - Remove now-dead linkonce functions at the end of
+ // processing to avoid breaking the SCC traversal.
+ bool doFinalization(CallGraph &CG) override;
+
+ /// This method returns the value specified by the -inline-threshold value,
+ /// specified on the command line. This is typically not directly needed.
+ ///
+ unsigned getInlineThreshold() const { return InlineThreshold; }
+
+ /// Calculate the inline threshold for given Caller. This threshold is lower
+ /// if the caller is marked with OptimizeForSize and -inline-threshold is not
+ /// given on the comand line. It is higher if the callee is marked with the
+ /// inlinehint attribute.
+ ///
+ unsigned getInlineThreshold(CallSite CS) const;
+
+ /// getInlineCost - This method must be implemented by the subclass to
+ /// determine the cost of inlining the specified call site. If the cost
+ /// returned is greater than the current inline threshold, the call site is
+ /// not inlined.
+ ///
+ virtual InlineCost getInlineCost(CallSite CS) = 0;
+
+ /// removeDeadFunctions - Remove dead functions.
+ ///
+ /// This also includes a hack in the form of the 'AlwaysInlineOnly' flag
+ /// which restricts it to deleting functions with an 'AlwaysInline'
+ /// attribute. This is useful for the InlineAlways pass that only wants to
+ /// deal with that subset of the functions.
+ bool removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly = false);
+
+private:
+ // InlineThreshold - Cache the value here for easy access.
+ unsigned InlineThreshold;
+
+ // InsertLifetime - Insert @llvm.lifetime intrinsics.
+ bool InsertLifetime;
+
+ /// shouldInline - Return true if the inliner should attempt to
+ /// inline at the given CallSite.
+ bool shouldInline(CallSite CS);
+
+protected:
+ AssumptionCacheTracker *ACT;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/IPO/LowerBitSets.h b/gnu/llvm/include/llvm/Transforms/IPO/LowerBitSets.h
new file mode 100644
index 00000000000..e5fb7b98fcb
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO/LowerBitSets.h
@@ -0,0 +1,201 @@
+//===- LowerBitSets.h - Bitset lowering pass --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines parts of the bitset lowering pass implementation that may
+// be usefully unit tested.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_LOWERBITSETS_H
+#define LLVM_TRANSFORMS_IPO_LOWERBITSETS_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+#include <stdint.h>
+#include <limits>
+#include <set>
+#include <vector>
+
+namespace llvm {
+
+class DataLayout;
+class GlobalObject;
+class Value;
+class raw_ostream;
+
+struct BitSetInfo {
+ // The indices of the set bits in the bitset.
+ std::set<uint64_t> Bits;
+
+ // The byte offset into the combined global represented by the bitset.
+ uint64_t ByteOffset;
+
+ // The size of the bitset in bits.
+ uint64_t BitSize;
+
+ // Log2 alignment of the bit set relative to the combined global.
+ // For example, a log2 alignment of 3 means that bits in the bitset
+ // represent addresses 8 bytes apart.
+ unsigned AlignLog2;
+
+ bool isSingleOffset() const {
+ return Bits.size() == 1;
+ }
+
+ bool isAllOnes() const {
+ return Bits.size() == BitSize;
+ }
+
+ bool containsGlobalOffset(uint64_t Offset) const;
+
+ bool containsValue(const DataLayout &DL,
+ const DenseMap<GlobalObject *, uint64_t> &GlobalLayout,
+ Value *V, uint64_t COffset = 0) const;
+
+ void print(raw_ostream &OS) const;
+};
+
+struct BitSetBuilder {
+ SmallVector<uint64_t, 16> Offsets;
+ uint64_t Min, Max;
+
+ BitSetBuilder() : Min(std::numeric_limits<uint64_t>::max()), Max(0) {}
+
+ void addOffset(uint64_t Offset) {
+ if (Min > Offset)
+ Min = Offset;
+ if (Max < Offset)
+ Max = Offset;
+
+ Offsets.push_back(Offset);
+ }
+
+ BitSetInfo build();
+};
+
+/// This class implements a layout algorithm for globals referenced by bit sets
+/// that tries to keep members of small bit sets together. This can
+/// significantly reduce bit set sizes in many cases.
+///
+/// It works by assembling fragments of layout from sets of referenced globals.
+/// Each set of referenced globals causes the algorithm to create a new
+/// fragment, which is assembled by appending each referenced global in the set
+/// into the fragment. If a referenced global has already been referenced by an
+/// fragment created earlier, we instead delete that fragment and append its
+/// contents into the fragment we are assembling.
+///
+/// By starting with the smallest fragments, we minimize the size of the
+/// fragments that are copied into larger fragments. This is most intuitively
+/// thought about when considering the case where the globals are virtual tables
+/// and the bit sets represent their derived classes: in a single inheritance
+/// hierarchy, the optimum layout would involve a depth-first search of the
+/// class hierarchy (and in fact the computed layout ends up looking a lot like
+/// a DFS), but a naive DFS would not work well in the presence of multiple
+/// inheritance. This aspect of the algorithm ends up fitting smaller
+/// hierarchies inside larger ones where that would be beneficial.
+///
+/// For example, consider this class hierarchy:
+///
+/// A B
+/// \ / | \
+/// C D E
+///
+/// We have five bit sets: bsA (A, C), bsB (B, C, D, E), bsC (C), bsD (D) and
+/// bsE (E). If we laid out our objects by DFS traversing B followed by A, our
+/// layout would be {B, C, D, E, A}. This is optimal for bsB as it needs to
+/// cover the only 4 objects in its hierarchy, but not for bsA as it needs to
+/// cover 5 objects, i.e. the entire layout. Our algorithm proceeds as follows:
+///
+/// Add bsC, fragments {{C}}
+/// Add bsD, fragments {{C}, {D}}
+/// Add bsE, fragments {{C}, {D}, {E}}
+/// Add bsA, fragments {{A, C}, {D}, {E}}
+/// Add bsB, fragments {{B, A, C, D, E}}
+///
+/// This layout is optimal for bsA, as it now only needs to cover two (i.e. 3
+/// fewer) objects, at the cost of bsB needing to cover 1 more object.
+///
+/// The bit set lowering pass assigns an object index to each object that needs
+/// to be laid out, and calls addFragment for each bit set passing the object
+/// indices of its referenced globals. It then assembles a layout from the
+/// computed layout in the Fragments field.
+struct GlobalLayoutBuilder {
+ /// The computed layout. Each element of this vector contains a fragment of
+ /// layout (which may be empty) consisting of object indices.
+ std::vector<std::vector<uint64_t>> Fragments;
+
+ /// Mapping from object index to fragment index.
+ std::vector<uint64_t> FragmentMap;
+
+ GlobalLayoutBuilder(uint64_t NumObjects)
+ : Fragments(1), FragmentMap(NumObjects) {}
+
+ /// Add F to the layout while trying to keep its indices contiguous.
+ /// If a previously seen fragment uses any of F's indices, that
+ /// fragment will be laid out inside F.
+ void addFragment(const std::set<uint64_t> &F);
+};
+
+/// This class is used to build a byte array containing overlapping bit sets. By
+/// loading from indexed offsets into the byte array and applying a mask, a
+/// program can test bits from the bit set with a relatively short instruction
+/// sequence. For example, suppose we have 15 bit sets to lay out:
+///
+/// A (16 bits), B (15 bits), C (14 bits), D (13 bits), E (12 bits),
+/// F (11 bits), G (10 bits), H (9 bits), I (7 bits), J (6 bits), K (5 bits),
+/// L (4 bits), M (3 bits), N (2 bits), O (1 bit)
+///
+/// These bits can be laid out in a 16-byte array like this:
+///
+/// Byte Offset
+/// 0123456789ABCDEF
+/// Bit
+/// 7 HHHHHHHHHIIIIIII
+/// 6 GGGGGGGGGGJJJJJJ
+/// 5 FFFFFFFFFFFKKKKK
+/// 4 EEEEEEEEEEEELLLL
+/// 3 DDDDDDDDDDDDDMMM
+/// 2 CCCCCCCCCCCCCCNN
+/// 1 BBBBBBBBBBBBBBBO
+/// 0 AAAAAAAAAAAAAAAA
+///
+/// For example, to test bit X of A, we evaluate ((bits[X] & 1) != 0), or to
+/// test bit X of I, we evaluate ((bits[9 + X] & 0x80) != 0). This can be done
+/// in 1-2 machine instructions on x86, or 4-6 instructions on ARM.
+///
+/// This is a byte array, rather than (say) a 2-byte array or a 4-byte array,
+/// because for one thing it gives us better packing (the more bins there are,
+/// the less evenly they will be filled), and for another, the instruction
+/// sequences can be slightly shorter, both on x86 and ARM.
+struct ByteArrayBuilder {
+ /// The byte array built so far.
+ std::vector<uint8_t> Bytes;
+
+ enum { BitsPerByte = 8 };
+
+ /// The number of bytes allocated so far for each of the bits.
+ uint64_t BitAllocs[BitsPerByte];
+
+ ByteArrayBuilder() {
+ memset(BitAllocs, 0, sizeof(BitAllocs));
+ }
+
+ /// Allocate BitSize bits in the byte array where Bits contains the bits to
+ /// set. AllocByteOffset is set to the offset within the byte array and
+ /// AllocMask is set to the bitmask for those bits. This uses the LPT (Longest
+ /// Processing Time) multiprocessor scheduling algorithm to lay out the bits
+ /// efficiently; the pass allocates bit sets in decreasing size order.
+ void allocate(const std::set<uint64_t> &Bits, uint64_t BitSize,
+ uint64_t &AllocByteOffset, uint8_t &AllocMask);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h b/gnu/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
new file mode 100644
index 00000000000..a4e7bce8ef4
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO/PassManagerBuilder.h
@@ -0,0 +1,179 @@
+// llvm/Transforms/IPO/PassManagerBuilder.h - Build Standard Pass -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PassManagerBuilder class, which is used to set up a
+// "standard" optimization sequence suitable for languages like C and C++.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+#define LLVM_TRANSFORMS_IPO_PASSMANAGERBUILDER_H
+
+#include <memory>
+#include <vector>
+
+namespace llvm {
+class FunctionInfoIndex;
+class Pass;
+class TargetLibraryInfoImpl;
+class TargetMachine;
+
+// The old pass manager infrastructure is hidden in a legacy namespace now.
+namespace legacy {
+class FunctionPassManager;
+class PassManagerBase;
+}
+
+/// PassManagerBuilder - This class is used to set up a standard optimization
+/// sequence for languages like C and C++, allowing some APIs to customize the
+/// pass sequence in various ways. A simple example of using it would be:
+///
+/// PassManagerBuilder Builder;
+/// Builder.OptLevel = 2;
+/// Builder.populateFunctionPassManager(FPM);
+/// Builder.populateModulePassManager(MPM);
+///
+/// In addition to setting up the basic passes, PassManagerBuilder allows
+/// frontends to vend a plugin API, where plugins are allowed to add extensions
+/// to the default pass manager. They do this by specifying where in the pass
+/// pipeline they want to be added, along with a callback function that adds
+/// the pass(es). For example, a plugin that wanted to add a loop optimization
+/// could do something like this:
+///
+/// static void addMyLoopPass(const PMBuilder &Builder, PassManagerBase &PM) {
+/// if (Builder.getOptLevel() > 2 && Builder.getOptSizeLevel() == 0)
+/// PM.add(createMyAwesomePass());
+/// }
+/// ...
+/// Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd,
+/// addMyLoopPass);
+/// ...
+class PassManagerBuilder {
+public:
+ /// Extensions are passed the builder itself (so they can see how it is
+ /// configured) as well as the pass manager to add stuff to.
+ typedef void (*ExtensionFn)(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM);
+ enum ExtensionPointTy {
+ /// EP_EarlyAsPossible - This extension point allows adding passes before
+ /// any other transformations, allowing them to see the code as it is coming
+ /// out of the frontend.
+ EP_EarlyAsPossible,
+
+ /// EP_ModuleOptimizerEarly - This extension point allows adding passes
+ /// just before the main module-level optimization passes.
+ EP_ModuleOptimizerEarly,
+
+ /// EP_LoopOptimizerEnd - This extension point allows adding loop passes to
+ /// the end of the loop optimizer.
+ EP_LoopOptimizerEnd,
+
+ /// EP_ScalarOptimizerLate - This extension point allows adding optimization
+ /// passes after most of the main optimizations, but before the last
+ /// cleanup-ish optimizations.
+ EP_ScalarOptimizerLate,
+
+ /// EP_OptimizerLast -- This extension point allows adding passes that
+ /// run after everything else.
+ EP_OptimizerLast,
+
+ /// EP_VectorizerStart - This extension point allows adding optimization
+ /// passes before the vectorizer and other highly target specific
+ /// optimization passes are executed.
+ EP_VectorizerStart,
+
+ /// EP_EnabledOnOptLevel0 - This extension point allows adding passes that
+ /// should not be disabled by O0 optimization level. The passes will be
+ /// inserted after the inlining pass.
+ EP_EnabledOnOptLevel0,
+
+ /// EP_Peephole - This extension point allows adding passes that perform
+ /// peephole optimizations similar to the instruction combiner. These passes
+ /// will be inserted after each instance of the instruction combiner pass.
+ EP_Peephole,
+ };
+
+ /// The Optimization Level - Specify the basic optimization level.
+ /// 0 = -O0, 1 = -O1, 2 = -O2, 3 = -O3
+ unsigned OptLevel;
+
+ /// SizeLevel - How much we're optimizing for size.
+ /// 0 = none, 1 = -Os, 2 = -Oz
+ unsigned SizeLevel;
+
+ /// LibraryInfo - Specifies information about the runtime library for the
+ /// optimizer. If this is non-null, it is added to both the function and
+ /// per-module pass pipeline.
+ TargetLibraryInfoImpl *LibraryInfo;
+
+ /// Inliner - Specifies the inliner to use. If this is non-null, it is
+ /// added to the per-module passes.
+ Pass *Inliner;
+
+ /// The function summary index to use for function importing.
+ const FunctionInfoIndex *FunctionIndex;
+
+ bool DisableTailCalls;
+ bool DisableUnitAtATime;
+ bool DisableUnrollLoops;
+ bool BBVectorize;
+ bool SLPVectorize;
+ bool LoopVectorize;
+ bool RerollLoops;
+ bool LoadCombine;
+ bool DisableGVNLoadPRE;
+ bool VerifyInput;
+ bool VerifyOutput;
+ bool MergeFunctions;
+ bool PrepareForLTO;
+
+private:
+ /// ExtensionList - This is list of all of the extensions that are registered.
+ std::vector<std::pair<ExtensionPointTy, ExtensionFn> > Extensions;
+
+public:
+ PassManagerBuilder();
+ ~PassManagerBuilder();
+ /// Adds an extension that will be used by all PassManagerBuilder instances.
+ /// This is intended to be used by plugins, to register a set of
+ /// optimisations to run automatically.
+ static void addGlobalExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+ void addExtension(ExtensionPointTy Ty, ExtensionFn Fn);
+
+private:
+ void addExtensionsToPM(ExtensionPointTy ETy,
+ legacy::PassManagerBase &PM) const;
+ void addInitialAliasAnalysisPasses(legacy::PassManagerBase &PM) const;
+ void addLTOOptimizationPasses(legacy::PassManagerBase &PM);
+ void addLateLTOOptimizationPasses(legacy::PassManagerBase &PM);
+
+public:
+ /// populateFunctionPassManager - This fills in the function pass manager,
+ /// which is expected to be run on each function immediately as it is
+ /// generated. The idea is to reduce the size of the IR in memory.
+ void populateFunctionPassManager(legacy::FunctionPassManager &FPM);
+
+ /// populateModulePassManager - This sets up the primary pass manager.
+ void populateModulePassManager(legacy::PassManagerBase &MPM);
+ void populateLTOPassManager(legacy::PassManagerBase &PM);
+};
+
+/// Registers a function for adding a standard set of passes. This should be
+/// used by optimizer plugins to allow all front ends to transparently use
+/// them. Create a static instance of this class in your plugin, providing a
+/// private function that the PassManagerBuilder can use to add your passes.
+struct RegisterStandardPasses {
+ RegisterStandardPasses(PassManagerBuilder::ExtensionPointTy Ty,
+ PassManagerBuilder::ExtensionFn Fn) {
+ PassManagerBuilder::addGlobalExtension(Ty, Fn);
+ }
+};
+
+} // end namespace llvm
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h b/gnu/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h
new file mode 100644
index 00000000000..9dddd12871c
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/IPO/StripDeadPrototypes.h
@@ -0,0 +1,34 @@
+//===-- StripDeadPrototypes.h - Remove unused function declarations -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass loops over all of the functions in the input module, looking for
+// dead declarations and removes them. Dead declarations are declarations of
+// functions for which no implementation is available (i.e., declarations for
+// unused library functions).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+#define LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
+
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// Pass to remove unused function declarations.
+class StripDeadPrototypesPass {
+public:
+ static StringRef name() { return "StripDeadPrototypesPass"; }
+ PreservedAnalyses run(Module &M);
+};
+
+}
+
+#endif // LLVM_TRANSFORMS_IPO_STRIPDEADPROTOTYPES_H
diff --git a/gnu/llvm/include/llvm/Transforms/InstCombine/InstCombine.h b/gnu/llvm/include/llvm/Transforms/InstCombine/InstCombine.h
new file mode 100644
index 00000000000..f48ec13107b
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/InstCombine/InstCombine.h
@@ -0,0 +1,46 @@
+//===- InstCombine.h - InstCombine pass -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the primary interface to the instcombine pass. This pass
+/// is suitable for use in the new pass manager. For a pass that works with the
+/// legacy pass manager, please look for \c createInstructionCombiningPass() in
+/// Scalar.h.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
+
+namespace llvm {
+
+class InstCombinePass {
+ InstCombineWorklist Worklist;
+
+public:
+ static StringRef name() { return "InstCombinePass"; }
+
+ // Explicitly define constructors for MSVC.
+ InstCombinePass() {}
+ InstCombinePass(InstCombinePass &&Arg) : Worklist(std::move(Arg.Worklist)) {}
+ InstCombinePass &operator=(InstCombinePass &&RHS) {
+ Worklist = std::move(RHS.Worklist);
+ return *this;
+ }
+
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h b/gnu/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
new file mode 100644
index 00000000000..5d2b2d00000
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/InstCombine/InstCombineWorklist.h
@@ -0,0 +1,116 @@
+//===- InstCombineWorklist.h - Worklist for InstCombine pass ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+#define LLVM_TRANSFORMS_INSTCOMBINE_INSTCOMBINEWORKLIST_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "instcombine"
+
+namespace llvm {
+
+/// InstCombineWorklist - This is the worklist management logic for
+/// InstCombine.
+class InstCombineWorklist {
+ SmallVector<Instruction*, 256> Worklist;
+ DenseMap<Instruction*, unsigned> WorklistMap;
+
+ void operator=(const InstCombineWorklist&RHS) = delete;
+ InstCombineWorklist(const InstCombineWorklist&) = delete;
+public:
+ InstCombineWorklist() {}
+
+ InstCombineWorklist(InstCombineWorklist &&Arg)
+ : Worklist(std::move(Arg.Worklist)),
+ WorklistMap(std::move(Arg.WorklistMap)) {}
+ InstCombineWorklist &operator=(InstCombineWorklist &&RHS) {
+ Worklist = std::move(RHS.Worklist);
+ WorklistMap = std::move(RHS.WorklistMap);
+ return *this;
+ }
+
+ bool isEmpty() const { return Worklist.empty(); }
+
+ /// Add - Add the specified instruction to the worklist if it isn't already
+ /// in it.
+ void Add(Instruction *I) {
+ if (WorklistMap.insert(std::make_pair(I, Worklist.size())).second) {
+ DEBUG(dbgs() << "IC: ADD: " << *I << '\n');
+ Worklist.push_back(I);
+ }
+ }
+
+ void AddValue(Value *V) {
+ if (Instruction *I = dyn_cast<Instruction>(V))
+ Add(I);
+ }
+
+ /// AddInitialGroup - Add the specified batch of stuff in reverse order.
+ /// which should only be done when the worklist is empty and when the group
+ /// has no duplicates.
+ void AddInitialGroup(ArrayRef<Instruction *> List) {
+ assert(Worklist.empty() && "Worklist must be empty to add initial group");
+ Worklist.reserve(List.size()+16);
+ WorklistMap.resize(List.size());
+ DEBUG(dbgs() << "IC: ADDING: " << List.size() << " instrs to worklist\n");
+ unsigned Idx = 0;
+ for (Instruction *I : reverse(List)) {
+ WorklistMap.insert(std::make_pair(I, Idx++));
+ Worklist.push_back(I);
+ }
+ }
+
+ // Remove - remove I from the worklist if it exists.
+ void Remove(Instruction *I) {
+ DenseMap<Instruction*, unsigned>::iterator It = WorklistMap.find(I);
+ if (It == WorklistMap.end()) return; // Not in worklist.
+
+ // Don't bother moving everything down, just null out the slot.
+ Worklist[It->second] = nullptr;
+
+ WorklistMap.erase(It);
+ }
+
+ Instruction *RemoveOne() {
+ Instruction *I = Worklist.pop_back_val();
+ WorklistMap.erase(I);
+ return I;
+ }
+
+ /// AddUsersToWorkList - When an instruction is simplified, add all users of
+ /// the instruction to the work lists because they might get more simplified
+ /// now.
+ ///
+ void AddUsersToWorkList(Instruction &I) {
+ for (User *U : I.users())
+ Add(cast<Instruction>(U));
+ }
+
+
+ /// Zap - check that the worklist is empty and nuke the backing store for
+ /// the map if it is large.
+ void Zap() {
+ assert(WorklistMap.empty() && "Worklist empty, but map not?");
+
+ // Do an explicit clear, this shrinks the map if needed.
+ WorklistMap.clear();
+ }
+};
+
+} // end namespace llvm.
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Instrumentation.h b/gnu/llvm/include/llvm/Transforms/Instrumentation.h
new file mode 100644
index 00000000000..38dfeb04ace
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Instrumentation.h
@@ -0,0 +1,177 @@
+//===- Transforms/Instrumentation.h - Instrumentation passes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines constructor functions for instrumentation passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_INSTRUMENTATION_H
+#define LLVM_TRANSFORMS_INSTRUMENTATION_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/BasicBlock.h"
+#include <vector>
+
+#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
+inline void *getDFSanArgTLSPtrForJIT() {
+ extern __thread __attribute__((tls_model("initial-exec")))
+ void *__dfsan_arg_tls;
+ return (void *)&__dfsan_arg_tls;
+}
+
+inline void *getDFSanRetValTLSPtrForJIT() {
+ extern __thread __attribute__((tls_model("initial-exec")))
+ void *__dfsan_retval_tls;
+ return (void *)&__dfsan_retval_tls;
+}
+#endif
+
+namespace llvm {
+
+class TargetMachine;
+
+/// Instrumentation passes often insert conditional checks into entry blocks.
+/// Call this function before splitting the entry block to move instructions
+/// that must remain in the entry block up before the split point. Static
+/// allocas and llvm.localescape calls, for example, must remain in the entry
+/// block.
+BasicBlock::iterator PrepareToSplitEntryBlock(BasicBlock &BB,
+ BasicBlock::iterator IP);
+
+class ModulePass;
+class FunctionPass;
+
+// Insert GCOV profiling instrumentation
+struct GCOVOptions {
+ static GCOVOptions getDefault();
+
+ // Specify whether to emit .gcno files.
+ bool EmitNotes;
+
+ // Specify whether to modify the program to emit .gcda files when run.
+ bool EmitData;
+
+ // A four-byte version string. The meaning of a version string is described in
+ // gcc's gcov-io.h
+ char Version[4];
+
+ // Emit a "cfg checksum" that follows the "line number checksum" of a
+ // function. This affects both .gcno and .gcda files.
+ bool UseCfgChecksum;
+
+ // Add the 'noredzone' attribute to added runtime library calls.
+ bool NoRedZone;
+
+ // Emit the name of the function in the .gcda files. This is redundant, as
+ // the function identifier can be used to find the name from the .gcno file.
+ bool FunctionNamesInData;
+
+ // Emit the exit block immediately after the start block, rather than after
+ // all of the function body's blocks.
+ bool ExitBlockBeforeBody;
+};
+ModulePass *createGCOVProfilerPass(const GCOVOptions &Options =
+ GCOVOptions::getDefault());
+
+// PGO Instrumention
+ModulePass *createPGOInstrumentationGenPass();
+ModulePass *
+createPGOInstrumentationUsePass(StringRef Filename = StringRef(""));
+
+/// Options for the frontend instrumentation based profiling pass.
+struct InstrProfOptions {
+ InstrProfOptions() : NoRedZone(false) {}
+
+ // Add the 'noredzone' attribute to added runtime library calls.
+ bool NoRedZone;
+
+ // Name of the profile file to use as output
+ std::string InstrProfileOutput;
+};
+
+/// Insert frontend instrumentation based profiling.
+ModulePass *createInstrProfilingPass(
+ const InstrProfOptions &Options = InstrProfOptions());
+
+// Insert AddressSanitizer (address sanity checking) instrumentation
+FunctionPass *createAddressSanitizerFunctionPass(bool CompileKernel = false,
+ bool Recover = false);
+ModulePass *createAddressSanitizerModulePass(bool CompileKernel = false,
+ bool Recover = false);
+
+// Insert MemorySanitizer instrumentation (detection of uninitialized reads)
+FunctionPass *createMemorySanitizerPass(int TrackOrigins = 0);
+
+// Insert ThreadSanitizer (race detection) instrumentation
+FunctionPass *createThreadSanitizerPass();
+
+// Insert DataFlowSanitizer (dynamic data flow analysis) instrumentation
+ModulePass *createDataFlowSanitizerPass(
+ const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
+ void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
+
+// Options for sanitizer coverage instrumentation.
+struct SanitizerCoverageOptions {
+ SanitizerCoverageOptions()
+ : CoverageType(SCK_None), IndirectCalls(false), TraceBB(false),
+ TraceCmp(false), Use8bitCounters(false) {}
+
+ enum Type {
+ SCK_None = 0,
+ SCK_Function,
+ SCK_BB,
+ SCK_Edge
+ } CoverageType;
+ bool IndirectCalls;
+ bool TraceBB;
+ bool TraceCmp;
+ bool Use8bitCounters;
+};
+
+// Insert SanitizerCoverage instrumentation.
+ModulePass *createSanitizerCoverageModulePass(
+ const SanitizerCoverageOptions &Options = SanitizerCoverageOptions());
+
+#if defined(__GNUC__) && defined(__linux__) && !defined(ANDROID)
+inline ModulePass *createDataFlowSanitizerPassForJIT(
+ const std::vector<std::string> &ABIListFiles = std::vector<std::string>()) {
+ return createDataFlowSanitizerPass(ABIListFiles, getDFSanArgTLSPtrForJIT,
+ getDFSanRetValTLSPtrForJIT);
+}
+#endif
+
+// BoundsChecking - This pass instruments the code to perform run-time bounds
+// checking on loads, stores, and other memory intrinsics.
+FunctionPass *createBoundsCheckingPass();
+
+/// \brief This pass splits the stack into a safe stack and an unsafe stack to
+/// protect against stack-based overflow vulnerabilities.
+FunctionPass *createSafeStackPass(const TargetMachine *TM = nullptr);
+
+/// \brief Calculate what to divide by to scale counts.
+///
+/// Given the maximum count, calculate a divisor that will scale all the
+/// weights to strictly less than UINT32_MAX.
+static inline uint64_t calculateCountScale(uint64_t MaxCount) {
+ return MaxCount < UINT32_MAX ? 1 : MaxCount / UINT32_MAX + 1;
+}
+
+/// \brief Scale an individual branch count.
+///
+/// Scale a 64-bit weight down to 32-bits using \c Scale.
+///
+static inline uint32_t scaleBranchCount(uint64_t Count, uint64_t Scale) {
+ uint64_t Scaled = Count / Scale;
+ assert(Scaled <= UINT32_MAX && "overflow 32-bits");
+ return Scaled;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/ObjCARC.h b/gnu/llvm/include/llvm/Transforms/ObjCARC.h
new file mode 100644
index 00000000000..1897adc2ffb
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/ObjCARC.h
@@ -0,0 +1,48 @@
+//===-- ObjCARC.h - ObjCARC Scalar Transformations --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the ObjCARC Scalar Transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_OBJCARC_H
+#define LLVM_TRANSFORMS_OBJCARC_H
+
+namespace llvm {
+
+class Pass;
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCAPElim - ObjC ARC autorelease pool elimination.
+//
+Pass *createObjCARCAPElimPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCExpand - ObjC ARC preliminary simplifications.
+//
+Pass *createObjCARCExpandPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCContract - Late ObjC ARC cleanups.
+//
+Pass *createObjCARCContractPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ObjCARCOpt - ObjC ARC optimization.
+//
+Pass *createObjCARCOptPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Scalar.h b/gnu/llvm/include/llvm/Transforms/Scalar.h
new file mode 100644
index 00000000000..9173de1112f
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Scalar.h
@@ -0,0 +1,491 @@
+//===-- Scalar.h - Scalar Transformations -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Scalar transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_H
+#define LLVM_TRANSFORMS_SCALAR_H
+
+#include "llvm/ADT/StringRef.h"
+#include <functional>
+
+namespace llvm {
+
+class BasicBlockPass;
+class Function;
+class FunctionPass;
+class ModulePass;
+class Pass;
+class GetElementPtrInst;
+class PassInfo;
+class TerminatorInst;
+class TargetLowering;
+class TargetMachine;
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantPropagation - A worklist driven constant propagation pass
+//
+FunctionPass *createConstantPropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AlignmentFromAssumptions - Use assume intrinsics to set load/store
+// alignments.
+//
+FunctionPass *createAlignmentFromAssumptionsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SCCP - Sparse conditional constant propagation.
+//
+FunctionPass *createSCCPPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadInstElimination - This pass quickly removes trivially dead instructions
+// without modifying the CFG of the function. It is a BasicBlockPass, so it
+// runs efficiently when queued next to other BasicBlockPass's.
+//
+Pass *createDeadInstEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadCodeElimination - This pass is more powerful than DeadInstElimination,
+// because it is worklist driven that can potentially revisit instructions when
+// their other instructions become dead, to eliminate chains of dead
+// computations.
+//
+FunctionPass *createDeadCodeEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DeadStoreElimination - This pass deletes stores that are post-dominated by
+// must-aliased stores and are not loaded used between the stores.
+//
+FunctionPass *createDeadStoreEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AggressiveDCE - This pass uses the SSA based Aggressive DCE algorithm. This
+// algorithm assumes instructions are dead until proven otherwise, which makes
+// it more successful are removing non-obviously dead instructions.
+//
+FunctionPass *createAggressiveDCEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// BitTrackingDCE - This pass uses a bit-tracking DCE algorithm in order to
+// remove computations of dead bits.
+//
+FunctionPass *createBitTrackingDCEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SROA - Replace aggregates or pieces of aggregates with scalar SSA values.
+//
+FunctionPass *createSROAPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ScalarReplAggregates - Break up alloca's of aggregates into multiple allocas
+// if possible.
+//
+FunctionPass *createScalarReplAggregatesPass(signed Threshold = -1,
+ bool UseDomTree = true,
+ signed StructMemberThreshold = -1,
+ signed ArrayElementThreshold = -1,
+ signed ScalarLoadThreshold = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// InductiveRangeCheckElimination - Transform loops to elide range checks on
+// linear functions of the induction variable.
+//
+Pass *createInductiveRangeCheckEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InductionVariableSimplify - Transform induction variables in a program to all
+// use a single canonical induction variable per loop.
+//
+Pass *createIndVarSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionCombining - Combine instructions to form fewer, simple
+// instructions. This pass does not modify the CFG, and has a tendency to make
+// instructions dead, so a subsequent DCE pass is useful.
+//
+// This pass combines things like:
+// %Y = add int 1, %X
+// %Z = add int 1, %Y
+// into:
+// %Z = add int 2, %X
+//
+FunctionPass *createInstructionCombiningPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LICM - This pass is a loop invariant code motion and memory promotion pass.
+//
+Pass *createLICMPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopInterchange - This pass interchanges loops to provide a more
+// cache-friendly memory access patterns.
+//
+Pass *createLoopInterchangePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopStrengthReduce - This pass is strength reduces GEP instructions that use
+// a loop's canonical induction variable as one of their indices.
+//
+Pass *createLoopStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// GlobalMerge - This pass merges internal (by default) globals into structs
+// to enable reuse of a base pointer by indexed addressing modes.
+// It can also be configured to focus on size optimizations only.
+//
+Pass *createGlobalMergePass(const TargetMachine *TM, unsigned MaximalOffset,
+ bool OnlyOptimizeForSize = false,
+ bool MergeExternalByDefault = false);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnswitch - This pass is a simple loop unswitching pass.
+//
+Pass *createLoopUnswitchPass(bool OptimizeForSize = false);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopInstSimplify - This pass simplifies instructions in a loop's body.
+//
+Pass *createLoopInstSimplifyPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopUnroll - This pass is a simple loop unrolling pass.
+//
+Pass *createLoopUnrollPass(int Threshold = -1, int Count = -1,
+ int AllowPartial = -1, int Runtime = -1);
+// Create an unrolling pass for full unrolling only.
+Pass *createSimpleLoopUnrollPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopReroll - This pass is a simple loop rerolling pass.
+//
+Pass *createLoopRerollPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopRotate - This pass is a simple loop rotating pass.
+//
+Pass *createLoopRotatePass(int MaxHeaderSize = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// LoopIdiom - This pass recognizes and replaces idioms in loops.
+//
+Pass *createLoopIdiomPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PromoteMemoryToRegister - This pass is used to promote memory references to
+// be register references. A simple example of the transformation performed by
+// this pass is:
+//
+// FROM CODE TO CODE
+// %X = alloca i32, i32 1 ret i32 42
+// store i32 42, i32 *%X
+// %Y = load i32* %X
+// ret i32 %Y
+//
+FunctionPass *createPromoteMemoryToRegisterPass();
+
+//===----------------------------------------------------------------------===//
+//
+// DemoteRegisterToMemoryPass - This pass is used to demote registers to memory
+// references. In basically undoes the PromoteMemoryToRegister pass to make cfg
+// hacking easier.
+//
+FunctionPass *createDemoteRegisterToMemoryPass();
+extern char &DemoteRegisterToMemoryID;
+
+//===----------------------------------------------------------------------===//
+//
+// Reassociate - This pass reassociates commutative expressions in an order that
+// is designed to promote better constant propagation, GCSE, LICM, PRE...
+//
+// For example: 4 + (x + 5) -> x + (4 + 5)
+//
+FunctionPass *createReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// JumpThreading - Thread control through mult-pred/multi-succ blocks where some
+// preds always go to some succ. Thresholds other than minus one override the
+// internal BB duplication default threshold.
+//
+FunctionPass *createJumpThreadingPass(int Threshold = -1);
+
+//===----------------------------------------------------------------------===//
+//
+// CFGSimplification - Merge basic blocks, eliminate unreachable blocks,
+// simplify terminator instructions, etc...
+//
+FunctionPass *createCFGSimplificationPass(
+ int Threshold = -1, std::function<bool(const Function &)> Ftor = nullptr);
+
+//===----------------------------------------------------------------------===//
+//
+// FlattenCFG - flatten CFG, reduce number of conditional branches by using
+// parallel-and and parallel-or mode, etc...
+//
+FunctionPass *createFlattenCFGPass();
+
+//===----------------------------------------------------------------------===//
+//
+// CFG Structurization - Remove irreducible control flow
+//
+Pass *createStructurizeCFGPass();
+
+//===----------------------------------------------------------------------===//
+//
+// BreakCriticalEdges - Break all of the critical edges in the CFG by inserting
+// a dummy basic block. This pass may be "required" by passes that cannot deal
+// with critical edges. For this usage, a pass must call:
+//
+// AU.addRequiredID(BreakCriticalEdgesID);
+//
+// This pass obviously invalidates the CFG, but can update forward dominator
+// (set, immediate dominators, tree, and frontier) information.
+//
+FunctionPass *createBreakCriticalEdgesPass();
+extern char &BreakCriticalEdgesID;
+
+//===----------------------------------------------------------------------===//
+//
+// LoopSimplify - Insert Pre-header blocks into the CFG for every function in
+// the module. This pass updates dominator information, loop information, and
+// does not add critical edges to the CFG.
+//
+// AU.addRequiredID(LoopSimplifyID);
+//
+Pass *createLoopSimplifyPass();
+extern char &LoopSimplifyID;
+
+//===----------------------------------------------------------------------===//
+//
+// TailCallElimination - This pass eliminates call instructions to the current
+// function which occur immediately before return instructions.
+//
+FunctionPass *createTailCallEliminationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerSwitch - This pass converts SwitchInst instructions into a sequence of
+// chained binary branch instructions.
+//
+FunctionPass *createLowerSwitchPass();
+extern char &LowerSwitchID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerInvoke - This pass removes invoke instructions, converting them to call
+// instructions.
+//
+FunctionPass *createLowerInvokePass();
+extern char &LowerInvokePassID;
+
+//===----------------------------------------------------------------------===//
+//
+// LCSSA - This pass inserts phi nodes at loop boundaries to simplify other loop
+// optimizations.
+//
+Pass *createLCSSAPass();
+extern char &LCSSAID;
+
+//===----------------------------------------------------------------------===//
+//
+// EarlyCSE - This pass performs a simple and fast CSE pass over the dominator
+// tree.
+//
+FunctionPass *createEarlyCSEPass();
+
+//===----------------------------------------------------------------------===//
+//
+// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
+// are hoisted into the header, while stores sink into the footer.
+//
+FunctionPass *createMergedLoadStoreMotionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// GVN - This pass performs global value numbering and redundant load
+// elimination cotemporaneously.
+//
+FunctionPass *createGVNPass(bool NoLoads = false);
+
+//===----------------------------------------------------------------------===//
+//
+// MemCpyOpt - This pass performs optimizations related to eliminating memcpy
+// calls and/or combining multiple stores into memset's.
+//
+FunctionPass *createMemCpyOptPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDeletion - This pass performs DCE of non-infinite loops that it
+// can prove are dead.
+//
+Pass *createLoopDeletionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ConstantHoisting - This pass prepares a function for expensive constants.
+//
+FunctionPass *createConstantHoistingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionNamer - Give any unnamed non-void instructions "tmp" names.
+//
+FunctionPass *createInstructionNamerPass();
+extern char &InstructionNamerID;
+
+//===----------------------------------------------------------------------===//
+//
+// Sink - Code Sinking
+//
+FunctionPass *createSinkingPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LowerAtomic - Lower atomic intrinsics to non-atomic form
+//
+Pass *createLowerAtomicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ValuePropagation - Propagate CFG-derived value information
+//
+Pass *createCorrelatedValuePropagationPass();
+
+//===----------------------------------------------------------------------===//
+//
+// InstructionSimplifier - Remove redundant instructions.
+//
+FunctionPass *createInstructionSimplifierPass();
+extern char &InstructionSimplifierID;
+
+//===----------------------------------------------------------------------===//
+//
+// LowerExpectIntrinsics - Removes llvm.expect intrinsics and creates
+// "block_weights" metadata.
+FunctionPass *createLowerExpectIntrinsicPass();
+
+//===----------------------------------------------------------------------===//
+//
+// PartiallyInlineLibCalls - Tries to inline the fast path of library
+// calls such as sqrt.
+//
+FunctionPass *createPartiallyInlineLibCallsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// ScalarizerPass - Converts vector operations into scalar operations
+//
+FunctionPass *createScalarizerPass();
+
+//===----------------------------------------------------------------------===//
+//
+// AddDiscriminators - Add DWARF path discriminators to the IR.
+FunctionPass *createAddDiscriminatorsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// SeparateConstOffsetFromGEP - Split GEPs for better CSE
+//
+FunctionPass *
+createSeparateConstOffsetFromGEPPass(const TargetMachine *TM = nullptr,
+ bool LowerGEP = false);
+
+//===----------------------------------------------------------------------===//
+//
+// SpeculativeExecution - Aggressively hoist instructions to enable
+// speculative execution on targets where branches are expensive.
+//
+FunctionPass *createSpeculativeExecutionPass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoadCombine - Combine loads into bigger loads.
+//
+BasicBlockPass *createLoadCombinePass();
+
+//===----------------------------------------------------------------------===//
+//
+// StraightLineStrengthReduce - This pass strength-reduces some certain
+// instruction patterns in straight-line code.
+//
+FunctionPass *createStraightLineStrengthReducePass();
+
+//===----------------------------------------------------------------------===//
+//
+// PlaceSafepoints - Rewrite any IR calls to gc.statepoints and insert any
+// safepoint polls (method entry, backedge) that might be required. This pass
+// does not generate explicit relocation sequences - that's handled by
+// RewriteStatepointsForGC which can be run at an arbitrary point in the pass
+// order following this pass.
+//
+FunctionPass *createPlaceSafepointsPass();
+
+//===----------------------------------------------------------------------===//
+//
+// RewriteStatepointsForGC - Rewrite any gc.statepoints which do not yet have
+// explicit relocations to include explicit relocations.
+//
+ModulePass *createRewriteStatepointsForGCPass();
+
+//===----------------------------------------------------------------------===//
+//
+// Float2Int - Demote floats to ints where possible.
+//
+FunctionPass *createFloat2IntPass();
+
+//===----------------------------------------------------------------------===//
+//
+// NaryReassociate - Simplify n-ary operations by reassociation.
+//
+FunctionPass *createNaryReassociatePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopDistribute - Distribute loops.
+//
+FunctionPass *createLoopDistributePass();
+
+//===----------------------------------------------------------------------===//
+//
+// LoopLoadElimination - Perform loop-aware load elimination.
+//
+FunctionPass *createLoopLoadEliminationPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Scalar/ADCE.h b/gnu/llvm/include/llvm/Transforms/Scalar/ADCE.h
new file mode 100644
index 00000000000..f9bc7b77c14
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Scalar/ADCE.h
@@ -0,0 +1,38 @@
+//===- ADCE.h - Aggressive dead code elimination --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the interface for the Aggressive Dead Code Elimination
+// pass. This pass optimistically assumes that all instructions are dead until
+// proven otherwise, allowing it to eliminate dead computations that other DCE
+// passes do not catch, particularly involving loop computations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_ADCE_H
+#define LLVM_TRANSFORMS_SCALAR_ADCE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A DCE pass that assumes instructions are dead until proven otherwise.
+///
+/// This pass eliminates dead code by optimistically assuming that all
+/// instructions are dead until proven otherwise. This allows it to eliminate
+/// dead computations that other DCE passes do not catch, particularly involving
+/// loop computations.
+class ADCEPass {
+public:
+ static StringRef name() { return "ADCEPass"; }
+ PreservedAnalyses run(Function &F);
+};
+}
+
+#endif // LLVM_TRANSFORMS_SCALAR_ADCE_H
diff --git a/gnu/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h b/gnu/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h
new file mode 100644
index 00000000000..e3dd3c050df
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Scalar/EarlyCSE.h
@@ -0,0 +1,39 @@
+//===- EarlyCSE.h - Simple and fast CSE pass --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for a simple, fast CSE pass.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+#define LLVM_TRANSFORMS_SCALAR_EARLYCSE_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// \brief A simple and fast domtree-based CSE pass.
+///
+/// This pass does a simple depth-first walk over the dominator tree,
+/// eliminating trivially redundant instructions and using instsimplify to
+/// canonicalize things as it goes. It is intended to be fast and catch obvious
+/// cases so that instcombine and other passes are more effective. It is
+/// expected that a later pass of GVN will catch the interesting/hard cases.
+class EarlyCSEPass {
+public:
+ static StringRef name() { return "EarlyCSEPass"; }
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h b/gnu/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
new file mode 100644
index 00000000000..40283203f3a
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Scalar/LowerExpectIntrinsic.h
@@ -0,0 +1,40 @@
+//===- LowerExpectIntrinsic.h - LowerExpectIntrinsic pass -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The header file for the LowerExpectIntrinsic pass as used by the new pass
+/// manager.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+#define LLVM_TRANSFORMS_SCALAR_LOWEREXPECTINTRINSIC_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+class LowerExpectIntrinsicPass {
+public:
+ static StringRef name() { return "LowerExpectIntrinsicPass"; }
+
+ /// \brief Run the pass over the function.
+ ///
+ /// This will lower all of th expect intrinsic calls in this function into
+ /// branch weight metadata. That metadata will subsequently feed the analysis
+ /// of the probabilities and frequencies of the CFG. After running this pass,
+ /// no more expect intrinsics remain, allowing the rest of the optimizer to
+ /// ignore them.
+ PreservedAnalyses run(Function &F);
+};
+
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Scalar/SROA.h b/gnu/llvm/include/llvm/Transforms/Scalar/SROA.h
new file mode 100644
index 00000000000..f90cc7b686b
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Scalar/SROA.h
@@ -0,0 +1,129 @@
+//===- SROA.h - Scalar Replacement Of Aggregates ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for LLVM's Scalar Replacement of
+/// Aggregates pass. This pass provides both aggregate splitting and the
+/// primary SSA formation used in the compiler.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SROA_H
+#define LLVM_TRANSFORMS_SCALAR_SROA_H
+
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// A private "module" namespace for types and utilities used by SROA. These
+/// are implementation details and should not be used by clients.
+namespace sroa {
+class AllocaSliceRewriter;
+class AllocaSlices;
+class Partition;
+class SROALegacyPass;
+}
+
+/// \brief An optimization pass providing Scalar Replacement of Aggregates.
+///
+/// This pass takes allocations which can be completely analyzed (that is, they
+/// don't escape) and tries to turn them into scalar SSA values. There are
+/// a few steps to this process.
+///
+/// 1) It takes allocations of aggregates and analyzes the ways in which they
+/// are used to try to split them into smaller allocations, ideally of
+/// a single scalar data type. It will split up memcpy and memset accesses
+/// as necessary and try to isolate individual scalar accesses.
+/// 2) It will transform accesses into forms which are suitable for SSA value
+/// promotion. This can be replacing a memset with a scalar store of an
+/// integer value, or it can involve speculating operations on a PHI or
+/// select to be a PHI or select of the results.
+/// 3) Finally, this will try to detect a pattern of accesses which map cleanly
+/// onto insert and extract operations on a vector value, and convert them to
+/// this form. By doing so, it will enable promotion of vector aggregates to
+/// SSA vector values.
+class SROA {
+ LLVMContext *C;
+ DominatorTree *DT;
+ AssumptionCache *AC;
+
+ /// \brief Worklist of alloca instructions to simplify.
+ ///
+ /// Each alloca in the function is added to this. Each new alloca formed gets
+ /// added to it as well to recursively simplify unless that alloca can be
+ /// directly promoted. Finally, each time we rewrite a use of an alloca other
+ /// the one being actively rewritten, we add it back onto the list if not
+ /// already present to ensure it is re-visited.
+ SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> Worklist;
+
+ /// \brief A collection of instructions to delete.
+ /// We try to batch deletions to simplify code and make things a bit more
+ /// efficient.
+ SetVector<Instruction *, SmallVector<Instruction *, 8>> DeadInsts;
+
+ /// \brief Post-promotion worklist.
+ ///
+ /// Sometimes we discover an alloca which has a high probability of becoming
+ /// viable for SROA after a round of promotion takes place. In those cases,
+ /// the alloca is enqueued here for re-processing.
+ ///
+ /// Note that we have to be very careful to clear allocas out of this list in
+ /// the event they are deleted.
+ SetVector<AllocaInst *, SmallVector<AllocaInst *, 16>> PostPromotionWorklist;
+
+ /// \brief A collection of alloca instructions we can directly promote.
+ std::vector<AllocaInst *> PromotableAllocas;
+
+ /// \brief A worklist of PHIs to speculate prior to promoting allocas.
+ ///
+ /// All of these PHIs have been checked for the safety of speculation and by
+ /// being speculated will allow promoting allocas currently in the promotable
+ /// queue.
+ SetVector<PHINode *, SmallVector<PHINode *, 2>> SpeculatablePHIs;
+
+ /// \brief A worklist of select instructions to speculate prior to promoting
+ /// allocas.
+ ///
+ /// All of these select instructions have been checked for the safety of
+ /// speculation and by being speculated will allow promoting allocas
+ /// currently in the promotable queue.
+ SetVector<SelectInst *, SmallVector<SelectInst *, 2>> SpeculatableSelects;
+
+public:
+ SROA() : C(nullptr), DT(nullptr), AC(nullptr) {}
+
+ static StringRef name() { return "SROA"; }
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+
+private:
+ friend class sroa::AllocaSliceRewriter;
+ friend class sroa::SROALegacyPass;
+
+ /// Helper used by both the public run method and by the legacy pass.
+ PreservedAnalyses runImpl(Function &F, DominatorTree &RunDT,
+ AssumptionCache &RunAC);
+
+ bool presplitLoadsAndStores(AllocaInst &AI, sroa::AllocaSlices &AS);
+ AllocaInst *rewritePartition(AllocaInst &AI, sroa::AllocaSlices &AS,
+ sroa::Partition &P);
+ bool splitAlloca(AllocaInst &AI, sroa::AllocaSlices &AS);
+ bool runOnAlloca(AllocaInst &AI);
+ void clobberUse(Use &U);
+ void deleteDeadInstructions(SmallPtrSetImpl<AllocaInst *> &DeletedAllocas);
+ bool promoteAllocas(Function &F);
+};
+
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h b/gnu/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h
new file mode 100644
index 00000000000..ef28e0f78a4
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Scalar/SimplifyCFG.h
@@ -0,0 +1,46 @@
+//===- SimplifyCFG.h - Simplify and canonicalize the CFG --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file provides the interface for the pass responsible for both
+/// simplifying and canonicalizing the CFG.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLIFYCFG_H
+
+#include "llvm/IR/Function.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// \brief A pass to simplify and canonicalize the CFG of a function.
+///
+/// This pass iteratively simplifies the entire CFG of a function, removing
+/// unnecessary control flows and bringing it into the canonical form expected
+/// by the rest of the mid-level optimizer.
+class SimplifyCFGPass {
+ int BonusInstThreshold;
+
+public:
+ static StringRef name() { return "SimplifyCFGPass"; }
+
+ /// \brief Construct a pass with the default thresholds.
+ SimplifyCFGPass();
+
+ /// \brief Construct a pass with a specific bonus threshold.
+ SimplifyCFGPass(int BonusInstThreshold);
+
+ /// \brief Run the pass over the function.
+ PreservedAnalyses run(Function &F, AnalysisManager<Function> *AM);
+};
+
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h b/gnu/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
new file mode 100644
index 00000000000..4e4f02c84ec
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/ASanStackFrameLayout.h
@@ -0,0 +1,64 @@
+//===- ASanStackFrameLayout.h - ComputeASanStackFrameLayout -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines ComputeASanStackFrameLayout and auxiliary data structs.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#define LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace llvm {
+
+class AllocaInst;
+
+// These magic constants should be the same as in
+// in asan_internal.h from ASan runtime in compiler-rt.
+static const int kAsanStackLeftRedzoneMagic = 0xf1;
+static const int kAsanStackMidRedzoneMagic = 0xf2;
+static const int kAsanStackRightRedzoneMagic = 0xf3;
+
+// Input/output data struct for ComputeASanStackFrameLayout.
+struct ASanStackVariableDescription {
+ const char *Name; // Name of the variable that will be displayed by asan
+ // if a stack-related bug is reported.
+ uint64_t Size; // Size of the variable in bytes.
+ size_t Alignment; // Alignment of the variable (power of 2).
+ AllocaInst *AI; // The actual AllocaInst.
+ size_t Offset; // Offset from the beginning of the frame;
+ // set by ComputeASanStackFrameLayout.
+};
+
+// Output data struct for ComputeASanStackFrameLayout.
+struct ASanStackFrameLayout {
+ // Frame description, see DescribeAddressIfStack in ASan runtime.
+ SmallString<64> DescriptionString;
+ // The contents of the shadow memory for the stack frame that we need
+ // to set at function entry.
+ SmallVector<uint8_t, 64> ShadowBytes;
+ size_t FrameAlignment; // Alignment for the entire frame.
+ size_t FrameSize; // Size of the frame in bytes.
+};
+
+void ComputeASanStackFrameLayout(
+ // The array of stack variables. The elements may get reordered and changed.
+ SmallVectorImpl<ASanStackVariableDescription> &Vars,
+ // AddressSanitizer's shadow granularity. Usually 8, may also be 16, 32, 64.
+ size_t Granularity,
+ // The minimal size of the left-most redzone (header).
+ // At least 4 pointer sizes, power of 2, and >= Granularity.
+ // The resulting FrameSize should be multiple of MinHeaderSize.
+ size_t MinHeaderSize,
+ // The result is put here.
+ ASanStackFrameLayout *Layout);
+
+} // llvm namespace
+
+#endif // LLVM_TRANSFORMS_UTILS_ASANSTACKFRAMELAYOUT_H
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h b/gnu/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
new file mode 100644
index 00000000000..13c856dfdc9
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/BasicBlockUtils.h
@@ -0,0 +1,297 @@
+//===-- Transform/Utils/BasicBlockUtils.h - BasicBlock Utils ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on basic blocks, and
+// instructions contained within basic blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+#define LLVM_TRANSFORMS_UTILS_BASICBLOCKUTILS_H
+
+// FIXME: Move to this file: BasicBlock::removePredecessor, BB::splitBasicBlock
+
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+
+namespace llvm {
+
+class MemoryDependenceAnalysis;
+class DominatorTree;
+class LoopInfo;
+class Instruction;
+class MDNode;
+class ReturnInst;
+class TargetLibraryInfo;
+class TerminatorInst;
+
+/// DeleteDeadBlock - Delete the specified block, which must have no
+/// predecessors.
+void DeleteDeadBlock(BasicBlock *BB);
+
+/// FoldSingleEntryPHINodes - We know that BB has one predecessor. If there are
+/// any single-entry PHI nodes in it, fold them away. This handles the case
+/// when all entries to the PHI nodes in a block are guaranteed equal, such as
+/// when the block has exactly one predecessor.
+void FoldSingleEntryPHINodes(BasicBlock *BB,
+ MemoryDependenceAnalysis *MemDep = nullptr);
+
+/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it
+/// is dead. Also recursively delete any operands that become dead as
+/// a result. This includes tracing the def-use list from the PHI to see if
+/// it is ultimately unused or if it reaches an unused cycle. Return true
+/// if any PHIs were deleted.
+bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
+
+/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
+/// if possible. The return value indicates success or failure.
+bool MergeBlockIntoPredecessor(BasicBlock *BB, DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ MemoryDependenceAnalysis *MemDep = nullptr);
+
+// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI)
+// with a value, then remove and delete the original instruction.
+//
+void ReplaceInstWithValue(BasicBlock::InstListType &BIL,
+ BasicBlock::iterator &BI, Value *V);
+
+// ReplaceInstWithInst - Replace the instruction specified by BI with the
+// instruction specified by I. Copies DebugLoc from BI to I, if I doesn't
+// already have a DebugLoc. The original instruction is deleted and BI is
+// updated to point to the new instruction.
+//
+void ReplaceInstWithInst(BasicBlock::InstListType &BIL,
+ BasicBlock::iterator &BI, Instruction *I);
+
+// ReplaceInstWithInst - Replace the instruction specified by From with the
+// instruction specified by To. Copies DebugLoc from BI to I, if I doesn't
+// already have a DebugLoc.
+//
+void ReplaceInstWithInst(Instruction *From, Instruction *To);
+
+/// \brief Option class for critical edge splitting.
+///
+/// This provides a builder interface for overriding the default options used
+/// during critical edge splitting.
+struct CriticalEdgeSplittingOptions {
+ DominatorTree *DT;
+ LoopInfo *LI;
+ bool MergeIdenticalEdges;
+ bool DontDeleteUselessPHIs;
+ bool PreserveLCSSA;
+
+ CriticalEdgeSplittingOptions(DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr)
+ : DT(DT), LI(LI), MergeIdenticalEdges(false),
+ DontDeleteUselessPHIs(false), PreserveLCSSA(false) {}
+
+ CriticalEdgeSplittingOptions &setMergeIdenticalEdges() {
+ MergeIdenticalEdges = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setDontDeleteUselessPHIs() {
+ DontDeleteUselessPHIs = true;
+ return *this;
+ }
+
+ CriticalEdgeSplittingOptions &setPreserveLCSSA() {
+ PreserveLCSSA = true;
+ return *this;
+ }
+};
+
+/// SplitCriticalEdge - If this edge is a critical edge, insert a new node to
+/// split the critical edge. This will update the analyses passed in through
+/// the option struct. This returns the new block if the edge was split, null
+/// otherwise.
+///
+/// If MergeIdenticalEdges in the options struct is true (not the default),
+/// *all* edges from TI to the specified successor will be merged into the same
+/// critical edge block. This is most commonly interesting with switch
+/// instructions, which may have many edges to any one destination. This
+/// ensures that all edges to that dest go to one block instead of each going
+/// to a different block, but isn't the standard definition of a "critical
+/// edge".
+///
+/// It is invalid to call this function on a critical edge that starts at an
+/// IndirectBrInst. Splitting these edges will almost always create an invalid
+/// program because the address of the new block won't be the one that is jumped
+/// to.
+///
+BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
+
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(),
+ Options);
+}
+
+/// SplitCriticalEdge - If the edge from *PI to BB is not critical, return
+/// false. Otherwise, split all edges between the two blocks and return true.
+/// This updates all of the same analyses as the other SplitCriticalEdge
+/// function. If P is specified, it updates the analyses
+/// described above.
+inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ bool MadeChange = false;
+ TerminatorInst *TI = (*PI)->getTerminator();
+ for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
+ if (TI->getSuccessor(i) == Succ)
+ MadeChange |= !!SplitCriticalEdge(TI, i, Options);
+ return MadeChange;
+}
+
+/// SplitCriticalEdge - If an edge from Src to Dst is critical, split the edge
+/// and return true, otherwise return false. This method requires that there be
+/// an edge between the two blocks. It updates the analyses
+/// passed in the options struct
+inline BasicBlock *
+SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions()) {
+ TerminatorInst *TI = Src->getTerminator();
+ unsigned i = 0;
+ while (1) {
+ assert(i != TI->getNumSuccessors() && "Edge doesn't exist!");
+ if (TI->getSuccessor(i) == Dst)
+ return SplitCriticalEdge(TI, i, Options);
+ ++i;
+ }
+}
+
+// SplitAllCriticalEdges - Loop over all of the edges in the CFG,
+// breaking critical edges as they are found.
+// Returns the number of broken edges.
+unsigned SplitAllCriticalEdges(Function &F,
+ const CriticalEdgeSplittingOptions &Options =
+ CriticalEdgeSplittingOptions());
+
+/// SplitEdge - Split the edge connecting specified block.
+BasicBlock *SplitEdge(BasicBlock *From, BasicBlock *To,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// SplitBlock - Split the specified block at the specified instruction - every
+/// thing before SplitPt stays in Old and everything starting with SplitPt moves
+/// to a new block. The two blocks are joined by an unconditional branch and
+/// the loop info is updated.
+///
+BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt,
+ DominatorTree *DT = nullptr, LoopInfo *LI = nullptr);
+
+/// SplitBlockPredecessors - This method introduces at least one new basic block
+/// into the function and moves some of the predecessors of BB to be
+/// predecessors of the new block. The new predecessors are indicated by the
+/// Preds array. The new block is given a suffix of 'Suffix'. Returns new basic
+/// block to which predecessors from Preds are now pointing.
+///
+/// If BB is a landingpad block then additional basicblock might be introduced.
+/// It will have Suffix+".split_lp". See SplitLandingPadPredecessors for more
+/// details on this case.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+///
+BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock *> Preds,
+ const char *Suffix,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
+
+/// SplitLandingPadPredecessors - This method transforms the landing pad,
+/// OrigBB, by introducing two new basic blocks into the function. One of those
+/// new basic blocks gets the predecessors listed in Preds. The other basic
+/// block gets the remaining predecessors of OrigBB. The landingpad instruction
+/// OrigBB is clone into both of the new basic blocks. The new blocks are given
+/// the suffixes 'Suffix1' and 'Suffix2', and are returned in the NewBBs vector.
+///
+/// This currently updates the LLVM IR, DominatorTree, LoopInfo, and LCCSA but
+/// no other analyses. In particular, it does not preserve LoopSimplify
+/// (because it's complicated to handle the case where one of the edges being
+/// split is an exit of a loop with other exits).
+///
+void SplitLandingPadPredecessors(BasicBlock *OrigBB,
+ ArrayRef<BasicBlock *> Preds,
+ const char *Suffix, const char *Suffix2,
+ SmallVectorImpl<BasicBlock *> &NewBBs,
+ DominatorTree *DT = nullptr,
+ LoopInfo *LI = nullptr,
+ bool PreserveLCSSA = false);
+
+/// FoldReturnIntoUncondBranch - This method duplicates the specified return
+/// instruction into a predecessor which ends in an unconditional branch. If
+/// the return instruction returns a value defined by a PHI, propagate the
+/// right value into the return. It returns the new return instruction in the
+/// predecessor.
+ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
+ BasicBlock *Pred);
+
+/// SplitBlockAndInsertIfThen - Split the containing block at the
+/// specified instruction - everything before and including SplitBefore stays
+/// in the old basic block, and everything after SplitBefore is moved to a
+/// new block. The two blocks are connected by a conditional branch
+/// (with value of Cmp being the condition).
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// SplitBefore
+/// Tail
+///
+/// If Unreachable is true, then ThenBlock ends with
+/// UnreachableInst, otherwise it branches to Tail.
+/// Returns the NewBasicBlock's terminator.
+///
+/// Updates DT if given.
+TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
+ bool Unreachable,
+ MDNode *BranchWeights = nullptr,
+ DominatorTree *DT = nullptr);
+
+/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
+/// but also creates the ElseBlock.
+/// Before:
+/// Head
+/// SplitBefore
+/// Tail
+/// After:
+/// Head
+/// if (Cond)
+/// ThenBlock
+/// else
+/// ElseBlock
+/// SplitBefore
+/// Tail
+void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
+ TerminatorInst **ThenTerm,
+ TerminatorInst **ElseTerm,
+ MDNode *BranchWeights = nullptr);
+
+///
+/// GetIfCondition - Check whether BB is the merge point of a if-region.
+/// If so, return the boolean condition that determines which entry into
+/// BB will be taken. Also, return by references the block that will be
+/// entered from if the condition is true, and the block that will be
+/// entered if the condition is false.
+Value *GetIfCondition(BasicBlock *BB, BasicBlock *&IfTrue,
+ BasicBlock *&IfFalse);
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h b/gnu/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
new file mode 100644
index 00000000000..879f295caf0
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
@@ -0,0 +1,116 @@
+//===- BuildLibCalls.h - Utility builder for libcalls -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_BUILDLIBCALLS_H
+
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+ class Value;
+ class DataLayout;
+ class TargetLibraryInfo;
+
+ /// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
+ Value *CastToCStr(Value *V, IRBuilder<> &B);
+
+ /// EmitStrLen - Emit a call to the strlen function to the builder, for the
+ /// specified pointer. Ptr is required to be some pointer type, and the
+ /// return value has 'intptr_t' type.
+ Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
+ /// specified pointer. Ptr is required to be some pointer type, MaxLen must
+ /// be of size_t type, and the return value has 'intptr_t' type.
+ Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitStrChr - Emit a call to the strchr function to the builder, for the
+ /// specified pointer and character. Ptr is required to be some pointer type,
+ /// and the return value has 'i8*' type.
+ Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
+ Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
+ /// specified pointer arguments.
+ Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI, StringRef Name = "strcpy");
+
+ /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
+ /// specified pointer arguments and length.
+ Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI, StringRef Name = "strncpy");
+
+ /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
+ /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
+ /// are pointers.
+ Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
+ IRBuilder<> &B, const DataLayout &DL,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
+ /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
+ Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitMemCmp - Emit a call to the memcmp function.
+ Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+
+ /// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
+ /// (e.g. 'floor'). This function is known to take a single of type matching
+ /// 'Op' and returns one value with the same type. If 'Op' is a long double,
+ /// 'l' is added as the suffix of name, if 'Op' is a float, we add a 'f'
+ /// suffix.
+ Value *EmitUnaryFloatFnCall(Value *Op, StringRef Name, IRBuilder<> &B,
+ const AttributeSet &Attrs);
+
+ /// EmitUnaryFloatFnCall - Emit a call to the binary function named 'Name'
+ /// (e.g. 'fmin'). This function is known to take type matching 'Op1' and
+ /// 'Op2' and return one value with the same type. If 'Op1/Op2' are long
+ /// double, 'l' is added as the suffix of name, if 'Op1/Op2' are float, we
+ /// add a 'f' suffix.
+ Value *EmitBinaryFloatFnCall(Value *Op1, Value *Op2, StringRef Name,
+ IRBuilder<> &B, const AttributeSet &Attrs);
+
+ /// EmitPutChar - Emit a call to the putchar function. This assumes that Char
+ /// is an integer.
+ Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+ /// EmitPutS - Emit a call to the puts function. This assumes that Str is
+ /// some pointer.
+ Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);
+
+ /// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
+ /// an i32, and File is a pointer to FILE.
+ Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitFPutS - Emit a call to the puts function. Str is required to be a
+ /// pointer and File is a pointer to FILE.
+ Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
+ const TargetLibraryInfo *TLI);
+
+ /// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
+ /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
+ Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
+ const DataLayout &DL, const TargetLibraryInfo *TLI);
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h b/gnu/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
new file mode 100644
index 00000000000..af0d60b2625
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/BypassSlowDivision.h
@@ -0,0 +1,36 @@
+//===- llvm/Transforms/Utils/BypassSlowDivision.h --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an optimization for div and rem on architectures that
+// execute short instructions significantly faster than longer instructions.
+// For example, on Intel Atom 32-bit divides are slow enough that during
+// runtime it is profitable to check the value of the operands, and if they are
+// positive and less than 256 use an unsigned 8-bit divide.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_BYPASSSLOWDIVISION_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/IR/Function.h"
+
+namespace llvm {
+
+/// This optimization identifies DIV instructions in a BB that can be
+/// profitably bypassed and carried out with a shorter, faster divide.
+///
+/// This optimization may add basic blocks immediately after BB; for obvious
+/// reasons, you shouldn't pass those blocks to bypassSlowDivision.
+bool bypassSlowDivision(
+ BasicBlock *BB, const DenseMap<unsigned int, unsigned int> &BypassWidth);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/Cloning.h b/gnu/llvm/include/llvm/Transforms/Utils/Cloning.h
new file mode 100644
index 00000000000..4f006f2adee
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/Cloning.h
@@ -0,0 +1,236 @@
+//===- Cloning.h - Clone various parts of LLVM programs ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various functions that are used to clone chunks of LLVM
+// code for various purposes. This varies from copying whole modules into new
+// modules, to cloning functions with different arguments, to inlining
+// functions, to copying basic blocks to support loop unrolling or superblock
+// formation, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CLONING_H
+#define LLVM_TRANSFORMS_UTILS_CLONING_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/ValueMap.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <functional>
+
+namespace llvm {
+
+class Module;
+class Function;
+class Instruction;
+class Pass;
+class LPPassManager;
+class BasicBlock;
+class Value;
+class CallInst;
+class InvokeInst;
+class ReturnInst;
+class CallSite;
+class Trace;
+class CallGraph;
+class DataLayout;
+class Loop;
+class LoopInfo;
+class AllocaInst;
+class AssumptionCacheTracker;
+class DominatorTree;
+
+/// Return an exact copy of the specified module
+///
+std::unique_ptr<Module> CloneModule(const Module *M);
+std::unique_ptr<Module> CloneModule(const Module *M, ValueToValueMapTy &VMap);
+
+/// Return a copy of the specified module. The ShouldCloneDefinition function
+/// controls whether a specific GlobalValue's definition is cloned. If the
+/// function returns false, the module copy will contain an external reference
+/// in place of the global definition.
+std::unique_ptr<Module>
+CloneModule(const Module *M, ValueToValueMapTy &VMap,
+ std::function<bool(const GlobalValue *)> ShouldCloneDefinition);
+
+/// ClonedCodeInfo - This struct can be used to capture information about code
+/// being cloned, while it is being cloned.
+struct ClonedCodeInfo {
+ /// ContainsCalls - This is set to true if the cloned code contains a normal
+ /// call instruction.
+ bool ContainsCalls;
+
+ /// ContainsDynamicAllocas - This is set to true if the cloned code contains
+ /// a 'dynamic' alloca. Dynamic allocas are allocas that are either not in
+ /// the entry block or they are in the entry block but are not a constant
+ /// size.
+ bool ContainsDynamicAllocas;
+
+ /// All cloned call sites that have operand bundles attached are appended to
+ /// this vector. This vector may contain nulls or undefs if some of the
+ /// originally inserted callsites were DCE'ed after they were cloned.
+ std::vector<WeakVH> OperandBundleCallSites;
+
+ ClonedCodeInfo() : ContainsCalls(false), ContainsDynamicAllocas(false) {}
+};
+
+/// CloneBasicBlock - Return a copy of the specified basic block, but without
+/// embedding the block into a particular function. The block returned is an
+/// exact copy of the specified basic block, without any remapping having been
+/// performed. Because of this, this is only suitable for applications where
+/// the basic block will be inserted into the same function that it was cloned
+/// from (loop unrolling would use this, for example).
+///
+/// Also, note that this function makes a direct copy of the basic block, and
+/// can thus produce illegal LLVM code. In particular, it will copy any PHI
+/// nodes from the original block, even though there are no predecessors for the
+/// newly cloned block (thus, phi nodes will have to be updated). Also, this
+/// block will branch to the old successors of the original block: these
+/// successors will have to have any PHI nodes updated to account for the new
+/// incoming edges.
+///
+/// The correlation between instructions in the source and result basic blocks
+/// is recorded in the VMap map.
+///
+/// If you have a particular suffix you'd like to use to add to any cloned
+/// names, specify it as the optional third parameter.
+///
+/// If you would like the basic block to be auto-inserted into the end of a
+/// function, you can specify it as the optional fourth parameter.
+///
+/// If you would like to collect additional information about the cloned
+/// function, you can specify a ClonedCodeInfo object with the optional fifth
+/// parameter.
+///
+BasicBlock *CloneBasicBlock(const BasicBlock *BB, ValueToValueMapTy &VMap,
+ const Twine &NameSuffix = "", Function *F = nullptr,
+ ClonedCodeInfo *CodeInfo = nullptr);
+
+/// CloneFunction - Return a copy of the specified function, but without
+/// embedding the function into another module. Also, any references specified
+/// in the VMap are changed to refer to their mapped value instead of the
+/// original one. If any of the arguments to the function are in the VMap,
+/// the arguments are deleted from the resultant function. The VMap is
+/// updated to include mappings from all of the instructions and basicblocks in
+/// the function from their old to new values. The final argument captures
+/// information about the cloned code if non-null.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings, and debug info metadata will not be cloned.
+///
+Function *CloneFunction(const Function *F, ValueToValueMapTy &VMap,
+ bool ModuleLevelChanges,
+ ClonedCodeInfo *CodeInfo = nullptr);
+
+/// Clone OldFunc into NewFunc, transforming the old arguments into references
+/// to VMap values. Note that if NewFunc already has basic blocks, the ones
+/// cloned into it will be added to the end of the function. This function
+/// fills in a list of return instructions, and can optionally remap types
+/// and/or append the specified suffix to all values cloned.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneFunctionInto(Function *NewFunc, const Function *OldFunc,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
+ const Instruction *StartingInst,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst *> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr);
+
+/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
+/// except that it does some simple constant prop and DCE on the fly. The
+/// effect of this is to copy significantly less code in cases where (for
+/// example) a function call with constant arguments is inlined, and those
+/// constant arguments cause a significant amount of code in the callee to be
+/// dead. Since this doesn't produce an exactly copy of the input, it can't be
+/// used for things like CloneFunction or CloneModule.
+///
+/// If ModuleLevelChanges is false, VMap contains no non-identity GlobalValue
+/// mappings.
+///
+void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
+ ValueToValueMapTy &VMap, bool ModuleLevelChanges,
+ SmallVectorImpl<ReturnInst*> &Returns,
+ const char *NameSuffix = "",
+ ClonedCodeInfo *CodeInfo = nullptr,
+ Instruction *TheCall = nullptr);
+
+/// InlineFunctionInfo - This class captures the data input to the
+/// InlineFunction call, and records the auxiliary results produced by it.
+class InlineFunctionInfo {
+public:
+ explicit InlineFunctionInfo(CallGraph *cg = nullptr,
+ AssumptionCacheTracker *ACT = nullptr)
+ : CG(cg), ACT(ACT) {}
+
+ /// CG - If non-null, InlineFunction will update the callgraph to reflect the
+ /// changes it makes.
+ CallGraph *CG;
+ AssumptionCacheTracker *ACT;
+
+ /// StaticAllocas - InlineFunction fills this in with all static allocas that
+ /// get copied into the caller.
+ SmallVector<AllocaInst *, 4> StaticAllocas;
+
+ /// InlinedCalls - InlineFunction fills this in with callsites that were
+ /// inlined from the callee. This is only filled in if CG is non-null.
+ SmallVector<WeakVH, 8> InlinedCalls;
+
+ void reset() {
+ StaticAllocas.clear();
+ InlinedCalls.clear();
+ }
+};
+
+/// InlineFunction - This function inlines the called function into the basic
+/// block of the caller. This returns false if it is not possible to inline
+/// this call. The program is still in a well defined state if this occurs
+/// though.
+///
+/// Note that this only does one level of inlining. For example, if the
+/// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
+/// exists in the instruction stream. Similarly this will inline a recursive
+/// function by one level.
+///
+bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+bool InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
+ AAResults *CalleeAAR = nullptr, bool InsertLifetime = true);
+
+/// \brief Clones a loop \p OrigLoop. Returns the loop and the blocks in \p
+/// Blocks.
+///
+/// Updates LoopInfo and DominatorTree assuming the loop is dominated by block
+/// \p LoopDomBB. Insert the new blocks before block specified in \p Before.
+Loop *cloneLoopWithPreheader(BasicBlock *Before, BasicBlock *LoopDomBB,
+ Loop *OrigLoop, ValueToValueMapTy &VMap,
+ const Twine &NameSuffix, LoopInfo *LI,
+ DominatorTree *DT,
+ SmallVectorImpl<BasicBlock *> &Blocks);
+
+/// \brief Remaps instructions in \p Blocks using the mapping in \p VMap.
+void remapInstructionsInBlocks(const SmallVectorImpl<BasicBlock *> &Blocks,
+ ValueToValueMapTy &VMap);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h b/gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
new file mode 100644
index 00000000000..73c15e42c35
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/CmpInstAnalysis.h
@@ -0,0 +1,65 @@
+//===-- CmpInstAnalysis.h - Utils to help fold compare insts ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds routines to help analyse compare instructions
+// and fold them into constants or other compare instructions
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H
+#define LLVM_TRANSFORMS_UTILS_CMPINSTANALYSIS_H
+
+#include "llvm/IR/InstrTypes.h"
+
+namespace llvm {
+ class ICmpInst;
+ class Value;
+
+ /// getICmpCode - Encode a icmp predicate into a three bit mask. These bits
+ /// are carefully arranged to allow folding of expressions such as:
+ ///
+ /// (A < B) | (A > B) --> (A != B)
+ ///
+ /// Note that this is only valid if the first and second predicates have the
+ /// same sign. Is illegal to do: (A u< B) | (A s> B)
+ ///
+ /// Three bits are used to represent the condition, as follows:
+ /// 0 A > B
+ /// 1 A == B
+ /// 2 A < B
+ ///
+ /// <=> Value Definition
+ /// 000 0 Always false
+ /// 001 1 A > B
+ /// 010 2 A == B
+ /// 011 3 A >= B
+ /// 100 4 A < B
+ /// 101 5 A != B
+ /// 110 6 A <= B
+ /// 111 7 Always true
+ ///
+ unsigned getICmpCode(const ICmpInst *ICI, bool InvertPred = false);
+
+ /// getICmpValue - This is the complement of getICmpCode, which turns an
+ /// opcode and two operands into either a constant true or false, or the
+ /// predicate for a new ICmp instruction. The sign is passed in to determine
+ /// which kind of predicate to use in the new icmp instruction.
+ /// Non-NULL return value will be a true or false constant.
+ /// NULL return means a new ICmp is needed. The predicate for which is
+ /// output in NewICmpPred.
+ Value *getICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
+ CmpInst::Predicate &NewICmpPred);
+
+ /// PredicatesFoldable - Return true if both predicates match sign or if at
+ /// least one of them is an equality comparison (which is signless).
+ bool PredicatesFoldable(CmpInst::Predicate p1, CmpInst::Predicate p2);
+
+} // end namespace llvm
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/CodeExtractor.h b/gnu/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
new file mode 100644
index 00000000000..3a96d955cac
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -0,0 +1,126 @@
+//===-- Transform/Utils/CodeExtractor.h - Code extraction util --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A utility to support extracting code from one function into its own
+// stand-alone function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+#define LLVM_TRANSFORMS_UTILS_CODEEXTRACTOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SetVector.h"
+
+namespace llvm {
+ class BasicBlock;
+ class DominatorTree;
+ class Function;
+ class Loop;
+ class Module;
+ class RegionNode;
+ class Type;
+ class Value;
+
+ /// \brief Utility class for extracting code into a new function.
+ ///
+ /// This utility provides a simple interface for extracting some sequence of
+ /// code into its own function, replacing it with a call to that function. It
+ /// also provides various methods to query about the nature and result of
+ /// such a transformation.
+ ///
+ /// The rough algorithm used is:
+ /// 1) Find both the inputs and outputs for the extracted region.
+ /// 2) Pass the inputs as arguments, remapping them within the extracted
+ /// function to arguments.
+ /// 3) Add allocas for any scalar outputs, adding all of the outputs' allocas
+ /// as arguments, and inserting stores to the arguments for any scalars.
+ class CodeExtractor {
+ typedef SetVector<Value *> ValueSet;
+
+ // Various bits of state computed on construction.
+ DominatorTree *const DT;
+ const bool AggregateArgs;
+
+ // Bits of intermediate state computed at various phases of extraction.
+ SetVector<BasicBlock *> Blocks;
+ unsigned NumExitBlocks;
+ Type *RetTy;
+
+ public:
+ /// \brief Create a code extractor for a single basic block.
+ ///
+ /// In this formation, we don't require a dominator tree. The given basic
+ /// block is set up for extraction.
+ CodeExtractor(BasicBlock *BB, bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a sequence of blocks.
+ ///
+ /// Given a sequence of basic blocks where the first block in the sequence
+ /// dominates the rest, prepare a code extractor object for pulling this
+ /// sequence out into its new function. When a DominatorTree is also given,
+ /// extra checking and transformations are enabled.
+ CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
+ bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a loop body.
+ ///
+ /// Behaves just like the generic code sequence constructor, but uses the
+ /// block sequence of the loop.
+ CodeExtractor(DominatorTree &DT, Loop &L, bool AggregateArgs = false);
+
+ /// \brief Create a code extractor for a region node.
+ ///
+ /// Behaves just like the generic code sequence constructor, but uses the
+ /// block sequence of the region node passed in.
+ CodeExtractor(DominatorTree &DT, const RegionNode &RN,
+ bool AggregateArgs = false);
+
+ /// \brief Perform the extraction, returning the new function.
+ ///
+ /// Returns zero when called on a CodeExtractor instance where isEligible
+ /// returns false.
+ Function *extractCodeRegion();
+
+ /// \brief Test whether this code extractor is eligible.
+ ///
+ /// Based on the blocks used when constructing the code extractor,
+ /// determine whether it is eligible for extraction.
+ bool isEligible() const { return !Blocks.empty(); }
+
+ /// \brief Compute the set of input values and output values for the code.
+ ///
+ /// These can be used either when performing the extraction or to evaluate
+ /// the expected size of a call to the extracted function. Note that this
+ /// work cannot be cached between the two as once we decide to extract
+ /// a code sequence, that sequence is modified, including changing these
+ /// sets, before extraction occurs. These modifications won't have any
+ /// significant impact on the cost however.
+ void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs) const;
+
+ private:
+ void severSplitPHINodes(BasicBlock *&Header);
+ void splitReturnBlocks();
+
+ Function *constructFunction(const ValueSet &inputs,
+ const ValueSet &outputs,
+ BasicBlock *header,
+ BasicBlock *newRootNode, BasicBlock *newHeader,
+ Function *oldFunction, Module *M);
+
+ void moveCodeToFunction(Function *newFunction);
+
+ void emitCallAndSwitchStatement(Function *newFunction,
+ BasicBlock *newHeader,
+ ValueSet &inputs,
+ ValueSet &outputs);
+ };
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/CtorUtils.h b/gnu/llvm/include/llvm/Transforms/Utils/CtorUtils.h
new file mode 100644
index 00000000000..63e564dcb87
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/CtorUtils.h
@@ -0,0 +1,32 @@
+//===- CtorUtils.h - Helpers for working with global_ctors ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines functions that are used to process llvm.global_ctors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+#define LLVM_TRANSFORMS_UTILS_CTORUTILS_H
+
+#include "llvm/ADT/STLExtras.h"
+
+namespace llvm {
+
+class GlobalVariable;
+class Function;
+class Module;
+
+/// Call "ShouldRemove" for every entry in M's global_ctor list and remove the
+/// entries for which it returns true. Return true if anything changed.
+bool optimizeGlobalCtorsList(Module &M,
+ function_ref<bool(Function *)> ShouldRemove);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/GlobalStatus.h b/gnu/llvm/include/llvm/Transforms/Utils/GlobalStatus.h
new file mode 100644
index 00000000000..c3660950880
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/GlobalStatus.h
@@ -0,0 +1,82 @@
+//===- GlobalStatus.h - Compute status info for globals ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+#define LLVM_TRANSFORMS_UTILS_GLOBALSTATUS_H
+
+#include "llvm/IR/Instructions.h"
+
+namespace llvm {
+class Value;
+class Function;
+
+/// It is safe to destroy a constant iff it is only used by constants itself.
+/// Note that constants cannot be cyclic, so this test is pretty easy to
+/// implement recursively.
+///
+bool isSafeToDestroyConstant(const Constant *C);
+
+/// As we analyze each global, keep track of some information about it. If we
+/// find out that the address of the global is taken, none of this info will be
+/// accurate.
+struct GlobalStatus {
+ /// True if the global's address is used in a comparison.
+ bool IsCompared;
+
+ /// True if the global is ever loaded. If the global isn't ever loaded it
+ /// can be deleted.
+ bool IsLoaded;
+
+ /// Keep track of what stores to the global look like.
+ enum StoredType {
+ /// There is no store to this global. It can thus be marked constant.
+ NotStored,
+
+ /// This global is stored to, but the only thing stored is the constant it
+ /// was initialized with. This is only tracked for scalar globals.
+ InitializerStored,
+
+ /// This global is stored to, but only its initializer and one other value
+ /// is ever stored to it. If this global isStoredOnce, we track the value
+ /// stored to it in StoredOnceValue below. This is only tracked for scalar
+ /// globals.
+ StoredOnce,
+
+ /// This global is stored to by multiple values or something else that we
+ /// cannot track.
+ Stored
+ } StoredType;
+
+ /// If only one value (besides the initializer constant) is ever stored to
+ /// this global, keep track of what value it is.
+ Value *StoredOnceValue;
+
+ /// These start out null/false. When the first accessing function is noticed,
+ /// it is recorded. When a second different accessing function is noticed,
+ /// HasMultipleAccessingFunctions is set to true.
+ const Function *AccessingFunction;
+ bool HasMultipleAccessingFunctions;
+
+ /// Set to true if this global has a user that is not an instruction (e.g. a
+ /// constant expr or GV initializer).
+ bool HasNonInstructionUser;
+
+ /// Set to the strongest atomic ordering requirement.
+ AtomicOrdering Ordering;
+
+ /// Look at all uses of the global and fill in the GlobalStatus structure. If
+ /// the global has its address taken, return true to indicate we can't do
+ /// anything with it.
+ static bool analyzeGlobal(const Value *V, GlobalStatus &GS);
+
+ GlobalStatus();
+};
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/IntegerDivision.h b/gnu/llvm/include/llvm/Transforms/Utils/IntegerDivision.h
new file mode 100644
index 00000000000..0ec3321b9cf
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/IntegerDivision.h
@@ -0,0 +1,73 @@
+//===- llvm/Transforms/Utils/IntegerDivision.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains an implementation of 32bit and 64bit scalar integer
+// division for targets that don't have native support. It's largely derived
+// from compiler-rt's implementations of __udivsi3 and __udivmoddi4,
+// but hand-tuned for targets that prefer less control flow.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+#define LLVM_TRANSFORMS_UTILS_INTEGERDIVISION_H
+
+namespace llvm {
+ class BinaryOperator;
+}
+
+namespace llvm {
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. This currently generates code using the udiv
+ /// expansion, but future work includes generating more specialized code,
+ /// e.g. when more information about the operands are known. Implements both
+ /// 32bit and 64bit scalar division.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandRemainder(BinaryOperator *Rem);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. This currently generates code similarly to compiler-rt's
+ /// implementations, but future work includes generating more specialized code
+ /// when more information about the operands are known. Implements both
+ /// 32bit and 64bit scalar division.
+ ///
+ /// @brief Replace Div with generated code.
+ bool expandDivision(BinaryOperator* Div);
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. Uses ExpandReminder with a 32bit Rem which
+ /// makes it useful for targets with little or no support for less than
+ /// 32 bit arithmetic.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandRemainderUpTo32Bits(BinaryOperator *Rem);
+
+ /// Generate code to calculate the remainder of two integers, replacing Rem
+ /// with the generated code. Uses ExpandReminder with a 64bit Rem.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandRemainderUpTo64Bits(BinaryOperator *Rem);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. Uses ExpandDivision with a 32bit Div which makes it useful for
+ /// targets with little or no support for less than 32 bit arithmetic.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandDivisionUpTo32Bits(BinaryOperator *Div);
+
+ /// Generate code to divide two integers, replacing Div with the generated
+ /// code. Uses ExpandDivision with a 64bit Div.
+ ///
+ /// @brief Replace Rem with generated code.
+ bool expandDivisionUpTo64Bits(BinaryOperator *Div);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/Local.h b/gnu/llvm/include/llvm/Transforms/Utils/Local.h
new file mode 100644
index 00000000000..3ae01657a2e
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/Local.h
@@ -0,0 +1,355 @@
+//===-- Local.h - Functions to perform local transformations ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform various local transformations to the
+// program.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOCAL_H
+#define LLVM_TRANSFORMS_UTILS_LOCAL_H
+
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Operator.h"
+
+namespace llvm {
+
+class User;
+class BasicBlock;
+class Function;
+class BranchInst;
+class Instruction;
+class DbgDeclareInst;
+class StoreInst;
+class LoadInst;
+class Value;
+class PHINode;
+class AllocaInst;
+class AssumptionCache;
+class ConstantExpr;
+class DataLayout;
+class TargetLibraryInfo;
+class TargetTransformInfo;
+class DIBuilder;
+class DominatorTree;
+class LazyValueInfo;
+
+template<typename T> class SmallVectorImpl;
+
+//===----------------------------------------------------------------------===//
+// Local constant propagation.
+//
+
+/// ConstantFoldTerminator - If a terminator instruction is predicated on a
+/// constant value, convert it into an unconditional branch to the constant
+/// destination. This is a nontrivial operation because the successors of this
+/// basic block must have their PHI nodes updated.
+/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
+/// conditions and indirectbr addresses this might make dead if
+/// DeleteDeadConditions is true.
+bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
+ const TargetLibraryInfo *TLI = nullptr);
+
+//===----------------------------------------------------------------------===//
+// Local dead code elimination.
+//
+
+/// isInstructionTriviallyDead - Return true if the result produced by the
+/// instruction is not used, and the instruction has no side effects.
+///
+bool isInstructionTriviallyDead(Instruction *I,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
+/// trivially dead instruction, delete it. If that makes any of its operands
+/// trivially dead, delete them too, recursively. Return true if any
+/// instructions were deleted.
+bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
+/// dead PHI node, due to being a def-use chain of single-use nodes that
+/// either forms a cycle or is terminated by a trivially dead instruction,
+/// delete it. If that makes any of its operands trivially dead, delete them
+/// too, recursively. Return true if a change was made.
+bool RecursivelyDeleteDeadPHINode(PHINode *PN,
+ const TargetLibraryInfo *TLI = nullptr);
+
+/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
+/// simplify any instructions in it and recursively delete dead instructions.
+///
+/// This returns true if it changed the code, note that it can delete
+/// instructions in other blocks as well in this block.
+bool SimplifyInstructionsInBlock(BasicBlock *BB,
+ const TargetLibraryInfo *TLI = nullptr);
+
+//===----------------------------------------------------------------------===//
+// Control Flow Graph Restructuring.
+//
+
+/// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this
+/// method is called when we're about to delete Pred as a predecessor of BB. If
+/// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred.
+///
+/// Unlike the removePredecessor method, this attempts to simplify uses of PHI
+/// nodes that collapse into identity values. For example, if we have:
+/// x = phi(1, 0, 0, 0)
+/// y = and x, z
+///
+/// .. and delete the predecessor corresponding to the '1', this will attempt to
+/// recursively fold the 'and' to 0.
+void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred);
+
+/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
+/// predecessor is known to have one successor (BB!). Eliminate the edge
+/// between them, moving the instructions in the predecessor into BB. This
+/// deletes the predecessor block.
+///
+void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, DominatorTree *DT = nullptr);
+
+/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
+/// unconditional branch, and contains no instructions other than PHI nodes,
+/// potential debug intrinsics and the branch. If possible, eliminate BB by
+/// rewriting all the predecessors to branch to the successor block and return
+/// true. If we can't transform, return false.
+bool TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB);
+
+/// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI
+/// nodes in this block. This doesn't try to be clever about PHI nodes
+/// which differ only in the order of the incoming values, but instcombine
+/// orders them so it usually won't matter.
+///
+bool EliminateDuplicatePHINodes(BasicBlock *BB);
+
+/// SimplifyCFG - This function is used to do simplification of a CFG. For
+/// example, it adjusts branches to branches to eliminate the extra hop, it
+/// eliminates unreachable basic blocks, and does other "peephole" optimization
+/// of the CFG. It returns true if a modification was made, possibly deleting
+/// the basic block that was pointed to.
+///
+bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
+ unsigned BonusInstThreshold, AssumptionCache *AC = nullptr);
+
+/// FlatternCFG - This function is used to flatten a CFG. For
+/// example, it uses parallel-and and parallel-or mode to collapse
+// if-conditions and merge if-regions with identical statements.
+///
+bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
+
+/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
+/// and if a predecessor branches to us and one of our successors, fold the
+/// setcc into the predecessor and use logical operations to pick the right
+/// destination.
+bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);
+
+/// DemoteRegToStack - This function takes a virtual register computed by an
+/// Instruction and replaces it with a slot in the stack frame, allocated via
+/// alloca. This allows the CFG to be changed around without fear of
+/// invalidating the SSA information for the value. It returns the pointer to
+/// the alloca inserted to create a stack slot for X.
+///
+AllocaInst *DemoteRegToStack(Instruction &X,
+ bool VolatileLoads = false,
+ Instruction *AllocaPoint = nullptr);
+
+/// DemotePHIToStack - This function takes a virtual register computed by a phi
+/// node and replaces it with a slot in the stack frame, allocated via alloca.
+/// The phi node is deleted and it returns the pointer to the alloca inserted.
+AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
+
+/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
+/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
+/// and it is more than the alignment of the ultimate object, see if we can
+/// increase the alignment of the ultimate object, making this check succeed.
+unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
+ const DataLayout &DL,
+ const Instruction *CxtI = nullptr,
+ AssumptionCache *AC = nullptr,
+ const DominatorTree *DT = nullptr);
+
+/// getKnownAlignment - Try to infer an alignment for the specified pointer.
+static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
+ const Instruction *CxtI = nullptr,
+ AssumptionCache *AC = nullptr,
+ const DominatorTree *DT = nullptr) {
+ return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
+}
+
+/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
+/// code necessary to compute the offset from the base pointer (without adding
+/// in the base pointer). Return the result as a signed integer of intptr size.
+/// When NoAssumptions is true, no assumptions about index computation not
+/// overflowing is made.
+template <typename IRBuilderTy>
+Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
+ bool NoAssumptions = false) {
+ GEPOperator *GEPOp = cast<GEPOperator>(GEP);
+ Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
+ Value *Result = Constant::getNullValue(IntPtrTy);
+
+ // If the GEP is inbounds, we know that none of the addressing operations will
+ // overflow in an unsigned sense.
+ bool isInBounds = GEPOp->isInBounds() && !NoAssumptions;
+
+ // Build a mask for high order bits.
+ unsigned IntPtrWidth = IntPtrTy->getScalarType()->getIntegerBitWidth();
+ uint64_t PtrSizeMask = ~0ULL >> (64 - IntPtrWidth);
+
+ gep_type_iterator GTI = gep_type_begin(GEP);
+ for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
+ ++i, ++GTI) {
+ Value *Op = *i;
+ uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
+ if (Constant *OpC = dyn_cast<Constant>(Op)) {
+ if (OpC->isZeroValue())
+ continue;
+
+ // Handle a struct index, which adds its field offset to the pointer.
+ if (StructType *STy = dyn_cast<StructType>(*GTI)) {
+ if (OpC->getType()->isVectorTy())
+ OpC = OpC->getSplatValue();
+
+ uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
+ Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
+
+ if (Size)
+ Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".offs");
+ continue;
+ }
+
+ Constant *Scale = ConstantInt::get(IntPtrTy, Size);
+ Constant *OC = ConstantExpr::getIntegerCast(OpC, IntPtrTy, true /*SExt*/);
+ Scale = ConstantExpr::getMul(OC, Scale, isInBounds/*NUW*/);
+ // Emit an add instruction.
+ Result = Builder->CreateAdd(Result, Scale, GEP->getName()+".offs");
+ continue;
+ }
+ // Convert to correct type.
+ if (Op->getType() != IntPtrTy)
+ Op = Builder->CreateIntCast(Op, IntPtrTy, true, Op->getName()+".c");
+ if (Size != 1) {
+ // We'll let instcombine(mul) convert this to a shl if possible.
+ Op = Builder->CreateMul(Op, ConstantInt::get(IntPtrTy, Size),
+ GEP->getName()+".idx", isInBounds /*NUW*/);
+ }
+
+ // Emit an add instruction.
+ Result = Builder->CreateAdd(Op, Result, GEP->getName()+".offs");
+ }
+ return Result;
+}
+
+///===---------------------------------------------------------------------===//
+/// Dbg Intrinsic utilities
+///
+
+/// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value
+/// that has an associated llvm.dbg.decl intrinsic.
+bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
+ StoreInst *SI, DIBuilder &Builder);
+
+/// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value
+/// that has an associated llvm.dbg.decl intrinsic.
+bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
+ LoadInst *LI, DIBuilder &Builder);
+
+/// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set
+/// of llvm.dbg.value intrinsics.
+bool LowerDbgDeclare(Function &F);
+
+/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to
+/// an alloca, if any.
+DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
+
+/// \brief Replaces llvm.dbg.declare instruction when the address it describes
+/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
+/// prepended to the expression. If Offset is non-zero, a constant displacement
+/// is added to the expression (after the optional Deref). Offset can be
+/// negative.
+bool replaceDbgDeclare(Value *Address, Value *NewAddress,
+ Instruction *InsertBefore, DIBuilder &Builder,
+ bool Deref, int Offset);
+
+/// \brief Replaces llvm.dbg.declare instruction when the alloca it describes
+/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
+/// prepended to the expression. If Offset is non-zero, a constant displacement
+/// is added to the expression (after the optional Deref). Offset can be
+/// negative. New llvm.dbg.declare is inserted immediately before AI.
+bool replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress,
+ DIBuilder &Builder, bool Deref, int Offset = 0);
+
+/// \brief Insert an unreachable instruction before the specified
+/// instruction, making it and the rest of the code in the block dead.
+void changeToUnreachable(Instruction *I, bool UseLLVMTrap);
+
+/// Replace 'BB's terminator with one that does not have an unwind successor
+/// block. Rewrites `invoke` to `call`, etc. Updates any PHIs in unwind
+/// successor.
+///
+/// \param BB Block whose terminator will be replaced. Its terminator must
+/// have an unwind successor.
+void removeUnwindEdge(BasicBlock *BB);
+
+/// \brief Remove all blocks that can not be reached from the function's entry.
+///
+/// Returns true if any basic block was removed.
+bool removeUnreachableBlocks(Function &F, LazyValueInfo *LVI = nullptr);
+
+/// \brief Combine the metadata of two instructions so that K can replace J
+///
+/// Metadata not listed as known via KnownIDs is removed
+void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> KnownIDs);
+
+/// \brief Replace each use of 'From' with 'To' if that use is dominated by
+/// the given edge. Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+ const BasicBlockEdge &Edge);
+/// \brief Replace each use of 'From' with 'To' if that use is dominated by
+/// the given BasicBlock. Returns the number of replacements made.
+unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
+ const BasicBlock *BB);
+
+
+/// \brief Return true if the CallSite CS calls a gc leaf function.
+///
+/// A leaf function is a function that does not safepoint the thread during its
+/// execution. During a call or invoke to such a function, the callers stack
+/// does not have to be made parseable.
+///
+/// Most passes can and should ignore this information, and it is only used
+/// during lowering by the GC infrastructure.
+bool callsGCLeafFunction(ImmutableCallSite CS);
+
+//===----------------------------------------------------------------------===//
+// Intrinsic pattern matching
+//
+
+/// Try and match a bitreverse or bswap idiom.
+///
+/// If an idiom is matched, an intrinsic call is inserted before \c I. Any added
+/// instructions are returned in \c InsertedInsts. They will all have been added
+/// to a basic block.
+///
+/// A bitreverse idiom normally requires around 2*BW nodes to be searched (where
+/// BW is the bitwidth of the integer type). A bswap idiom requires anywhere up
+/// to BW / 4 nodes to be searched, so is significantly faster.
+///
+/// This function returns true on a successful match or false otherwise.
+bool recognizeBitReverseOrBSwapIdiom(
+ Instruction *I, bool MatchBSwaps, bool MatchBitReversals,
+ SmallVectorImpl<Instruction *> &InsertedInsts);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/LoopUtils.h b/gnu/llvm/include/llvm/Transforms/Utils/LoopUtils.h
new file mode 100644
index 00000000000..2cfacb650ff
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/LoopUtils.h
@@ -0,0 +1,382 @@
+//===- llvm/Transforms/Utils/LoopUtils.h - Loop utilities -*- C++ -*-=========//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop transformation utilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+#define LLVM_TRANSFORMS_UTILS_LOOPUTILS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/EHPersonalities.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class AliasSet;
+class AliasSetTracker;
+class AssumptionCache;
+class BasicBlock;
+class DataLayout;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class Pass;
+class PredIteratorCache;
+class ScalarEvolution;
+class TargetLibraryInfo;
+
+/// \brief Captures loop safety information.
+/// It keep information for loop & its header may throw exception.
+struct LICMSafetyInfo {
+ bool MayThrow; // The current loop contains an instruction which
+ // may throw.
+ bool HeaderMayThrow; // Same as previous, but specific to loop header
+ // Used to update funclet bundle operands.
+ DenseMap<BasicBlock *, ColorVector> BlockColors;
+ LICMSafetyInfo() : MayThrow(false), HeaderMayThrow(false)
+ {}
+};
+
+/// The RecurrenceDescriptor is used to identify recurrences variables in a
+/// loop. Reduction is a special case of recurrence that has uses of the
+/// recurrence variable outside the loop. The method isReductionPHI identifies
+/// reductions that are basic recurrences.
+///
+/// Basic recurrences are defined as the summation, product, OR, AND, XOR, min,
+/// or max of a set of terms. For example: for(i=0; i<n; i++) { total +=
+/// array[i]; } is a summation of array elements. Basic recurrences are a
+/// special case of chains of recurrences (CR). See ScalarEvolution for CR
+/// references.
+
+/// This struct holds information about recurrence variables.
+class RecurrenceDescriptor {
+
+public:
+ /// This enum represents the kinds of recurrences that we support.
+ enum RecurrenceKind {
+ RK_NoRecurrence, ///< Not a recurrence.
+ RK_IntegerAdd, ///< Sum of integers.
+ RK_IntegerMult, ///< Product of integers.
+ RK_IntegerOr, ///< Bitwise or logical OR of numbers.
+ RK_IntegerAnd, ///< Bitwise or logical AND of numbers.
+ RK_IntegerXor, ///< Bitwise or logical XOR of numbers.
+ RK_IntegerMinMax, ///< Min/max implemented in terms of select(cmp()).
+ RK_FloatAdd, ///< Sum of floats.
+ RK_FloatMult, ///< Product of floats.
+ RK_FloatMinMax ///< Min/max implemented in terms of select(cmp()).
+ };
+
+ // This enum represents the kind of minmax recurrence.
+ enum MinMaxRecurrenceKind {
+ MRK_Invalid,
+ MRK_UIntMin,
+ MRK_UIntMax,
+ MRK_SIntMin,
+ MRK_SIntMax,
+ MRK_FloatMin,
+ MRK_FloatMax
+ };
+
+ RecurrenceDescriptor()
+ : StartValue(nullptr), LoopExitInstr(nullptr), Kind(RK_NoRecurrence),
+ MinMaxKind(MRK_Invalid), UnsafeAlgebraInst(nullptr),
+ RecurrenceType(nullptr), IsSigned(false) {}
+
+ RecurrenceDescriptor(Value *Start, Instruction *Exit, RecurrenceKind K,
+ MinMaxRecurrenceKind MK, Instruction *UAI, Type *RT,
+ bool Signed, SmallPtrSetImpl<Instruction *> &CI)
+ : StartValue(Start), LoopExitInstr(Exit), Kind(K), MinMaxKind(MK),
+ UnsafeAlgebraInst(UAI), RecurrenceType(RT), IsSigned(Signed) {
+ CastInsts.insert(CI.begin(), CI.end());
+ }
+
+ /// This POD struct holds information about a potential recurrence operation.
+ class InstDesc {
+
+ public:
+ InstDesc(bool IsRecur, Instruction *I, Instruction *UAI = nullptr)
+ : IsRecurrence(IsRecur), PatternLastInst(I), MinMaxKind(MRK_Invalid),
+ UnsafeAlgebraInst(UAI) {}
+
+ InstDesc(Instruction *I, MinMaxRecurrenceKind K, Instruction *UAI = nullptr)
+ : IsRecurrence(true), PatternLastInst(I), MinMaxKind(K),
+ UnsafeAlgebraInst(UAI) {}
+
+ bool isRecurrence() { return IsRecurrence; }
+
+ bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+ Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+ MinMaxRecurrenceKind getMinMaxKind() { return MinMaxKind; }
+
+ Instruction *getPatternInst() { return PatternLastInst; }
+
+ private:
+ // Is this instruction a recurrence candidate.
+ bool IsRecurrence;
+ // The last instruction in a min/max pattern (select of the select(icmp())
+ // pattern), or the current recurrence instruction otherwise.
+ Instruction *PatternLastInst;
+ // If this is a min/max pattern the comparison predicate.
+ MinMaxRecurrenceKind MinMaxKind;
+ // Recurrence has unsafe algebra.
+ Instruction *UnsafeAlgebraInst;
+ };
+
+ /// Returns a struct describing if the instruction 'I' can be a recurrence
+ /// variable of type 'Kind'. If the recurrence is a min/max pattern of
+ /// select(icmp()) this function advances the instruction pointer 'I' from the
+ /// compare instruction to the select instruction and stores this pointer in
+ /// 'PatternLastInst' member of the returned struct.
+ static InstDesc isRecurrenceInstr(Instruction *I, RecurrenceKind Kind,
+ InstDesc &Prev, bool HasFunNoNaNAttr);
+
+ /// Returns true if instruction I has multiple uses in Insts
+ static bool hasMultipleUsesOf(Instruction *I,
+ SmallPtrSetImpl<Instruction *> &Insts);
+
+ /// Returns true if all uses of the instruction I is within the Set.
+ static bool areAllUsesIn(Instruction *I, SmallPtrSetImpl<Instruction *> &Set);
+
+ /// Returns a struct describing if the instruction if the instruction is a
+ /// Select(ICmp(X, Y), X, Y) instruction pattern corresponding to a min(X, Y)
+ /// or max(X, Y).
+ static InstDesc isMinMaxSelectCmpPattern(Instruction *I, InstDesc &Prev);
+
+ /// Returns identity corresponding to the RecurrenceKind.
+ static Constant *getRecurrenceIdentity(RecurrenceKind K, Type *Tp);
+
+ /// Returns the opcode of binary operation corresponding to the
+ /// RecurrenceKind.
+ static unsigned getRecurrenceBinOp(RecurrenceKind Kind);
+
+ /// Returns a Min/Max operation corresponding to MinMaxRecurrenceKind.
+ static Value *createMinMaxOp(IRBuilder<> &Builder, MinMaxRecurrenceKind RK,
+ Value *Left, Value *Right);
+
+ /// Returns true if Phi is a reduction of type Kind and adds it to the
+ /// RecurrenceDescriptor.
+ static bool AddReductionVar(PHINode *Phi, RecurrenceKind Kind, Loop *TheLoop,
+ bool HasFunNoNaNAttr,
+ RecurrenceDescriptor &RedDes);
+
+ /// Returns true if Phi is a reduction in TheLoop. The RecurrenceDescriptor is
+ /// returned in RedDes.
+ static bool isReductionPHI(PHINode *Phi, Loop *TheLoop,
+ RecurrenceDescriptor &RedDes);
+
+ RecurrenceKind getRecurrenceKind() { return Kind; }
+
+ MinMaxRecurrenceKind getMinMaxRecurrenceKind() { return MinMaxKind; }
+
+ TrackingVH<Value> getRecurrenceStartValue() { return StartValue; }
+
+ Instruction *getLoopExitInstr() { return LoopExitInstr; }
+
+ /// Returns true if the recurrence has unsafe algebra which requires a relaxed
+ /// floating-point model.
+ bool hasUnsafeAlgebra() { return UnsafeAlgebraInst != nullptr; }
+
+ /// Returns first unsafe algebra instruction in the PHI node's use-chain.
+ Instruction *getUnsafeAlgebraInst() { return UnsafeAlgebraInst; }
+
+ /// Returns true if the recurrence kind is an integer kind.
+ static bool isIntegerRecurrenceKind(RecurrenceKind Kind);
+
+ /// Returns true if the recurrence kind is a floating point kind.
+ static bool isFloatingPointRecurrenceKind(RecurrenceKind Kind);
+
+ /// Returns true if the recurrence kind is an arithmetic kind.
+ static bool isArithmeticRecurrenceKind(RecurrenceKind Kind);
+
+ /// Determines if Phi may have been type-promoted. If Phi has a single user
+ /// that ANDs the Phi with a type mask, return the user. RT is updated to
+ /// account for the narrower bit width represented by the mask, and the AND
+ /// instruction is added to CI.
+ static Instruction *lookThroughAnd(PHINode *Phi, Type *&RT,
+ SmallPtrSetImpl<Instruction *> &Visited,
+ SmallPtrSetImpl<Instruction *> &CI);
+
+ /// Returns true if all the source operands of a recurrence are either
+ /// SExtInsts or ZExtInsts. This function is intended to be used with
+ /// lookThroughAnd to determine if the recurrence has been type-promoted. The
+ /// source operands are added to CI, and IsSigned is updated to indicate if
+ /// all source operands are SExtInsts.
+ static bool getSourceExtensionKind(Instruction *Start, Instruction *Exit,
+ Type *RT, bool &IsSigned,
+ SmallPtrSetImpl<Instruction *> &Visited,
+ SmallPtrSetImpl<Instruction *> &CI);
+
+ /// Returns the type of the recurrence. This type can be narrower than the
+ /// actual type of the Phi if the recurrence has been type-promoted.
+ Type *getRecurrenceType() { return RecurrenceType; }
+
+ /// Returns a reference to the instructions used for type-promoting the
+ /// recurrence.
+ SmallPtrSet<Instruction *, 8> &getCastInsts() { return CastInsts; }
+
+ /// Returns true if all source operands of the recurrence are SExtInsts.
+ bool isSigned() { return IsSigned; }
+
+private:
+ // The starting value of the recurrence.
+ // It does not have to be zero!
+ TrackingVH<Value> StartValue;
+ // The instruction who's value is used outside the loop.
+ Instruction *LoopExitInstr;
+ // The kind of the recurrence.
+ RecurrenceKind Kind;
+ // If this a min/max recurrence the kind of recurrence.
+ MinMaxRecurrenceKind MinMaxKind;
+ // First occurance of unasfe algebra in the PHI's use-chain.
+ Instruction *UnsafeAlgebraInst;
+ // The type of the recurrence.
+ Type *RecurrenceType;
+ // True if all source operands of the recurrence are SExtInsts.
+ bool IsSigned;
+ // Instructions used for type-promoting the recurrence.
+ SmallPtrSet<Instruction *, 8> CastInsts;
+};
+
+/// A struct for saving information about induction variables.
+class InductionDescriptor {
+public:
+ /// This enum represents the kinds of inductions that we support.
+ enum InductionKind {
+ IK_NoInduction, ///< Not an induction variable.
+ IK_IntInduction, ///< Integer induction variable. Step = C.
+ IK_PtrInduction ///< Pointer induction var. Step = C / sizeof(elem).
+ };
+
+public:
+ /// Default constructor - creates an invalid induction.
+ InductionDescriptor()
+ : StartValue(nullptr), IK(IK_NoInduction), StepValue(nullptr) {}
+
+ /// Get the consecutive direction. Returns:
+ /// 0 - unknown or non-consecutive.
+ /// 1 - consecutive and increasing.
+ /// -1 - consecutive and decreasing.
+ int getConsecutiveDirection() const;
+
+ /// Compute the transformed value of Index at offset StartValue using step
+ /// StepValue.
+ /// For integer induction, returns StartValue + Index * StepValue.
+ /// For pointer induction, returns StartValue[Index * StepValue].
+ /// FIXME: The newly created binary instructions should contain nsw/nuw
+ /// flags, which can be found from the original scalar operations.
+ Value *transform(IRBuilder<> &B, Value *Index) const;
+
+ Value *getStartValue() const { return StartValue; }
+ InductionKind getKind() const { return IK; }
+ ConstantInt *getStepValue() const { return StepValue; }
+
+ static bool isInductionPHI(PHINode *Phi, ScalarEvolution *SE,
+ InductionDescriptor &D);
+
+private:
+ /// Private constructor - used by \c isInductionPHI.
+ InductionDescriptor(Value *Start, InductionKind K, ConstantInt *Step);
+
+ /// Start value.
+ TrackingVH<Value> StartValue;
+ /// Induction kind.
+ InductionKind IK;
+ /// Step value.
+ ConstantInt *StepValue;
+};
+
+BasicBlock *InsertPreheaderForLoop(Loop *L, DominatorTree *DT, LoopInfo *LI,
+ bool PreserveLCSSA);
+
+/// \brief Simplify each loop in a loop nest recursively.
+///
+/// This takes a potentially un-simplified loop L (and its children) and turns
+/// it into a simplified loop nest with preheaders and single backedges. It will
+/// update \c AliasAnalysis and \c ScalarEvolution analyses if they're non-null.
+bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, ScalarEvolution *SE,
+ AssumptionCache *AC, bool PreserveLCSSA);
+
+/// \brief Put loop into LCSSA form.
+///
+/// Looks at all instructions in the loop which have uses outside of the
+/// current loop. For each, an LCSSA PHI node is inserted and the uses outside
+/// the loop are rewritten to use this node.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSA(Loop &L, DominatorTree &DT, LoopInfo *LI,
+ ScalarEvolution *SE);
+
+/// \brief Put a loop nest into LCSSA form.
+///
+/// This recursively forms LCSSA for a loop nest.
+///
+/// LoopInfo and DominatorTree are required and preserved.
+///
+/// If ScalarEvolution is passed in, it will be preserved.
+///
+/// Returns true if any modifications are made to the loop.
+bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
+ ScalarEvolution *SE);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in
+/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
+/// uses before definitions, allowing us to sink a loop body in one pass without
+/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
+/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
+/// instructions of the loop and loop safety information as arguments.
+/// It returns changed status.
+bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ TargetLibraryInfo *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Walk the specified region of the CFG (defined by all blocks
+/// dominated by the specified block, and that are in the current loop) in depth
+/// first order w.r.t the DominatorTree. This allows us to visit definitions
+/// before uses, allowing us to hoist a loop body in one pass without iteration.
+/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
+/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
+/// loop and loop safety information as arguments. It returns changed status.
+bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
+ TargetLibraryInfo *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Try to promote memory values to scalars by sinking stores out of
+/// the loop and moving loads to before the loop. We do this by looping over
+/// the stores in the loop, looking for stores to Must pointers which are
+/// loop invariant. It takes AliasSet, Loop exit blocks vector, loop exit blocks
+/// insertion point vector, PredIteratorCache, LoopInfo, DominatorTree, Loop,
+/// AliasSet information for all instructions of the loop and loop safety
+/// information as arguments. It returns changed status.
+bool promoteLoopAccessesToScalars(AliasSet &, SmallVectorImpl<BasicBlock*> &,
+ SmallVectorImpl<Instruction*> &,
+ PredIteratorCache &, LoopInfo *,
+ DominatorTree *, Loop *, AliasSetTracker *,
+ LICMSafetyInfo *);
+
+/// \brief Computes safety information for a loop
+/// checks loop body & header for the possibility of may throw
+/// exception, it takes LICMSafetyInfo and loop as argument.
+/// Updates safety information in LICMSafetyInfo argument.
+void computeLICMSafetyInfo(LICMSafetyInfo *, Loop *);
+
+/// \brief Returns the instructions that use values defined in the loop.
+SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/LoopVersioning.h b/gnu/llvm/include/llvm/Transforms/Utils/LoopVersioning.h
new file mode 100644
index 00000000000..3b70594e0b6
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/LoopVersioning.h
@@ -0,0 +1,114 @@
+//===- LoopVersioning.h - Utility to version a loop -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a utility class to perform loop versioning. The versioned
+// loop speculates that otherwise may-aliasing memory accesses don't overlap and
+// emits checks to prove this.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+#define LLVM_TRANSFORMS_UTILS_LOOPVERSIONING_H
+
+#include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include "llvm/Transforms/Utils/LoopUtils.h"
+
+namespace llvm {
+
+class Loop;
+class LoopAccessInfo;
+class LoopInfo;
+class ScalarEvolution;
+
+/// \brief This class emits a version of the loop where run-time checks ensure
+/// that may-alias pointers can't overlap.
+///
+/// It currently only supports single-exit loops and assumes that the loop
+/// already has a preheader.
+class LoopVersioning {
+public:
+ /// \brief Expects LoopAccessInfo, Loop, LoopInfo, DominatorTree as input.
+ /// It uses runtime check provided by the user. If \p UseLAIChecks is true,
+ /// we will retain the default checks made by LAI. Otherwise, construct an
+ /// object having no checks and we expect the user to add them.
+ LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
+ DominatorTree *DT, ScalarEvolution *SE,
+ bool UseLAIChecks = true);
+
+ /// \brief Performs the CFG manipulation part of versioning the loop including
+ /// the DominatorTree and LoopInfo updates.
+ ///
+ /// The loop that was used to construct the class will be the "versioned" loop
+ /// i.e. the loop that will receive control if all the memchecks pass.
+ ///
+ /// This allows the loop transform pass to operate on the same loop regardless
+ /// of whether versioning was necessary or not:
+ ///
+ /// for each loop L:
+ /// analyze L
+ /// if versioning is necessary version L
+ /// transform L
+ void versionLoop() { versionLoop(findDefsUsedOutsideOfLoop(VersionedLoop)); }
+
+ /// \brief Same but if the client has already precomputed the set of values
+ /// used outside the loop, this API will allows passing that.
+ void versionLoop(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+ /// \brief Returns the versioned loop. Control flows here if pointers in the
+ /// loop don't alias (i.e. all memchecks passed). (This loop is actually the
+ /// same as the original loop that we got constructed with.)
+ Loop *getVersionedLoop() { return VersionedLoop; }
+
+ /// \brief Returns the fall-back loop. Control flows here if pointers in the
+ /// loop may alias (i.e. one of the memchecks failed).
+ Loop *getNonVersionedLoop() { return NonVersionedLoop; }
+
+ /// \brief Sets the runtime alias checks for versioning the loop.
+ void setAliasChecks(
+ const SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks);
+
+ /// \brief Sets the runtime SCEV checks for versioning the loop.
+ void setSCEVChecks(SCEVUnionPredicate Check);
+
+private:
+ /// \brief Adds the necessary PHI nodes for the versioned loops based on the
+ /// loop-defined values used outside of the loop.
+ ///
+ /// This needs to be called after versionLoop if there are defs in the loop
+ /// that are used outside the loop.
+ void addPHINodes(const SmallVectorImpl<Instruction *> &DefsUsedOutside);
+
+ /// \brief The original loop. This becomes the "versioned" one. I.e.,
+ /// control flows here if pointers in the loop don't alias.
+ Loop *VersionedLoop;
+ /// \brief The fall-back loop. I.e. control flows here if pointers in the
+ /// loop may alias (memchecks failed).
+ Loop *NonVersionedLoop;
+
+ /// \brief This maps the instructions from VersionedLoop to their counterpart
+ /// in NonVersionedLoop.
+ ValueToValueMapTy VMap;
+
+ /// \brief The set of alias checks that we are versioning for.
+ SmallVector<RuntimePointerChecking::PointerCheck, 4> AliasChecks;
+
+ /// \brief The set of SCEV checks that we are versioning for.
+ SCEVUnionPredicate Preds;
+
+ /// \brief Analyses used.
+ const LoopAccessInfo &LAI;
+ LoopInfo *LI;
+ DominatorTree *DT;
+ ScalarEvolution *SE;
+};
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/ModuleUtils.h b/gnu/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
new file mode 100644
index 00000000000..0f23d34de5d
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -0,0 +1,64 @@
+//===-- ModuleUtils.h - Functions to manipulate Modules ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This family of functions perform manipulations on Modules.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+#define LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include <utility> // for std::pair
+
+namespace llvm {
+
+class Module;
+class Function;
+class GlobalValue;
+class GlobalVariable;
+class Constant;
+class StringRef;
+class Value;
+class Type;
+template <class PtrType> class SmallPtrSetImpl;
+
+/// Append F to the list of global ctors of module M with the given Priority.
+/// This wraps the function in the appropriate structure and stores it along
+/// side other global constructors. For details see
+/// http://llvm.org/docs/LangRef.html#intg_global_ctors
+void appendToGlobalCtors(Module &M, Function *F, int Priority);
+
+/// Same as appendToGlobalCtors(), but for global dtors.
+void appendToGlobalDtors(Module &M, Function *F, int Priority);
+
+/// \brief Given "llvm.used" or "llvm.compiler.used" as a global name, collect
+/// the initializer elements of that global in Set and return the global itself.
+GlobalVariable *collectUsedGlobalVariables(Module &M,
+ SmallPtrSetImpl<GlobalValue *> &Set,
+ bool CompilerUsed);
+
+// Validate the result of Module::getOrInsertFunction called for an interface
+// function of given sanitizer. If the instrumented module defines a function
+// with the same name, their prototypes must match, otherwise
+// getOrInsertFunction returns a bitcast.
+Function *checkSanitizerInterfaceFunction(Constant *FuncOrBitcast);
+
+/// \brief Creates sanitizer constructor function, and calls sanitizer's init
+/// function from it.
+/// \return Returns pair of pointers to constructor, and init functions
+/// respectively.
+std::pair<Function *, Function *> createSanitizerCtorAndInitFunctions(
+ Module &M, StringRef CtorName, StringRef InitName,
+ ArrayRef<Type *> InitArgTypes, ArrayRef<Value *> InitArgs,
+ StringRef VersionCheckName = StringRef());
+} // End llvm namespace
+
+#endif // LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h b/gnu/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
new file mode 100644
index 00000000000..d0602bf47c9
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/PromoteMemToReg.h
@@ -0,0 +1,50 @@
+//===- PromoteMemToReg.h - Promote Allocas to Scalars -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to promote alloca instructions to SSA
+// registers, by using the SSA construction algorithm.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+#define LLVM_TRANSFORMS_UTILS_PROMOTEMEMTOREG_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+namespace llvm {
+
+class AllocaInst;
+class DominatorTree;
+class AliasSetTracker;
+class AssumptionCache;
+
+/// \brief Return true if this alloca is legal for promotion.
+///
+/// This is true if there are only loads, stores, and lifetime markers
+/// (transitively) using this alloca. This also enforces that there is only
+/// ever one layer of bitcasts or GEPs between the alloca and the lifetime
+/// markers.
+bool isAllocaPromotable(const AllocaInst *AI);
+
+/// \brief Promote the specified list of alloca instructions into scalar
+/// registers, inserting PHI nodes as appropriate.
+///
+/// This function makes use of DominanceFrontier information. This function
+/// does not modify the CFG of the function at all. All allocas must be from
+/// the same function.
+///
+/// If AST is specified, the specified tracker is updated to reflect changes
+/// made to the IR.
+void PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT,
+ AliasSetTracker *AST = nullptr,
+ AssumptionCache *AC = nullptr);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/SSAUpdater.h b/gnu/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
new file mode 100644
index 00000000000..1c7b2c587a3
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/SSAUpdater.h
@@ -0,0 +1,178 @@
+//===-- SSAUpdater.h - Unstructured SSA Update Tool -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SSAUpdater class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATER_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ class BasicBlock;
+ class Instruction;
+ class LoadInst;
+ template<typename T> class SmallVectorImpl;
+ template<typename T> class SSAUpdaterTraits;
+ class PHINode;
+ class Type;
+ class Use;
+ class Value;
+
+/// \brief Helper class for SSA formation on a set of values defined in
+/// multiple blocks.
+///
+/// This is used when code duplication or another unstructured
+/// transformation wants to rewrite a set of uses of one value with uses of a
+/// set of values.
+class SSAUpdater {
+ friend class SSAUpdaterTraits<SSAUpdater>;
+
+private:
+ /// This keeps track of which value to use on a per-block basis. When we
+ /// insert PHI nodes, we keep track of them here.
+ //typedef DenseMap<BasicBlock*, Value*> AvailableValsTy;
+ void *AV;
+
+ /// ProtoType holds the type of the values being rewritten.
+ Type *ProtoType;
+
+ /// PHI nodes are given a name based on ProtoName.
+ std::string ProtoName;
+
+ /// If this is non-null, the SSAUpdater adds all PHI nodes that it creates to
+ /// the vector.
+ SmallVectorImpl<PHINode*> *InsertedPHIs;
+
+public:
+ /// If InsertedPHIs is specified, it will be filled
+ /// in with all PHI Nodes created by rewriting.
+ explicit SSAUpdater(SmallVectorImpl<PHINode*> *InsertedPHIs = nullptr);
+ ~SSAUpdater();
+
+ /// \brief Reset this object to get ready for a new set of SSA updates with
+ /// type 'Ty'.
+ ///
+ /// PHI nodes get a name based on 'Name'.
+ void Initialize(Type *Ty, StringRef Name);
+
+ /// \brief Indicate that a rewritten value is available in the specified block
+ /// with the specified value.
+ void AddAvailableValue(BasicBlock *BB, Value *V);
+
+ /// \brief Return true if the SSAUpdater already has a value for the specified
+ /// block.
+ bool HasValueForBlock(BasicBlock *BB) const;
+
+ /// \brief Construct SSA form, materializing a value that is live at the end
+ /// of the specified block.
+ Value *GetValueAtEndOfBlock(BasicBlock *BB);
+
+ /// \brief Construct SSA form, materializing a value that is live in the
+ /// middle of the specified block.
+ ///
+ /// \c GetValueInMiddleOfBlock is the same as \c GetValueAtEndOfBlock except
+ /// in one important case: if there is a definition of the rewritten value
+ /// after the 'use' in BB. Consider code like this:
+ ///
+ /// \code
+ /// X1 = ...
+ /// SomeBB:
+ /// use(X)
+ /// X2 = ...
+ /// br Cond, SomeBB, OutBB
+ /// \endcode
+ ///
+ /// In this case, there are two values (X1 and X2) added to the AvailableVals
+ /// set by the client of the rewriter, and those values are both live out of
+ /// their respective blocks. However, the use of X happens in the *middle* of
+ /// a block. Because of this, we need to insert a new PHI node in SomeBB to
+ /// merge the appropriate values, and this value isn't live out of the block.
+ Value *GetValueInMiddleOfBlock(BasicBlock *BB);
+
+ /// \brief Rewrite a use of the symbolic value.
+ ///
+ /// This handles PHI nodes, which use their value in the corresponding
+ /// predecessor. Note that this will not work if the use is supposed to be
+ /// rewritten to a value defined in the same block as the use, but above it.
+ /// Any 'AddAvailableValue's added for the use's block will be considered to
+ /// be below it.
+ void RewriteUse(Use &U);
+
+ /// \brief Rewrite a use like \c RewriteUse but handling in-block definitions.
+ ///
+ /// This version of the method can rewrite uses in the same block as
+ /// a definition, because it assumes that all uses of a value are below any
+ /// inserted values.
+ void RewriteUseAfterInsertions(Use &U);
+
+private:
+ Value *GetValueAtEndOfBlockInternal(BasicBlock *BB);
+
+ void operator=(const SSAUpdater&) = delete;
+ SSAUpdater(const SSAUpdater&) = delete;
+};
+
+/// \brief Helper class for promoting a collection of loads and stores into SSA
+/// Form using the SSAUpdater.
+///
+/// This handles complexities that SSAUpdater doesn't, such as multiple loads
+/// and stores in one block.
+///
+/// Clients of this class are expected to subclass this and implement the
+/// virtual methods.
+class LoadAndStorePromoter {
+protected:
+ SSAUpdater &SSA;
+
+public:
+ LoadAndStorePromoter(ArrayRef<const Instruction*> Insts,
+ SSAUpdater &S, StringRef Name = StringRef());
+ virtual ~LoadAndStorePromoter() {}
+
+ /// \brief This does the promotion.
+ ///
+ /// Insts is a list of loads and stores to promote, and Name is the basename
+ /// for the PHIs to insert. After this is complete, the loads and stores are
+ /// removed from the code.
+ void run(const SmallVectorImpl<Instruction*> &Insts) const;
+
+ /// \brief Return true if the specified instruction is in the Inst list.
+ ///
+ /// The Insts list is the one passed into the constructor. Clients should
+ /// implement this with a more efficient version if possible.
+ virtual bool isInstInList(Instruction *I,
+ const SmallVectorImpl<Instruction*> &Insts) const;
+
+ /// \brief This hook is invoked after all the stores are found and inserted as
+ /// available values.
+ virtual void doExtraRewritesBeforeFinalDeletion() const {
+ }
+
+ /// \brief Clients can choose to implement this to get notified right before
+ /// a load is RAUW'd another value.
+ virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {
+ }
+
+ /// \brief Called before each instruction is deleted.
+ virtual void instructionDeleted(Instruction *I) const {
+ }
+
+ /// \brief Called to update debug info associated with the instruction.
+ virtual void updateDebugInfo(Instruction *I) const {
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h b/gnu/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
new file mode 100644
index 00000000000..425ecd3cfb5
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/SSAUpdaterImpl.h
@@ -0,0 +1,460 @@
+//===-- SSAUpdaterImpl.h - SSA Updater Implementation -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a template that implements the core algorithm for the
+// SSAUpdater and MachineSSAUpdater.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+#define LLVM_TRANSFORMS_UTILS_SSAUPDATERIMPL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+
+#define DEBUG_TYPE "ssaupdater"
+
+class CastInst;
+class PHINode;
+template<typename T> class SSAUpdaterTraits;
+
+template<typename UpdaterT>
+class SSAUpdaterImpl {
+private:
+ UpdaterT *Updater;
+
+ typedef SSAUpdaterTraits<UpdaterT> Traits;
+ typedef typename Traits::BlkT BlkT;
+ typedef typename Traits::ValT ValT;
+ typedef typename Traits::PhiT PhiT;
+
+ /// BBInfo - Per-basic block information used internally by SSAUpdaterImpl.
+ /// The predecessors of each block are cached here since pred_iterator is
+ /// slow and we need to iterate over the blocks at least a few times.
+ class BBInfo {
+ public:
+ BlkT *BB; // Back-pointer to the corresponding block.
+ ValT AvailableVal; // Value to use in this block.
+ BBInfo *DefBB; // Block that defines the available value.
+ int BlkNum; // Postorder number.
+ BBInfo *IDom; // Immediate dominator.
+ unsigned NumPreds; // Number of predecessor blocks.
+ BBInfo **Preds; // Array[NumPreds] of predecessor blocks.
+ PhiT *PHITag; // Marker for existing PHIs that match.
+
+ BBInfo(BlkT *ThisBB, ValT V)
+ : BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr), BlkNum(0),
+ IDom(nullptr), NumPreds(0), Preds(nullptr), PHITag(nullptr) {}
+ };
+
+ typedef DenseMap<BlkT*, ValT> AvailableValsTy;
+ AvailableValsTy *AvailableVals;
+
+ SmallVectorImpl<PhiT*> *InsertedPHIs;
+
+ typedef SmallVectorImpl<BBInfo*> BlockListTy;
+ typedef DenseMap<BlkT*, BBInfo*> BBMapTy;
+ BBMapTy BBMap;
+ BumpPtrAllocator Allocator;
+
+public:
+ explicit SSAUpdaterImpl(UpdaterT *U, AvailableValsTy *A,
+ SmallVectorImpl<PhiT*> *Ins) :
+ Updater(U), AvailableVals(A), InsertedPHIs(Ins) { }
+
+ /// GetValue - Check to see if AvailableVals has an entry for the specified
+ /// BB and if so, return it. If not, construct SSA form by first
+ /// calculating the required placement of PHIs and then inserting new PHIs
+ /// where needed.
+ ValT GetValue(BlkT *BB) {
+ SmallVector<BBInfo*, 100> BlockList;
+ BBInfo *PseudoEntry = BuildBlockList(BB, &BlockList);
+
+ // Special case: bail out if BB is unreachable.
+ if (BlockList.size() == 0) {
+ ValT V = Traits::GetUndefVal(BB, Updater);
+ (*AvailableVals)[BB] = V;
+ return V;
+ }
+
+ FindDominators(&BlockList, PseudoEntry);
+ FindPHIPlacement(&BlockList);
+ FindAvailableVals(&BlockList);
+
+ return BBMap[BB]->DefBB->AvailableVal;
+ }
+
+ /// BuildBlockList - Starting from the specified basic block, traverse back
+ /// through its predecessors until reaching blocks with known values.
+ /// Create BBInfo structures for the blocks and append them to the block
+ /// list.
+ BBInfo *BuildBlockList(BlkT *BB, BlockListTy *BlockList) {
+ SmallVector<BBInfo*, 10> RootList;
+ SmallVector<BBInfo*, 64> WorkList;
+
+ BBInfo *Info = new (Allocator) BBInfo(BB, 0);
+ BBMap[BB] = Info;
+ WorkList.push_back(Info);
+
+ // Search backward from BB, creating BBInfos along the way and stopping
+ // when reaching blocks that define the value. Record those defining
+ // blocks on the RootList.
+ SmallVector<BlkT*, 10> Preds;
+ while (!WorkList.empty()) {
+ Info = WorkList.pop_back_val();
+ Preds.clear();
+ Traits::FindPredecessorBlocks(Info->BB, &Preds);
+ Info->NumPreds = Preds.size();
+ if (Info->NumPreds == 0)
+ Info->Preds = nullptr;
+ else
+ Info->Preds = static_cast<BBInfo**>
+ (Allocator.Allocate(Info->NumPreds * sizeof(BBInfo*),
+ AlignOf<BBInfo*>::Alignment));
+
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BlkT *Pred = Preds[p];
+ // Check if BBMap already has a BBInfo for the predecessor block.
+ typename BBMapTy::value_type &BBMapBucket =
+ BBMap.FindAndConstruct(Pred);
+ if (BBMapBucket.second) {
+ Info->Preds[p] = BBMapBucket.second;
+ continue;
+ }
+
+ // Create a new BBInfo for the predecessor.
+ ValT PredVal = AvailableVals->lookup(Pred);
+ BBInfo *PredInfo = new (Allocator) BBInfo(Pred, PredVal);
+ BBMapBucket.second = PredInfo;
+ Info->Preds[p] = PredInfo;
+
+ if (PredInfo->AvailableVal) {
+ RootList.push_back(PredInfo);
+ continue;
+ }
+ WorkList.push_back(PredInfo);
+ }
+ }
+
+ // Now that we know what blocks are backwards-reachable from the starting
+ // block, do a forward depth-first traversal to assign postorder numbers
+ // to those blocks.
+ BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
+ unsigned BlkNum = 1;
+
+ // Initialize the worklist with the roots from the backward traversal.
+ while (!RootList.empty()) {
+ Info = RootList.pop_back_val();
+ Info->IDom = PseudoEntry;
+ Info->BlkNum = -1;
+ WorkList.push_back(Info);
+ }
+
+ while (!WorkList.empty()) {
+ Info = WorkList.back();
+
+ if (Info->BlkNum == -2) {
+ // All the successors have been handled; assign the postorder number.
+ Info->BlkNum = BlkNum++;
+ // If not a root, put it on the BlockList.
+ if (!Info->AvailableVal)
+ BlockList->push_back(Info);
+ WorkList.pop_back();
+ continue;
+ }
+
+ // Leave this entry on the worklist, but set its BlkNum to mark that its
+ // successors have been put on the worklist. When it returns to the top
+ // the list, after handling its successors, it will be assigned a
+ // number.
+ Info->BlkNum = -2;
+
+ // Add unvisited successors to the work list.
+ for (typename Traits::BlkSucc_iterator SI =
+ Traits::BlkSucc_begin(Info->BB),
+ E = Traits::BlkSucc_end(Info->BB); SI != E; ++SI) {
+ BBInfo *SuccInfo = BBMap[*SI];
+ if (!SuccInfo || SuccInfo->BlkNum)
+ continue;
+ SuccInfo->BlkNum = -1;
+ WorkList.push_back(SuccInfo);
+ }
+ }
+ PseudoEntry->BlkNum = BlkNum;
+ return PseudoEntry;
+ }
+
+ /// IntersectDominators - This is the dataflow lattice "meet" operation for
+ /// finding dominators. Given two basic blocks, it walks up the dominator
+ /// tree until it finds a common dominator of both. It uses the postorder
+ /// number of the blocks to determine how to do that.
+ BBInfo *IntersectDominators(BBInfo *Blk1, BBInfo *Blk2) {
+ while (Blk1 != Blk2) {
+ while (Blk1->BlkNum < Blk2->BlkNum) {
+ Blk1 = Blk1->IDom;
+ if (!Blk1)
+ return Blk2;
+ }
+ while (Blk2->BlkNum < Blk1->BlkNum) {
+ Blk2 = Blk2->IDom;
+ if (!Blk2)
+ return Blk1;
+ }
+ }
+ return Blk1;
+ }
+
+ /// FindDominators - Calculate the dominator tree for the subset of the CFG
+ /// corresponding to the basic blocks on the BlockList. This uses the
+ /// algorithm from: "A Simple, Fast Dominance Algorithm" by Cooper, Harvey
+ /// and Kennedy, published in Software--Practice and Experience, 2001,
+ /// 4:1-10. Because the CFG subset does not include any edges leading into
+ /// blocks that define the value, the results are not the usual dominator
+ /// tree. The CFG subset has a single pseudo-entry node with edges to a set
+ /// of root nodes for blocks that define the value. The dominators for this
+ /// subset CFG are not the standard dominators but they are adequate for
+ /// placing PHIs within the subset CFG.
+ void FindDominators(BlockListTy *BlockList, BBInfo *PseudoEntry) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+ BBInfo *NewIDom = nullptr;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *Pred = Info->Preds[p];
+
+ // Treat an unreachable predecessor as a definition with 'undef'.
+ if (Pred->BlkNum == 0) {
+ Pred->AvailableVal = Traits::GetUndefVal(Pred->BB, Updater);
+ (*AvailableVals)[Pred->BB] = Pred->AvailableVal;
+ Pred->DefBB = Pred;
+ Pred->BlkNum = PseudoEntry->BlkNum;
+ PseudoEntry->BlkNum++;
+ }
+
+ if (!NewIDom)
+ NewIDom = Pred;
+ else
+ NewIDom = IntersectDominators(NewIDom, Pred);
+ }
+
+ // Check if the IDom value has changed.
+ if (NewIDom && NewIDom != Info->IDom) {
+ Info->IDom = NewIDom;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// IsDefInDomFrontier - Search up the dominator tree from Pred to IDom for
+ /// any blocks containing definitions of the value. If one is found, then
+ /// the successor of Pred is in the dominance frontier for the definition,
+ /// and this function returns true.
+ bool IsDefInDomFrontier(const BBInfo *Pred, const BBInfo *IDom) {
+ for (; Pred != IDom; Pred = Pred->IDom) {
+ if (Pred->DefBB == Pred)
+ return true;
+ }
+ return false;
+ }
+
+ /// FindPHIPlacement - PHIs are needed in the iterated dominance frontiers
+ /// of the known definitions. Iteratively add PHIs in the dom frontiers
+ /// until nothing changes. Along the way, keep track of the nearest
+ /// dominating definitions for non-PHI blocks.
+ void FindPHIPlacement(BlockListTy *BlockList) {
+ bool Changed;
+ do {
+ Changed = false;
+ // Iterate over the list in reverse order, i.e., forward on CFG edges.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ // If this block already needs a PHI, there is nothing to do here.
+ if (Info->DefBB == Info)
+ continue;
+
+ // Default to use the same def as the immediate dominator.
+ BBInfo *NewDefBB = Info->IDom->DefBB;
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ if (IsDefInDomFrontier(Info->Preds[p], Info->IDom)) {
+ // Need a PHI here.
+ NewDefBB = Info;
+ break;
+ }
+ }
+
+ // Check if anything changed.
+ if (NewDefBB != Info->DefBB) {
+ Info->DefBB = NewDefBB;
+ Changed = true;
+ }
+ }
+ } while (Changed);
+ }
+
+ /// FindAvailableVal - If this block requires a PHI, first check if an
+ /// existing PHI matches the PHI placement and reaching definitions computed
+ /// earlier, and if not, create a new PHI. Visit all the block's
+ /// predecessors to calculate the available value for each one and fill in
+ /// the incoming values for a new PHI.
+ void FindAvailableVals(BlockListTy *BlockList) {
+ // Go through the worklist in forward order (i.e., backward through the CFG)
+ // and check if existing PHIs can be used. If not, create empty PHIs where
+ // they are needed.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I) {
+ BBInfo *Info = *I;
+ // Check if there needs to be a PHI in BB.
+ if (Info->DefBB != Info)
+ continue;
+
+ // Look for an existing PHI.
+ FindExistingPHI(Info->BB, BlockList);
+ if (Info->AvailableVal)
+ continue;
+
+ ValT PHI = Traits::CreateEmptyPHI(Info->BB, Info->NumPreds, Updater);
+ Info->AvailableVal = PHI;
+ (*AvailableVals)[Info->BB] = PHI;
+ }
+
+ // Now go back through the worklist in reverse order to fill in the
+ // arguments for any new PHIs added in the forward traversal.
+ for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
+ E = BlockList->rend(); I != E; ++I) {
+ BBInfo *Info = *I;
+
+ if (Info->DefBB != Info) {
+ // Record the available value at join nodes to speed up subsequent
+ // uses of this SSAUpdater for the same value.
+ if (Info->NumPreds > 1)
+ (*AvailableVals)[Info->BB] = Info->DefBB->AvailableVal;
+ continue;
+ }
+
+ // Check if this block contains a newly added PHI.
+ PhiT *PHI = Traits::ValueIsNewPHI(Info->AvailableVal, Updater);
+ if (!PHI)
+ continue;
+
+ // Iterate through the block's predecessors.
+ for (unsigned p = 0; p != Info->NumPreds; ++p) {
+ BBInfo *PredInfo = Info->Preds[p];
+ BlkT *Pred = PredInfo->BB;
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+ Traits::AddPHIOperand(PHI, PredInfo->AvailableVal, Pred);
+ }
+
+ DEBUG(dbgs() << " Inserted PHI: " << *PHI << "\n");
+
+ // If the client wants to know about all new instructions, tell it.
+ if (InsertedPHIs) InsertedPHIs->push_back(PHI);
+ }
+ }
+
+ /// FindExistingPHI - Look through the PHI nodes in a block to see if any of
+ /// them match what is needed.
+ void FindExistingPHI(BlkT *BB, BlockListTy *BlockList) {
+ for (typename BlkT::iterator BBI = BB->begin(), BBE = BB->end();
+ BBI != BBE; ++BBI) {
+ PhiT *SomePHI = Traits::InstrIsPHI(&*BBI);
+ if (!SomePHI)
+ break;
+ if (CheckIfPHIMatches(SomePHI)) {
+ RecordMatchingPHIs(BlockList);
+ break;
+ }
+ // Match failed: clear all the PHITag values.
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ (*I)->PHITag = nullptr;
+ }
+ }
+
+ /// CheckIfPHIMatches - Check if a PHI node matches the placement and values
+ /// in the BBMap.
+ bool CheckIfPHIMatches(PhiT *PHI) {
+ SmallVector<PhiT*, 20> WorkList;
+ WorkList.push_back(PHI);
+
+ // Mark that the block containing this PHI has been visited.
+ BBMap[PHI->getParent()]->PHITag = PHI;
+
+ while (!WorkList.empty()) {
+ PHI = WorkList.pop_back_val();
+
+ // Iterate through the PHI's incoming values.
+ for (typename Traits::PHI_iterator I = Traits::PHI_begin(PHI),
+ E = Traits::PHI_end(PHI); I != E; ++I) {
+ ValT IncomingVal = I.getIncomingValue();
+ BBInfo *PredInfo = BBMap[I.getIncomingBlock()];
+ // Skip to the nearest preceding definition.
+ if (PredInfo->DefBB != PredInfo)
+ PredInfo = PredInfo->DefBB;
+
+ // Check if it matches the expected value.
+ if (PredInfo->AvailableVal) {
+ if (IncomingVal == PredInfo->AvailableVal)
+ continue;
+ return false;
+ }
+
+ // Check if the value is a PHI in the correct block.
+ PhiT *IncomingPHIVal = Traits::ValueIsPHI(IncomingVal, Updater);
+ if (!IncomingPHIVal || IncomingPHIVal->getParent() != PredInfo->BB)
+ return false;
+
+ // If this block has already been visited, check if this PHI matches.
+ if (PredInfo->PHITag) {
+ if (IncomingPHIVal == PredInfo->PHITag)
+ continue;
+ return false;
+ }
+ PredInfo->PHITag = IncomingPHIVal;
+
+ WorkList.push_back(IncomingPHIVal);
+ }
+ }
+ return true;
+ }
+
+ /// RecordMatchingPHIs - For each PHI node that matches, record it in both
+ /// the BBMap and the AvailableVals mapping.
+ void RecordMatchingPHIs(BlockListTy *BlockList) {
+ for (typename BlockListTy::iterator I = BlockList->begin(),
+ E = BlockList->end(); I != E; ++I)
+ if (PhiT *PHI = (*I)->PHITag) {
+ BlkT *BB = PHI->getParent();
+ ValT PHIVal = Traits::GetPHIValue(PHI);
+ (*AvailableVals)[BB] = PHIVal;
+ BBMap[BB]->AvailableVal = PHIVal;
+ }
+ }
+};
+
+#undef DEBUG_TYPE // "ssaupdater"
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h b/gnu/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
new file mode 100644
index 00000000000..3c55e64537c
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -0,0 +1,71 @@
+//===-- llvm/Transforms/Utils/SimplifyIndVar.h - Indvar Utils ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines in interface for induction variable simplification. It does
+// not define any actual pass or policy, but provides a single function to
+// simplify a loop's induction variables based on ScalarEvolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYINDVAR_H
+
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+class CastInst;
+class DominatorTree;
+class IVUsers;
+class Loop;
+class LoopInfo;
+class PHINode;
+class ScalarEvolution;
+
+/// Interface for visiting interesting IV users that are recognized but not
+/// simplified by this utility.
+class IVVisitor {
+protected:
+ const DominatorTree *DT;
+ bool ShouldSplitOverflowIntrinsics;
+
+ virtual void anchor();
+
+public:
+ IVVisitor(): DT(nullptr), ShouldSplitOverflowIntrinsics(false) {}
+ virtual ~IVVisitor() {}
+
+ const DominatorTree *getDomTree() const { return DT; }
+
+ bool shouldSplitOverflowInstrinsics() const {
+ return ShouldSplitOverflowIntrinsics;
+ }
+ void setSplitOverflowIntrinsics() {
+ ShouldSplitOverflowIntrinsics = true;
+ assert(DT && "Splitting overflow intrinsics requires a DomTree.");
+ }
+
+ virtual void visitCast(CastInst *Cast) = 0;
+};
+
+/// simplifyUsersOfIV - Simplify instructions that use this induction variable
+/// by using ScalarEvolution to analyze the IV's recurrence.
+bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
+ LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead,
+ IVVisitor *V = nullptr);
+
+/// SimplifyLoopIVs - Simplify users of induction variables within this
+/// loop. This does not actually change or add IVs.
+bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
+ LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead);
+
+} // namespace llvm
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h b/gnu/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
new file mode 100644
index 00000000000..fc34f49a125
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
@@ -0,0 +1,172 @@
+//===- SimplifyLibCalls.h - Library call simplifier -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an interface to build some C language libcalls for
+// optimization passes that need to call the various functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+#define LLVM_TRANSFORMS_UTILS_SIMPLIFYLIBCALLS_H
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+namespace llvm {
+class Value;
+class CallInst;
+class DataLayout;
+class Instruction;
+class TargetLibraryInfo;
+class BasicBlock;
+class Function;
+
+/// \brief This class implements simplifications for calls to fortified library
+/// functions (__st*cpy_chk, __memcpy_chk, __memmove_chk, __memset_chk), to,
+/// when possible, replace them with their non-checking counterparts.
+/// Other optimizations can also be done, but it's possible to disable them and
+/// only simplify needless use of the checking versions (when the object size
+/// is unknown) by passing true for OnlyLowerUnknownSize.
+class FortifiedLibCallSimplifier {
+private:
+ const TargetLibraryInfo *TLI;
+ bool OnlyLowerUnknownSize;
+
+public:
+ FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
+ bool OnlyLowerUnknownSize = false);
+
+ /// \brief Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI);
+
+private:
+ Value *optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemSetChk(CallInst *CI, IRBuilder<> &B);
+
+ // Str/Stp cpy are similar enough to be handled in the same functions.
+ Value *optimizeStrpCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+ Value *optimizeStrpNCpyChk(CallInst *CI, IRBuilder<> &B, LibFunc::Func Func);
+
+ /// \brief Checks whether the call \p CI to a fortified libcall is foldable
+ /// to the non-fortified version.
+ bool isFortifiedCallFoldable(CallInst *CI, unsigned ObjSizeOp,
+ unsigned SizeOp, bool isString);
+};
+
+/// LibCallSimplifier - This class implements a collection of optimizations
+/// that replace well formed calls to library functions with a more optimal
+/// form. For example, replacing 'printf("Hello!")' with 'puts("Hello!")'.
+class LibCallSimplifier {
+private:
+ FortifiedLibCallSimplifier FortifiedSimplifier;
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI;
+ bool UnsafeFPShrink;
+ function_ref<void(Instruction *, Value *)> Replacer;
+
+ /// \brief Internal wrapper for RAUW that is the default implementation.
+ ///
+ /// Other users may provide an alternate function with this signature instead
+ /// of this one.
+ static void replaceAllUsesWithDefault(Instruction *I, Value *With);
+
+ /// \brief Replace an instruction's uses with a value using our replacer.
+ void replaceAllUsesWith(Instruction *I, Value *With);
+
+public:
+ LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI,
+ function_ref<void(Instruction *, Value *)> Replacer =
+ &replaceAllUsesWithDefault);
+
+ /// optimizeCall - Take the given call instruction and return a more
+ /// optimal value to replace the instruction with or 0 if a more
+ /// optimal form can't be found. Note that the returned value may
+ /// be equal to the instruction being optimized. In this case all
+ /// other instructions that use the given instruction were modified
+ /// and the given instruction is dead.
+ /// The call must not be an indirect call.
+ Value *optimizeCall(CallInst *CI);
+
+private:
+ // String and Memory Library Call Optimizations
+ Value *optimizeStrCat(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrNCat(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrChr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrRChr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrCmp(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrNCmp(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStpCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrNCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrLen(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrPBrk(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrTo(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrSpn(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrCSpn(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeStrStr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemChr(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemCmp(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemCpy(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemMove(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeMemSet(CallInst *CI, IRBuilder<> &B);
+ // Wrapper for all String/Memory Library Call Optimizations
+ Value *optimizeStringMemoryLibCall(CallInst *CI, IRBuilder<> &B);
+
+ // Math Library Optimizations
+ Value *optimizeCos(CallInst *CI, IRBuilder<> &B);
+ Value *optimizePow(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeExp2(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFabs(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFMinFMax(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeLog(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSqrt(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSinCosPi(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeTan(CallInst *CI, IRBuilder<> &B);
+
+ // Integer Library Call Optimizations
+ Value *optimizeFFS(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeAbs(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeIsDigit(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeIsAscii(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeToAscii(CallInst *CI, IRBuilder<> &B);
+
+ // Formatting and IO Library Call Optimizations
+ Value *optimizeErrorReporting(CallInst *CI, IRBuilder<> &B,
+ int StreamArg = -1);
+ Value *optimizePrintF(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSPrintF(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFPrintF(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFWrite(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFPuts(CallInst *CI, IRBuilder<> &B);
+ Value *optimizePuts(CallInst *CI, IRBuilder<> &B);
+
+ // Helper methods
+ Value *emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, IRBuilder<> &B);
+ void classifyArgUse(Value *Val, BasicBlock *BB, bool IsFloat,
+ SmallVectorImpl<CallInst *> &SinCalls,
+ SmallVectorImpl<CallInst *> &CosCalls,
+ SmallVectorImpl<CallInst *> &SinCosCalls);
+ void replaceTrigInsts(SmallVectorImpl<CallInst *> &Calls, Value *Res);
+ Value *optimizePrintFString(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeSPrintFString(CallInst *CI, IRBuilder<> &B);
+ Value *optimizeFPrintFString(CallInst *CI, IRBuilder<> &B);
+
+ /// hasFloatVersion - Checks if there is a float version of the specified
+ /// function by checking for an existing function with name FuncName + f
+ bool hasFloatVersion(StringRef FuncName);
+};
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/SplitModule.h b/gnu/llvm/include/llvm/Transforms/Utils/SplitModule.h
new file mode 100644
index 00000000000..7d896d1993d
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/SplitModule.h
@@ -0,0 +1,43 @@
+//===- SplitModule.h - Split a module into partitions -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the function llvm::SplitModule, which splits a module
+// into multiple linkable partitions. It can be used to implement parallel code
+// generation for link-time optimization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+#define LLVM_TRANSFORMS_UTILS_SPLITMODULE_H
+
+#include <functional>
+#include <memory>
+
+namespace llvm {
+
+class Module;
+class StringRef;
+
+/// Splits the module M into N linkable partitions. The function ModuleCallback
+/// is called N times passing each individual partition as the MPart argument.
+///
+/// FIXME: This function does not deal with the somewhat subtle symbol
+/// visibility issues around module splitting, including (but not limited to):
+///
+/// - Internal symbols should not collide with symbols defined outside the
+/// module.
+/// - Internal symbols defined in module-level inline asm should be visible to
+/// each partition.
+void SplitModule(
+ std::unique_ptr<Module> M, unsigned N,
+ std::function<void(std::unique_ptr<Module> MPart)> ModuleCallback);
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h b/gnu/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h
new file mode 100644
index 00000000000..5ccee98f97e
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/SymbolRewriter.h
@@ -0,0 +1,152 @@
+//===-- SymbolRewriter.h - Symbol Rewriting Pass ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the prototypes and definitions related to the Symbol
+// Rewriter pass.
+//
+// The Symbol Rewriter pass takes a set of rewrite descriptors which define
+// transformations for symbol names. These can be either single name to name
+// trnsformation or more broad regular expression based transformations.
+//
+// All the functions are re-written at the IR level. The Symbol Rewriter itself
+// is exposed as a module level pass. All symbols at the module level are
+// iterated. For any matching symbol, the requested transformation is applied,
+// updating references to it as well (a la RAUW). The resulting binary will
+// only contain the rewritten symbols.
+//
+// By performing this operation in the compiler, we are able to catch symbols
+// that would otherwise not be possible to catch (e.g. inlined symbols).
+//
+// This makes it possible to cleanly transform symbols without resorting to
+// overly-complex macro tricks and the pre-processor. An example of where this
+// is useful is the sanitizers where we would like to intercept a well-defined
+// set of functions across the module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H
+#define LLVM_TRANSFORMS_UTILS_SYMBOL_REWRITER_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/IR/Module.h"
+
+namespace llvm {
+class MemoryBuffer;
+
+namespace yaml {
+class KeyValueNode;
+class MappingNode;
+class ScalarNode;
+class Stream;
+}
+
+namespace SymbolRewriter {
+/// The basic entity representing a rewrite operation. It serves as the base
+/// class for any rewrite descriptor. It has a certain set of specializations
+/// which describe a particular rewrite.
+///
+/// The RewriteMapParser can be used to parse a mapping file that provides the
+/// mapping for rewriting the symbols. The descriptors individually describe
+/// whether to rewrite a function, global variable, or global alias. Each of
+/// these can be selected either by explicitly providing a name for the ones to
+/// be rewritten or providing a (posix compatible) regular expression that will
+/// select the symbols to rewrite. This descriptor list is passed to the
+/// SymbolRewriter pass.
+class RewriteDescriptor : public ilist_node<RewriteDescriptor> {
+ RewriteDescriptor(const RewriteDescriptor &) = delete;
+
+ const RewriteDescriptor &
+ operator=(const RewriteDescriptor &) = delete;
+
+public:
+ enum class Type {
+ Invalid, /// invalid
+ Function, /// function - descriptor rewrites a function
+ GlobalVariable, /// global variable - descriptor rewrites a global variable
+ NamedAlias, /// named alias - descriptor rewrites a global alias
+ };
+
+ virtual ~RewriteDescriptor() {}
+
+ Type getType() const { return Kind; }
+
+ virtual bool performOnModule(Module &M) = 0;
+
+protected:
+ explicit RewriteDescriptor(Type T) : Kind(T) {}
+
+private:
+ const Type Kind;
+};
+
+typedef iplist<RewriteDescriptor> RewriteDescriptorList;
+
+class RewriteMapParser {
+public:
+ bool parse(const std::string &MapFile, RewriteDescriptorList *Descriptors);
+
+private:
+ bool parse(std::unique_ptr<MemoryBuffer> &MapFile, RewriteDescriptorList *DL);
+ bool parseEntry(yaml::Stream &Stream, yaml::KeyValueNode &Entry,
+ RewriteDescriptorList *DL);
+ bool parseRewriteFunctionDescriptor(yaml::Stream &Stream,
+ yaml::ScalarNode *Key,
+ yaml::MappingNode *Value,
+ RewriteDescriptorList *DL);
+ bool parseRewriteGlobalVariableDescriptor(yaml::Stream &Stream,
+ yaml::ScalarNode *Key,
+ yaml::MappingNode *Value,
+ RewriteDescriptorList *DL);
+ bool parseRewriteGlobalAliasDescriptor(yaml::Stream &YS, yaml::ScalarNode *K,
+ yaml::MappingNode *V,
+ RewriteDescriptorList *DL);
+};
+}
+
+template <>
+struct ilist_traits<SymbolRewriter::RewriteDescriptor>
+ : public ilist_default_traits<SymbolRewriter::RewriteDescriptor> {
+ mutable ilist_half_node<SymbolRewriter::RewriteDescriptor> Sentinel;
+
+public:
+ // createSentinel is used to get a reference to a node marking the end of
+ // the list. Because the sentinel is relative to this instance, use a
+ // non-static method.
+ SymbolRewriter::RewriteDescriptor *createSentinel() const {
+ // since i[p] lists always publicly derive from the corresponding
+ // traits, placing a data member in this class will augment the
+ // i[p]list. Since the NodeTy is expected to publicly derive from
+ // ilist_node<NodeTy>, there is a legal viable downcast from it to
+ // NodeTy. We use this trick to superpose i[p]list with a "ghostly"
+ // NodeTy, which becomes the sentinel. Dereferencing the sentinel is
+ // forbidden (save the ilist_node<NodeTy>) so no one will ever notice
+ // the superposition.
+ return static_cast<SymbolRewriter::RewriteDescriptor *>(&Sentinel);
+ }
+ void destroySentinel(SymbolRewriter::RewriteDescriptor *) {}
+
+ SymbolRewriter::RewriteDescriptor *provideInitialHead() const {
+ return createSentinel();
+ }
+
+ SymbolRewriter::RewriteDescriptor *
+ ensureHead(SymbolRewriter::RewriteDescriptor *&) const {
+ return createSentinel();
+ }
+
+ static void noteHead(SymbolRewriter::RewriteDescriptor *,
+ SymbolRewriter::RewriteDescriptor *) {}
+};
+
+ModulePass *createRewriteSymbolsPass();
+ModulePass *createRewriteSymbolsPass(SymbolRewriter::RewriteDescriptorList &);
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h b/gnu/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
new file mode 100644
index 00000000000..550292f6b7a
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/UnifyFunctionExitNodes.h
@@ -0,0 +1,52 @@
+//===-- UnifyFunctionExitNodes.h - Ensure fn's have one return --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass is used to ensure that functions have at most one return and one
+// unwind instruction in them. Additionally, it keeps track of which node is
+// the new exit node of the CFG. If there are no return or unwind instructions
+// in the function, the getReturnBlock/getUnwindBlock methods will return a null
+// pointer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+#define LLVM_TRANSFORMS_UTILS_UNIFYFUNCTIONEXITNODES_H
+
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+struct UnifyFunctionExitNodes : public FunctionPass {
+ BasicBlock *ReturnBlock, *UnwindBlock, *UnreachableBlock;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+ UnifyFunctionExitNodes() : FunctionPass(ID),
+ ReturnBlock(nullptr), UnwindBlock(nullptr) {
+ initializeUnifyFunctionExitNodesPass(*PassRegistry::getPassRegistry());
+ }
+
+ // We can preserve non-critical-edgeness when we unify function exit nodes
+ void getAnalysisUsage(AnalysisUsage &AU) const override;
+
+ // getReturn|Unwind|UnreachableBlock - Return the new single (or nonexistent)
+ // return, unwind, or unreachable basic blocks in the CFG.
+ //
+ BasicBlock *getReturnBlock() const { return ReturnBlock; }
+ BasicBlock *getUnwindBlock() const { return UnwindBlock; }
+ BasicBlock *getUnreachableBlock() const { return UnreachableBlock; }
+
+ bool runOnFunction(Function &F) override;
+};
+
+Pass *createUnifyFunctionExitNodesPass();
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/UnrollLoop.h b/gnu/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
new file mode 100644
index 00000000000..710817cddf6
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/UnrollLoop.h
@@ -0,0 +1,45 @@
+//===- llvm/Transforms/Utils/UnrollLoop.h - Unrolling utilities -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some loop unrolling utilities. It does not define any
+// actual pass or policy, but provides a single function to perform loop
+// unrolling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+#define LLVM_TRANSFORMS_UTILS_UNROLLLOOP_H
+
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+
+class AssumptionCache;
+class DominatorTree;
+class Loop;
+class LoopInfo;
+class LPPassManager;
+class MDNode;
+class Pass;
+class ScalarEvolution;
+
+bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool AllowRuntime,
+ bool AllowExpensiveTripCount, unsigned TripMultiple,
+ LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT,
+ AssumptionCache *AC, bool PreserveLCSSA);
+
+bool UnrollRuntimeLoopProlog(Loop *L, unsigned Count,
+ bool AllowExpensiveTripCount, LoopInfo *LI,
+ ScalarEvolution *SE, DominatorTree *DT,
+ bool PreserveLCSSA);
+
+MDNode *GetUnrollMetadata(MDNode *LoopID, StringRef Name);
+}
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Utils/ValueMapper.h b/gnu/llvm/include/llvm/Transforms/Utils/ValueMapper.h
new file mode 100644
index 00000000000..469022f34c5
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Utils/ValueMapper.h
@@ -0,0 +1,135 @@
+//===- ValueMapper.h - Remapping for constants and metadata -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MapValue interface which is used by various parts of
+// the Transforms/Utils library to implement cloning and linking facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+#define LLVM_TRANSFORMS_UTILS_VALUEMAPPER_H
+
+#include "llvm/IR/ValueMap.h"
+
+namespace llvm {
+ class Value;
+ class Instruction;
+ typedef ValueMap<const Value *, WeakVH> ValueToValueMapTy;
+
+ /// ValueMapTypeRemapper - This is a class that can be implemented by clients
+ /// to remap types when cloning constants and instructions.
+ class ValueMapTypeRemapper {
+ virtual void anchor(); // Out of line method.
+ public:
+ virtual ~ValueMapTypeRemapper() {}
+
+ /// remapType - The client should implement this method if they want to
+ /// remap types while mapping values.
+ virtual Type *remapType(Type *SrcTy) = 0;
+ };
+
+ /// ValueMaterializer - This is a class that can be implemented by clients
+ /// to materialize Values on demand.
+ class ValueMaterializer {
+ virtual void anchor(); // Out of line method.
+
+ protected:
+ ~ValueMaterializer() = default;
+ ValueMaterializer() = default;
+ ValueMaterializer(const ValueMaterializer&) = default;
+ ValueMaterializer &operator=(const ValueMaterializer&) = default;
+
+ public:
+ /// The client should implement this method if they want to generate a
+ /// mapped Value on demand. For example, if linking lazily.
+ virtual Value *materializeDeclFor(Value *V) = 0;
+
+ /// If the data being mapped is recursive, the above function can map
+ /// just the declaration and this is called to compute the initializer.
+ /// It is called after the mapping is recorded, so it doesn't need to worry
+ /// about recursion.
+ virtual void materializeInitFor(GlobalValue *New, GlobalValue *Old);
+
+ /// If the client needs to handle temporary metadata it must implement
+ /// these methods.
+ virtual Metadata *mapTemporaryMetadata(Metadata *MD) { return nullptr; }
+ virtual void replaceTemporaryMetadata(const Metadata *OrigMD,
+ Metadata *NewMD) {}
+
+ /// The client should implement this method if some metadata need
+ /// not be mapped, for example DISubprogram metadata for functions not
+ /// linked into the destination module.
+ virtual bool isMetadataNeeded(Metadata *MD) { return true; }
+ };
+
+ /// RemapFlags - These are flags that the value mapping APIs allow.
+ enum RemapFlags {
+ RF_None = 0,
+
+ /// RF_NoModuleLevelChanges - If this flag is set, the remapper knows that
+ /// only local values within a function (such as an instruction or argument)
+ /// are mapped, not global values like functions and global metadata.
+ RF_NoModuleLevelChanges = 1,
+
+ /// RF_IgnoreMissingEntries - If this flag is set, the remapper ignores
+ /// entries that are not in the value map. If it is unset, it aborts if an
+ /// operand is asked to be remapped which doesn't exist in the mapping.
+ RF_IgnoreMissingEntries = 2,
+
+ /// Instruct the remapper to move distinct metadata instead of duplicating
+ /// it when there are module-level changes.
+ RF_MoveDistinctMDs = 4,
+
+ /// Any global values not in value map are mapped to null instead of
+ /// mapping to self. Illegal if RF_IgnoreMissingEntries is also set.
+ RF_NullMapMissingGlobalValues = 8,
+
+ /// Set when there is still temporary metadata that must be handled,
+ /// such as when we are doing function importing and will materialize
+ /// and link metadata as a postpass.
+ RF_HaveUnmaterializedMetadata = 16,
+ };
+
+ static inline RemapFlags operator|(RemapFlags LHS, RemapFlags RHS) {
+ return RemapFlags(unsigned(LHS)|unsigned(RHS));
+ }
+
+ Value *MapValue(const Value *V, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ Metadata *MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ /// MapMetadata - provide versions that preserve type safety for MDNodes.
+ MDNode *MapMetadata(const MDNode *MD, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ void RemapInstruction(Instruction *I, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr);
+
+ /// MapValue - provide versions that preserve type safety for Constants.
+ inline Constant *MapValue(const Constant *V, ValueToValueMapTy &VM,
+ RemapFlags Flags = RF_None,
+ ValueMapTypeRemapper *TypeMapper = nullptr,
+ ValueMaterializer *Materializer = nullptr) {
+ return cast<Constant>(MapValue((const Value*)V, VM, Flags, TypeMapper,
+ Materializer));
+ }
+
+} // End llvm namespace
+
+#endif
diff --git a/gnu/llvm/include/llvm/Transforms/Vectorize.h b/gnu/llvm/include/llvm/Transforms/Vectorize.h
new file mode 100644
index 00000000000..aec3993d68f
--- /dev/null
+++ b/gnu/llvm/include/llvm/Transforms/Vectorize.h
@@ -0,0 +1,144 @@
+//===-- Vectorize.h - Vectorization Transformations -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines prototypes for accessor functions that expose passes
+// in the Vectorize transformations library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_VECTORIZE_H
+#define LLVM_TRANSFORMS_VECTORIZE_H
+
+namespace llvm {
+class BasicBlock;
+class BasicBlockPass;
+class Pass;
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize configuration.
+struct VectorizeConfig {
+ //===--------------------------------------------------------------------===//
+ // Target architecture related parameters
+
+ /// @brief The size of the native vector registers.
+ unsigned VectorBits;
+
+ /// @brief Vectorize boolean values.
+ bool VectorizeBools;
+
+ /// @brief Vectorize integer values.
+ bool VectorizeInts;
+
+ /// @brief Vectorize floating-point values.
+ bool VectorizeFloats;
+
+ /// @brief Vectorize pointer values.
+ bool VectorizePointers;
+
+ /// @brief Vectorize casting (conversion) operations.
+ bool VectorizeCasts;
+
+ /// @brief Vectorize floating-point math intrinsics.
+ bool VectorizeMath;
+
+ /// @brief Vectorize bit intrinsics.
+ bool VectorizeBitManipulations;
+
+ /// @brief Vectorize the fused-multiply-add intrinsic.
+ bool VectorizeFMA;
+
+ /// @brief Vectorize select instructions.
+ bool VectorizeSelect;
+
+ /// @brief Vectorize comparison instructions.
+ bool VectorizeCmp;
+
+ /// @brief Vectorize getelementptr instructions.
+ bool VectorizeGEP;
+
+ /// @brief Vectorize loads and stores.
+ bool VectorizeMemOps;
+
+ /// @brief Only generate aligned loads and stores.
+ bool AlignedOnly;
+
+ //===--------------------------------------------------------------------===//
+ // Misc parameters
+
+ /// @brief The required chain depth for vectorization.
+ unsigned ReqChainDepth;
+
+ /// @brief The maximum search distance for instruction pairs.
+ unsigned SearchLimit;
+
+ /// @brief The maximum number of candidate pairs with which to use a full
+ /// cycle check.
+ unsigned MaxCandPairsForCycleCheck;
+
+ /// @brief Replicating one element to a pair breaks the chain.
+ bool SplatBreaksChain;
+
+ /// @brief The maximum number of pairable instructions per group.
+ unsigned MaxInsts;
+
+ /// @brief The maximum number of candidate instruction pairs per group.
+ unsigned MaxPairs;
+
+ /// @brief The maximum number of pairing iterations.
+ unsigned MaxIter;
+
+ /// @brief Don't try to form odd-length vectors.
+ bool Pow2LenOnly;
+
+ /// @brief Don't boost the chain-depth contribution of loads and stores.
+ bool NoMemOpBoost;
+
+ /// @brief Use a fast instruction dependency analysis.
+ bool FastDep;
+
+ /// @brief Initialize the VectorizeConfig from command line options.
+ VectorizeConfig();
+};
+
+//===----------------------------------------------------------------------===//
+//
+// BBVectorize - A basic-block vectorization pass.
+//
+BasicBlockPass *
+createBBVectorizePass(const VectorizeConfig &C = VectorizeConfig());
+
+//===----------------------------------------------------------------------===//
+//
+// LoopVectorize - Create a loop vectorization pass.
+//
+Pass *createLoopVectorizePass(bool NoUnrolling = false,
+ bool AlwaysVectorize = true);
+
+//===----------------------------------------------------------------------===//
+//
+// SLPVectorizer - Create a bottom-up SLP vectorizer pass.
+//
+Pass *createSLPVectorizerPass();
+
+//===----------------------------------------------------------------------===//
+/// @brief Vectorize the BasicBlock.
+///
+/// @param BB The BasicBlock to be vectorized
+/// @param P The current running pass, should require AliasAnalysis and
+/// ScalarEvolution. After the vectorization, AliasAnalysis,
+/// ScalarEvolution and CFG are preserved.
+///
+/// @return True if the BB is changed, false otherwise.
+///
+bool vectorizeBasicBlock(Pass *P, BasicBlock &BB,
+ const VectorizeConfig &C = VectorizeConfig());
+
+} // End llvm namespace
+
+#endif