summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpatrick <patrick@openbsd.org>2017-10-04 20:51:20 +0000
committerpatrick <patrick@openbsd.org>2017-10-04 20:51:20 +0000
commite48371bb3a3f1b4e66fe2cf2ebc4ff8ab9856711 (patch)
treef0864d17eb1c963fd8162989e54acce4644ed106
parentImport LLVM 5.0.0 release including clang, lld and lldb. (diff)
downloadwireguard-openbsd-e48371bb3a3f1b4e66fe2cf2ebc4ff8ab9856711.tar.xz
wireguard-openbsd-e48371bb3a3f1b4e66fe2cf2ebc4ff8ab9856711.zip
Merge LLVM 5.0.0 release.
-rw-r--r--gnu/llvm/include/llvm/CodeGen/AsmPrinter.h104
-rw-r--r--gnu/llvm/include/llvm/MC/MCAsmInfoELF.h2
-rw-r--r--gnu/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp367
-rw-r--r--gnu/llvm/lib/CodeGen/StackProtector.cpp77
-rw-r--r--gnu/llvm/lib/CodeGen/TargetLoweringBase.cpp169
-rw-r--r--gnu/llvm/lib/MC/MCAsmInfoELF.cpp8
-rw-r--r--gnu/llvm/lib/MC/MCELFStreamer.cpp90
-rw-r--r--gnu/llvm/lib/MC/MCParser/AsmParser.cpp229
-rw-r--r--gnu/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp1186
-rw-r--r--gnu/llvm/lib/Target/AArch64/AArch64Subtarget.h66
-rw-r--r--gnu/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp12
-rw-r--r--gnu/llvm/lib/Target/X86/X86AsmPrinter.h5
-rw-r--r--gnu/llvm/lib/Target/X86/X86MCInstLower.cpp385
-rw-r--r--gnu/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp483
-rw-r--r--gnu/llvm/tools/clang/include/clang/Basic/Builtins.def42
-rw-r--r--gnu/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td46
-rw-r--r--gnu/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td536
-rw-r--r--gnu/llvm/tools/clang/include/clang/Driver/Options.td534
-rw-r--r--gnu/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h5
-rw-r--r--gnu/llvm/tools/clang/include/clang/Sema/Sema.h665
-rw-r--r--gnu/llvm/tools/clang/lib/Basic/Targets.cpp1434
-rw-r--r--gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp8
-rw-r--r--gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.h3
-rw-r--r--gnu/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp24
-rw-r--r--gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp73
-rw-r--r--gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.h7
-rw-r--r--gnu/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp2
-rw-r--r--gnu/llvm/tools/clang/lib/Sema/SemaChecking.cpp506
-rw-r--r--gnu/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp874
-rw-r--r--gnu/llvm/tools/lld/ELF/Config.h103
-rw-r--r--gnu/llvm/tools/lld/ELF/Driver.cpp673
-rw-r--r--gnu/llvm/tools/lld/ELF/DriverUtils.cpp27
-rw-r--r--gnu/llvm/tools/lld/ELF/Options.td90
-rw-r--r--gnu/llvm/tools/lld/ELF/OutputSections.cpp715
-rw-r--r--gnu/llvm/tools/lld/ELF/SymbolTable.cpp304
-rw-r--r--gnu/llvm/tools/lld/ELF/Symbols.cpp290
-rw-r--r--gnu/llvm/tools/lld/ELF/SyntheticSections.cpp1937
-rw-r--r--gnu/llvm/tools/lld/tools/lld/lld.cpp8
38 files changed, 8250 insertions, 3839 deletions
diff --git a/gnu/llvm/include/llvm/CodeGen/AsmPrinter.h b/gnu/llvm/include/llvm/CodeGen/AsmPrinter.h
index 0e094c26d21..24fcb5230bb 100644
--- a/gnu/llvm/include/llvm/CodeGen/AsmPrinter.h
+++ b/gnu/llvm/include/llvm/CodeGen/AsmPrinter.h
@@ -1,4 +1,4 @@
-//===-- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework --------*- C++ -*-===//
+//===- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,35 +17,45 @@
#define LLVM_CODEGEN_ASMPRINTER_H
#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
-#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/IR/InlineAsm.h"
-#include "llvm/Support/DataTypes.h"
+#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include <cstdint>
+#include <memory>
+#include <utility>
+#include <vector>
namespace llvm {
+
class AsmPrinterHandler;
+class BasicBlock;
class BlockAddress;
-class ByteStreamer;
-class GCStrategy;
class Constant;
class ConstantArray;
+class DataLayout;
class DIE;
class DIEAbbrev;
+class DwarfDebug;
class GCMetadataPrinter;
class GlobalIndirectSymbol;
+class GlobalObject;
class GlobalValue;
class GlobalVariable;
+class GCStrategy;
class MachineBasicBlock;
+class MachineConstantPoolValue;
class MachineFunction;
class MachineInstr;
-class MachineLocation;
-class MachineLoopInfo;
-class MachineLoop;
-class MachineConstantPoolValue;
class MachineJumpTableInfo;
+class MachineLoopInfo;
class MachineModuleInfo;
+class MachineOptimizationRemarkEmitter;
class MCAsmInfo;
class MCCFIInstruction;
class MCContext;
@@ -57,10 +67,9 @@ class MCSubtargetInfo;
class MCSymbol;
class MCTargetOptions;
class MDNode;
-class DwarfDebug;
-class Mangler;
+class Module;
+class raw_ostream;
class TargetLoweringObjectFile;
-class DataLayout;
class TargetMachine;
/// This class is intended to be used as a driving class for all asm writers.
@@ -84,33 +93,39 @@ public:
std::unique_ptr<MCStreamer> OutStreamer;
/// The current machine function.
- const MachineFunction *MF;
+ const MachineFunction *MF = nullptr;
/// This is a pointer to the current MachineModuleInfo.
- MachineModuleInfo *MMI;
+ MachineModuleInfo *MMI = nullptr;
+
+ /// Optimization remark emitter.
+ MachineOptimizationRemarkEmitter *ORE;
/// The symbol for the current function. This is recalculated at the beginning
/// of each call to runOnMachineFunction().
///
- MCSymbol *CurrentFnSym;
+ MCSymbol *CurrentFnSym = nullptr;
/// The symbol used to represent the start of the current function for the
/// purpose of calculating its size (e.g. using the .size directive). By
/// default, this is equal to CurrentFnSym.
- MCSymbol *CurrentFnSymForSize;
+ MCSymbol *CurrentFnSymForSize = nullptr;
/// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
/// its number of uses by other globals.
- typedef std::pair<const GlobalVariable *, unsigned> GOTEquivUsePair;
+ using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
+ /// Enable print [latency:throughput] in output
+ bool EnablePrintSchedInfo = false;
+
private:
- MCSymbol *CurrentFnBegin;
- MCSymbol *CurrentFnEnd;
- MCSymbol *CurExceptionSym;
+ MCSymbol *CurrentFnBegin = nullptr;
+ MCSymbol *CurrentFnEnd = nullptr;
+ MCSymbol *CurExceptionSym = nullptr;
// The garbage collection metadata printer table.
- void *GCMetadataPrinters; // Really a DenseMap.
+ void *GCMetadataPrinters = nullptr; // Really a DenseMap.
/// Emit comments in assembly output if this is true.
///
@@ -118,7 +133,7 @@ private:
static char ID;
/// If VerboseAsm is set, a pointer to the loop info for this function.
- MachineLoopInfo *LI;
+ MachineLoopInfo *LI = nullptr;
struct HandlerInfo {
AsmPrinterHandler *Handler;
@@ -126,6 +141,7 @@ private:
const char *TimerDescription;
const char *TimerGroupName;
const char *TimerGroupDescription;
+
HandlerInfo(AsmPrinterHandler *Handler, const char *TimerName,
const char *TimerDescription, const char *TimerGroupName,
const char *TimerGroupDescription)
@@ -137,11 +153,24 @@ private:
/// maintains ownership of the emitters.
SmallVector<HandlerInfo, 1> Handlers;
+public:
+ struct SrcMgrDiagInfo {
+ SourceMgr SrcMgr;
+ std::vector<const MDNode *> LocInfos;
+ LLVMContext::InlineAsmDiagHandlerTy DiagHandler;
+ void *DiagContext;
+ };
+
+private:
+ /// Structure for generating diagnostics for inline assembly. Only initialised
+ /// when necessary.
+ mutable std::unique_ptr<SrcMgrDiagInfo> DiagInfo;
+
/// If the target supports dwarf debug info, this pointer is non-null.
- DwarfDebug *DD;
+ DwarfDebug *DD = nullptr;
/// If the current module uses dwarf CFI annotations strictly for debugging.
- bool isCFIMoveForDebugging;
+ bool isCFIMoveForDebugging = false;
protected:
explicit AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer);
@@ -200,6 +229,8 @@ public:
FUNCTION_ENTER = 0,
FUNCTION_EXIT = 1,
TAIL_CALL = 2,
+ LOG_ARGS_ENTER = 3,
+ CUSTOM_EVENT = 4,
};
// The table will contain these structs that point to the sled, the function
@@ -216,7 +247,7 @@ public:
};
// All the sleds to be emitted.
- std::vector<XRayFunctionEntry> Sleds;
+ SmallVector<XRayFunctionEntry, 4> Sleds;
// Helper function to record a given XRay sled.
void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind);
@@ -391,7 +422,7 @@ public:
//===------------------------------------------------------------------===//
// Symbol Lowering Routines.
//===------------------------------------------------------------------===//
-public:
+
MCSymbol *createTempSymbol(const Twine &Name) const;
/// Return the MCSymbol for a private symbol with global value name as its
@@ -417,7 +448,7 @@ public:
//===------------------------------------------------------------------===//
// Emission Helper Routines.
//===------------------------------------------------------------------===//
-public:
+
/// This is just convenient handler for printing offsets.
void printOffset(int64_t Offset, raw_ostream &OS) const;
@@ -494,7 +525,7 @@ public:
///
/// \p Value - The value to emit.
/// \p Size - The size of the integer (in bytes) to emit.
- virtual void EmitDebugValue(const MCExpr *Value, unsigned Size) const;
+ virtual void EmitDebugThreadLocal(const MCExpr *Value, unsigned Size) const;
//===------------------------------------------------------------------===//
// Dwarf Lowering Routines
@@ -521,7 +552,7 @@ public:
//===------------------------------------------------------------------===//
// Inline Asm Support
//===------------------------------------------------------------------===//
-public:
+
// These are hooks that targets can override to implement inline asm
// support. These should probably be moved out of AsmPrinter someday.
@@ -565,9 +596,9 @@ public:
private:
/// Private state for PrintSpecial()
// Assign a unique ID to this machine instruction.
- mutable const MachineInstr *LastMI;
- mutable unsigned LastFn;
- mutable unsigned Counter;
+ mutable const MachineInstr *LastMI = nullptr;
+ mutable unsigned LastFn = 0;
+ mutable unsigned Counter = ~0U;
/// This method emits the header for the current function.
virtual void EmitFunctionHeader();
@@ -587,8 +618,8 @@ private:
// Internal Implementation Details
//===------------------------------------------------------------------===//
- /// This emits visibility information about symbol, if this is suported by the
- /// target.
+ /// This emits visibility information about symbol, if this is supported by
+ /// the target.
void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
bool IsDefinition = true) const;
@@ -606,6 +637,7 @@ private:
void emitGlobalIndirectSymbol(Module &M,
const GlobalIndirectSymbol& GIS);
};
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_CODEGEN_ASMPRINTER_H
diff --git a/gnu/llvm/include/llvm/MC/MCAsmInfoELF.h b/gnu/llvm/include/llvm/MC/MCAsmInfoELF.h
index f113afc9885..f65018ceeda 100644
--- a/gnu/llvm/include/llvm/MC/MCAsmInfoELF.h
+++ b/gnu/llvm/include/llvm/MC/MCAsmInfoELF.h
@@ -21,7 +21,7 @@ class MCAsmInfoELF : public MCAsmInfo {
protected:
/// Targets which have non-executable stacks by default can set this to false
/// to disable the special section which requests a non-executable stack.
- bool UsesNonexecutableStackSection = true;
+ bool UsesNonexecutableStackSection = false;
MCAsmInfoELF();
};
diff --git a/gnu/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/gnu/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 88f2b1126d9..9f615f06ebc 100644
--- a/gnu/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/gnu/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -12,47 +12,101 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/AsmPrinter.h"
+#include "AsmPrinterHandler.h"
#include "CodeViewDebug.h"
#include "DwarfDebug.h"
#include "DwarfException.h"
#include "WinException.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/ObjectUtils.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/BinaryFormat/ELF.h"
#include "llvm/CodeGen/Analysis.h"
+#include "llvm/CodeGen/GCMetadata.h"
#include "llvm/CodeGen/GCMetadataPrinter.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalIndirectSymbol.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Metadata.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
+#include "llvm/IR/Value.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
-#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MCValue.h"
+#include "llvm/MC/SectionKind.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include <algorithm>
+#include <cassert>
+#include <cinttypes>
+#include <cstdint>
+#include <limits>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
using namespace llvm;
#define DEBUG_TYPE "asm-printer"
@@ -69,6 +123,10 @@ static const char *const CodeViewLineTablesGroupDescription =
STATISTIC(EmittedInsts, "Number of machine instrs printed");
+static cl::opt<bool>
+ PrintSchedule("print-schedule", cl::Hidden, cl::init(false),
+ cl::desc("Print 'sched: [latency:throughput]' in .s output"));
+
char AsmPrinter::ID = 0;
typedef DenseMap<GCStrategy*, std::unique_ptr<GCMetadataPrinter>> gcp_map_type;
@@ -78,7 +136,6 @@ static gcp_map_type &getGCMap(void *&P) {
return *(gcp_map_type*)P;
}
-
/// getGVAlignmentLog2 - Return the alignment to use for the specified global
/// value in log2 form. This rounds up to the preferred alignment if possible
/// and legal.
@@ -107,16 +164,7 @@ static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &DL,
AsmPrinter::AsmPrinter(TargetMachine &tm, std::unique_ptr<MCStreamer> Streamer)
: MachineFunctionPass(ID), TM(tm), MAI(tm.getMCAsmInfo()),
- OutContext(Streamer->getContext()), OutStreamer(std::move(Streamer)),
- isCFIMoveForDebugging(false), LastMI(nullptr), LastFn(0), Counter(~0U) {
- DD = nullptr;
- MMI = nullptr;
- LI = nullptr;
- MF = nullptr;
- CurExceptionSym = CurrentFnSym = CurrentFnSymForSize = nullptr;
- CurrentFnBegin = nullptr;
- CurrentFnEnd = nullptr;
- GCMetadataPrinters = nullptr;
+ OutContext(Streamer->getContext()), OutStreamer(std::move(Streamer)) {
VerboseAsm = OutStreamer->isVerboseAsm();
}
@@ -171,6 +219,7 @@ void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
MachineFunctionPass::getAnalysisUsage(AU);
AU.addRequired<MachineModuleInfo>();
+ AU.addRequired<MachineOptimizationRemarkEmitterPass>();
AU.addRequired<GCModuleInfo>();
if (isVerbose())
AU.addRequired<MachineLoopInfo>();
@@ -223,7 +272,7 @@ bool AsmPrinter::doInitialization(Module &M) {
// don't, this at least helps the user find where a global came from.
if (MAI->hasSingleParameterDotFile()) {
// .file "foo.c"
- OutStreamer->EmitFileDirective(M.getModuleIdentifier());
+ OutStreamer->EmitFileDirective(M.getSourceFileName());
}
GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
@@ -571,7 +620,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
///
/// \p Value - The value to emit.
/// \p Size - The size of the integer (in bytes) to emit.
-void AsmPrinter::EmitDebugValue(const MCExpr *Value,
+void AsmPrinter::EmitDebugThreadLocal(const MCExpr *Value,
unsigned Size) const {
OutStreamer->EmitValue(Value, Size);
}
@@ -579,12 +628,17 @@ void AsmPrinter::EmitDebugValue(const MCExpr *Value,
/// EmitFunctionHeader - This method emits the header for the current
/// function.
void AsmPrinter::EmitFunctionHeader() {
+ const Function *F = MF->getFunction();
+
+ if (isVerbose())
+ OutStreamer->GetCommentOS()
+ << "-- Begin function "
+ << GlobalValue::dropLLVMManglingEscape(F->getName()) << '\n';
+
// Print out constants referenced by the function
EmitConstantPool();
// Print the 'header' of function.
- const Function *F = MF->getFunction();
-
OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(F, TM));
EmitVisibility(CurrentFnSym, F->getVisibility());
@@ -602,8 +656,23 @@ void AsmPrinter::EmitFunctionHeader() {
}
// Emit the prefix data.
- if (F->hasPrefixData())
- EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData());
+ if (F->hasPrefixData()) {
+ if (MAI->hasSubsectionsViaSymbols()) {
+ // Preserving prefix data on platforms which use subsections-via-symbols
+ // is a bit tricky. Here we introduce a symbol for the prefix data
+ // and use the .alt_entry attribute to mark the function's real entry point
+ // as an alternative entry point to the prefix-data symbol.
+ MCSymbol *PrefixSym = OutContext.createLinkerPrivateTempSymbol();
+ OutStreamer->EmitLabel(PrefixSym);
+
+ EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData());
+
+ // Emit an .alt_entry directive for the actual function symbol.
+ OutStreamer->EmitSymbolAttribute(CurrentFnSym, MCSA_AltEntry);
+ } else {
+ EmitGlobalConstant(F->getParent()->getDataLayout(), F->getPrefixData());
+ }
+ }
// Emit the CurrentFnSym. This is a virtual function to allow targets to
// do their wild and crazy things as required.
@@ -660,7 +729,8 @@ void AsmPrinter::EmitFunctionEntryLabel() {
}
/// emitComments - Pretty-print comments for instructions.
-static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
+static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS,
+ AsmPrinter *AP) {
const MachineFunction *MF = MI.getParent()->getParent();
const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
@@ -668,6 +738,7 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
int FI;
const MachineFrameInfo &MFI = MF->getFrameInfo();
+ bool Commented = false;
// We assume a single instruction only has a spill or reload, not
// both.
@@ -675,24 +746,39 @@ static void emitComments(const MachineInstr &MI, raw_ostream &CommentOS) {
if (TII->isLoadFromStackSlotPostFE(MI, FI)) {
if (MFI.isSpillSlotObjectIndex(FI)) {
MMO = *MI.memoperands_begin();
- CommentOS << MMO->getSize() << "-byte Reload\n";
+ CommentOS << MMO->getSize() << "-byte Reload";
+ Commented = true;
}
} else if (TII->hasLoadFromStackSlot(MI, MMO, FI)) {
- if (MFI.isSpillSlotObjectIndex(FI))
- CommentOS << MMO->getSize() << "-byte Folded Reload\n";
+ if (MFI.isSpillSlotObjectIndex(FI)) {
+ CommentOS << MMO->getSize() << "-byte Folded Reload";
+ Commented = true;
+ }
} else if (TII->isStoreToStackSlotPostFE(MI, FI)) {
if (MFI.isSpillSlotObjectIndex(FI)) {
MMO = *MI.memoperands_begin();
- CommentOS << MMO->getSize() << "-byte Spill\n";
+ CommentOS << MMO->getSize() << "-byte Spill";
+ Commented = true;
}
} else if (TII->hasStoreToStackSlot(MI, MMO, FI)) {
- if (MFI.isSpillSlotObjectIndex(FI))
- CommentOS << MMO->getSize() << "-byte Folded Spill\n";
+ if (MFI.isSpillSlotObjectIndex(FI)) {
+ CommentOS << MMO->getSize() << "-byte Folded Spill";
+ Commented = true;
+ }
}
// Check for spill-induced copies
- if (MI.getAsmPrinterFlag(MachineInstr::ReloadReuse))
- CommentOS << " Reload Reuse\n";
+ if (MI.getAsmPrinterFlag(MachineInstr::ReloadReuse)) {
+ Commented = true;
+ CommentOS << " Reload Reuse";
+ }
+
+ if (Commented && AP->EnablePrintSchedInfo)
+ // If any comment was added above and we need sched info comment then
+ // add this new comment just after the above comment w/o "\n" between them.
+ CommentOS << " " << MF->getSubtarget().getSchedInfoStr(MI) << "\n";
+ else if (Commented)
+ CommentOS << "\n";
}
/// emitImplicitDef - This method emits the specified machine instruction
@@ -739,46 +825,30 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
const DILocalVariable *V = MI->getDebugVariable();
if (auto *SP = dyn_cast<DISubprogram>(V->getScope())) {
- StringRef Name = SP->getDisplayName();
+ StringRef Name = SP->getName();
if (!Name.empty())
OS << Name << ":";
}
OS << V->getName();
-
- const DIExpression *Expr = MI->getDebugExpression();
- auto Fragment = Expr->getFragmentInfo();
- if (Fragment)
- OS << " [fragment offset=" << Fragment->OffsetInBits
- << " size=" << Fragment->SizeInBits << "]";
OS << " <- ";
// The second operand is only an offset if it's an immediate.
- bool Deref = MI->getOperand(0).isReg() && MI->getOperand(1).isImm();
- int64_t Offset = Deref ? MI->getOperand(1).getImm() : 0;
-
- for (unsigned i = 0; i < Expr->getNumElements(); ++i) {
- uint64_t Op = Expr->getElement(i);
- if (Op == dwarf::DW_OP_LLVM_fragment) {
- // There can't be any operands after this in a valid expression
- break;
- } else if (Deref) {
- // We currently don't support extra Offsets or derefs after the first
- // one. Bail out early instead of emitting an incorrect comment
- OS << " [complex expression]";
- AP.OutStreamer->emitRawComment(OS.str());
- return true;
- } else if (Op == dwarf::DW_OP_deref) {
- Deref = true;
- continue;
- }
-
- uint64_t ExtraOffset = Expr->getElement(i++);
- if (Op == dwarf::DW_OP_plus)
- Offset += ExtraOffset;
- else {
- assert(Op == dwarf::DW_OP_minus);
- Offset -= ExtraOffset;
+ bool MemLoc = MI->getOperand(0).isReg() && MI->getOperand(1).isImm();
+ int64_t Offset = MemLoc ? MI->getOperand(1).getImm() : 0;
+ const DIExpression *Expr = MI->getDebugExpression();
+ if (Expr->getNumElements()) {
+ OS << '[';
+ bool NeedSep = false;
+ for (auto Op : Expr->expr_ops()) {
+ if (NeedSep)
+ OS << ", ";
+ else
+ NeedSep = true;
+ OS << dwarf::OperationEncodingString(Op.getOp());
+ for (unsigned I = 0; I < Op.getNumArgs(); ++I)
+ OS << ' ' << Op.getArg(I);
}
+ OS << "] ";
}
// Register or immediate value. Register 0 means undef.
@@ -809,7 +879,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
const TargetFrameLowering *TFI = AP.MF->getSubtarget().getFrameLowering();
Offset += TFI->getFrameIndexReference(*AP.MF,
MI->getOperand(0).getIndex(), Reg);
- Deref = true;
+ MemLoc = true;
}
if (Reg == 0) {
// Suppress offset, it is not meaningful here.
@@ -818,12 +888,12 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
AP.OutStreamer->emitRawComment(OS.str());
return true;
}
- if (Deref)
+ if (MemLoc)
OS << '[';
OS << PrintReg(Reg, AP.MF->getSubtarget().getRegisterInfo());
}
- if (Deref)
+ if (MemLoc)
OS << '+' << Offset << ']';
// NOTE: Want this comment at start of line, don't emit with AddComment.
@@ -855,6 +925,16 @@ void AsmPrinter::emitCFIInstruction(const MachineInstr &MI) {
if (needsCFIMoves() == CFI_M_None)
return;
+ // If there is no "real" instruction following this CFI instruction, skip
+ // emitting it; it would be beyond the end of the function's FDE range.
+ auto *MBB = MI.getParent();
+ auto I = std::next(MI.getIterator());
+ while (I != MBB->end() && I->isTransient())
+ ++I;
+ if (I == MBB->instr_end() &&
+ MBB->getReverseIterator() == MBB->getParent()->rbegin())
+ return;
+
const std::vector<MCCFIInstruction> &Instrs = MF->getFrameInstructions();
unsigned CFIIndex = MI.getOperand(0).getCFIIndex();
const MCCFIInstruction &CFI = Instrs[CFIIndex];
@@ -871,6 +951,19 @@ void AsmPrinter::emitFrameAlloc(const MachineInstr &MI) {
MCConstantExpr::create(FrameOffset, OutContext));
}
+static bool needFuncLabelsForEHOrDebugInfo(const MachineFunction &MF,
+ MachineModuleInfo *MMI) {
+ if (!MF.getLandingPads().empty() || MF.hasEHFunclets() || MMI->hasDebugInfo())
+ return true;
+
+ // We might emit an EH table that uses function begin and end labels even if
+ // we don't have any landingpads.
+ if (!MF.getFunction()->hasPersonalityFn())
+ return false;
+ return !isNoOpWithoutInvoke(
+ classifyEHPersonality(MF.getFunction()->getPersonalityFn()));
+}
+
/// EmitFunctionBody - This method emits the body and trailer for a
/// function.
void AsmPrinter::EmitFunctionBody() {
@@ -883,6 +976,7 @@ void AsmPrinter::EmitFunctionBody() {
// Print out code for the function.
bool HasAnyRealCode = false;
+ int NumInstsInFunction = 0;
for (auto &MBB : *MF) {
// Print a label for the basic block.
EmitBasicBlockStart(MBB);
@@ -892,7 +986,7 @@ void AsmPrinter::EmitFunctionBody() {
if (!MI.isPosition() && !MI.isImplicitDef() && !MI.isKill() &&
!MI.isDebugValue()) {
HasAnyRealCode = true;
- ++EmittedInsts;
+ ++NumInstsInFunction;
}
if (ShouldPrintDebugScopes) {
@@ -905,7 +999,7 @@ void AsmPrinter::EmitFunctionBody() {
}
if (isVerbose())
- emitComments(MI, OutStreamer->GetCommentOS());
+ emitComments(MI, OutStreamer->GetCommentOS(), this);
switch (MI.getOpcode()) {
case TargetOpcode::CFI_INSTRUCTION:
@@ -953,18 +1047,34 @@ void AsmPrinter::EmitFunctionBody() {
EmitBasicBlockEnd(MBB);
}
+ EmittedInsts += NumInstsInFunction;
+ MachineOptimizationRemarkAnalysis R(DEBUG_TYPE, "InstructionCount",
+ MF->getFunction()->getSubprogram(),
+ &MF->front());
+ R << ore::NV("NumInstructions", NumInstsInFunction)
+ << " instructions in function";
+ ORE->emit(R);
+
// If the function is empty and the object file uses .subsections_via_symbols,
// then we need to emit *something* to the function body to prevent the
// labels from collapsing together. Just emit a noop.
- if ((MAI->hasSubsectionsViaSymbols() && !HasAnyRealCode)) {
+ // Similarly, don't emit empty functions on Windows either. It can lead to
+ // duplicate entries (two functions with the same RVA) in the Guard CF Table
+ // after linking, causing the kernel not to load the binary:
+ // https://developercommunity.visualstudio.com/content/problem/45366/vc-linker-creates-invalid-dll-with-clang-cl.html
+ // FIXME: Hide this behind some API in e.g. MCAsmInfo or MCTargetStreamer.
+ const Triple &TT = TM.getTargetTriple();
+ if (!HasAnyRealCode && (MAI->hasSubsectionsViaSymbols() ||
+ (TT.isOSWindows() && TT.isOSBinFormatCOFF()))) {
MCInst Noop;
- MF->getSubtarget().getInstrInfo()->getNoopForMachoTarget(Noop);
- OutStreamer->AddComment("avoids zero-length function");
+ MF->getSubtarget().getInstrInfo()->getNoop(Noop);
// Targets can opt-out of emitting the noop here by leaving the opcode
// unspecified.
- if (Noop.getOpcode())
+ if (Noop.getOpcode()) {
+ OutStreamer->AddComment("avoids zero-length function");
OutStreamer->EmitInstruction(Noop, getSubtargetInfo());
+ }
}
const Function *F = MF->getFunction();
@@ -981,8 +1091,8 @@ void AsmPrinter::EmitFunctionBody() {
// Emit target-specific gunk after the function body.
EmitFunctionBodyEnd();
- if (!MF->getLandingPads().empty() || MMI->hasDebugInfo() ||
- MF->hasEHFunclets() || MAI->hasDotTypeDotSizeDirective()) {
+ if (needFuncLabelsForEHOrDebugInfo(*MF, MMI) ||
+ MAI->hasDotTypeDotSizeDirective()) {
// Create a symbol for the end of function.
CurrentFnEnd = createTempSymbol("func_end");
OutStreamer->EmitLabel(CurrentFnEnd);
@@ -1015,6 +1125,9 @@ void AsmPrinter::EmitFunctionBody() {
HI.Handler->endFunction(MF);
}
+ if (isVerbose())
+ OutStreamer->GetCommentOS() << "-- End function\n";
+
OutStreamer->AddBlankLine();
}
@@ -1175,11 +1288,7 @@ bool AsmPrinter::doFinalization(Module &M) {
const TargetLoweringObjectFile &TLOF = getObjFileLowering();
- // Emit module flags.
- SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
- M.getModuleFlagsMetadata(ModuleFlags);
- if (!ModuleFlags.empty())
- TLOF.emitModuleFlags(*OutStreamer, ModuleFlags, TM);
+ TLOF.emitModuleMetadata(*OutStreamer, M, TM);
if (TM.getTargetTriple().isOSBinFormatELF()) {
MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
@@ -1238,7 +1347,7 @@ bool AsmPrinter::doFinalization(Module &M) {
break;
AliasStack.push_back(Cur);
}
- for (const GlobalAlias *AncestorAlias : reverse(AliasStack))
+ for (const GlobalAlias *AncestorAlias : llvm::reverse(AliasStack))
emitGlobalIndirectSymbol(M, *AncestorAlias);
AliasStack.clear();
}
@@ -1266,7 +1375,7 @@ bool AsmPrinter::doFinalization(Module &M) {
OutContext.getOrCreateSymbol(StringRef("__morestack_addr"));
OutStreamer->EmitLabel(AddrSymbol);
- unsigned PtrSize = M.getDataLayout().getPointerSize(0);
+ unsigned PtrSize = MAI->getCodePointerSize();
OutStreamer->EmitSymbolValue(GetExternalSymbolSymbol("__morestack"),
PtrSize);
}
@@ -1304,26 +1413,34 @@ void AsmPrinter::SetupMachineFunction(MachineFunction &MF) {
CurrentFnBegin = nullptr;
CurExceptionSym = nullptr;
bool NeedsLocalForSize = MAI->needsLocalForSize();
- if (!MF.getLandingPads().empty() || MMI->hasDebugInfo() ||
- MF.hasEHFunclets() || NeedsLocalForSize) {
+ if (needFuncLabelsForEHOrDebugInfo(MF, MMI) || NeedsLocalForSize) {
CurrentFnBegin = createTempSymbol("func_begin");
if (NeedsLocalForSize)
CurrentFnSymForSize = CurrentFnBegin;
}
+ ORE = &getAnalysis<MachineOptimizationRemarkEmitterPass>().getORE();
if (isVerbose())
LI = &getAnalysis<MachineLoopInfo>();
+
+ const TargetSubtargetInfo &STI = MF.getSubtarget();
+ EnablePrintSchedInfo = PrintSchedule.getNumOccurrences()
+ ? PrintSchedule
+ : STI.supportPrintSchedInfo();
}
namespace {
+
// Keep track the alignment, constpool entries per Section.
struct SectionCPs {
MCSection *S;
unsigned Alignment;
SmallVector<unsigned, 4> CPEs;
+
SectionCPs(MCSection *s, unsigned a) : S(s), Alignment(a) {}
};
-}
+
+} // end anonymous namespace
/// EmitConstantPool - Print to the current output stream assembly
/// representations of the constants in the constant pool MCP. This is
@@ -1547,7 +1664,6 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
OutStreamer->EmitValue(Value, EntrySize);
}
-
/// EmitSpecialLLVMGlobal - Check to see if the specified global is a
/// special global used by LLVM. If so, emit it and return true, otherwise
/// do nothing and return false.
@@ -1598,13 +1714,16 @@ void AsmPrinter::EmitLLVMUsedList(const ConstantArray *InitList) {
}
namespace {
+
struct Structor {
- Structor() : Priority(0), Func(nullptr), ComdatKey(nullptr) {}
- int Priority;
- llvm::Constant *Func;
- llvm::GlobalValue *ComdatKey;
+ int Priority = 0;
+ Constant *Func = nullptr;
+ GlobalValue *ComdatKey = nullptr;
+
+ Structor() = default;
};
-} // end namespace
+
+} // end anonymous namespace
/// EmitXXStructorList - Emit the ctor or dtor list taking into account the init
/// priority.
@@ -1653,8 +1772,11 @@ void AsmPrinter::EmitXXStructorList(const DataLayout &DL, const Constant *List,
const TargetLoweringObjectFile &Obj = getObjFileLowering();
const MCSymbol *KeySym = nullptr;
if (GlobalValue *GV = S.ComdatKey) {
- if (GV->hasAvailableExternallyLinkage())
- // If the associated variable is available_externally, some other TU
+ if (GV->isDeclarationForLinker())
+ // If the associated variable is not defined in this module
+ // (it might be available_externally, or have been an
+ // available_externally definition that was dropped by the
+ // EliminateAvailableExternally pass), some other TU
// will provide its dynamic initializer.
continue;
@@ -1958,7 +2080,6 @@ static int isRepeatedByteSequence(const ConstantDataSequential *V) {
return static_cast<uint8_t>(C); // Ensure 255 is not returned as -1.
}
-
/// isRepeatedByteSequence - Determine whether the given value is
/// composed of a repeated sequence of identical bytes and return the
/// byte value. If it is not a repeated sequence, return -1.
@@ -1999,7 +2120,6 @@ static int isRepeatedByteSequence(const Value *V, const DataLayout &DL) {
static void emitGlobalConstantDataSequential(const DataLayout &DL,
const ConstantDataSequential *CDS,
AsmPrinter &AP) {
-
// See if we can aggregate this into a .fill, if so, emit it as such.
int Value = isRepeatedByteSequence(CDS, DL);
if (Value != -1) {
@@ -2033,7 +2153,6 @@ static void emitGlobalConstantDataSequential(const DataLayout &DL,
CDS->getNumElements();
if (unsigned Padding = Size - EmittedSize)
AP.OutStreamer->EmitZeros(Padding);
-
}
static void emitGlobalConstantArray(const DataLayout &DL,
@@ -2172,7 +2291,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI, AsmPrinter &AP) {
// chu[nk1 chu][nk2 chu] ... [nkN-1 chunkN]
ExtraBits = Realigned.getRawData()[0] &
(((uint64_t)-1) >> (64 - ExtraBitsSize));
- Realigned = Realigned.lshr(ExtraBitsSize);
+ Realigned.lshrInPlace(ExtraBitsSize);
} else
ExtraBits = Realigned.getRawData()[BitWidth / 64];
}
@@ -2447,8 +2566,6 @@ MCSymbol *AsmPrinter::GetExternalSymbolSymbol(StringRef Sym) const {
return OutContext.getOrCreateSymbol(NameStr);
}
-
-
/// PrintParentLoopComment - Print comments about parent loops of this one.
static void PrintParentLoopComment(raw_ostream &OS, const MachineLoop *Loop,
unsigned FunctionNumber) {
@@ -2513,7 +2630,6 @@ static void emitBasicBlockLoopComments(const MachineBasicBlock &MBB,
PrintChildLoopComment(OS, Loop, AP.getFunctionNumber());
}
-
/// EmitBasicBlockStart - This method prints the label for the specified
/// MachineBasicBlock, an alignment (if present) and a comment describing
/// it if appropriate.
@@ -2634,8 +2750,6 @@ isBlockOnlyReachableByFallthrough(const MachineBasicBlock *MBB) const {
return true;
}
-
-
GCMetadataPrinter *AsmPrinter::GetOrCreateGCPrinter(GCStrategy &S) {
if (!S.usesMetadata())
return nullptr;
@@ -2666,7 +2780,7 @@ GCMetadataPrinter *AsmPrinter::GetOrCreateGCPrinter(GCStrategy &S) {
}
/// Pin vtable to this file.
-AsmPrinterHandler::~AsmPrinterHandler() {}
+AsmPrinterHandler::~AsmPrinterHandler() = default;
void AsmPrinterHandler::markFunctionEnd() {}
@@ -2690,37 +2804,61 @@ void AsmPrinter::emitXRayTable() {
auto PrevSection = OutStreamer->getCurrentSectionOnly();
auto Fn = MF->getFunction();
- MCSection *Section = nullptr;
+ MCSection *InstMap = nullptr;
+ MCSection *FnSledIndex = nullptr;
if (MF->getSubtarget().getTargetTriple().isOSBinFormatELF()) {
if (Fn->hasComdat()) {
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
+ InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
Fn->getComdat()->getName());
+ FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
+ Fn->getComdat()->getName());
} else {
- Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
+ InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC);
+ FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
+ ELF::SHF_ALLOC);
}
} else if (MF->getSubtarget().getTargetTriple().isOSBinFormatMachO()) {
- Section = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
+ InstMap = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
SectionKind::getReadOnlyWithRel());
+ FnSledIndex = OutContext.getMachOSection("__DATA", "xray_fn_idx", 0,
+ SectionKind::getReadOnlyWithRel());
} else {
llvm_unreachable("Unsupported target");
}
// Before we switch over, we force a reference to a label inside the
- // xray_instr_map section. Since this function is always called just
- // before the function's end, we assume that this is happening after
- // the last return instruction.
-
- auto WordSizeBytes = TM.getPointerSize();
- MCSymbol *Tmp = OutContext.createTempSymbol("xray_synthetic_", true);
+ // xray_fn_idx sections. This makes sure that the xray_fn_idx section is kept
+ // live by the linker if the function is not garbage-collected. Since this
+ // function is always called just before the function's end, we assume that
+ // this is happening after the last return instruction.
+ auto WordSizeBytes = MAI->getCodePointerSize();
+ MCSymbol *IdxRef = OutContext.createTempSymbol("xray_fn_idx_synth_", true);
OutStreamer->EmitCodeAlignment(16);
- OutStreamer->EmitSymbolValue(Tmp, WordSizeBytes, false);
- OutStreamer->SwitchSection(Section);
- OutStreamer->EmitLabel(Tmp);
+ OutStreamer->EmitSymbolValue(IdxRef, WordSizeBytes, false);
+
+ // Now we switch to the instrumentation map section. Because this is done
+ // per-function, we are able to create an index entry that will represent the
+ // range of sleds associated with a function.
+ MCSymbol *SledsStart = OutContext.createTempSymbol("xray_sleds_start", true);
+ OutStreamer->SwitchSection(InstMap);
+ OutStreamer->EmitLabel(SledsStart);
for (const auto &Sled : Sleds)
Sled.emit(WordSizeBytes, OutStreamer.get(), CurrentFnSym);
-
+ MCSymbol *SledsEnd = OutContext.createTempSymbol("xray_sleds_end", true);
+ OutStreamer->EmitLabel(SledsEnd);
+
+ // We then emit a single entry in the index per function. We use the symbols
+ // that bound the instrumentation map as the range for a specific function.
+ // Each entry here will be 2 * word size aligned, as we're writing down two
+ // pointers. This should work for both 32-bit and 64-bit platforms.
+ OutStreamer->SwitchSection(FnSledIndex);
+ OutStreamer->EmitCodeAlignment(2 * WordSizeBytes);
+ OutStreamer->EmitLabel(IdxRef);
+ OutStreamer->EmitSymbolValue(SledsStart, WordSizeBytes);
+ OutStreamer->EmitSymbolValue(SledsEnd, WordSizeBytes);
OutStreamer->SwitchSection(PrevSection);
Sleds.clear();
}
@@ -2729,8 +2867,11 @@ void AsmPrinter::recordSled(MCSymbol *Sled, const MachineInstr &MI,
SledKind Kind) {
auto Fn = MI.getParent()->getParent()->getFunction();
auto Attr = Fn->getFnAttribute("function-instrument");
+ bool LogArgs = Fn->hasFnAttribute("xray-log-args");
bool AlwaysInstrument =
Attr.isStringAttribute() && Attr.getValueAsString() == "xray-always";
+ if (Kind == SledKind::FUNCTION_ENTER && LogArgs)
+ Kind = SledKind::LOG_ARGS_ENTER;
Sleds.emplace_back(
XRayFunctionEntry{ Sled, CurrentFnSym, Kind, AlwaysInstrument, Fn });
}
diff --git a/gnu/llvm/lib/CodeGen/StackProtector.cpp b/gnu/llvm/lib/CodeGen/StackProtector.cpp
index c2c010a29d4..d8e7840a257 100644
--- a/gnu/llvm/lib/CodeGen/StackProtector.cpp
+++ b/gnu/llvm/lib/CodeGen/StackProtector.cpp
@@ -1,4 +1,4 @@
-//===-- StackProtector.cpp - Stack Protector Insertion --------------------===//
+//===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -14,30 +14,40 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/CodeGen/StackProtector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/EHPersonalities.h"
-#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/StackProtector.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
-#include "llvm/IR/GlobalValue.h"
-#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetLowering.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSubtargetInfo.h"
-#include <cstdlib>
+#include <utility>
+
using namespace llvm;
#define DEBUG_TYPE "stack-protector"
@@ -50,12 +60,14 @@ static cl::opt<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
cl::init(true), cl::Hidden);
char StackProtector::ID = 0;
-INITIALIZE_TM_PASS(StackProtector, "stack-protector", "Insert stack protectors",
- false, true)
-FunctionPass *llvm::createStackProtectorPass(const TargetMachine *TM) {
- return new StackProtector(TM);
-}
+INITIALIZE_PASS_BEGIN(StackProtector, DEBUG_TYPE,
+ "Insert stack protectors", false, true)
+INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
+INITIALIZE_PASS_END(StackProtector, DEBUG_TYPE,
+ "Insert stack protectors", false, true)
+
+FunctionPass *llvm::createStackProtectorPass() { return new StackProtector(); }
StackProtector::SSPLayoutKind
StackProtector::getSSPLayout(const AllocaInst *AI) const {
@@ -83,12 +95,19 @@ void StackProtector::adjustForColoring(const AllocaInst *From,
}
}
+void StackProtector::getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<TargetPassConfig>();
+ AU.addPreserved<DominatorTreeWrapperPass>();
+}
+
bool StackProtector::runOnFunction(Function &Fn) {
F = &Fn;
M = F->getParent();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
+ TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
+ Trip = TM->getTargetTriple();
TLI = TM->getSubtargetImpl(Fn)->getTargetLowering();
HasPrologue = false;
HasIRCheck = false;
@@ -222,7 +241,16 @@ bool StackProtector::RequiresStackProtector() {
if (F->hasFnAttribute(Attribute::SafeStack))
return false;
+ // We are constructing the OptimizationRemarkEmitter on the fly rather than
+ // using the analysis pass to avoid building DominatorTree and LoopInfo which
+ // are not available this late in the IR pipeline.
+ OptimizationRemarkEmitter ORE(F);
+
if (F->hasFnAttribute(Attribute::StackProtectReq)) {
+ ORE.emit(OptimizationRemark(DEBUG_TYPE, "StackProtectorRequested", F)
+ << "Stack protection applied to function "
+ << ore::NV("Function", F)
+ << " due to a function attribute or command-line switch");
NeedsProtector = true;
Strong = true; // Use the same heuristic as strong to determine SSPLayout
} else if (F->hasFnAttribute(Attribute::StackProtectStrong))
@@ -236,20 +264,29 @@ bool StackProtector::RequiresStackProtector() {
for (const Instruction &I : BB) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
if (AI->isArrayAllocation()) {
+ OptimizationRemark Remark(DEBUG_TYPE, "StackProtectorAllocaOrArray",
+ &I);
+ Remark
+ << "Stack protection applied to function "
+ << ore::NV("Function", F)
+ << " due to a call to alloca or use of a variable length array";
if (const auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
if (CI->getLimitedValue(SSPBufferSize) >= SSPBufferSize) {
// A call to alloca with size >= SSPBufferSize requires
// stack protectors.
Layout.insert(std::make_pair(AI, SSPLK_LargeArray));
+ ORE.emit(Remark);
NeedsProtector = true;
} else if (Strong) {
// Require protectors for all alloca calls in strong mode.
Layout.insert(std::make_pair(AI, SSPLK_SmallArray));
+ ORE.emit(Remark);
NeedsProtector = true;
}
} else {
// A call to alloca with a variable size requires protectors.
Layout.insert(std::make_pair(AI, SSPLK_LargeArray));
+ ORE.emit(Remark);
NeedsProtector = true;
}
continue;
@@ -259,6 +296,11 @@ bool StackProtector::RequiresStackProtector() {
if (ContainsProtectableArray(AI->getAllocatedType(), IsLarge, Strong)) {
Layout.insert(std::make_pair(AI, IsLarge ? SSPLK_LargeArray
: SSPLK_SmallArray));
+ ORE.emit(OptimizationRemark(DEBUG_TYPE, "StackProtectorBuffer", &I)
+ << "Stack protection applied to function "
+ << ore::NV("Function", F)
+ << " due to a stack allocated buffer or struct containing a "
+ "buffer");
NeedsProtector = true;
continue;
}
@@ -266,6 +308,11 @@ bool StackProtector::RequiresStackProtector() {
if (Strong && HasAddressTaken(AI)) {
++NumAddrTaken;
Layout.insert(std::make_pair(AI, SSPLK_AddrOf));
+ ORE.emit(
+ OptimizationRemark(DEBUG_TYPE, "StackProtectorAddressTaken", &I)
+ << "Stack protection applied to function "
+ << ore::NV("Function", F)
+ << " due to the address of a local variable being taken");
NeedsProtector = true;
}
}
@@ -448,13 +495,13 @@ BasicBlock *StackProtector::CreateFailBB() {
Constant *StackChkFail =
M->getOrInsertFunction("__stack_smash_handler",
Type::getVoidTy(Context),
- Type::getInt8PtrTy(Context), nullptr);
+ Type::getInt8PtrTy(Context));
B.CreateCall(StackChkFail, B.CreateGlobalStringPtr(F->getName(), "SSH"));
} else {
Constant *StackChkFail =
- M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context),
- nullptr);
+ M->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context));
+
B.CreateCall(StackChkFail, {});
}
B.CreateUnreachable();
diff --git a/gnu/llvm/lib/CodeGen/TargetLoweringBase.cpp b/gnu/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 2af126aa807..6e66245e3dc 100644
--- a/gnu/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/gnu/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -11,7 +11,6 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
@@ -21,6 +20,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
@@ -33,6 +33,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h"
@@ -53,6 +54,18 @@ static cl::opt<unsigned> MaximumJumpTableSize
("max-jump-table-size", cl::init(0), cl::Hidden,
cl::desc("Set maximum size of jump tables; zero for no limit."));
+/// Minimum jump table density for normal functions.
+static cl::opt<unsigned>
+ JumpTableDensity("jump-table-density", cl::init(10), cl::Hidden,
+ cl::desc("Minimum density for building a jump table in "
+ "a normal function"));
+
+/// Minimum jump table density for -Os or -Oz functions.
+static cl::opt<unsigned> OptsizeJumpTableDensity(
+ "optsize-jump-table-density", cl::init(40), cl::Hidden,
+ cl::desc("Minimum density for building a jump table in "
+ "an optsize function"));
+
// Although this default value is arbitrary, it is not random. It is assumed
// that a condition that evaluates the same way by a higher percentage than this
// is best represented as control flow. Therefore, the default value N should be
@@ -361,11 +374,36 @@ static void InitLibcallNames(const char **Names, const Triple &TT) {
Names[RTLIB::MEMCPY] = "memcpy";
Names[RTLIB::MEMMOVE] = "memmove";
Names[RTLIB::MEMSET] = "memset";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_1] = "__llvm_memcpy_element_atomic_1";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_2] = "__llvm_memcpy_element_atomic_2";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_4] = "__llvm_memcpy_element_atomic_4";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_8] = "__llvm_memcpy_element_atomic_8";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_16] = "__llvm_memcpy_element_atomic_16";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_1] =
+ "__llvm_memcpy_element_unordered_atomic_1";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_2] =
+ "__llvm_memcpy_element_unordered_atomic_2";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_4] =
+ "__llvm_memcpy_element_unordered_atomic_4";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_8] =
+ "__llvm_memcpy_element_unordered_atomic_8";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_16] =
+ "__llvm_memcpy_element_unordered_atomic_16";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1] =
+ "__llvm_memmove_element_unordered_atomic_1";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2] =
+ "__llvm_memmove_element_unordered_atomic_2";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4] =
+ "__llvm_memmove_element_unordered_atomic_4";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8] =
+ "__llvm_memmove_element_unordered_atomic_8";
+ Names[RTLIB::MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16] =
+ "__llvm_memmove_element_unordered_atomic_16";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_1] =
+ "__llvm_memset_element_unordered_atomic_1";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_2] =
+ "__llvm_memset_element_unordered_atomic_2";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_4] =
+ "__llvm_memset_element_unordered_atomic_4";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_8] =
+ "__llvm_memset_element_unordered_atomic_8";
+ Names[RTLIB::MEMSET_ELEMENT_UNORDERED_ATOMIC_16] =
+ "__llvm_memset_element_unordered_atomic_16";
Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
@@ -768,22 +806,55 @@ RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
return UNKNOWN_LIBCALL;
}
-RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_ATOMIC(uint64_t ElementSize) {
+RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
+ switch (ElementSize) {
+ case 1:
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
+ case 2:
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
+ case 4:
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
+ case 8:
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
+ case 16:
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
+ default:
+ return UNKNOWN_LIBCALL;
+ }
+}
+
+RTLIB::Libcall RTLIB::getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
switch (ElementSize) {
case 1:
- return MEMCPY_ELEMENT_ATOMIC_1;
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1;
case 2:
- return MEMCPY_ELEMENT_ATOMIC_2;
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2;
case 4:
- return MEMCPY_ELEMENT_ATOMIC_4;
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4;
case 8:
- return MEMCPY_ELEMENT_ATOMIC_8;
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8;
case 16:
- return MEMCPY_ELEMENT_ATOMIC_16;
+ return MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16;
default:
return UNKNOWN_LIBCALL;
}
+}
+RTLIB::Libcall RTLIB::getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
+ switch (ElementSize) {
+ case 1:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_1;
+ case 2:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_2;
+ case 4:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_4;
+ case 8:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_8;
+ case 16:
+ return MEMSET_ELEMENT_UNORDERED_ATOMIC_16;
+ default:
+ return UNKNOWN_LIBCALL;
+ }
}
/// InitCmpLibcallCCs - Set default comparison libcall CC.
@@ -829,16 +900,16 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
initActions();
// Perform these initializations only once.
- MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
- MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
- = MaxStoresPerMemmoveOptSize = 4;
+ MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove =
+ MaxLoadsPerMemcmp = 8;
+ MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize =
+ MaxStoresPerMemmoveOptSize = MaxLoadsPerMemcmpOptSize = 4;
UseUnderscoreSetJmp = false;
UseUnderscoreLongJmp = false;
HasMultipleConditionRegisters = false;
HasExtractBitsInsn = false;
JumpIsExpensive = JumpIsExpensiveOverride;
PredictableSelectIsExpensive = false;
- MaskAndBranchFoldingIsLegal = false;
EnableExtLdPromotion = false;
HasFloatingPointExceptions = true;
StackPointerRegisterToSaveRestore = 0;
@@ -851,7 +922,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
MinFunctionAlignment = 0;
PrefFunctionAlignment = 0;
PrefLoopAlignment = 0;
- GatherAllAliasesMaxDepth = 6;
+ GatherAllAliasesMaxDepth = 18;
MinStackArgumentAlignment = 1;
// TODO: the default will be switched to 0 in the next commit, along
// with the Target-specific changes necessary.
@@ -901,6 +972,7 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::SMAX, VT, Expand);
setOperationAction(ISD::UMIN, VT, Expand);
setOperationAction(ISD::UMAX, VT, Expand);
+ setOperationAction(ISD::ABS, VT, Expand);
// Overflow operations default to expand
setOperationAction(ISD::SADDO, VT, Expand);
@@ -910,6 +982,11 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::SMULO, VT, Expand);
setOperationAction(ISD::UMULO, VT, Expand);
+ // ADDCARRY operations default to expand
+ setOperationAction(ISD::ADDCARRY, VT, Expand);
+ setOperationAction(ISD::SUBCARRY, VT, Expand);
+ setOperationAction(ISD::SETCCCARRY, VT, Expand);
+
// These default to Expand so they will be expanded to CTLZ/CTTZ by default.
setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
@@ -918,6 +995,7 @@ void TargetLoweringBase::initActions() {
// These library functions default to expand.
setOperationAction(ISD::FROUND, VT, Expand);
+ setOperationAction(ISD::FPOWI, VT, Expand);
// These operations default to expand for vector types.
if (VT.isVector()) {
@@ -1184,12 +1262,11 @@ static unsigned getVectorTypeBreakdownMVT(MVT VT, MVT &IntermediateVT,
/// isLegalRC - Return true if the value types that can be represented by the
/// specified register class are all legal.
-bool TargetLoweringBase::isLegalRC(const TargetRegisterClass *RC) const {
- for (TargetRegisterClass::vt_iterator I = RC->vt_begin(), E = RC->vt_end();
- I != E; ++I) {
+bool TargetLoweringBase::isLegalRC(const TargetRegisterInfo &TRI,
+ const TargetRegisterClass &RC) const {
+ for (auto I = TRI.legalclasstypes_begin(RC); *I != MVT::Other; ++I)
if (isTypeLegal(*I))
return true;
- }
return false;
}
@@ -1227,7 +1304,7 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
// Copy operands before the frame-index.
for (unsigned i = 0; i < OperIdx; ++i)
- MIB.addOperand(MI->getOperand(i));
+ MIB.add(MI->getOperand(i));
// Add frame index operands recognized by stackmaps.cpp
if (MFI.isStatepointSpillSlotObjectIndex(FI)) {
// indirect-mem-ref tag, size, #FI, offset.
@@ -1237,18 +1314,18 @@ TargetLoweringBase::emitPatchPoint(MachineInstr &InitialMI,
assert(MI->getOpcode() == TargetOpcode::STATEPOINT && "sanity");
MIB.addImm(StackMaps::IndirectMemRefOp);
MIB.addImm(MFI.getObjectSize(FI));
- MIB.addOperand(MI->getOperand(OperIdx));
+ MIB.add(MI->getOperand(OperIdx));
MIB.addImm(0);
} else {
// direct-mem-ref tag, #FI, offset.
// Used by patchpoint, and direct alloca arguments to statepoints
MIB.addImm(StackMaps::DirectMemRefOp);
- MIB.addOperand(MI->getOperand(OperIdx));
+ MIB.add(MI->getOperand(OperIdx));
MIB.addImm(0);
}
// Copy the operands after the frame index.
for (unsigned i = OperIdx + 1; i != MI->getNumOperands(); ++i)
- MIB.addOperand(MI->getOperand(i));
+ MIB.add(MI->getOperand(i));
// Inherit previous memory operands.
MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
@@ -1296,12 +1373,12 @@ TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI,
// Find the first legal register class with the largest spill size.
const TargetRegisterClass *BestRC = RC;
- for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
+ for (unsigned i : SuperRegRC.set_bits()) {
const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
// We want the largest possible spill size.
- if (SuperRC->getSize() <= BestRC->getSize())
+ if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
continue;
- if (!isLegalRC(SuperRC))
+ if (!isLegalRC(*TRI, *SuperRC))
continue;
BestRC = SuperRC;
}
@@ -1437,6 +1514,7 @@ void TargetLoweringBase::computeRegisterProperties(
}
if (IsLegalWiderType)
break;
+ LLVM_FALLTHROUGH;
}
case TypeWidenVector: {
// Try to widen the vector.
@@ -1454,6 +1532,7 @@ void TargetLoweringBase::computeRegisterProperties(
}
if (IsLegalWiderType)
break;
+ LLVM_FALLTHROUGH;
}
case TypeSplitVector:
case TypeScalarizeVector: {
@@ -1589,7 +1668,7 @@ unsigned TargetLoweringBase::getVectorTypeBreakdown(LLVMContext &Context, EVT VT
/// type of the given function. This does not require a DAG or a return value,
/// and is suitable for use before any DAGs for the function are constructed.
/// TODO: Move this out of TargetLowering.cpp.
-void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
+void llvm::GetReturnInfo(Type *ReturnType, AttributeList attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL) {
SmallVector<EVT, 4> ValueVTs;
@@ -1601,9 +1680,9 @@ void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
EVT VT = ValueVTs[j];
ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
- if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
+ if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
ExtendKind = ISD::SIGN_EXTEND;
- else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
+ else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
ExtendKind = ISD::ZERO_EXTEND;
// FIXME: C calling convention requires the return type to be promoted to
@@ -1616,18 +1695,20 @@ void llvm::GetReturnInfo(Type *ReturnType, AttributeSet attr,
VT = MinVT;
}
- unsigned NumParts = TLI.getNumRegisters(ReturnType->getContext(), VT);
- MVT PartVT = TLI.getRegisterType(ReturnType->getContext(), VT);
+ unsigned NumParts =
+ TLI.getNumRegistersForCallingConv(ReturnType->getContext(), VT);
+ MVT PartVT =
+ TLI.getRegisterTypeForCallingConv(ReturnType->getContext(), VT);
// 'inreg' on function refers to return value
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
- if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::InReg))
+ if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::InReg))
Flags.setInReg();
// Propagate extension type if any
- if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
+ if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt))
Flags.setSExt();
- else if (attr.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt))
+ else if (attr.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt))
Flags.setZExt();
for (unsigned i = 0; i < NumParts; ++i)
@@ -1818,7 +1899,7 @@ Value *TargetLoweringBase::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Type *StackPtrTy = Type::getInt8PtrTy(M->getContext());
Value *Fn = M->getOrInsertFunction("__safestack_pointer_address",
- StackPtrTy->getPointerTo(0), nullptr);
+ StackPtrTy->getPointerTo(0));
return IRB.CreateCall(Fn);
}
@@ -1905,6 +1986,10 @@ void TargetLoweringBase::setMinimumJumpTableEntries(unsigned Val) {
MinimumJumpTableEntries = Val;
}
+unsigned TargetLoweringBase::getMinimumJumpTableDensity(bool OptForSize) const {
+ return OptForSize ? OptsizeJumpTableDensity : JumpTableDensity;
+}
+
unsigned TargetLoweringBase::getMaximumJumpTableSize() const {
return MaximumJumpTableSize;
}
@@ -1921,11 +2006,7 @@ void TargetLoweringBase::setMaximumJumpTableSize(unsigned Val) {
/// override the target defaults.
static StringRef getRecipEstimateForFunc(MachineFunction &MF) {
const Function *F = MF.getFunction();
- StringRef RecipAttrName = "reciprocal-estimates";
- if (!F->hasFnAttribute(RecipAttrName))
- return StringRef();
-
- return F->getFnAttribute(RecipAttrName).getValueAsString();
+ return F->getFnAttribute("reciprocal-estimates").getValueAsString();
}
/// Construct a string for the given reciprocal operation of the given type.
@@ -2100,3 +2181,7 @@ int TargetLoweringBase::getDivRefinementSteps(EVT VT,
MachineFunction &MF) const {
return getOpRefinementSteps(false, VT, getRecipEstimateForFunc(MF));
}
+
+void TargetLoweringBase::finalizeLowering(MachineFunction &MF) const {
+ MF.getRegInfo().freezeReservedRegs(MF);
+}
diff --git a/gnu/llvm/lib/MC/MCAsmInfoELF.cpp b/gnu/llvm/lib/MC/MCAsmInfoELF.cpp
index 4394e17f995..5839ca70890 100644
--- a/gnu/llvm/lib/MC/MCAsmInfoELF.cpp
+++ b/gnu/llvm/lib/MC/MCAsmInfoELF.cpp
@@ -1,4 +1,4 @@
-//===-- MCAsmInfoELF.cpp - ELF asm properties -------------------*- C++ -*-===//
+//===- MCAsmInfoELF.cpp - ELF asm properties ------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,12 +13,13 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCAsmInfoELF.h"
+#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCSectionELF.h"
-#include "llvm/Support/ELF.h"
+
using namespace llvm;
-void MCAsmInfoELF::anchor() { }
+void MCAsmInfoELF::anchor() {}
MCSection *MCAsmInfoELF::getNonexecutableStackSection(MCContext &Ctx) const {
if (!UsesNonexecutableStackSection)
@@ -31,5 +32,4 @@ MCAsmInfoELF::MCAsmInfoELF() {
WeakRefDirective = "\t.weak\t";
PrivateGlobalPrefix = ".L";
PrivateLabelPrefix = ".L";
- UsesNonexecutableStackSection = false;
}
diff --git a/gnu/llvm/lib/MC/MCELFStreamer.cpp b/gnu/llvm/lib/MC/MCELFStreamer.cpp
index fd0e3452071..91d6751bfe6 100644
--- a/gnu/llvm/lib/MC/MCELFStreamer.cpp
+++ b/gnu/llvm/lib/MC/MCELFStreamer.cpp
@@ -12,29 +12,30 @@
//===----------------------------------------------------------------------===//
#include "llvm/MC/MCELFStreamer.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/BinaryFormat/ELF.h"
#include "llvm/MC/MCAsmBackend.h"
-#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAssembler.h"
#include "llvm/MC/MCCodeEmitter.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCFragment.h"
#include "llvm/MC/MCObjectFileInfo.h"
-#include "llvm/MC/MCObjectStreamer.h"
#include "llvm/MC/MCObjectWriter.h"
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCSectionELF.h"
-#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCValue.h"
-#include "llvm/Support/Debug.h"
-#include "llvm/Support/ELF.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
using namespace llvm;
@@ -42,9 +43,6 @@ bool MCELFStreamer::isBundleLocked() const {
return getCurrentSectionOnly()->isBundleLocked();
}
-MCELFStreamer::~MCELFStreamer() {
-}
-
void MCELFStreamer::mergeFragment(MCDataFragment *DF,
MCDataFragment *EF) {
MCAssembler &Assembler = getAssembler();
@@ -98,11 +96,19 @@ void MCELFStreamer::InitSections(bool NoExecStack) {
}
}
-void MCELFStreamer::EmitLabel(MCSymbol *S) {
+void MCELFStreamer::EmitLabel(MCSymbol *S, SMLoc Loc) {
auto *Symbol = cast<MCSymbolELF>(S);
- assert(Symbol->isUndefined() && "Cannot define a symbol twice!");
+ MCObjectStreamer::EmitLabel(Symbol, Loc);
+
+ const MCSectionELF &Section =
+ static_cast<const MCSectionELF &>(*getCurrentSectionOnly());
+ if (Section.getFlags() & ELF::SHF_TLS)
+ Symbol->setType(ELF::STT_TLS);
+}
- MCObjectStreamer::EmitLabel(Symbol);
+void MCELFStreamer::EmitLabel(MCSymbol *S, SMLoc Loc, MCFragment *F) {
+ auto *Symbol = cast<MCSymbolELF>(S);
+ MCObjectStreamer::EmitLabel(Symbol, Loc, F);
const MCSectionELF &Section =
static_cast<const MCSectionELF &>(*getCurrentSectionOnly());
@@ -150,17 +156,8 @@ void MCELFStreamer::ChangeSection(MCSection *Section,
if (Grp)
Asm.registerSymbol(*Grp);
- this->MCObjectStreamer::ChangeSection(Section, Subsection);
- MCContext &Ctx = getContext();
- auto *Begin = cast_or_null<MCSymbolELF>(Section->getBeginSymbol());
- if (!Begin) {
- Begin = Ctx.getOrCreateSectionSymbol(*SectionELF);
- Section->setBeginSymbol(Begin);
- }
- if (Begin->isUndefined()) {
- Asm.registerSymbol(*Begin);
- Begin->setType(ELF::STT_SECTION);
- }
+ changeSectionImpl(Section, Subsection);
+ Asm.registerSymbol(*Section->getBeginSymbol());
}
void MCELFStreamer::EmitWeakReference(MCSymbol *Alias, const MCSymbol *Symbol) {
@@ -364,13 +361,6 @@ void MCELFStreamer::EmitValueToAlignment(unsigned ByteAlignment,
ValueSize, MaxBytesToEmit);
}
-// Add a symbol for the file name of this module. They start after the
-// null symbol and don't count as normal symbol, i.e. a non-STT_FILE symbol
-// with the same name may appear.
-void MCELFStreamer::EmitFileDirective(StringRef Filename) {
- getAssembler().addFileName(Filename);
-}
-
void MCELFStreamer::EmitIdent(StringRef IdentString) {
MCSection *Comment = getAssembler().getContext().getELFSection(
".comment", ELF::SHT_PROGBITS, ELF::SHF_MERGE | ELF::SHF_STRINGS, 1, "");
@@ -633,15 +623,6 @@ void MCELFStreamer::FinishImpl() {
this->MCObjectStreamer::FinishImpl();
}
-MCStreamer *llvm::createELFStreamer(MCContext &Context, MCAsmBackend &MAB,
- raw_pwrite_stream &OS, MCCodeEmitter *CE,
- bool RelaxAll) {
- MCELFStreamer *S = new MCELFStreamer(Context, MAB, OS, CE);
- if (RelaxAll)
- S->getAssembler().setRelaxAll(true);
- return S;
-}
-
void MCELFStreamer::EmitThumbFunc(MCSymbol *Func) {
llvm_unreachable("Generic ELF doesn't support this directive");
}
@@ -650,22 +631,6 @@ void MCELFStreamer::EmitSymbolDesc(MCSymbol *Symbol, unsigned DescValue) {
llvm_unreachable("ELF doesn't support this directive");
}
-void MCELFStreamer::BeginCOFFSymbolDef(const MCSymbol *Symbol) {
- llvm_unreachable("ELF doesn't support this directive");
-}
-
-void MCELFStreamer::EmitCOFFSymbolStorageClass(int StorageClass) {
- llvm_unreachable("ELF doesn't support this directive");
-}
-
-void MCELFStreamer::EmitCOFFSymbolType(int Type) {
- llvm_unreachable("ELF doesn't support this directive");
-}
-
-void MCELFStreamer::EndCOFFSymbolDef() {
- llvm_unreachable("ELF doesn't support this directive");
-}
-
void MCELFStreamer::EmitZerofill(MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment) {
llvm_unreachable("ELF doesn't support this directive");
@@ -675,3 +640,12 @@ void MCELFStreamer::EmitTBSSSymbol(MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment) {
llvm_unreachable("ELF doesn't support this directive");
}
+
+MCStreamer *llvm::createELFStreamer(MCContext &Context, MCAsmBackend &MAB,
+ raw_pwrite_stream &OS, MCCodeEmitter *CE,
+ bool RelaxAll) {
+ MCELFStreamer *S = new MCELFStreamer(Context, MAB, OS, CE);
+ if (RelaxAll)
+ S->getAssembler().setRelaxAll(true);
+ return S;
+}
diff --git a/gnu/llvm/lib/MC/MCParser/AsmParser.cpp b/gnu/llvm/lib/MC/MCParser/AsmParser.cpp
index 602162c351b..6bacdedeedc 100644
--- a/gnu/llvm/lib/MC/MCParser/AsmParser.cpp
+++ b/gnu/llvm/lib/MC/MCParser/AsmParser.cpp
@@ -15,12 +15,13 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCCodeView.h"
#include "llvm/MC/MCContext.h"
@@ -35,6 +36,7 @@
#include "llvm/MC/MCParser/AsmLexer.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCParser/MCAsmParserExtension.h"
#include "llvm/MC/MCParser/MCAsmParserUtils.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCParser/MCTargetAsmParser.h"
@@ -42,10 +44,10 @@
#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCTargetOptions.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/CommandLine.h"
-#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -55,6 +57,7 @@
#include <algorithm>
#include <cassert>
#include <cctype>
+#include <climits>
#include <cstddef>
#include <cstdint>
#include <deque>
@@ -67,7 +70,7 @@
using namespace llvm;
-MCAsmParserSemaCallback::~MCAsmParserSemaCallback() {}
+MCAsmParserSemaCallback::~MCAsmParserSemaCallback() = default;
static cl::opt<unsigned> AsmMacroMaxNestingDepth(
"asm-macro-max-nesting-depth", cl::init(20), cl::Hidden,
@@ -82,10 +85,10 @@ typedef std::vector<MCAsmMacroArgument> MCAsmMacroArguments;
struct MCAsmMacroParameter {
StringRef Name;
MCAsmMacroArgument Value;
- bool Required;
- bool Vararg;
+ bool Required = false;
+ bool Vararg = false;
- MCAsmMacroParameter() : Required(false), Vararg(false) {}
+ MCAsmMacroParameter() = default;
};
typedef std::vector<MCAsmMacroParameter> MCAsmMacroParameters;
@@ -124,23 +127,20 @@ struct ParseStatementInfo {
SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> ParsedOperands;
/// \brief The opcode from the last parsed instruction.
- unsigned Opcode;
+ unsigned Opcode = ~0U;
/// \brief Was there an error parsing the inline assembly?
- bool ParseError;
+ bool ParseError = false;
- SmallVectorImpl<AsmRewrite> *AsmRewrites;
+ SmallVectorImpl<AsmRewrite> *AsmRewrites = nullptr;
- ParseStatementInfo() : Opcode(~0U), ParseError(false), AsmRewrites(nullptr) {}
+ ParseStatementInfo() = delete;
ParseStatementInfo(SmallVectorImpl<AsmRewrite> *rewrites)
- : Opcode(~0), ParseError(false), AsmRewrites(rewrites) {}
+ : AsmRewrites(rewrites) {}
};
/// \brief The concrete assembly parser instance.
class AsmParser : public MCAsmParser {
- AsmParser(const AsmParser &) = delete;
- void operator=(const AsmParser &) = delete;
-
private:
AsmLexer Lexer;
MCContext &Ctx;
@@ -199,17 +199,19 @@ private:
unsigned LastQueryLine;
/// AssemblerDialect. ~OU means unset value and use value provided by MAI.
- unsigned AssemblerDialect;
+ unsigned AssemblerDialect = ~0U;
/// \brief is Darwin compatibility enabled?
- bool IsDarwin;
+ bool IsDarwin = false;
/// \brief Are we parsing ms-style inline assembly?
- bool ParsingInlineAsm;
+ bool ParsingInlineAsm = false;
public:
AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
- const MCAsmInfo &MAI);
+ const MCAsmInfo &MAI, unsigned CB);
+ AsmParser(const AsmParser &) = delete;
+ AsmParser &operator=(const AsmParser &) = delete;
~AsmParser() override;
bool Run(bool NoInitialTextSection, bool NoFinalize = false) override;
@@ -223,7 +225,6 @@ public:
DirectiveKindMap[Directive] = DirectiveKindMap[Alias];
}
-public:
/// @name MCAsmParser Interface
/// {
@@ -258,7 +259,7 @@ public:
bool parseMSInlineAsm(void *AsmLoc, std::string &AsmString,
unsigned &NumOutputs, unsigned &NumInputs,
- SmallVectorImpl<std::pair<void *,bool> > &OpDecls,
+ SmallVectorImpl<std::pair<void *,bool>> &OpDecls,
SmallVectorImpl<std::string> &Constraints,
SmallVectorImpl<std::string> &Clobbers,
const MCInstrInfo *MII, const MCInstPrinter *IP,
@@ -286,6 +287,8 @@ public:
/// }
private:
+ bool isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc);
+ void altMacroString(StringRef AltMacroStr, std::string &Res);
bool parseStatement(ParseStatementInfo &Info,
MCAsmParserSemaCallback *SI);
bool parseCurlyBlockScope(SmallVectorImpl<AsmRewrite>& AsmStrRewrites);
@@ -411,7 +414,7 @@ private:
DK_CFI_REMEMBER_STATE, DK_CFI_RESTORE_STATE, DK_CFI_SAME_VALUE,
DK_CFI_RESTORE, DK_CFI_ESCAPE, DK_CFI_SIGNAL_FRAME, DK_CFI_UNDEFINED,
DK_CFI_REGISTER, DK_CFI_WINDOW_SAVE,
- DK_MACROS_ON, DK_MACROS_OFF,
+ DK_MACROS_ON, DK_MACROS_OFF, DK_ALTMACRO, DK_NOALTMACRO,
DK_MACRO, DK_EXITM, DK_ENDM, DK_ENDMACRO, DK_PURGEM,
DK_SLEB128, DK_ULEB128,
DK_ERR, DK_ERROR, DK_WARNING,
@@ -483,7 +486,8 @@ private:
bool parseDirectiveEndMacro(StringRef Directive);
bool parseDirectiveMacro(SMLoc DirectiveLoc);
bool parseDirectiveMacrosOnOff(StringRef Directive);
-
+ // alternate macro mode directives
+ bool parseDirectiveAltmacro(StringRef Directive);
// ".bundle_align_mode"
bool parseDirectiveBundleAlignMode();
// ".bundle_lock"
@@ -572,11 +576,9 @@ extern MCAsmParserExtension *createCOFFAsmParser();
enum { DEFAULT_ADDRSPACE = 0 };
AsmParser::AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
- const MCAsmInfo &MAI)
+ const MCAsmInfo &MAI, unsigned CB = 0)
: Lexer(MAI), Ctx(Ctx), Out(Out), MAI(MAI), SrcMgr(SM),
- PlatformParser(nullptr), CurBuffer(SM.getMainFileID()),
- MacrosEnabledFlag(true), CppHashInfo(), AssemblerDialect(~0U),
- IsDarwin(false), ParsingInlineAsm(false) {
+ CurBuffer(CB ? CB : SM.getMainFileID()), MacrosEnabledFlag(true) {
HadError = false;
// Save the old handler.
SavedDiagHandler = SrcMgr.getDiagHandler();
@@ -597,6 +599,9 @@ AsmParser::AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
case MCObjectFileInfo::IsELF:
PlatformParser.reset(createELFAsmParser());
break;
+ case MCObjectFileInfo::IsWasm:
+ llvm_unreachable("Wasm parsing not supported yet");
+ break;
}
PlatformParser->Initialize(*this);
@@ -608,6 +613,10 @@ AsmParser::AsmParser(SourceMgr &SM, MCContext &Ctx, MCStreamer &Out,
AsmParser::~AsmParser() {
assert((HadError || ActiveMacros.empty()) &&
"Unexpected active macro instantiation!");
+
+ // Restore the saved diagnostics handler and context for use during
+ // finalization.
+ SrcMgr.setDiagHandler(SavedDiagHandler, SavedDiagContext);
}
void AsmParser::printMacroInstantiations() {
@@ -694,7 +703,7 @@ const AsmToken &AsmParser::Lex() {
// if it's a end of statement with a comment in it
if (getTok().is(AsmToken::EndOfStatement)) {
// if this is a line comment output it.
- if (getTok().getString().front() != '\n' &&
+ if (!getTok().getString().empty() && getTok().getString().front() != '\n' &&
getTok().getString().front() != '\r' && MAI.preserveAsmComments())
Out.addExplicitComment(Twine(getTok().getString()));
}
@@ -731,6 +740,7 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
HadError = false;
AsmCond StartingCondState = TheCondState;
+ SmallVector<AsmRewrite, 4> AsmStrRewrites;
StringRef Filename = getContext().getMainFileName();
@@ -755,7 +765,7 @@ bool AsmParser::Run(bool NoInitialTextSection, bool NoFinalize) {
// While we have input, parse each statement.
while (Lexer.isNot(AsmToken::Eof)) {
- ParseStatementInfo Info;
+ ParseStatementInfo Info(&AsmStrRewrites);
if (!parseStatement(Info, nullptr))
continue;
@@ -923,7 +933,7 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
- Res = MCUnaryExpr::createLNot(Res, getContext());
+ Res = MCUnaryExpr::createLNot(Res, getContext(), FirstTokenLoc);
return false;
case AsmToken::Dollar:
case AsmToken::At:
@@ -984,7 +994,7 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
// Lookup the symbol variant if used.
- if (Split.second.size()) {
+ if (!Split.second.empty()) {
Variant = MCSymbolRefExpr::getVariantKindForName(Split.second);
if (Variant != MCSymbolRefExpr::VK_Invalid) {
SymbolName = Split.first;
@@ -1010,7 +1020,7 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
}
// Otherwise create a symbol ref.
- Res = MCSymbolRefExpr::create(Sym, Variant, getContext());
+ Res = MCSymbolRefExpr::create(Sym, Variant, getContext(), FirstTokenLoc);
return false;
}
case AsmToken::BigNum:
@@ -1076,19 +1086,19 @@ bool AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) {
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
- Res = MCUnaryExpr::createMinus(Res, getContext());
+ Res = MCUnaryExpr::createMinus(Res, getContext(), FirstTokenLoc);
return false;
case AsmToken::Plus:
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
- Res = MCUnaryExpr::createPlus(Res, getContext());
+ Res = MCUnaryExpr::createPlus(Res, getContext(), FirstTokenLoc);
return false;
case AsmToken::Tilde:
Lex(); // Eat the operator.
if (parsePrimaryExpr(Res, EndLoc))
return true;
- Res = MCUnaryExpr::createNot(Res, getContext());
+ Res = MCUnaryExpr::createNot(Res, getContext(), FirstTokenLoc);
return false;
// MIPS unary expression operators. The lexer won't generate these tokens if
// MCAsmInfo::HasMipsExpressions is false for the target.
@@ -1189,6 +1199,42 @@ AsmParser::applyModifierToExpr(const MCExpr *E,
llvm_unreachable("Invalid expression kind!");
}
+/// This function checks if the next token is <string> type or arithmetic.
+/// string that begin with character '<' must end with character '>'.
+/// otherwise it is arithmetics.
+/// If the function returns a 'true' value,
+/// the End argument will be filled with the last location pointed to the '>'
+/// character.
+
+/// There is a gap between the AltMacro's documentation and the single quote implementation.
+/// GCC does not fully support this feature and so we will not support it.
+/// TODO: Adding single quote as a string.
+bool AsmParser::isAltmacroString(SMLoc &StrLoc, SMLoc &EndLoc) {
+ assert((StrLoc.getPointer() != NULL) &&
+ "Argument to the function cannot be a NULL value");
+ const char *CharPtr = StrLoc.getPointer();
+ while ((*CharPtr != '>') && (*CharPtr != '\n') &&
+ (*CharPtr != '\r') && (*CharPtr != '\0')){
+ if(*CharPtr == '!')
+ CharPtr++;
+ CharPtr++;
+ }
+ if (*CharPtr == '>') {
+ EndLoc = StrLoc.getFromPointer(CharPtr + 1);
+ return true;
+ }
+ return false;
+}
+
+/// \brief creating a string without the escape characters '!'.
+void AsmParser::altMacroString(StringRef AltMacroStr,std::string &Res) {
+ for (size_t Pos = 0; Pos < AltMacroStr.size(); Pos++) {
+ if (AltMacroStr[Pos] == '!')
+ Pos++;
+ Res += AltMacroStr[Pos];
+ }
+}
+
/// \brief Parse an expression and return it.
///
/// expr ::= expr &&,|| expr -> lowest.
@@ -1441,6 +1487,7 @@ unsigned AsmParser::getBinOpPrecedence(AsmToken::TokenKind K,
/// Res contains the LHS of the expression on input.
bool AsmParser::parseBinOpRHS(unsigned Precedence, const MCExpr *&Res,
SMLoc &EndLoc) {
+ SMLoc StartLoc = Lexer.getLoc();
while (true) {
MCBinaryExpr::Opcode Kind = MCBinaryExpr::Add;
unsigned TokPrec = getBinOpPrecedence(Lexer.getKind(), Kind);
@@ -1465,7 +1512,7 @@ bool AsmParser::parseBinOpRHS(unsigned Precedence, const MCExpr *&Res,
return true;
// Merge LHS and RHS according to operator.
- Res = MCBinaryExpr::create(Kind, Res, RHS, getContext());
+ Res = MCBinaryExpr::create(Kind, Res, RHS, getContext(), StartLoc);
}
}
@@ -1481,7 +1528,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
Lex();
if (Lexer.is(AsmToken::EndOfStatement)) {
// if this is a line comment we can drop it safely
- if (getTok().getString().front() == '\r' ||
+ if (getTok().getString().empty() || getTok().getString().front() == '\r' ||
getTok().getString().front() == '\n')
Out.AddBlankLine();
Lex();
@@ -1622,7 +1669,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
if (ParsingInlineAsm && SI) {
StringRef RewrittenLabel =
SI->LookupInlineAsmLabel(IDVal, getSourceManager(), IDLoc, true);
- assert(RewrittenLabel.size() &&
+ assert(!RewrittenLabel.empty() &&
"We should have an internal name here.");
Info.AsmRewrites->emplace_back(AOK_Label, IDLoc, IDVal.size(),
RewrittenLabel);
@@ -1631,12 +1678,6 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
Sym = getContext().getOrCreateSymbol(IDVal);
} else
Sym = Ctx.createDirectionalLocalSymbol(LocalLabelVal);
-
- Sym->redefineIfPossible();
-
- if (!Sym->isUndefined() || Sym->isVariable())
- return Error(IDLoc, "invalid symbol redefinition");
-
// End of Labels should be treated as end of line for lexing
// purposes but that information is not available to the Lexer who
// does not understand Labels. This may cause us to see a Hash
@@ -1654,8 +1695,8 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
}
// Emit the label.
- if (!ParsingInlineAsm)
- Out.EmitLabel(Sym);
+ if (!getTargetParser().isParsingInlineAsm())
+ Out.EmitLabel(Sym, IDLoc);
// If we are generating dwarf for assembly source files then gather the
// info to make a dwarf label entry for this label if needed.
@@ -1759,8 +1800,8 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
case DK_8BYTE:
return parseDirectiveValue(IDVal, 8);
case DK_DC_A:
- return parseDirectiveValue(IDVal,
- getContext().getAsmInfo()->getPointerSize());
+ return parseDirectiveValue(
+ IDVal, getContext().getAsmInfo()->getCodePointerSize());
case DK_OCTA:
return parseDirectiveOctaValue(IDVal);
case DK_SINGLE:
@@ -1925,6 +1966,9 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
return parseDirectiveMacrosOnOff(IDVal);
case DK_MACRO:
return parseDirectiveMacro(IDLoc);
+ case DK_ALTMACRO:
+ case DK_NOALTMACRO:
+ return parseDirectiveAltmacro(IDVal);
case DK_EXITM:
return parseDirectiveExitMacro(IDVal);
case DK_ENDM:
@@ -1984,7 +2028,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
if (ParsingInlineAsm && (IDVal == "align" || IDVal == "ALIGN"))
return parseDirectiveMSAlign(IDLoc, Info);
- if (ParsingInlineAsm && (IDVal == "even"))
+ if (ParsingInlineAsm && (IDVal == "even" || IDVal == "EVEN"))
Info.AsmRewrites->emplace_back(AOK_EVEN, IDLoc, 4);
if (checkForValidSection())
return true;
@@ -2030,7 +2074,7 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
// If we previously parsed a cpp hash file line comment then make sure the
// current Dwarf File is for the CppHashFilename if not then emit the
// Dwarf File table for it and adjust the line number for the .loc.
- if (CppHashInfo.Filename.size()) {
+ if (!CppHashInfo.Filename.empty()) {
unsigned FileNumber = getStreamer().EmitDwarfFileDirective(
0, StringRef(), CppHashInfo.Filename);
getContext().setGenDwarfFileNumber(FileNumber);
@@ -2061,9 +2105,9 @@ bool AsmParser::parseStatement(ParseStatementInfo &Info,
// If parsing succeeded, match the instruction.
if (!ParseHadError) {
uint64_t ErrorInfo;
- if (getTargetParser().MatchAndEmitInstruction(IDLoc, Info.Opcode,
- Info.ParsedOperands, Out,
- ErrorInfo, ParsingInlineAsm))
+ if (getTargetParser().MatchAndEmitInstruction(
+ IDLoc, Info.Opcode, Info.ParsedOperands, Out, ErrorInfo,
+ getTargetParser().isParsingInlineAsm()))
return true;
}
return false;
@@ -2273,9 +2317,27 @@ bool AsmParser::expandMacro(raw_svector_ostream &OS, StringRef Body,
} else {
bool VarargParameter = HasVararg && Index == (NParameters - 1);
for (const AsmToken &Token : A[Index])
+ // For altmacro mode, you can write '%expr'.
+ // The prefix '%' evaluates the expression 'expr'
+ // and uses the result as a string (e.g. replace %(1+2) with the string "3").
+ // Here, we identify the integer token which is the result of the
+ // absolute expression evaluation and replace it with its string representation.
+ if ((Lexer.IsaAltMacroMode()) &&
+ (*(Token.getString().begin()) == '%') && Token.is(AsmToken::Integer))
+ // Emit an integer value to the buffer.
+ OS << Token.getIntVal();
+ // Only Token that was validated as a string and begins with '<'
+ // is considered altMacroString!!!
+ else if ((Lexer.IsaAltMacroMode()) &&
+ (*(Token.getString().begin()) == '<') &&
+ Token.is(AsmToken::String)) {
+ std::string Res;
+ altMacroString(Token.getStringContents(), Res);
+ OS << Res;
+ }
// We expect no quotes around the string's contents when
// parsing for varargs.
- if (Token.getKind() != AsmToken::String || VarargParameter)
+ else if (Token.isNot(AsmToken::String) || VarargParameter)
OS << Token.getString();
else
OS << Token.getStringContents();
@@ -2446,13 +2508,37 @@ bool AsmParser::parseMacroArguments(const MCAsmMacro *M,
NamedParametersFound = true;
}
+ bool Vararg = HasVararg && Parameter == (NParameters - 1);
if (NamedParametersFound && FA.Name.empty())
return Error(IDLoc, "cannot mix positional and keyword arguments");
- bool Vararg = HasVararg && Parameter == (NParameters - 1);
- if (parseMacroArgument(FA.Value, Vararg))
- return true;
+ SMLoc StrLoc = Lexer.getLoc();
+ SMLoc EndLoc;
+ if (Lexer.IsaAltMacroMode() && Lexer.is(AsmToken::Percent)) {
+ const MCExpr *AbsoluteExp;
+ int64_t Value;
+ /// Eat '%'
+ Lex();
+ if (parseExpression(AbsoluteExp, EndLoc))
+ return false;
+ if (!AbsoluteExp->evaluateAsAbsolute(Value))
+ return Error(StrLoc, "expected absolute expression");
+ const char *StrChar = StrLoc.getPointer();
+ const char *EndChar = EndLoc.getPointer();
+ AsmToken newToken(AsmToken::Integer, StringRef(StrChar , EndChar - StrChar), Value);
+ FA.Value.push_back(newToken);
+ } else if (Lexer.IsaAltMacroMode() && Lexer.is(AsmToken::Less) &&
+ isAltmacroString(StrLoc, EndLoc)) {
+ const char *StrChar = StrLoc.getPointer();
+ const char *EndChar = EndLoc.getPointer();
+ jumpToLoc(EndLoc, CurBuffer);
+ /// Eat from '<' to '>'
+ Lex();
+ AsmToken newToken(AsmToken::String, StringRef(StrChar, EndChar - StrChar));
+ FA.Value.push_back(newToken);
+ } else if(parseMacroArgument(FA.Value, Vararg))
+ return true;
unsigned PI = Parameter;
if (!FA.Name.empty()) {
@@ -3844,6 +3930,19 @@ bool AsmParser::parseDirectiveCFIUndefined(SMLoc DirectiveLoc) {
return false;
}
+/// parseDirectiveAltmacro
+/// ::= .altmacro
+/// ::= .noaltmacro
+bool AsmParser::parseDirectiveAltmacro(StringRef Directive) {
+ if (getLexer().isNot(AsmToken::EndOfStatement))
+ return TokError("unexpected token in '" + Directive + "' directive");
+ if (Directive == ".altmacro")
+ getLexer().SetAltMacroMode(true);
+ else
+ getLexer().SetAltMacroMode(false);
+ return false;
+}
+
/// parseDirectiveMacrosOnOff
/// ::= .macros_on
/// ::= .macros_off
@@ -3878,6 +3977,12 @@ bool AsmParser::parseDirectiveMacro(SMLoc DirectiveLoc) {
if (parseIdentifier(Parameter.Name))
return TokError("expected identifier in '.macro' directive");
+ // Emit an error if two (or more) named parameters share the same name
+ for (const MCAsmMacroParameter& CurrParam : Parameters)
+ if (CurrParam.Name.equals(Parameter.Name))
+ return TokError("macro '" + Name + "' has multiple parameters"
+ " named '" + Parameter.Name + "'");
+
if (Lexer.is(AsmToken::Colon)) {
Lex(); // consume ':'
@@ -4196,7 +4301,6 @@ bool AsmParser::parseDirectiveBundleUnlock() {
/// parseDirectiveSpace
/// ::= (.skip | .space) expression [ , expression ]
bool AsmParser::parseDirectiveSpace(StringRef IDVal) {
-
SMLoc NumBytesLoc = Lexer.getLoc();
const MCExpr *NumBytes;
if (checkForValidSection() || parseExpression(NumBytes))
@@ -4292,7 +4396,6 @@ bool AsmParser::parseDirectiveRealDCB(StringRef IDVal, const fltSemantics &Seman
/// parseDirectiveDS
/// ::= .ds.{b, d, l, p, s, w, x} expression
bool AsmParser::parseDirectiveDS(StringRef IDVal, unsigned Size) {
-
SMLoc NumValuesLoc = Lexer.getLoc();
int64_t NumValues;
if (checkForValidSection() || parseAbsoluteExpression(NumValues))
@@ -4421,6 +4524,7 @@ bool AsmParser::parseDirectiveComm(bool IsLocal) {
return Error(Pow2AlignmentLoc, "invalid '.comm' or '.lcomm' directive "
"alignment, can't be less than zero");
+ Sym->redefineIfPossible();
if (!Sym->isUndefined())
return Error(IDLoc, "invalid symbol redefinition");
@@ -4936,6 +5040,8 @@ void AsmParser::initializeDirectiveKindMap() {
DirectiveKindMap[".err"] = DK_ERR;
DirectiveKindMap[".error"] = DK_ERROR;
DirectiveKindMap[".warning"] = DK_WARNING;
+ DirectiveKindMap[".altmacro"] = DK_ALTMACRO;
+ DirectiveKindMap[".noaltmacro"] = DK_NOALTMACRO;
DirectiveKindMap[".reloc"] = DK_RELOC;
DirectiveKindMap[".dc"] = DK_DC;
DirectiveKindMap[".dc.a"] = DK_DC_A;
@@ -5213,7 +5319,7 @@ static int rewritesSort(const AsmRewrite *AsmRewriteA,
bool AsmParser::parseMSInlineAsm(
void *AsmLoc, std::string &AsmString, unsigned &NumOutputs,
- unsigned &NumInputs, SmallVectorImpl<std::pair<void *, bool> > &OpDecls,
+ unsigned &NumInputs, SmallVectorImpl<std::pair<void *, bool>> &OpDecls,
SmallVectorImpl<std::string> &Constraints,
SmallVectorImpl<std::string> &Clobbers, const MCInstrInfo *MII,
const MCInstPrinter *IP, MCAsmParserSemaCallback &SI) {
@@ -5523,6 +5629,7 @@ bool parseAssignmentExpression(StringRef Name, bool allow_redef,
/// \brief Create an MCAsmParser instance.
MCAsmParser *llvm::createMCAsmParser(SourceMgr &SM, MCContext &C,
- MCStreamer &Out, const MCAsmInfo &MAI) {
- return new AsmParser(SM, C, Out, MAI);
+ MCStreamer &Out, const MCAsmInfo &MAI,
+ unsigned CB) {
+ return new AsmParser(SM, C, Out, MAI, CB);
}
diff --git a/gnu/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/gnu/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d95f80c93c1..8e14150318c 100644
--- a/gnu/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/gnu/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
+#include "AArch64ISelLowering.h"
#include "AArch64CallingConvention.h"
#include "AArch64MachineFunctionInfo.h"
-#include "AArch64ISelLowering.h"
#include "AArch64PerfectShuffle.h"
#include "AArch64RegisterInfo.h"
#include "AArch64Subtarget.h"
@@ -22,13 +22,14 @@
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/VectorUtils.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
@@ -50,10 +51,10 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
-#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Type.h"
@@ -66,6 +67,7 @@
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetCallingConv.h"
@@ -90,6 +92,7 @@ using namespace llvm;
STATISTIC(NumTailCalls, "Number of tail calls");
STATISTIC(NumShiftInserts, "Number of vector shift inserts");
+STATISTIC(NumOptimizedImms, "Number of times immediates were optimized");
static cl::opt<bool>
EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden,
@@ -104,6 +107,12 @@ cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration(
cl::desc("Allow AArch64 Local Dynamic TLS code generation"),
cl::init(false));
+static cl::opt<bool>
+EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden,
+ cl::desc("Enable AArch64 logical imm instruction "
+ "optimization"),
+ cl::init(true));
+
/// Value type used for condition codes.
static const MVT MVT_CC = MVT::i32;
@@ -372,7 +381,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand);
setOperationAction(ISD::FNEG, MVT::v4f16, Expand);
setOperationAction(ISD::FPOW, MVT::v4f16, Expand);
- setOperationAction(ISD::FPOWI, MVT::v4f16, Expand);
setOperationAction(ISD::FREM, MVT::v4f16, Expand);
setOperationAction(ISD::FROUND, MVT::v4f16, Expand);
setOperationAction(ISD::FRINT, MVT::v4f16, Expand);
@@ -404,7 +412,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand);
setOperationAction(ISD::FNEG, MVT::v8f16, Expand);
setOperationAction(ISD::FPOW, MVT::v8f16, Expand);
- setOperationAction(ISD::FPOWI, MVT::v8f16, Expand);
setOperationAction(ISD::FREM, MVT::v8f16, Expand);
setOperationAction(ISD::FROUND, MVT::v8f16, Expand);
setOperationAction(ISD::FRINT, MVT::v8f16, Expand);
@@ -544,7 +551,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::INTRINSIC_VOID);
setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
- setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 8;
MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 4;
@@ -554,8 +560,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setSchedulingPreference(Sched::Hybrid);
- // Enable TBZ/TBNZ
- MaskAndBranchFoldingIsLegal = true;
EnableExtLdPromotion = true;
// Set required alignment.
@@ -652,6 +656,19 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
setOperationAction(ISD::MUL, MVT::v2i64, Custom);
+ // Vector reductions
+ for (MVT VT : MVT::integer_valuetypes()) {
+ setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
+ }
+ for (MVT VT : MVT::fp_valuetypes()) {
+ setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
+ setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
+ }
+
setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal);
setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand);
// Likewise, narrowing and extending vector loads/stores aren't handled
@@ -707,7 +724,6 @@ void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) {
if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) {
setOperationAction(ISD::FSIN, VT, Expand);
setOperationAction(ISD::FCOS, VT, Expand);
- setOperationAction(ISD::FPOWI, VT, Expand);
setOperationAction(ISD::FPOW, VT, Expand);
setOperationAction(ISD::FLOG, VT, Expand);
setOperationAction(ISD::FLOG2, VT, Expand);
@@ -751,6 +767,9 @@ void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) {
setOperationAction(ISD::FP_TO_SINT, VT, Custom);
setOperationAction(ISD::FP_TO_UINT, VT, Custom);
+ if (!VT.isFloatingPoint())
+ setOperationAction(ISD::ABS, VT, Legal);
+
// [SU][MIN|MAX] are available for all NEON types apart from i64.
if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64)
for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
@@ -788,21 +807,157 @@ EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
return VT.changeVectorElementTypeToInteger();
}
+static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm,
+ const APInt &Demanded,
+ TargetLowering::TargetLoweringOpt &TLO,
+ unsigned NewOpc) {
+ uint64_t OldImm = Imm, NewImm, Enc;
+ uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask;
+
+ // Return if the immediate is already all zeros, all ones, a bimm32 or a
+ // bimm64.
+ if (Imm == 0 || Imm == Mask ||
+ AArch64_AM::isLogicalImmediate(Imm & Mask, Size))
+ return false;
+
+ unsigned EltSize = Size;
+ uint64_t DemandedBits = Demanded.getZExtValue();
+
+ // Clear bits that are not demanded.
+ Imm &= DemandedBits;
+
+ while (true) {
+ // The goal here is to set the non-demanded bits in a way that minimizes
+ // the number of switching between 0 and 1. In order to achieve this goal,
+ // we set the non-demanded bits to the value of the preceding demanded bits.
+ // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a
+ // non-demanded bit), we copy bit0 (1) to the least significant 'x',
+ // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'.
+ // The final result is 0b11000011.
+ uint64_t NonDemandedBits = ~DemandedBits;
+ uint64_t InvertedImm = ~Imm & DemandedBits;
+ uint64_t RotatedImm =
+ ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) &
+ NonDemandedBits;
+ uint64_t Sum = RotatedImm + NonDemandedBits;
+ bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1));
+ uint64_t Ones = (Sum + Carry) & NonDemandedBits;
+ NewImm = (Imm | Ones) & Mask;
+
+ // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate
+ // or all-ones or all-zeros, in which case we can stop searching. Otherwise,
+ // we halve the element size and continue the search.
+ if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask)))
+ break;
+
+ // We cannot shrink the element size any further if it is 2-bits.
+ if (EltSize == 2)
+ return false;
+
+ EltSize /= 2;
+ Mask >>= EltSize;
+ uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize;
+
+ // Return if there is mismatch in any of the demanded bits of Imm and Hi.
+ if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0)
+ return false;
+
+ // Merge the upper and lower halves of Imm and DemandedBits.
+ Imm |= Hi;
+ DemandedBits |= DemandedBitsHi;
+ }
+
+ ++NumOptimizedImms;
+
+ // Replicate the element across the register width.
+ while (EltSize < Size) {
+ NewImm |= NewImm << EltSize;
+ EltSize *= 2;
+ }
+
+ (void)OldImm;
+ assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 &&
+ "demanded bits should never be altered");
+ assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm");
+
+ // Create the new constant immediate node.
+ EVT VT = Op.getValueType();
+ SDLoc DL(Op);
+ SDValue New;
+
+ // If the new constant immediate is all-zeros or all-ones, let the target
+ // independent DAG combine optimize this node.
+ if (NewImm == 0 || NewImm == OrigMask) {
+ New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0),
+ TLO.DAG.getConstant(NewImm, DL, VT));
+ // Otherwise, create a machine node so that target independent DAG combine
+ // doesn't undo this optimization.
+ } else {
+ Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size);
+ SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT);
+ New = SDValue(
+ TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0);
+ }
+
+ return TLO.CombineTo(Op, New);
+}
+
+bool AArch64TargetLowering::targetShrinkDemandedConstant(
+ SDValue Op, const APInt &Demanded, TargetLoweringOpt &TLO) const {
+ // Delay this optimization to as late as possible.
+ if (!TLO.LegalOps)
+ return false;
+
+ if (!EnableOptimizeLogicalImm)
+ return false;
+
+ EVT VT = Op.getValueType();
+ if (VT.isVector())
+ return false;
+
+ unsigned Size = VT.getSizeInBits();
+ assert((Size == 32 || Size == 64) &&
+ "i32 or i64 is expected after legalization.");
+
+ // Exit early if we demand all bits.
+ if (Demanded.countPopulation() == Size)
+ return false;
+
+ unsigned NewOpc;
+ switch (Op.getOpcode()) {
+ default:
+ return false;
+ case ISD::AND:
+ NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri;
+ break;
+ case ISD::OR:
+ NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri;
+ break;
+ case ISD::XOR:
+ NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri;
+ break;
+ }
+ ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
+ if (!C)
+ return false;
+ uint64_t Imm = C->getZExtValue();
+ return optimizeLogicalImm(Op, Size, Imm, Demanded, TLO, NewOpc);
+}
+
/// computeKnownBitsForTargetNode - Determine which of the bits specified in
-/// Mask are known to be either zero or one and return them in the
-/// KnownZero/KnownOne bitsets.
+/// Mask are known to be either zero or one and return them Known.
void AArch64TargetLowering::computeKnownBitsForTargetNode(
- const SDValue Op, APInt &KnownZero, APInt &KnownOne,
- const SelectionDAG &DAG, unsigned Depth) const {
+ const SDValue Op, KnownBits &Known,
+ const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
switch (Op.getOpcode()) {
default:
break;
case AArch64ISD::CSEL: {
- APInt KnownZero2, KnownOne2;
- DAG.computeKnownBits(Op->getOperand(0), KnownZero, KnownOne, Depth + 1);
- DAG.computeKnownBits(Op->getOperand(1), KnownZero2, KnownOne2, Depth + 1);
- KnownZero &= KnownZero2;
- KnownOne &= KnownOne2;
+ KnownBits Known2;
+ DAG.computeKnownBits(Op->getOperand(0), Known, Depth + 1);
+ DAG.computeKnownBits(Op->getOperand(1), Known2, Depth + 1);
+ Known.Zero &= Known2.Zero;
+ Known.One &= Known2.One;
break;
}
case ISD::INTRINSIC_W_CHAIN: {
@@ -812,10 +967,10 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode(
default: return;
case Intrinsic::aarch64_ldaxr:
case Intrinsic::aarch64_ldxr: {
- unsigned BitWidth = KnownOne.getBitWidth();
+ unsigned BitWidth = Known.getBitWidth();
EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
unsigned MemBits = VT.getScalarSizeInBits();
- KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
+ Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
return;
}
}
@@ -834,15 +989,15 @@ void AArch64TargetLowering::computeKnownBitsForTargetNode(
// bits larger than the element datatype. 32-bit or larget doesn't need
// this as those are legal types and will be handled by isel directly.
MVT VT = Op.getOperand(1).getValueType().getSimpleVT();
- unsigned BitWidth = KnownZero.getBitWidth();
+ unsigned BitWidth = Known.getBitWidth();
if (VT == MVT::v8i8 || VT == MVT::v16i8) {
assert(BitWidth >= 8 && "Unexpected width!");
APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8);
- KnownZero |= Mask;
+ Known.Zero |= Mask;
} else if (VT == MVT::v4i16 || VT == MVT::v8i16) {
assert(BitWidth >= 16 && "Unexpected width!");
APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16);
- KnownZero |= Mask;
+ Known.Zero |= Mask;
}
break;
} break;
@@ -2113,8 +2268,8 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
Entry.Node = Arg;
Entry.Ty = ArgTy;
- Entry.isSExt = false;
- Entry.isZExt = false;
+ Entry.IsSExt = false;
+ Entry.IsZExt = false;
Args.push_back(Entry);
const char *LibcallName =
@@ -2122,10 +2277,11 @@ SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op,
SDValue Callee =
DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
- StructType *RetTy = StructType::get(ArgTy, ArgTy, nullptr);
+ StructType *RetTy = StructType::get(ArgTy, ArgTy);
TargetLowering::CallLoweringInfo CLI(DAG);
- CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
- .setCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
+ CLI.setDebugLoc(dl)
+ .setChain(DAG.getEntryNode())
+ .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args));
std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
return CallResult.first;
@@ -2231,19 +2387,13 @@ static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) {
}
static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
- if (N->getOpcode() == ISD::SIGN_EXTEND)
- return true;
- if (isExtendedBUILD_VECTOR(N, DAG, true))
- return true;
- return false;
+ return N->getOpcode() == ISD::SIGN_EXTEND ||
+ isExtendedBUILD_VECTOR(N, DAG, true);
}
static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
- if (N->getOpcode() == ISD::ZERO_EXTEND)
- return true;
- if (isExtendedBUILD_VECTOR(N, DAG, false))
- return true;
- return false;
+ return N->getOpcode() == ISD::ZERO_EXTEND ||
+ isExtendedBUILD_VECTOR(N, DAG, false);
}
static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
@@ -2347,6 +2497,9 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
EVT PtrVT = getPointerTy(DAG.getDataLayout());
return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT);
}
+ case Intrinsic::aarch64_neon_abs:
+ return DAG.getNode(ISD::ABS, dl, Op.getValueType(),
+ Op.getOperand(1));
case Intrinsic::aarch64_neon_smax:
return DAG.getNode(ISD::SMAX, dl, Op.getValueType(),
Op.getOperand(1), Op.getOperand(2));
@@ -2465,6 +2618,14 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
return LowerMUL(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN:
return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ case ISD::VECREDUCE_ADD:
+ case ISD::VECREDUCE_SMAX:
+ case ISD::VECREDUCE_SMIN:
+ case ISD::VECREDUCE_UMAX:
+ case ISD::VECREDUCE_UMIN:
+ case ISD::VECREDUCE_FMAX:
+ case ISD::VECREDUCE_FMIN:
+ return LowerVECREDUCE(Op, DAG);
}
}
@@ -2489,9 +2650,13 @@ CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
case CallingConv::PreserveMost:
case CallingConv::CXX_FAST_TLS:
case CallingConv::Swift:
+ if (Subtarget->isTargetWindows() && IsVarArg)
+ return CC_AArch64_Win64_VarArg;
if (!Subtarget->isTargetDarwin())
return CC_AArch64_AAPCS;
return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS;
+ case CallingConv::Win64:
+ return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS;
}
}
@@ -2507,6 +2672,7 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
MachineFunction &MF = DAG.getMachineFunction();
MachineFrameInfo &MFI = MF.getFrameInfo();
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -2663,10 +2829,12 @@ SDValue AArch64TargetLowering::LowerFormalArguments(
// varargs
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
if (isVarArg) {
- if (!Subtarget->isTargetDarwin()) {
+ if (!Subtarget->isTargetDarwin() || IsWin64) {
// The AAPCS variadic function ABI is identical to the non-variadic
// one. As a result there may be more arguments in registers and we should
// save them for future reference.
+ // Win64 variadic functions also pass arguments in registers, but all float
+ // arguments are passed in integer registers.
saveVarArgRegisters(CCInfo, DAG, DL, Chain);
}
@@ -2708,6 +2876,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
MachineFrameInfo &MFI = MF.getFrameInfo();
AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
auto PtrVT = getPointerTy(DAG.getDataLayout());
+ bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv());
SmallVector<SDValue, 8> MemOps;
@@ -2720,7 +2889,13 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR);
int GPRIdx = 0;
if (GPRSaveSize != 0) {
- GPRIdx = MFI.CreateStackObject(GPRSaveSize, 8, false);
+ if (IsWin64) {
+ GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false);
+ if (GPRSaveSize & 15)
+ // The extra size here, if triggered, will always be 8.
+ MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false);
+ } else
+ GPRIdx = MFI.CreateStackObject(GPRSaveSize, 8, false);
SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT);
@@ -2729,7 +2904,11 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
SDValue Store = DAG.getStore(
Val.getValue(1), DL, Val, FIN,
- MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 8));
+ IsWin64
+ ? MachinePointerInfo::getFixedStack(DAG.getMachineFunction(),
+ GPRIdx,
+ (i - FirstVariadicGPR) * 8)
+ : MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 8));
MemOps.push_back(Store);
FIN =
DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT));
@@ -2738,7 +2917,7 @@ void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo,
FuncInfo->setVarArgsGPRIndex(GPRIdx);
FuncInfo->setVarArgsGPRSize(GPRSaveSize);
- if (Subtarget->hasFPARMv8()) {
+ if (Subtarget->hasFPARMv8() && !IsWin64) {
static const MCPhysReg FPRArgRegs[] = {
AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7};
@@ -3108,9 +3287,7 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// Adjust the stack pointer for the new arguments...
// These operations are automatically eliminated by the prolog/epilog pass
if (!IsSibCall)
- Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, DL,
- true),
- DL);
+ Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL);
SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP,
getPointerTy(DAG.getDataLayout()));
@@ -3245,30 +3422,26 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
// If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
// direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
// node so that legalize doesn't hack it.
- if (getTargetMachine().getCodeModel() == CodeModel::Large &&
- Subtarget->isTargetMachO()) {
- if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ auto GV = G->getGlobal();
+ if (Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine()) ==
+ AArch64II::MO_GOT) {
+ Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT);
+ Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
+ } else {
const GlobalValue *GV = G->getGlobal();
- bool InternalLinkage = GV->hasInternalLinkage();
- if (InternalLinkage)
- Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
- else {
- Callee =
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT);
- Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
- }
- } else if (ExternalSymbolSDNode *S =
- dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
+ }
+ } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ if (getTargetMachine().getCodeModel() == CodeModel::Large &&
+ Subtarget->isTargetMachO()) {
const char *Sym = S->getSymbol();
Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT);
Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee);
+ } else {
+ const char *Sym = S->getSymbol();
+ Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
}
- } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
- const GlobalValue *GV = G->getGlobal();
- Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0);
- } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
- const char *Sym = S->getSymbol();
- Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0);
}
// We don't usually want to end the call-sequence here because we would tidy
@@ -3428,11 +3601,75 @@ AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
// Other Lowering Code
//===----------------------------------------------------------------------===//
+SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
+}
+
+SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
+}
+
+SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(),
+ N->getOffset(), Flag);
+}
+
+SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty,
+ SelectionDAG &DAG,
+ unsigned Flag) const {
+ return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
+}
+
+// (loadGOT sym)
+template <class NodeTy>
+SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG) const {
+ DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n");
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT);
+ // FIXME: Once remat is capable of dealing with instructions with register
+ // operands, expand this into two nodes instead of using a wrapper node.
+ return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr);
+}
+
+// (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym))
+template <class NodeTy>
+SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG)
+ const {
+ DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n");
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ const unsigned char MO_NC = AArch64II::MO_NC;
+ return DAG.getNode(
+ AArch64ISD::WrapperLarge, DL, Ty,
+ getTargetNode(N, Ty, DAG, AArch64II::MO_G3),
+ getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC),
+ getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC),
+ getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC));
+}
+
+// (addlow (adrp %hi(sym)) %lo(sym))
+template <class NodeTy>
+SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG) const {
+ DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n");
+ SDLoc DL(N);
+ EVT Ty = getPointerTy(DAG.getDataLayout());
+ SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE);
+ SDValue Lo = getTargetNode(N, Ty, DAG,
+ AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
+ SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi);
+ return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo);
+}
+
SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
SelectionDAG &DAG) const {
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
- const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
+ GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
const GlobalValue *GV = GN->getGlobal();
unsigned char OpFlags =
Subtarget->ClassifyGlobalReference(GV, getTargetMachine());
@@ -3440,32 +3677,15 @@ SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op,
assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 &&
"unexpected offset in global node");
- // This also catched the large code model case for Darwin.
+ // This also catches the large code model case for Darwin.
if ((OpFlags & AArch64II::MO_GOT) != 0) {
- SDValue GotAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
- // FIXME: Once remat is capable of dealing with instructions with register
- // operands, expand this into two nodes instead of using a wrapper node.
- return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr);
+ return getGOT(GN, DAG);
}
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
- const unsigned char MO_NC = AArch64II::MO_NC;
- return DAG.getNode(
- AArch64ISD::WrapperLarge, DL, PtrVT,
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G3),
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G2 | MO_NC),
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G1 | MO_NC),
- DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_G0 | MO_NC));
+ return getAddrLarge(GN, DAG);
} else {
- // Use ADRP/ADD or ADRP/LDR for everything else: the small model on ELF and
- // the only correct model on Darwin.
- SDValue Hi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
- OpFlags | AArch64II::MO_PAGE);
- unsigned char LoFlags = OpFlags | AArch64II::MO_PAGEOFF | AArch64II::MO_NC;
- SDValue Lo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, LoFlags);
-
- SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
- return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
+ return getAddr(GN, DAG);
}
}
@@ -3578,7 +3798,7 @@ SDValue
AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
assert(Subtarget->isTargetELF() && "This function expects an ELF target");
- assert(getTargetMachine().getCodeModel() == CodeModel::Small &&
+ assert(Subtarget->useSmallAddressing() &&
"ELF TLS only supported in small memory model");
// Different choices can be made for the maximum size of the TLS area for a
// module. For the small address model, the default TLS size is 16MiB and the
@@ -3679,7 +3899,7 @@ SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
SelectionDAG &DAG) const {
if (Subtarget->isTargetDarwin())
return LowerDarwinGlobalTLSAddress(Op, DAG);
- else if (Subtarget->isTargetELF())
+ if (Subtarget->isTargetELF())
return LowerELFGlobalTLSAddress(Op, DAG);
llvm_unreachable("Unexpected platform trying to use TLS");
@@ -4242,90 +4462,37 @@ SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op,
// Jump table entries as PC relative offsets. No additional tweaking
// is necessary here. Just get the address of the jump table.
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
if (getTargetMachine().getCodeModel() == CodeModel::Large &&
!Subtarget->isTargetMachO()) {
- const unsigned char MO_NC = AArch64II::MO_NC;
- return DAG.getNode(
- AArch64ISD::WrapperLarge, DL, PtrVT,
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G3),
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G2 | MO_NC),
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_G1 | MO_NC),
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
- AArch64II::MO_G0 | MO_NC));
+ return getAddrLarge(JT, DAG);
}
-
- SDValue Hi =
- DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_PAGE);
- SDValue Lo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
- AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
- SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
- return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
+ return getAddr(JT, DAG);
}
SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op,
SelectionDAG &DAG) const {
ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
if (getTargetMachine().getCodeModel() == CodeModel::Large) {
// Use the GOT for the large code model on iOS.
if (Subtarget->isTargetMachO()) {
- SDValue GotAddr = DAG.getTargetConstantPool(
- CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(),
- AArch64II::MO_GOT);
- return DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, GotAddr);
+ return getGOT(CP, DAG);
}
-
- const unsigned char MO_NC = AArch64II::MO_NC;
- return DAG.getNode(
- AArch64ISD::WrapperLarge, DL, PtrVT,
- DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
- CP->getOffset(), AArch64II::MO_G3),
- DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
- CP->getOffset(), AArch64II::MO_G2 | MO_NC),
- DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
- CP->getOffset(), AArch64II::MO_G1 | MO_NC),
- DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
- CP->getOffset(), AArch64II::MO_G0 | MO_NC));
+ return getAddrLarge(CP, DAG);
} else {
- // Use ADRP/ADD or ADRP/LDR for everything else: the small memory model on
- // ELF, the only valid one on Darwin.
- SDValue Hi =
- DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlignment(),
- CP->getOffset(), AArch64II::MO_PAGE);
- SDValue Lo = DAG.getTargetConstantPool(
- CP->getConstVal(), PtrVT, CP->getAlignment(), CP->getOffset(),
- AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
-
- SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
- return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
+ return getAddr(CP, DAG);
}
}
SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op,
SelectionDAG &DAG) const {
- const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
- EVT PtrVT = getPointerTy(DAG.getDataLayout());
- SDLoc DL(Op);
+ BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op);
if (getTargetMachine().getCodeModel() == CodeModel::Large &&
!Subtarget->isTargetMachO()) {
- const unsigned char MO_NC = AArch64II::MO_NC;
- return DAG.getNode(
- AArch64ISD::WrapperLarge, DL, PtrVT,
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G3),
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G2 | MO_NC),
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G1 | MO_NC),
- DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_G0 | MO_NC));
+ return getAddrLarge(BA, DAG);
} else {
- SDValue Hi = DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_PAGE);
- SDValue Lo = DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_PAGEOFF |
- AArch64II::MO_NC);
- SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, Hi);
- return DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, Lo);
+ return getAddr(BA, DAG);
}
}
@@ -4342,6 +4509,21 @@ SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op,
MachinePointerInfo(SV));
}
+SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op,
+ SelectionDAG &DAG) const {
+ AArch64FunctionInfo *FuncInfo =
+ DAG.getMachineFunction().getInfo<AArch64FunctionInfo>();
+
+ SDLoc DL(Op);
+ SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0
+ ? FuncInfo->getVarArgsGPRIndex()
+ : FuncInfo->getVarArgsStackIndex(),
+ getPointerTy(DAG.getDataLayout()));
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
+ MachinePointerInfo(SV));
+}
+
SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
SelectionDAG &DAG) const {
// The layout of the va_list struct is specified in the AArch64 Procedure Call
@@ -4413,8 +4595,14 @@ SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op,
SDValue AArch64TargetLowering::LowerVASTART(SDValue Op,
SelectionDAG &DAG) const {
- return Subtarget->isTargetDarwin() ? LowerDarwin_VASTART(Op, DAG)
- : LowerAAPCS_VASTART(Op, DAG);
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ if (Subtarget->isCallingConvWin64(MF.getFunction()->getCallingConv()))
+ return LowerWin64_VASTART(Op, DAG);
+ else if (Subtarget->isTargetDarwin())
+ return LowerDarwin_VASTART(Op, DAG);
+ else
+ return LowerAAPCS_VASTART(Op, DAG);
}
SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
@@ -4422,7 +4610,8 @@ SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op,
// AAPCS has three pointers and two ints (= 32 bytes), Darwin has single
// pointer.
SDLoc DL(Op);
- unsigned VaListSize = Subtarget->isTargetDarwin() ? 8 : 32;
+ unsigned VaListSize =
+ Subtarget->isTargetDarwin() || Subtarget->isTargetWindows() ? 8 : 32;
const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
@@ -4516,7 +4705,12 @@ unsigned AArch64TargetLowering::getRegisterByName(const char* RegName, EVT VT,
SelectionDAG &DAG) const {
unsigned Reg = StringSwitch<unsigned>(RegName)
.Case("sp", AArch64::SP)
+ .Case("x18", AArch64::X18)
+ .Case("w18", AArch64::W18)
.Default(0);
+ if ((Reg == AArch64::X18 || Reg == AArch64::W18) &&
+ !Subtarget->isX18Reserved())
+ Reg = 0;
if (Reg)
return Reg;
report_fatal_error(Twine("Invalid register name \""
@@ -4717,9 +4911,9 @@ SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
// AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N)
for (int i = ExtraSteps; i > 0; --i) {
SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate,
- &Flags);
- Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, &Flags);
- Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, &Flags);
+ Flags);
+ Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags);
+ Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
}
if (!Reciprocal) {
@@ -4728,7 +4922,7 @@ SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand,
SDValue FPZero = DAG.getConstantFP(0.0, DL, VT);
SDValue Eq = DAG.getSetCC(DL, CCVT, Operand, FPZero, ISD::SETEQ);
- Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, &Flags);
+ Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags);
// Correct the result if the operand is 0.0.
Estimate = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL,
VT, Eq, Operand, Estimate);
@@ -4757,8 +4951,8 @@ SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand,
// AArch64 reciprocal iteration instruction: (2 - M * N)
for (int i = ExtraSteps; i > 0; --i) {
SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand,
- Estimate, &Flags);
- Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, &Flags);
+ Estimate, Flags);
+ Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags);
}
ExtraSteps = 0;
@@ -6591,21 +6785,20 @@ FailedModImm:
if (!isConstant && !usesOnlyOneValue) {
SDValue Vec = DAG.getUNDEF(VT);
SDValue Op0 = Op.getOperand(0);
- unsigned ElemSize = VT.getScalarSizeInBits();
unsigned i = 0;
- // For 32 and 64 bit types, use INSERT_SUBREG for lane zero to
+
+ // Use SCALAR_TO_VECTOR for lane zero to
// a) Avoid a RMW dependency on the full vector register, and
// b) Allow the register coalescer to fold away the copy if the
- // value is already in an S or D register.
- // Do not do this for UNDEF/LOAD nodes because we have better patterns
- // for those avoiding the SCALAR_TO_VECTOR/BUILD_VECTOR.
- if (!Op0.isUndef() && Op0.getOpcode() != ISD::LOAD &&
- (ElemSize == 32 || ElemSize == 64)) {
- unsigned SubIdx = ElemSize == 32 ? AArch64::ssub : AArch64::dsub;
- MachineSDNode *N =
- DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl, VT, Vec, Op0,
- DAG.getTargetConstant(SubIdx, dl, MVT::i32));
- Vec = SDValue(N, 0);
+ // value is already in an S or D register, and we're forced to emit an
+ // INSERT_SUBREG that we can't fold anywhere.
+ //
+ // We also allow types like i8 and i16 which are illegal scalar but legal
+ // vector element types. After type-legalization the inserted value is
+ // extended (i32) and it is safe to cast them to the vector type by ignoring
+ // the upper bits of the lowest lane (e.g. v8i8, v4i16).
+ if (!Op0.isUndef()) {
+ Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0);
++i;
}
for (; i < NumElts; ++i) {
@@ -6995,6 +7188,47 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
return Cmp;
}
+static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp,
+ SelectionDAG &DAG) {
+ SDValue VecOp = ScalarOp.getOperand(0);
+ auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp);
+ return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx,
+ DAG.getConstant(0, DL, MVT::i64));
+}
+
+SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc dl(Op);
+ switch (Op.getOpcode()) {
+ case ISD::VECREDUCE_ADD:
+ return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG);
+ case ISD::VECREDUCE_SMAX:
+ return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG);
+ case ISD::VECREDUCE_SMIN:
+ return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG);
+ case ISD::VECREDUCE_UMAX:
+ return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG);
+ case ISD::VECREDUCE_UMIN:
+ return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG);
+ case ISD::VECREDUCE_FMAX: {
+ assert(Op->getFlags().hasNoNaNs() && "fmax vector reduction needs NoNaN flag");
+ return DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
+ DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32),
+ Op.getOperand(0));
+ }
+ case ISD::VECREDUCE_FMIN: {
+ assert(Op->getFlags().hasNoNaNs() && "fmin vector reduction needs NoNaN flag");
+ return DAG.getNode(
+ ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(),
+ DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32),
+ Op.getOperand(0));
+ }
+ default:
+ llvm_unreachable("Unhandled reduction");
+ }
+}
+
/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
/// specified in the intrinsic calls.
@@ -7132,7 +7366,7 @@ bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const {
if (I->getOpcode() != Instruction::FMul)
return true;
- if (I->getNumUses() != 1)
+ if (!I->hasOneUse())
return true;
Instruction *User = I->user_back();
@@ -7249,6 +7483,41 @@ bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType,
return NumBits == 32 || NumBits == 64;
}
+/// A helper function for determining the number of interleaved accesses we
+/// will generate when lowering accesses of the given type.
+unsigned
+AArch64TargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
+ const DataLayout &DL) const {
+ return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
+}
+
+MachineMemOperand::Flags
+AArch64TargetLowering::getMMOFlags(const Instruction &I) const {
+ if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor &&
+ I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr)
+ return MOStridedAccess;
+ return MachineMemOperand::MONone;
+}
+
+bool AArch64TargetLowering::isLegalInterleavedAccessType(
+ VectorType *VecTy, const DataLayout &DL) const {
+
+ unsigned VecSize = DL.getTypeSizeInBits(VecTy);
+ unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
+
+ // Ensure the number of vector elements is greater than 1.
+ if (VecTy->getNumElements() < 2)
+ return false;
+
+ // Ensure the element type is legal.
+ if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64)
+ return false;
+
+ // Ensure the total vector size is 64 or a multiple of 128. Types larger than
+ // 128 will be split into multiple interleaved accesses.
+ return VecSize == 64 || VecSize % 128 == 0;
+}
+
/// \brief Lower an interleaved load into a ldN intrinsic.
///
/// E.g. Lower an interleaved load (Factor = 2):
@@ -7272,12 +7541,15 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
const DataLayout &DL = LI->getModule()->getDataLayout();
VectorType *VecTy = Shuffles[0]->getType();
- unsigned VecSize = DL.getTypeSizeInBits(VecTy);
- // Skip if we do not have NEON and skip illegal vector types.
- if (!Subtarget->hasNEON() || (VecSize != 64 && VecSize != 128))
+ // Skip if we do not have NEON and skip illegal vector types. We can
+ // "legalize" wide vector types into multiple interleaved accesses as long as
+ // the vector types are divisible by 128.
+ if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL))
return false;
+ unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
+
// A pointer vector can not be the return type of the ldN intrinsics. Need to
// load integer vectors first and then convert to pointer vectors.
Type *EltTy = VecTy->getVectorElementType();
@@ -7285,6 +7557,25 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
VecTy =
VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
+ IRBuilder<> Builder(LI);
+
+ // The base address of the load.
+ Value *BaseAddr = LI->getPointerOperand();
+
+ if (NumLoads > 1) {
+ // If we're going to generate more than one load, reset the sub-vector type
+ // to something legal.
+ VecTy = VectorType::get(VecTy->getVectorElementType(),
+ VecTy->getVectorNumElements() / NumLoads);
+
+ // We will compute the pointer operand of each load from the original base
+ // address using GEPs. Cast the base address to a pointer to the scalar
+ // element type.
+ BaseAddr = Builder.CreateBitCast(
+ BaseAddr, VecTy->getVectorElementType()->getPointerTo(
+ LI->getPointerAddressSpace()));
+ }
+
Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace());
Type *Tys[2] = {VecTy, PtrTy};
static const Intrinsic::ID LoadInts[3] = {Intrinsic::aarch64_neon_ld2,
@@ -7293,39 +7584,50 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
Function *LdNFunc =
Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
- IRBuilder<> Builder(LI);
- Value *Ptr = Builder.CreateBitCast(LI->getPointerOperand(), PtrTy);
+ // Holds sub-vectors extracted from the load intrinsic return values. The
+ // sub-vectors are associated with the shufflevector instructions they will
+ // replace.
+ DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
- CallInst *LdN = Builder.CreateCall(LdNFunc, Ptr, "ldN");
+ for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
- // Replace uses of each shufflevector with the corresponding vector loaded
- // by ldN.
- for (unsigned i = 0; i < Shuffles.size(); i++) {
- ShuffleVectorInst *SVI = Shuffles[i];
- unsigned Index = Indices[i];
+ // If we're generating more than one load, compute the base address of
+ // subsequent loads as an offset from the previous.
+ if (LoadCount > 0)
+ BaseAddr = Builder.CreateConstGEP1_32(
+ BaseAddr, VecTy->getVectorNumElements() * Factor);
- Value *SubVec = Builder.CreateExtractValue(LdN, Index);
+ CallInst *LdN = Builder.CreateCall(
+ LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN");
- // Convert the integer vector to pointer vector if the element is pointer.
- if (EltTy->isPointerTy())
- SubVec = Builder.CreateIntToPtr(SubVec, SVI->getType());
+ // Extract and store the sub-vectors returned by the load intrinsic.
+ for (unsigned i = 0; i < Shuffles.size(); i++) {
+ ShuffleVectorInst *SVI = Shuffles[i];
+ unsigned Index = Indices[i];
- SVI->replaceAllUsesWith(SubVec);
- }
+ Value *SubVec = Builder.CreateExtractValue(LdN, Index);
- return true;
-}
+ // Convert the integer vector to pointer vector if the element is pointer.
+ if (EltTy->isPointerTy())
+ SubVec = Builder.CreateIntToPtr(
+ SubVec, VectorType::get(SVI->getType()->getVectorElementType(),
+ VecTy->getVectorNumElements()));
+ SubVecs[SVI].push_back(SubVec);
+ }
+ }
-/// \brief Get a mask consisting of sequential integers starting from \p Start.
-///
-/// I.e. <Start, Start + 1, ..., Start + NumElts - 1>
-static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned Start,
- unsigned NumElts) {
- SmallVector<Constant *, 16> Mask;
- for (unsigned i = 0; i < NumElts; i++)
- Mask.push_back(Builder.getInt32(Start + i));
+ // Replace uses of the shufflevector instructions with the sub-vectors
+ // returned by the load intrinsic. If a shufflevector instruction is
+ // associated with more than one sub-vector, those sub-vectors will be
+ // concatenated into a single wide vector.
+ for (ShuffleVectorInst *SVI : Shuffles) {
+ auto &SubVec = SubVecs[SVI];
+ auto *WideVec =
+ SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
+ SVI->replaceAllUsesWith(WideVec);
+ }
- return ConstantVector::get(Mask);
+ return true;
}
/// \brief Lower an interleaved store into a stN intrinsic.
@@ -7369,12 +7671,15 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
const DataLayout &DL = SI->getModule()->getDataLayout();
- unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy);
- // Skip if we do not have NEON and skip illegal vector types.
- if (!Subtarget->hasNEON() || (SubVecSize != 64 && SubVecSize != 128))
+ // Skip if we do not have NEON and skip illegal vector types. We can
+ // "legalize" wide vector types into multiple interleaved accesses as long as
+ // the vector types are divisible by 128.
+ if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL))
return false;
+ unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
+
Value *Op0 = SVI->getOperand(0);
Value *Op1 = SVI->getOperand(1);
IRBuilder<> Builder(SI);
@@ -7394,6 +7699,25 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
SubVecTy = VectorType::get(IntTy, LaneLen);
}
+ // The base address of the store.
+ Value *BaseAddr = SI->getPointerOperand();
+
+ if (NumStores > 1) {
+ // If we're going to generate more than one store, reset the lane length
+ // and sub-vector type to something legal.
+ LaneLen /= NumStores;
+ SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen);
+
+ // We will compute the pointer operand of each store from the original base
+ // address using GEPs. Cast the base address to a pointer to the scalar
+ // element type.
+ BaseAddr = Builder.CreateBitCast(
+ BaseAddr, SubVecTy->getVectorElementType()->getPointerTo(
+ SI->getPointerAddressSpace()));
+ }
+
+ auto Mask = SVI->getShuffleMask();
+
Type *PtrTy = SubVecTy->getPointerTo(SI->getPointerAddressSpace());
Type *Tys[2] = {SubVecTy, PtrTy};
static const Intrinsic::ID StoreInts[3] = {Intrinsic::aarch64_neon_st2,
@@ -7402,34 +7726,43 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
Function *StNFunc =
Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys);
- SmallVector<Value *, 5> Ops;
+ for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
- // Split the shufflevector operands into sub vectors for the new stN call.
- auto Mask = SVI->getShuffleMask();
- for (unsigned i = 0; i < Factor; i++) {
- if (Mask[i] >= 0) {
- Ops.push_back(Builder.CreateShuffleVector(
- Op0, Op1, getSequentialMask(Builder, Mask[i], LaneLen)));
- } else {
- unsigned StartMask = 0;
- for (unsigned j = 1; j < LaneLen; j++) {
- if (Mask[j*Factor + i] >= 0) {
- StartMask = Mask[j*Factor + i] - j;
- break;
+ SmallVector<Value *, 5> Ops;
+
+ // Split the shufflevector operands into sub vectors for the new stN call.
+ for (unsigned i = 0; i < Factor; i++) {
+ unsigned IdxI = StoreCount * LaneLen * Factor + i;
+ if (Mask[IdxI] >= 0) {
+ Ops.push_back(Builder.CreateShuffleVector(
+ Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0)));
+ } else {
+ unsigned StartMask = 0;
+ for (unsigned j = 1; j < LaneLen; j++) {
+ unsigned IdxJ = StoreCount * LaneLen * Factor + j;
+ if (Mask[IdxJ * Factor + IdxI] >= 0) {
+ StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
+ break;
+ }
}
+ // Note: Filling undef gaps with random elements is ok, since
+ // those elements were being written anyway (with undefs).
+ // In the case of all undefs we're defaulting to using elems from 0
+ // Note: StartMask cannot be negative, it's checked in
+ // isReInterleaveMask
+ Ops.push_back(Builder.CreateShuffleVector(
+ Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0)));
}
- // Note: If all elements in a chunk are undefs, StartMask=0!
- // Note: Filling undef gaps with random elements is ok, since
- // those elements were being written anyway (with undefs).
- // In the case of all undefs we're defaulting to using elems from 0
- // Note: StartMask cannot be negative, it's checked in isReInterleaveMask
- Ops.push_back(Builder.CreateShuffleVector(
- Op0, Op1, getSequentialMask(Builder, StartMask, LaneLen)));
}
- }
- Ops.push_back(Builder.CreateBitCast(SI->getPointerOperand(), PtrTy));
- Builder.CreateCall(StNFunc, Ops);
+ // If we generating more than one store, we compute the base address of
+ // subsequent stores as an offset from the previous.
+ if (StoreCount > 0)
+ BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor);
+
+ Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy));
+ Builder.CreateCall(StNFunc, Ops);
+ }
return true;
}
@@ -7690,7 +8023,7 @@ SDValue
AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
std::vector<SDNode *> *Created) const {
- AttributeSet Attr = DAG.getMachineFunction().getFunction()->getAttributes();
+ AttributeList Attr = DAG.getMachineFunction().getFunction()->getAttributes();
if (isIntDivCheap(N->getValueType(0), Attr))
return SDValue(N,0); // Lower SDIV as SDIV
@@ -8079,9 +8412,9 @@ static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
/// EXTR instruction extracts a contiguous chunk of bits from two existing
/// registers viewed as a high/low pair. This function looks for the pattern:
-/// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
-/// EXTR. Can't quite be done in TableGen because the two immediates aren't
-/// independent.
+/// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it
+/// with an EXTR. Can't quite be done in TableGen because the two immediates
+/// aren't independent.
static SDValue tryCombineToEXTR(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
SelectionDAG &DAG = DCI.DAG;
@@ -8935,16 +9268,26 @@ static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
// instructions (stp).
SDLoc DL(&St);
SDValue BasePtr = St.getBasePtr();
+ uint64_t BaseOffset = 0;
+
const MachinePointerInfo &PtrInfo = St.getPointerInfo();
SDValue NewST1 =
DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
OrigAlignment, St.getMemOperand()->getFlags());
+ // As this in ISel, we will not merge this add which may degrade results.
+ if (BasePtr->getOpcode() == ISD::ADD &&
+ isa<ConstantSDNode>(BasePtr->getOperand(1))) {
+ BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
+ BasePtr = BasePtr->getOperand(0);
+ }
+
unsigned Offset = EltOffset;
while (--NumVecElts) {
unsigned Alignment = MinAlign(OrigAlignment, Offset);
- SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
- DAG.getConstant(Offset, DL, MVT::i64));
+ SDValue OffsetPtr =
+ DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
+ DAG.getConstant(BaseOffset + Offset, DL, MVT::i64));
NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
PtrInfo.getWithOffset(Offset), Alignment,
St.getMemOperand()->getFlags());
@@ -9072,7 +9415,7 @@ static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
return SDValue();
StoreSDNode *S = cast<StoreSDNode>(N);
- if (S->isVolatile())
+ if (S->isVolatile() || S->isIndexed())
return SDValue();
SDValue StVal = S->getValue();
@@ -9236,17 +9579,17 @@ static SDValue performPostLD1Combine(SDNode *N,
return SDValue();
}
-/// Simplify \Addr given that the top byte of it is ignored by HW during
+/// Simplify ``Addr`` given that the top byte of it is ignored by HW during
/// address translation.
static bool performTBISimplification(SDValue Addr,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
APInt DemandedMask = APInt::getLowBitsSet(64, 56);
- APInt KnownZero, KnownOne;
- TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
- DCI.isBeforeLegalizeOps());
+ KnownBits Known;
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- if (TLI.SimplifyDemandedBits(Addr, DemandedMask, KnownZero, KnownOne, TLO)) {
+ if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) {
DCI.CommitTargetLoweringOpt(TLO);
return true;
}
@@ -9267,266 +9610,6 @@ static SDValue performSTORECombine(SDNode *N,
return SDValue();
}
- /// This function handles the log2-shuffle pattern produced by the
-/// LoopVectorizer for the across vector reduction. It consists of
-/// log2(NumVectorElements) steps and, in each step, 2^(s) elements
-/// are reduced, where s is an induction variable from 0 to
-/// log2(NumVectorElements).
-static SDValue tryMatchAcrossLaneShuffleForReduction(SDNode *N, SDValue OpV,
- unsigned Op,
- SelectionDAG &DAG) {
- EVT VTy = OpV->getOperand(0).getValueType();
- if (!VTy.isVector())
- return SDValue();
-
- int NumVecElts = VTy.getVectorNumElements();
- if (Op == ISD::FMAXNUM || Op == ISD::FMINNUM) {
- if (NumVecElts != 4)
- return SDValue();
- } else {
- if (NumVecElts != 4 && NumVecElts != 8 && NumVecElts != 16)
- return SDValue();
- }
-
- int NumExpectedSteps = APInt(8, NumVecElts).logBase2();
- SDValue PreOp = OpV;
- // Iterate over each step of the across vector reduction.
- for (int CurStep = 0; CurStep != NumExpectedSteps; ++CurStep) {
- SDValue CurOp = PreOp.getOperand(0);
- SDValue Shuffle = PreOp.getOperand(1);
- if (Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE) {
- // Try to swap the 1st and 2nd operand as add and min/max instructions
- // are commutative.
- CurOp = PreOp.getOperand(1);
- Shuffle = PreOp.getOperand(0);
- if (Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE)
- return SDValue();
- }
-
- // Check if the input vector is fed by the operator we want to handle,
- // except the last step; the very first input vector is not necessarily
- // the same operator we are handling.
- if (CurOp.getOpcode() != Op && (CurStep != (NumExpectedSteps - 1)))
- return SDValue();
-
- // Check if it forms one step of the across vector reduction.
- // E.g.,
- // %cur = add %1, %0
- // %shuffle = vector_shuffle %cur, <2, 3, u, u>
- // %pre = add %cur, %shuffle
- if (Shuffle.getOperand(0) != CurOp)
- return SDValue();
-
- int NumMaskElts = 1 << CurStep;
- ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Shuffle)->getMask();
- // Check mask values in each step.
- // We expect the shuffle mask in each step follows a specific pattern
- // denoted here by the <M, U> form, where M is a sequence of integers
- // starting from NumMaskElts, increasing by 1, and the number integers
- // in M should be NumMaskElts. U is a sequence of UNDEFs and the number
- // of undef in U should be NumVecElts - NumMaskElts.
- // E.g., for <8 x i16>, mask values in each step should be :
- // step 0 : <1,u,u,u,u,u,u,u>
- // step 1 : <2,3,u,u,u,u,u,u>
- // step 2 : <4,5,6,7,u,u,u,u>
- for (int i = 0; i < NumVecElts; ++i)
- if ((i < NumMaskElts && Mask[i] != (NumMaskElts + i)) ||
- (i >= NumMaskElts && !(Mask[i] < 0)))
- return SDValue();
-
- PreOp = CurOp;
- }
- unsigned Opcode;
- bool IsIntrinsic = false;
-
- switch (Op) {
- default:
- llvm_unreachable("Unexpected operator for across vector reduction");
- case ISD::ADD:
- Opcode = AArch64ISD::UADDV;
- break;
- case ISD::SMAX:
- Opcode = AArch64ISD::SMAXV;
- break;
- case ISD::UMAX:
- Opcode = AArch64ISD::UMAXV;
- break;
- case ISD::SMIN:
- Opcode = AArch64ISD::SMINV;
- break;
- case ISD::UMIN:
- Opcode = AArch64ISD::UMINV;
- break;
- case ISD::FMAXNUM:
- Opcode = Intrinsic::aarch64_neon_fmaxnmv;
- IsIntrinsic = true;
- break;
- case ISD::FMINNUM:
- Opcode = Intrinsic::aarch64_neon_fminnmv;
- IsIntrinsic = true;
- break;
- }
- SDLoc DL(N);
-
- return IsIntrinsic
- ? DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, N->getValueType(0),
- DAG.getConstant(Opcode, DL, MVT::i32), PreOp)
- : DAG.getNode(
- ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0),
- DAG.getNode(Opcode, DL, PreOp.getSimpleValueType(), PreOp),
- DAG.getConstant(0, DL, MVT::i64));
-}
-
-/// Target-specific DAG combine for the across vector min/max reductions.
-/// This function specifically handles the final clean-up step of the vector
-/// min/max reductions produced by the LoopVectorizer. It is the log2-shuffle
-/// pattern, which narrows down and finds the final min/max value from all
-/// elements of the vector.
-/// For example, for a <16 x i8> vector :
-/// svn0 = vector_shuffle %0, undef<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u>
-/// %smax0 = smax %arr, svn0
-/// %svn1 = vector_shuffle %smax0, undef<4,5,6,7,u,u,u,u,u,u,u,u,u,u,u,u>
-/// %smax1 = smax %smax0, %svn1
-/// %svn2 = vector_shuffle %smax1, undef<2,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-/// %smax2 = smax %smax1, svn2
-/// %svn3 = vector_shuffle %smax2, undef<1,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
-/// %sc = setcc %smax2, %svn3, gt
-/// %n0 = extract_vector_elt %sc, #0
-/// %n1 = extract_vector_elt %smax2, #0
-/// %n2 = extract_vector_elt $smax2, #1
-/// %result = select %n0, %n1, n2
-/// becomes :
-/// %1 = smaxv %0
-/// %result = extract_vector_elt %1, 0
-static SDValue
-performAcrossLaneMinMaxReductionCombine(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- if (!Subtarget->hasNEON())
- return SDValue();
-
- SDValue N0 = N->getOperand(0);
- SDValue IfTrue = N->getOperand(1);
- SDValue IfFalse = N->getOperand(2);
-
- // Check if the SELECT merges up the final result of the min/max
- // from a vector.
- if (N0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- IfTrue.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
- IfFalse.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
- return SDValue();
-
- // Expect N0 is fed by SETCC.
- SDValue SetCC = N0.getOperand(0);
- EVT SetCCVT = SetCC.getValueType();
- if (SetCC.getOpcode() != ISD::SETCC || !SetCCVT.isVector() ||
- SetCCVT.getVectorElementType() != MVT::i1)
- return SDValue();
-
- SDValue VectorOp = SetCC.getOperand(0);
- unsigned Op = VectorOp->getOpcode();
- // Check if the input vector is fed by the operator we want to handle.
- if (Op != ISD::SMAX && Op != ISD::UMAX && Op != ISD::SMIN &&
- Op != ISD::UMIN && Op != ISD::FMAXNUM && Op != ISD::FMINNUM)
- return SDValue();
-
- EVT VTy = VectorOp.getValueType();
- if (!VTy.isVector())
- return SDValue();
-
- if (VTy.getSizeInBits() < 64)
- return SDValue();
-
- EVT EltTy = VTy.getVectorElementType();
- if (Op == ISD::FMAXNUM || Op == ISD::FMINNUM) {
- if (EltTy != MVT::f32)
- return SDValue();
- } else {
- if (EltTy != MVT::i32 && EltTy != MVT::i16 && EltTy != MVT::i8)
- return SDValue();
- }
-
- // Check if extracting from the same vector.
- // For example,
- // %sc = setcc %vector, %svn1, gt
- // %n0 = extract_vector_elt %sc, #0
- // %n1 = extract_vector_elt %vector, #0
- // %n2 = extract_vector_elt $vector, #1
- if (!(VectorOp == IfTrue->getOperand(0) &&
- VectorOp == IfFalse->getOperand(0)))
- return SDValue();
-
- // Check if the condition code is matched with the operator type.
- ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
- if ((Op == ISD::SMAX && CC != ISD::SETGT && CC != ISD::SETGE) ||
- (Op == ISD::UMAX && CC != ISD::SETUGT && CC != ISD::SETUGE) ||
- (Op == ISD::SMIN && CC != ISD::SETLT && CC != ISD::SETLE) ||
- (Op == ISD::UMIN && CC != ISD::SETULT && CC != ISD::SETULE) ||
- (Op == ISD::FMAXNUM && CC != ISD::SETOGT && CC != ISD::SETOGE &&
- CC != ISD::SETUGT && CC != ISD::SETUGE && CC != ISD::SETGT &&
- CC != ISD::SETGE) ||
- (Op == ISD::FMINNUM && CC != ISD::SETOLT && CC != ISD::SETOLE &&
- CC != ISD::SETULT && CC != ISD::SETULE && CC != ISD::SETLT &&
- CC != ISD::SETLE))
- return SDValue();
-
- // Expect to check only lane 0 from the vector SETCC.
- if (!isNullConstant(N0.getOperand(1)))
- return SDValue();
-
- // Expect to extract the true value from lane 0.
- if (!isNullConstant(IfTrue.getOperand(1)))
- return SDValue();
-
- // Expect to extract the false value from lane 1.
- if (!isOneConstant(IfFalse.getOperand(1)))
- return SDValue();
-
- return tryMatchAcrossLaneShuffleForReduction(N, SetCC, Op, DAG);
-}
-
-/// Target-specific DAG combine for the across vector add reduction.
-/// This function specifically handles the final clean-up step of the vector
-/// add reduction produced by the LoopVectorizer. It is the log2-shuffle
-/// pattern, which adds all elements of a vector together.
-/// For example, for a <4 x i32> vector :
-/// %1 = vector_shuffle %0, <2,3,u,u>
-/// %2 = add %0, %1
-/// %3 = vector_shuffle %2, <1,u,u,u>
-/// %4 = add %2, %3
-/// %result = extract_vector_elt %4, 0
-/// becomes :
-/// %0 = uaddv %0
-/// %result = extract_vector_elt %0, 0
-static SDValue
-performAcrossLaneAddReductionCombine(SDNode *N, SelectionDAG &DAG,
- const AArch64Subtarget *Subtarget) {
- if (!Subtarget->hasNEON())
- return SDValue();
- SDValue N0 = N->getOperand(0);
- SDValue N1 = N->getOperand(1);
-
- // Check if the input vector is fed by the ADD.
- if (N0->getOpcode() != ISD::ADD)
- return SDValue();
-
- // The vector extract idx must constant zero because we only expect the final
- // result of the reduction is placed in lane 0.
- if (!isNullConstant(N1))
- return SDValue();
-
- EVT VTy = N0.getValueType();
- if (!VTy.isVector())
- return SDValue();
-
- EVT EltTy = VTy.getVectorElementType();
- if (EltTy != MVT::i32 && EltTy != MVT::i16 && EltTy != MVT::i8)
- return SDValue();
-
- if (VTy.getSizeInBits() < 64)
- return SDValue();
-
- return tryMatchAcrossLaneShuffleForReduction(N, N0, ISD::ADD, DAG);
-}
/// Target-specific DAG combine function for NEON load/store intrinsics
/// to merge base address updates.
@@ -10205,12 +10288,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performBitcastCombine(N, DCI, DAG);
case ISD::CONCAT_VECTORS:
return performConcatVectorsCombine(N, DCI, DAG);
- case ISD::SELECT: {
- SDValue RV = performSelectCombine(N, DCI);
- if (!RV.getNode())
- RV = performAcrossLaneMinMaxReductionCombine(N, DAG, Subtarget);
- return RV;
- }
+ case ISD::SELECT:
+ return performSelectCombine(N, DCI);
case ISD::VSELECT:
return performVSelectCombine(N, DCI.DAG);
case ISD::LOAD:
@@ -10232,8 +10311,6 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performNVCASTCombine(N);
case ISD::INSERT_VECTOR_ELT:
return performPostLD1Combine(N, DCI, true);
- case ISD::EXTRACT_VECTOR_ELT:
- return performAcrossLaneAddReductionCombine(N, DAG, Subtarget);
case ISD::INTRINSIC_VOID:
case ISD::INTRINSIC_W_CHAIN:
switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
@@ -10307,7 +10384,7 @@ bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N,
// call. This will cause the optimizers to attempt to move, or duplicate,
// return instructions to help enable tail call optimizations for this
// instruction.
-bool AArch64TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
+bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
return CI->isTailCall();
}
@@ -10453,6 +10530,14 @@ void AArch64TargetLowering::ReplaceNodeResults(
case ISD::BITCAST:
ReplaceBITCASTResults(N, Results, DAG);
return;
+ case ISD::VECREDUCE_ADD:
+ case ISD::VECREDUCE_SMAX:
+ case ISD::VECREDUCE_SMIN:
+ case ISD::VECREDUCE_UMAX:
+ case ISD::VECREDUCE_UMIN:
+ Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG));
+ return;
+
case AArch64ISD::SADDV:
ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV);
return;
@@ -10483,9 +10568,10 @@ void AArch64TargetLowering::ReplaceNodeResults(
}
bool AArch64TargetLowering::useLoadStackGuardNode() const {
- if (!Subtarget->isTargetAndroid() && !Subtarget->isTargetOpenBSD())
- return true;
- return TargetLowering::useLoadStackGuardNode();
+ if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia() ||
+ Subtarget->isTargetOpenBSD())
+ return TargetLowering::useLoadStackGuardNode();
+ return true;
}
unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const {
@@ -10527,11 +10613,17 @@ AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
- return Size <= 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None;
+ if (Size > 128) return AtomicExpansionKind::None;
+ // Nand not supported in LSE.
+ if (AI->getOperation() == AtomicRMWInst::Nand) return AtomicExpansionKind::LLSC;
+ // Leave 128 bits to LLSC.
+ return (Subtarget->hasLSE() && Size < 128) ? AtomicExpansionKind::None : AtomicExpansionKind::LLSC;
}
bool AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR(
AtomicCmpXchgInst *AI) const {
+ // If subtarget has LSE, leave cmpxchg intact for codegen.
+ if (Subtarget->hasLSE()) return false;
// At -O0, fast-regalloc cannot cope with the live vregs necessary to
// implement cmpxchg without spilling. If the address being exchanged is also
// on the stack and close enough to the spill slot, this can lead to a
@@ -10623,36 +10715,56 @@ bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &,
return false;
}
-Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
- if (!Subtarget->isTargetAndroid())
- return TargetLowering::getIRStackGuard(IRB);
-
- // Android provides a fixed TLS slot for the stack cookie. See the definition
- // of TLS_SLOT_STACK_GUARD in
- // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
- const unsigned TlsOffset = 0x28;
+static Value *UseTlsOffset(IRBuilder<> &IRB, unsigned Offset) {
Module *M = IRB.GetInsertBlock()->getParent()->getParent();
Function *ThreadPointerFunc =
Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
return IRB.CreatePointerCast(
- IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
+ IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), Offset),
Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
}
-Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
- if (!Subtarget->isTargetAndroid())
- return TargetLowering::getSafeStackPointerLocation(IRB);
+Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
+ // Android provides a fixed TLS slot for the stack cookie. See the definition
+ // of TLS_SLOT_STACK_GUARD in
+ // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
+ if (Subtarget->isTargetAndroid())
+ return UseTlsOffset(IRB, 0x28);
+ // Fuchsia is similar.
+ // <magenta/tls.h> defines MX_TLS_STACK_GUARD_OFFSET with this value.
+ if (Subtarget->isTargetFuchsia())
+ return UseTlsOffset(IRB, -0x10);
+
+ return TargetLowering::getIRStackGuard(IRB);
+}
+
+Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
// Android provides a fixed TLS slot for the SafeStack pointer. See the
// definition of TLS_SLOT_SAFESTACK in
// https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
- const unsigned TlsOffset = 0x48;
- Module *M = IRB.GetInsertBlock()->getParent()->getParent();
- Function *ThreadPointerFunc =
- Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
- return IRB.CreatePointerCast(
- IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), TlsOffset),
- Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0));
+ if (Subtarget->isTargetAndroid())
+ return UseTlsOffset(IRB, 0x48);
+
+ // Fuchsia is similar.
+ // <magenta/tls.h> defines MX_TLS_UNSAFE_SP_OFFSET with this value.
+ if (Subtarget->isTargetFuchsia())
+ return UseTlsOffset(IRB, -0x8);
+
+ return TargetLowering::getSafeStackPointerLocation(IRB);
+}
+
+bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial(
+ const Instruction &AndI) const {
+ // Only sink 'and' mask to cmp use block if it is masking a single bit, since
+ // this is likely to be fold the and/cmp/br into a single tbz instruction. It
+ // may be beneficial to sink in other cases, but we would have to check that
+ // the cmp would not get folded into the br to form a cbz for these to be
+ // beneficial.
+ ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1));
+ if (!Mask)
+ return false;
+ return Mask->getUniqueInteger().isPowerOf2();
}
void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
@@ -10702,7 +10814,7 @@ void AArch64TargetLowering::insertCopiesSplitCSR(
}
}
-bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
+bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// Integer division on AArch64 is expensive. However, when aggressively
// optimizing for code size, we prefer to use a div instruction, as it is
// usually smaller than the alternative sequence.
@@ -10711,6 +10823,14 @@ bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeSet Attr) const {
// size, because it will have to be scalarized, while the alternative code
// sequence can be performed in vector form.
bool OptSize =
- Attr.hasAttribute(AttributeSet::FunctionIndex, Attribute::MinSize);
+ Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize);
return OptSize && !VT.isVector();
}
+
+unsigned
+AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const {
+ if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows())
+ return getPointerTy(DL).getSizeInBits();
+
+ return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32;
+}
diff --git a/gnu/llvm/lib/Target/AArch64/AArch64Subtarget.h b/gnu/llvm/lib/Target/AArch64/AArch64Subtarget.h
index 5354788cd35..03c95cf9fc6 100644
--- a/gnu/llvm/lib/Target/AArch64/AArch64Subtarget.h
+++ b/gnu/llvm/lib/Target/AArch64/AArch64Subtarget.h
@@ -45,7 +45,11 @@ public:
ExynosM1,
Falkor,
Kryo,
- Vulcan
+ ThunderX2T99,
+ ThunderX,
+ ThunderXT81,
+ ThunderXT83,
+ ThunderXT88
};
protected:
@@ -61,9 +65,12 @@ protected:
bool HasCRC = false;
bool HasLSE = false;
bool HasRAS = false;
+ bool HasRDM = false;
bool HasPerfMon = false;
bool HasFullFP16 = false;
bool HasSPE = false;
+ bool HasLSLFast = false;
+ bool HasSVE = false;
// HasZeroCycleRegMove - Has zero-cycle register mov instructions.
bool HasZeroCycleRegMove = false;
@@ -73,6 +80,13 @@ protected:
// StrictAlign - Disallow unaligned memory accesses.
bool StrictAlign = false;
+
+ // NegativeImmediates - transform instructions with negative immediates
+ bool NegativeImmediates = true;
+
+ // Enable 64-bit vectorization in SLP.
+ unsigned MinVectorRegisterBitWidth = 64;
+
bool UseAA = false;
bool PredictableSelectIsExpensive = false;
bool BalanceFPOps = false;
@@ -83,6 +97,8 @@ protected:
bool UseAlternateSExtLoadCVTF32Pattern = false;
bool HasArithmeticBccFusion = false;
bool HasArithmeticCbzFusion = false;
+ bool HasFuseAES = false;
+ bool HasFuseLiterals = false;
bool DisableLatencySchedHeuristic = false;
bool UseRSqrt = false;
uint8_t MaxInterleaveFactor = 2;
@@ -94,6 +110,7 @@ protected:
unsigned PrefFunctionAlignment = 0;
unsigned PrefLoopAlignment = 0;
unsigned MaxJumpTableSize = 0;
+ unsigned WideningBaseCost = 0;
// ReserveX18 - X18 is not available as a general purpose register.
bool ReserveX18;
@@ -176,6 +193,10 @@ public:
bool isXRaySupported() const override { return true; }
+ unsigned getMinVectorRegisterBitWidth() const {
+ return MinVectorRegisterBitWidth;
+ }
+
bool isX18Reserved() const { return ReserveX18; }
bool hasFPARMv8() const { return HasFPARMv8; }
bool hasNEON() const { return HasNEON; }
@@ -183,6 +204,7 @@ public:
bool hasCRC() const { return HasCRC; }
bool hasLSE() const { return HasLSE; }
bool hasRAS() const { return HasRAS; }
+ bool hasRDM() const { return HasRDM; }
bool balanceFPOps() const { return BalanceFPOps; }
bool predictableSelectIsExpensive() const {
return PredictableSelectIsExpensive;
@@ -195,6 +217,15 @@ public:
}
bool hasArithmeticBccFusion() const { return HasArithmeticBccFusion; }
bool hasArithmeticCbzFusion() const { return HasArithmeticCbzFusion; }
+ bool hasFuseAES() const { return HasFuseAES; }
+ bool hasFuseLiterals() const { return HasFuseLiterals; }
+
+ /// \brief Return true if the CPU supports any kind of instruction fusion.
+ bool hasFusion() const {
+ return hasArithmeticBccFusion() || hasArithmeticCbzFusion() ||
+ hasFuseAES() || hasFuseLiterals();
+ }
+
bool useRSqrt() const { return UseRSqrt; }
unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
unsigned getVectorInsertExtractBaseCost() const {
@@ -211,6 +242,8 @@ public:
unsigned getMaximumJumpTableSize() const { return MaxJumpTableSize; }
+ unsigned getWideningBaseCost() const { return WideningBaseCost; }
+
/// CPU has TBI (top byte of addresses is ignored during HW address
/// translation) and OS enables it.
bool supportsAddressTopByteIgnored() const;
@@ -218,6 +251,8 @@ public:
bool hasPerfMon() const { return HasPerfMon; }
bool hasFullFP16() const { return HasFullFP16; }
bool hasSPE() const { return HasSPE; }
+ bool hasLSLFast() const { return HasLSLFast; }
+ bool hasSVE() const { return HasSVE; }
bool isLittleEndian() const { return IsLittle; }
@@ -227,6 +262,7 @@ public:
bool isTargetOpenBSD() const { return TargetTriple.isOSOpenBSD(); }
bool isTargetWindows() const { return TargetTriple.isOSWindows(); }
bool isTargetAndroid() const { return TargetTriple.isAndroid(); }
+ bool isTargetFuchsia() const { return TargetTriple.isOSFuchsia(); }
bool isTargetCOFF() const { return TargetTriple.isOSBinFormatCOFF(); }
bool isTargetELF() const { return TargetTriple.isOSBinFormatELF(); }
@@ -234,9 +270,17 @@ public:
bool useAA() const override { return UseAA; }
- /// getMaxInlineSizeThreshold - Returns the maximum memset / memcpy size
- /// that still makes it profitable to inline the call.
- unsigned getMaxInlineSizeThreshold() const { return 64; }
+ bool useSmallAddressing() const {
+ switch (TLInfo.getTargetMachine().getCodeModel()) {
+ case CodeModel::Kernel:
+ // Kernel is currently allowed only for Fuchsia targets,
+ // where it is the same as Small for almost all purposes.
+ case CodeModel::Small:
+ return true;
+ default:
+ return false;
+ }
+ }
/// ParseSubtargetFeatures - Parses features string setting specified
/// subtarget options. Definition of function is auto generated by tblgen.
@@ -247,6 +291,9 @@ public:
unsigned char ClassifyGlobalReference(const GlobalValue *GV,
const TargetMachine &TM) const;
+ unsigned char classifyGlobalFunctionReference(const GlobalValue *GV,
+ const TargetMachine &TM) const;
+
/// This function returns the name of a function which has an interface
/// like the non-standard bzero function, if such a function exists on
/// the current subtarget and it is considered prefereable over
@@ -260,6 +307,17 @@ public:
bool enableEarlyIfConversion() const override;
std::unique_ptr<PBQPRAConstraint> getCustomPBQPConstraints() const override;
+
+ bool isCallingConvWin64(CallingConv::ID CC) const {
+ switch (CC) {
+ case CallingConv::C:
+ return isTargetWindows();
+ case CallingConv::Win64:
+ return true;
+ default:
+ return false;
+ }
+ }
};
} // End llvm namespace
diff --git a/gnu/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/gnu/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index d18fc252f8b..1ca6b38c69b 100644
--- a/gnu/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/gnu/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -10,6 +10,8 @@
#include "MCTargetDesc/X86BaseInfo.h"
#include "MCTargetDesc/X86FixupKinds.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/BinaryFormat/MachO.h"
#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
@@ -22,9 +24,7 @@
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCSubtargetInfo.h"
-#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/MachO.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -108,12 +108,12 @@ public:
return Infos[Kind - FirstTargetFixupKind];
}
- void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
- uint64_t Value, bool IsPCRel) const override {
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsResolved) const override {
unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
- assert(Fixup.getOffset() + Size <= DataSize &&
- "Invalid fixup offset!");
+ assert(Fixup.getOffset() + Size <= Data.size() && "Invalid fixup offset!");
// Check that uppper bits are either all zeros or all ones.
// Specifically ignore overflow/underflow as long as the leakage is
diff --git a/gnu/llvm/lib/Target/X86/X86AsmPrinter.h b/gnu/llvm/lib/Target/X86/X86AsmPrinter.h
index af5b8098eb2..3d260884577 100644
--- a/gnu/llvm/lib/Target/X86/X86AsmPrinter.h
+++ b/gnu/llvm/lib/Target/X86/X86AsmPrinter.h
@@ -81,7 +81,7 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void LowerSTACKMAP(const MachineInstr &MI);
void LowerPATCHPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerSTATEPOINT(const MachineInstr &MI, X86MCInstLower &MCIL);
- void LowerFAULTING_LOAD_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerFAULTING_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerPATCHABLE_OP(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerTlsAddr(X86MCInstLower &MCInstLowering, const MachineInstr &MI);
@@ -91,6 +91,9 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
X86MCInstLower &MCIL);
void LowerPATCHABLE_RET(const MachineInstr &MI, X86MCInstLower &MCIL);
void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+ void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+
+ void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
// Helper function that emits the XRay sleds we've collected for a particular
// function.
diff --git a/gnu/llvm/lib/Target/X86/X86MCInstLower.cpp b/gnu/llvm/lib/Target/X86/X86MCInstLower.cpp
index b1db061482b..a9aa2ff48e5 100644
--- a/gnu/llvm/lib/Target/X86/X86MCInstLower.cpp
+++ b/gnu/llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -12,20 +12,21 @@
//
//===----------------------------------------------------------------------===//
-#include "X86AsmPrinter.h"
-#include "X86RegisterInfo.h"
-#include "X86ShuffleDecodeConstantPool.h"
#include "InstPrinter/X86ATTInstPrinter.h"
#include "InstPrinter/X86InstComments.h"
#include "MCTargetDesc/X86BaseInfo.h"
#include "Utils/X86ShuffleDecode.h"
+#include "X86AsmPrinter.h"
+#include "X86RegisterInfo.h"
+#include "X86ShuffleDecodeConstantPool.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/iterator_range.h"
-#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/BinaryFormat/ELF.h"
#include "llvm/CodeGen/MachineConstantPool.h"
-#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalValue.h"
@@ -38,13 +39,12 @@
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCInstBuilder.h"
#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSectionELF.h"
+#include "llvm/MC/MCSectionMachO.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSymbolELF.h"
-#include "llvm/MC/MCSectionELF.h"
-#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Support/TargetRegistry.h"
-#include "llvm/Support/ELF.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
using namespace llvm;
@@ -102,7 +102,7 @@ void X86AsmPrinter::StackMapShadowTracker::emitShadowPadding(
}
void X86AsmPrinter::EmitAndCountInstruction(MCInst &Inst) {
- OutStreamer->EmitInstruction(Inst, getSubtargetInfo());
+ OutStreamer->EmitInstruction(Inst, getSubtargetInfo(), EnablePrintSchedInfo);
SMShadowTracker.count(Inst, getSubtargetInfo(), CodeEmitter.get());
}
@@ -215,6 +215,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break;
case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
+ case X86II::MO_ABS8: RefKind = MCSymbolRefExpr::VK_X86_ABS8; break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
Expr = MCSymbolRefExpr::create(Sym, Ctx);
@@ -357,7 +358,7 @@ X86MCInstLower::LowerMachineOperand(const MachineInstr *MI,
const MachineOperand &MO) const {
switch (MO.getType()) {
default:
- MI->dump();
+ MI->print(errs());
llvm_unreachable("unknown operand type");
case MachineOperand::MO_Register:
// Ignore all implicit register operands.
@@ -498,11 +499,16 @@ ReSimplify:
break;
}
- // TAILJMPd, TAILJMPd64 - Lower to the correct jump instruction.
+ // TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump instruction.
{ unsigned Opcode;
case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode;
case X86::TAILJMPd:
case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode;
+ case X86::TAILJMPd_CC:
+ case X86::TAILJMPd64_CC:
+ Opcode = X86::GetCondBranchFromCond(
+ static_cast<X86::CondCode>(MI->getOperand(1).getImm()));
+ goto SetTailJmpOpcode;
SetTailJmpOpcode:
MCOperand Saved = OutMI.getOperand(0);
@@ -888,30 +894,47 @@ void X86AsmPrinter::LowerSTATEPOINT(const MachineInstr &MI,
SM.recordStatepoint(MI);
}
-void X86AsmPrinter::LowerFAULTING_LOAD_OP(const MachineInstr &MI,
- X86MCInstLower &MCIL) {
- // FAULTING_LOAD_OP <def>, <MBB handler>, <load opcode>, <load operands>
+void X86AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI,
+ X86MCInstLower &MCIL) {
+ // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
+ // <opcode>, <operands>
- unsigned LoadDefRegister = MI.getOperand(0).getReg();
- MCSymbol *HandlerLabel = MI.getOperand(1).getMBB()->getSymbol();
- unsigned LoadOpcode = MI.getOperand(2).getImm();
- unsigned LoadOperandsBeginIdx = 3;
+ unsigned DefRegister = FaultingMI.getOperand(0).getReg();
+ FaultMaps::FaultKind FK =
+ static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
+ MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
+ unsigned Opcode = FaultingMI.getOperand(3).getImm();
+ unsigned OperandsBeginIdx = 4;
- FM.recordFaultingOp(FaultMaps::FaultingLoad, HandlerLabel);
+ assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
+ FM.recordFaultingOp(FK, HandlerLabel);
- MCInst LoadMI;
- LoadMI.setOpcode(LoadOpcode);
+ MCInst MI;
+ MI.setOpcode(Opcode);
- if (LoadDefRegister != X86::NoRegister)
- LoadMI.addOperand(MCOperand::createReg(LoadDefRegister));
+ if (DefRegister != X86::NoRegister)
+ MI.addOperand(MCOperand::createReg(DefRegister));
- for (auto I = MI.operands_begin() + LoadOperandsBeginIdx,
- E = MI.operands_end();
+ for (auto I = FaultingMI.operands_begin() + OperandsBeginIdx,
+ E = FaultingMI.operands_end();
I != E; ++I)
- if (auto MaybeOperand = MCIL.LowerMachineOperand(&MI, *I))
- LoadMI.addOperand(MaybeOperand.getValue());
+ if (auto MaybeOperand = MCIL.LowerMachineOperand(&FaultingMI, *I))
+ MI.addOperand(MaybeOperand.getValue());
+
+ OutStreamer->EmitInstruction(MI, getSubtargetInfo());
+}
- OutStreamer->EmitInstruction(LoadMI, getSubtargetInfo());
+void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ bool Is64Bits = Subtarget->is64Bit();
+ MCContext &Ctx = OutStreamer->getContext();
+ MCSymbol *fentry = Ctx.getOrCreateSymbol("__fentry__");
+ const MCSymbolRefExpr *Op =
+ MCSymbolRefExpr::create(fentry, MCSymbolRefExpr::VK_None, Ctx);
+
+ EmitAndCountInstruction(
+ MCInstBuilder(Is64Bits ? X86::CALL64pcrel32 : X86::CALLpcrel32)
+ .addExpr(Op));
}
void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
@@ -1017,6 +1040,83 @@ void X86AsmPrinter::LowerPATCHPOINT(const MachineInstr &MI,
getSubtargetInfo());
}
+void X86AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
+ X86MCInstLower &MCIL) {
+ assert(Subtarget->is64Bit() && "XRay custom events only supports X86-64");
+
+ // We want to emit the following pattern, which follows the x86 calling
+ // convention to prepare for the trampoline call to be patched in.
+ //
+ // <args placement according SysV64 calling convention>
+ // .p2align 1, ...
+ // .Lxray_event_sled_N:
+ // jmp +N // jump across the call instruction
+ // callq __xray_CustomEvent // force relocation to symbol
+ // <args cleanup, jump to here>
+ //
+ // The relative jump needs to jump forward 24 bytes:
+ // 10 (args) + 5 (nops) + 9 (cleanup)
+ //
+ // After patching, it would look something like:
+ //
+ // nopw (2-byte nop)
+ // callq __xrayCustomEvent // already lowered
+ //
+ // ---
+ // First we emit the label and the jump.
+ auto CurSled = OutContext.createTempSymbol("xray_event_sled_", true);
+ OutStreamer->AddComment("# XRay Custom Event Log");
+ OutStreamer->EmitCodeAlignment(2);
+ OutStreamer->EmitLabel(CurSled);
+
+ // Use a two-byte `jmp`. This version of JMP takes an 8-bit relative offset as
+ // an operand (computed as an offset from the jmp instruction).
+ // FIXME: Find another less hacky way do force the relative jump.
+ OutStreamer->EmitBytes("\xeb\x14");
+
+ // The default C calling convention will place two arguments into %rcx and
+ // %rdx -- so we only work with those.
+ unsigned UsedRegs[] = {X86::RDI, X86::RSI, X86::RAX};
+
+ // Because we will use %rax, we preserve that across the call.
+ EmitAndCountInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RAX));
+
+ // Then we put the operands in the %rdi and %rsi registers.
+ for (unsigned I = 0; I < MI.getNumOperands(); ++I)
+ if (auto Op = MCIL.LowerMachineOperand(&MI, MI.getOperand(I))) {
+ if (Op->isImm())
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(UsedRegs[I])
+ .addImm(Op->getImm()));
+ else if (Op->isReg()) {
+ if (Op->getReg() != UsedRegs[I])
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(UsedRegs[I])
+ .addReg(Op->getReg()));
+ else
+ EmitNops(*OutStreamer, 3, Subtarget->is64Bit(), getSubtargetInfo());
+ }
+ }
+
+ // We emit a hard dependency on the __xray_CustomEvent symbol, which is the
+ // name of the trampoline to be implemented by the XRay runtime. We put this
+ // explicitly in the %rax register.
+ auto TSym = OutContext.getOrCreateSymbol("__xray_CustomEvent");
+ MachineOperand TOp = MachineOperand::CreateMCSymbol(TSym);
+ EmitAndCountInstruction(MCInstBuilder(X86::MOV64ri)
+ .addReg(X86::RAX)
+ .addOperand(MCIL.LowerSymbolOperand(TOp, TSym)));
+
+ // Emit the call instruction.
+ EmitAndCountInstruction(MCInstBuilder(X86::CALL64r).addReg(X86::RAX));
+
+ // Restore caller-saved and used registers.
+ OutStreamer->AddComment("xray custom event end.");
+ EmitAndCountInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RAX));
+
+ recordSled(CurSled, MI, SledKind::CUSTOM_EVENT);
+}
+
void X86AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI,
X86MCInstLower &MCIL) {
// We want to emit the following pattern:
@@ -1232,6 +1332,32 @@ static std::string getShuffleComment(const MachineInstr *MI,
return Comment;
}
+static void printConstant(const Constant *COp, raw_ostream &CS) {
+ if (isa<UndefValue>(COp)) {
+ CS << "u";
+ } else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
+ if (CI->getBitWidth() <= 64) {
+ CS << CI->getZExtValue();
+ } else {
+ // print multi-word constant as (w0,w1)
+ const auto &Val = CI->getValue();
+ CS << "(";
+ for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
+ if (i > 0)
+ CS << ",";
+ CS << Val.getRawData()[i];
+ }
+ CS << ")";
+ }
+ } else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
+ SmallString<32> Str;
+ CF->getValueAPF().toString(Str);
+ CS << Str;
+ } else {
+ CS << "?";
+ }
+}
+
void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
X86MCInstLower MCInstLowering(*MF, *this);
const X86RegisterInfo *RI = MF->getSubtarget<X86Subtarget>().getRegisterInfo();
@@ -1276,9 +1402,11 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::TAILJMPr:
case X86::TAILJMPm:
case X86::TAILJMPd:
+ case X86::TAILJMPd_CC:
case X86::TAILJMPr64:
case X86::TAILJMPm64:
case X86::TAILJMPd64:
+ case X86::TAILJMPd64_CC:
case X86::TAILJMPr64_REX:
case X86::TAILJMPm64_REX:
// Lower these as normal, but add some comments.
@@ -1367,8 +1495,11 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::STATEPOINT:
return LowerSTATEPOINT(*MI, MCInstLowering);
- case TargetOpcode::FAULTING_LOAD_OP:
- return LowerFAULTING_LOAD_OP(*MI, MCInstLowering);
+ case TargetOpcode::FAULTING_OP:
+ return LowerFAULTING_OP(*MI, MCInstLowering);
+
+ case TargetOpcode::FENTRY_CALL:
+ return LowerFENTRY_CALL(*MI, MCInstLowering);
case TargetOpcode::PATCHABLE_OP:
return LowerPATCHABLE_OP(*MI, MCInstLowering);
@@ -1387,6 +1518,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case TargetOpcode::PATCHABLE_TAIL_CALL:
return LowerPATCHABLE_TAIL_CALL(*MI, MCInstLowering);
+
+ case TargetOpcode::PATCHABLE_EVENT_CALL:
+ return LowerPATCHABLE_EVENT_CALL(*MI, MCInstLowering);
case X86::MORESTACK_RET:
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
@@ -1501,7 +1635,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallVector<int, 64> Mask;
DecodePSHUFBMask(C, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
+ OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask),
+ !EnablePrintSchedInfo);
}
break;
}
@@ -1572,15 +1707,16 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallVector<int, 16> Mask;
DecodeVPERMILPMask(C, ElSize, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask));
+ OutStreamer->AddComment(getShuffleComment(MI, SrcIdx, SrcIdx, Mask),
+ !EnablePrintSchedInfo);
}
break;
}
case X86::VPERMIL2PDrm:
case X86::VPERMIL2PSrm:
- case X86::VPERMIL2PDrmY:
- case X86::VPERMIL2PSrmY: {
+ case X86::VPERMIL2PDYrm:
+ case X86::VPERMIL2PSYrm: {
if (!OutStreamer->isVerboseAsm())
break;
assert(MI->getNumOperands() >= 8 &&
@@ -1593,8 +1729,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
unsigned ElSize;
switch (MI->getOpcode()) {
default: llvm_unreachable("Invalid opcode");
- case X86::VPERMIL2PSrm: case X86::VPERMIL2PSrmY: ElSize = 32; break;
- case X86::VPERMIL2PDrm: case X86::VPERMIL2PDrmY: ElSize = 64; break;
+ case X86::VPERMIL2PSrm: case X86::VPERMIL2PSYrm: ElSize = 32; break;
+ case X86::VPERMIL2PDrm: case X86::VPERMIL2PDYrm: ElSize = 64; break;
}
const MachineOperand &MaskOp = MI->getOperand(6);
@@ -1602,7 +1738,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallVector<int, 16> Mask;
DecodeVPERMIL2PMask(C, (unsigned)CtrlOp.getImm(), ElSize, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask));
+ OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask),
+ !EnablePrintSchedInfo);
}
break;
}
@@ -1618,7 +1755,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
SmallVector<int, 16> Mask;
DecodeVPPERMMask(C, Mask);
if (!Mask.empty())
- OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask));
+ OutStreamer->AddComment(getShuffleComment(MI, 1, 2, Mask),
+ !EnablePrintSchedInfo);
}
break;
}
@@ -1654,66 +1792,159 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
// For loads from a constant pool to a vector register, print the constant
// loaded.
CASE_ALL_MOV_RM()
+ case X86::VBROADCASTF128:
+ case X86::VBROADCASTI128:
+ case X86::VBROADCASTF32X4Z256rm:
+ case X86::VBROADCASTF32X4rm:
+ case X86::VBROADCASTF32X8rm:
+ case X86::VBROADCASTF64X2Z128rm:
+ case X86::VBROADCASTF64X2rm:
+ case X86::VBROADCASTF64X4rm:
+ case X86::VBROADCASTI32X4Z256rm:
+ case X86::VBROADCASTI32X4rm:
+ case X86::VBROADCASTI32X8rm:
+ case X86::VBROADCASTI64X2Z128rm:
+ case X86::VBROADCASTI64X2rm:
+ case X86::VBROADCASTI64X4rm:
if (!OutStreamer->isVerboseAsm())
break;
if (MI->getNumOperands() <= 4)
break;
if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
+ int NumLanes = 1;
+ // Override NumLanes for the broadcast instructions.
+ switch (MI->getOpcode()) {
+ case X86::VBROADCASTF128: NumLanes = 2; break;
+ case X86::VBROADCASTI128: NumLanes = 2; break;
+ case X86::VBROADCASTF32X4Z256rm: NumLanes = 2; break;
+ case X86::VBROADCASTF32X4rm: NumLanes = 4; break;
+ case X86::VBROADCASTF32X8rm: NumLanes = 2; break;
+ case X86::VBROADCASTF64X2Z128rm: NumLanes = 2; break;
+ case X86::VBROADCASTF64X2rm: NumLanes = 4; break;
+ case X86::VBROADCASTF64X4rm: NumLanes = 2; break;
+ case X86::VBROADCASTI32X4Z256rm: NumLanes = 2; break;
+ case X86::VBROADCASTI32X4rm: NumLanes = 4; break;
+ case X86::VBROADCASTI32X8rm: NumLanes = 2; break;
+ case X86::VBROADCASTI64X2Z128rm: NumLanes = 2; break;
+ case X86::VBROADCASTI64X2rm: NumLanes = 4; break;
+ case X86::VBROADCASTI64X4rm: NumLanes = 2; break;
+ }
+
std::string Comment;
raw_string_ostream CS(Comment);
const MachineOperand &DstOp = MI->getOperand(0);
CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
CS << "[";
- for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) {
- if (i != 0)
- CS << ",";
- if (CDS->getElementType()->isIntegerTy())
- CS << CDS->getElementAsInteger(i);
- else if (CDS->getElementType()->isFloatTy())
- CS << CDS->getElementAsFloat(i);
- else if (CDS->getElementType()->isDoubleTy())
- CS << CDS->getElementAsDouble(i);
- else
- CS << "?";
+ for (int l = 0; l != NumLanes; ++l) {
+ for (int i = 0, NumElements = CDS->getNumElements(); i < NumElements; ++i) {
+ if (i != 0 || l != 0)
+ CS << ",";
+ if (CDS->getElementType()->isIntegerTy())
+ CS << CDS->getElementAsInteger(i);
+ else if (CDS->getElementType()->isFloatTy())
+ CS << CDS->getElementAsFloat(i);
+ else if (CDS->getElementType()->isDoubleTy())
+ CS << CDS->getElementAsDouble(i);
+ else
+ CS << "?";
+ }
}
CS << "]";
- OutStreamer->AddComment(CS.str());
+ OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo);
} else if (auto *CV = dyn_cast<ConstantVector>(C)) {
CS << "<";
- for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) {
- if (i != 0)
- CS << ",";
- Constant *COp = CV->getOperand(i);
- if (isa<UndefValue>(COp)) {
- CS << "u";
- } else if (auto *CI = dyn_cast<ConstantInt>(COp)) {
- if (CI->getBitWidth() <= 64) {
- CS << CI->getZExtValue();
- } else {
- // print multi-word constant as (w0,w1)
- const auto &Val = CI->getValue();
- CS << "(";
- for (int i = 0, N = Val.getNumWords(); i < N; ++i) {
- if (i > 0)
- CS << ",";
- CS << Val.getRawData()[i];
- }
- CS << ")";
- }
- } else if (auto *CF = dyn_cast<ConstantFP>(COp)) {
- SmallString<32> Str;
- CF->getValueAPF().toString(Str);
- CS << Str;
- } else {
- CS << "?";
+ for (int l = 0; l != NumLanes; ++l) {
+ for (int i = 0, NumOperands = CV->getNumOperands(); i < NumOperands; ++i) {
+ if (i != 0 || l != 0)
+ CS << ",";
+ printConstant(CV->getOperand(i), CS);
}
}
CS << ">";
- OutStreamer->AddComment(CS.str());
+ OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo);
}
}
break;
+ case X86::VBROADCASTSSrm:
+ case X86::VBROADCASTSSYrm:
+ case X86::VBROADCASTSSZ128m:
+ case X86::VBROADCASTSSZ256m:
+ case X86::VBROADCASTSSZm:
+ case X86::VBROADCASTSDYrm:
+ case X86::VBROADCASTSDZ256m:
+ case X86::VBROADCASTSDZm:
+ case X86::VPBROADCASTBrm:
+ case X86::VPBROADCASTBYrm:
+ case X86::VPBROADCASTBZ128m:
+ case X86::VPBROADCASTBZ256m:
+ case X86::VPBROADCASTBZm:
+ case X86::VPBROADCASTDrm:
+ case X86::VPBROADCASTDYrm:
+ case X86::VPBROADCASTDZ128m:
+ case X86::VPBROADCASTDZ256m:
+ case X86::VPBROADCASTDZm:
+ case X86::VPBROADCASTQrm:
+ case X86::VPBROADCASTQYrm:
+ case X86::VPBROADCASTQZ128m:
+ case X86::VPBROADCASTQZ256m:
+ case X86::VPBROADCASTQZm:
+ case X86::VPBROADCASTWrm:
+ case X86::VPBROADCASTWYrm:
+ case X86::VPBROADCASTWZ128m:
+ case X86::VPBROADCASTWZ256m:
+ case X86::VPBROADCASTWZm:
+ if (!OutStreamer->isVerboseAsm())
+ break;
+ if (MI->getNumOperands() <= 4)
+ break;
+ if (auto *C = getConstantFromPool(*MI, MI->getOperand(4))) {
+ int NumElts;
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case X86::VBROADCASTSSrm: NumElts = 4; break;
+ case X86::VBROADCASTSSYrm: NumElts = 8; break;
+ case X86::VBROADCASTSSZ128m: NumElts = 4; break;
+ case X86::VBROADCASTSSZ256m: NumElts = 8; break;
+ case X86::VBROADCASTSSZm: NumElts = 16; break;
+ case X86::VBROADCASTSDYrm: NumElts = 4; break;
+ case X86::VBROADCASTSDZ256m: NumElts = 4; break;
+ case X86::VBROADCASTSDZm: NumElts = 8; break;
+ case X86::VPBROADCASTBrm: NumElts = 16; break;
+ case X86::VPBROADCASTBYrm: NumElts = 32; break;
+ case X86::VPBROADCASTBZ128m: NumElts = 16; break;
+ case X86::VPBROADCASTBZ256m: NumElts = 32; break;
+ case X86::VPBROADCASTBZm: NumElts = 64; break;
+ case X86::VPBROADCASTDrm: NumElts = 4; break;
+ case X86::VPBROADCASTDYrm: NumElts = 8; break;
+ case X86::VPBROADCASTDZ128m: NumElts = 4; break;
+ case X86::VPBROADCASTDZ256m: NumElts = 8; break;
+ case X86::VPBROADCASTDZm: NumElts = 16; break;
+ case X86::VPBROADCASTQrm: NumElts = 2; break;
+ case X86::VPBROADCASTQYrm: NumElts = 4; break;
+ case X86::VPBROADCASTQZ128m: NumElts = 2; break;
+ case X86::VPBROADCASTQZ256m: NumElts = 4; break;
+ case X86::VPBROADCASTQZm: NumElts = 8; break;
+ case X86::VPBROADCASTWrm: NumElts = 8; break;
+ case X86::VPBROADCASTWYrm: NumElts = 16; break;
+ case X86::VPBROADCASTWZ128m: NumElts = 8; break;
+ case X86::VPBROADCASTWZ256m: NumElts = 16; break;
+ case X86::VPBROADCASTWZm: NumElts = 32; break;
+ }
+
+ std::string Comment;
+ raw_string_ostream CS(Comment);
+ const MachineOperand &DstOp = MI->getOperand(0);
+ CS << X86ATTInstPrinter::getRegisterName(DstOp.getReg()) << " = ";
+ CS << "[";
+ for (int i = 0; i != NumElts; ++i) {
+ if (i != 0)
+ CS << ",";
+ printConstant(C, CS);
+ }
+ CS << "]";
+ OutStreamer->AddComment(CS.str(), !EnablePrintSchedInfo);
+ }
}
MCInst TmpInst;
diff --git a/gnu/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/gnu/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index 991a8f9115e..f04fe80df17 100644
--- a/gnu/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/gnu/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -110,6 +110,16 @@ private:
bool HasMemset;
bool HasMemsetPattern;
bool HasMemcpy;
+ /// Return code for isLegalStore()
+ enum LegalStoreKind {
+ None = 0,
+ Memset,
+ MemsetPattern,
+ Memcpy,
+ UnorderedAtomicMemcpy,
+ DontUse // Dummy retval never to be used. Allows catching errors in retval
+ // handling.
+ };
/// \name Countable Loop Idiom Handling
/// @{
@@ -119,8 +129,7 @@ private:
SmallVectorImpl<BasicBlock *> &ExitBlocks);
void collectStores(BasicBlock *BB);
- bool isLegalStore(StoreInst *SI, bool &ForMemset, bool &ForMemsetPattern,
- bool &ForMemcpy);
+ LegalStoreKind isLegalStore(StoreInst *SI);
bool processLoopStores(SmallVectorImpl<StoreInst *> &SL, const SCEV *BECount,
bool ForMemset);
bool processLoopMemSet(MemSetInst *MSI, const SCEV *BECount);
@@ -144,6 +153,10 @@ private:
bool recognizePopcount();
void transformLoopToPopcount(BasicBlock *PreCondBB, Instruction *CntInst,
PHINode *CntPhi, Value *Var);
+ bool recognizeAndInsertCTLZ();
+ void transformLoopToCountable(BasicBlock *PreCondBB, Instruction *CntInst,
+ PHINode *CntPhi, Value *Var, const DebugLoc DL,
+ bool ZeroCheck, bool IsCntPhiUsedOutsideLoop);
/// @}
};
@@ -238,9 +251,9 @@ bool LoopIdiomRecognize::runOnLoop(Loop *L) {
ApplyCodeSizeHeuristics =
L->getHeader()->getParent()->optForSize() && UseLIRCodeSizeHeurs;
- HasMemset = TLI->has(LibFunc::memset);
- HasMemsetPattern = TLI->has(LibFunc::memset_pattern16);
- HasMemcpy = TLI->has(LibFunc::memcpy);
+ HasMemset = TLI->has(LibFunc_memset);
+ HasMemsetPattern = TLI->has(LibFunc_memset_pattern16);
+ HasMemcpy = TLI->has(LibFunc_memcpy);
if (HasMemset || HasMemsetPattern || HasMemcpy)
if (SE->hasLoopInvariantBackedgeTakenCount(L))
@@ -341,15 +354,24 @@ static Constant *getMemSetPatternValue(Value *V, const DataLayout *DL) {
return ConstantArray::get(AT, std::vector<Constant *>(ArraySize, C));
}
-bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset,
- bool &ForMemsetPattern, bool &ForMemcpy) {
+LoopIdiomRecognize::LegalStoreKind
+LoopIdiomRecognize::isLegalStore(StoreInst *SI) {
+
// Don't touch volatile stores.
- if (!SI->isSimple())
- return false;
+ if (SI->isVolatile())
+ return LegalStoreKind::None;
+ // We only want simple or unordered-atomic stores.
+ if (!SI->isUnordered())
+ return LegalStoreKind::None;
+
+ // Don't convert stores of non-integral pointer types to memsets (which stores
+ // integers).
+ if (DL->isNonIntegralPointerType(SI->getValueOperand()->getType()))
+ return LegalStoreKind::None;
// Avoid merging nontemporal stores.
if (SI->getMetadata(LLVMContext::MD_nontemporal))
- return false;
+ return LegalStoreKind::None;
Value *StoredVal = SI->getValueOperand();
Value *StorePtr = SI->getPointerOperand();
@@ -357,7 +379,7 @@ bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset,
// Reject stores that are so large that they overflow an unsigned.
uint64_t SizeInBits = DL->getTypeSizeInBits(StoredVal->getType());
if ((SizeInBits & 7) || (SizeInBits >> 32) != 0)
- return false;
+ return LegalStoreKind::None;
// See if the pointer expression is an AddRec like {base,+,1} on the current
// loop, which indicates a strided store. If we have something else, it's a
@@ -365,11 +387,11 @@ bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset,
const SCEVAddRecExpr *StoreEv =
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
if (!StoreEv || StoreEv->getLoop() != CurLoop || !StoreEv->isAffine())
- return false;
+ return LegalStoreKind::None;
// Check to see if we have a constant stride.
if (!isa<SCEVConstant>(StoreEv->getOperand(1)))
- return false;
+ return LegalStoreKind::None;
// See if the store can be turned into a memset.
@@ -380,22 +402,23 @@ bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset,
Value *SplatValue = isBytewiseValue(StoredVal);
Constant *PatternValue = nullptr;
+ // Note: memset and memset_pattern on unordered-atomic is yet not supported
+ bool UnorderedAtomic = SI->isUnordered() && !SI->isSimple();
+
// If we're allowed to form a memset, and the stored value would be
// acceptable for memset, use it.
- if (HasMemset && SplatValue &&
+ if (!UnorderedAtomic && HasMemset && SplatValue &&
// Verify that the stored value is loop invariant. If not, we can't
// promote the memset.
CurLoop->isLoopInvariant(SplatValue)) {
// It looks like we can use SplatValue.
- ForMemset = true;
- return true;
- } else if (HasMemsetPattern &&
+ return LegalStoreKind::Memset;
+ } else if (!UnorderedAtomic && HasMemsetPattern &&
// Don't create memset_pattern16s with address spaces.
StorePtr->getType()->getPointerAddressSpace() == 0 &&
(PatternValue = getMemSetPatternValue(StoredVal, DL))) {
// It looks like we can use PatternValue!
- ForMemsetPattern = true;
- return true;
+ return LegalStoreKind::MemsetPattern;
}
// Otherwise, see if the store can be turned into a memcpy.
@@ -405,12 +428,17 @@ bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset,
APInt Stride = getStoreStride(StoreEv);
unsigned StoreSize = getStoreSizeInBytes(SI, DL);
if (StoreSize != Stride && StoreSize != -Stride)
- return false;
+ return LegalStoreKind::None;
// The store must be feeding a non-volatile load.
LoadInst *LI = dyn_cast<LoadInst>(SI->getValueOperand());
- if (!LI || !LI->isSimple())
- return false;
+
+ // Only allow non-volatile loads
+ if (!LI || LI->isVolatile())
+ return LegalStoreKind::None;
+ // Only allow simple or unordered-atomic loads
+ if (!LI->isUnordered())
+ return LegalStoreKind::None;
// See if the pointer expression is an AddRec like {base,+,1} on the current
// loop, which indicates a strided load. If we have something else, it's a
@@ -418,18 +446,19 @@ bool LoopIdiomRecognize::isLegalStore(StoreInst *SI, bool &ForMemset,
const SCEVAddRecExpr *LoadEv =
dyn_cast<SCEVAddRecExpr>(SE->getSCEV(LI->getPointerOperand()));
if (!LoadEv || LoadEv->getLoop() != CurLoop || !LoadEv->isAffine())
- return false;
+ return LegalStoreKind::None;
// The store and load must share the same stride.
if (StoreEv->getOperand(1) != LoadEv->getOperand(1))
- return false;
+ return LegalStoreKind::None;
// Success. This store can be converted into a memcpy.
- ForMemcpy = true;
- return true;
+ UnorderedAtomic = UnorderedAtomic || LI->isAtomic();
+ return UnorderedAtomic ? LegalStoreKind::UnorderedAtomicMemcpy
+ : LegalStoreKind::Memcpy;
}
// This store can't be transformed into a memset/memcpy.
- return false;
+ return LegalStoreKind::None;
}
void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
@@ -441,24 +470,29 @@ void LoopIdiomRecognize::collectStores(BasicBlock *BB) {
if (!SI)
continue;
- bool ForMemset = false;
- bool ForMemsetPattern = false;
- bool ForMemcpy = false;
// Make sure this is a strided store with a constant stride.
- if (!isLegalStore(SI, ForMemset, ForMemsetPattern, ForMemcpy))
- continue;
-
- // Save the store locations.
- if (ForMemset) {
+ switch (isLegalStore(SI)) {
+ case LegalStoreKind::None:
+ // Nothing to do
+ break;
+ case LegalStoreKind::Memset: {
// Find the base pointer.
Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemset[Ptr].push_back(SI);
- } else if (ForMemsetPattern) {
+ } break;
+ case LegalStoreKind::MemsetPattern: {
// Find the base pointer.
Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), *DL);
StoreRefsForMemsetPattern[Ptr].push_back(SI);
- } else if (ForMemcpy)
+ } break;
+ case LegalStoreKind::Memcpy:
+ case LegalStoreKind::UnorderedAtomicMemcpy:
StoreRefsForMemcpy.push_back(SI);
+ break;
+ default:
+ assert(false && "unhandled return value");
+ break;
+ }
}
}
@@ -496,7 +530,7 @@ bool LoopIdiomRecognize::runOnLoopBlock(
Instruction *Inst = &*I++;
// Look for memset instructions, which may be optimized to a larger memset.
if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst)) {
- WeakVH InstPtr(&*I);
+ WeakTrackingVH InstPtr(&*I);
if (!processLoopMemSet(MSI, BECount))
continue;
MadeChange = true;
@@ -780,6 +814,11 @@ bool LoopIdiomRecognize::processLoopStridedStore(
if (NegStride)
Start = getStartForNegStride(Start, BECount, IntPtr, StoreSize, SE);
+ // TODO: ideally we should still be able to generate memset if SCEV expander
+ // is taught to generate the dependencies at the latest point.
+ if (!isSafeToExpand(Start, *SE))
+ return false;
+
// Okay, we have a strided store "p[i]" of a splattable value. We can turn
// this into a memset in the loop preheader now if we want. However, this
// would be unsafe to do if there is anything else in the loop that may read
@@ -811,6 +850,11 @@ bool LoopIdiomRecognize::processLoopStridedStore(
SCEV::FlagNUW);
}
+ // TODO: ideally we should still be able to generate memset if SCEV expander
+ // is taught to generate the dependencies at the latest point.
+ if (!isSafeToExpand(NumBytesS, *SE))
+ return false;
+
Value *NumBytes =
Expander.expandCodeFor(NumBytesS, IntPtr, Preheader->getTerminator());
@@ -825,7 +869,7 @@ bool LoopIdiomRecognize::processLoopStridedStore(
Module *M = TheStore->getModule();
Value *MSP =
M->getOrInsertFunction("memset_pattern16", Builder.getVoidTy(),
- Int8PtrTy, Int8PtrTy, IntPtr, (void *)nullptr);
+ Int8PtrTy, Int8PtrTy, IntPtr);
inferLibFuncAttributes(*M->getFunction("memset_pattern16"), *TLI);
// Otherwise we should form a memset_pattern16. PatternValue is known to be
@@ -853,10 +897,10 @@ bool LoopIdiomRecognize::processLoopStridedStore(
/// If the stored value is a strided load in the same loop with the same stride
/// this may be transformable into a memcpy. This kicks in for stuff like
-/// for (i) A[i] = B[i];
+/// for (i) A[i] = B[i];
bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
const SCEV *BECount) {
- assert(SI->isSimple() && "Expected only non-volatile stores.");
+ assert(SI->isUnordered() && "Expected only non-volatile non-ordered stores.");
Value *StorePtr = SI->getPointerOperand();
const SCEVAddRecExpr *StoreEv = cast<SCEVAddRecExpr>(SE->getSCEV(StorePtr));
@@ -866,7 +910,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
// The store must be feeding a non-volatile load.
LoadInst *LI = cast<LoadInst>(SI->getValueOperand());
- assert(LI->isSimple() && "Expected only non-volatile stores.");
+ assert(LI->isUnordered() && "Expected only non-volatile non-ordered loads.");
// See if the pointer expression is an AddRec like {base,+,1} on the current
// loop, which indicates a strided load. If we have something else, it's a
@@ -940,6 +984,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
const SCEV *NumBytesS =
SE->getAddExpr(BECount, SE->getOne(IntPtrTy), SCEV::FlagNUW);
+
if (StoreSize != 1)
NumBytesS = SE->getMulExpr(NumBytesS, SE->getConstant(IntPtrTy, StoreSize),
SCEV::FlagNUW);
@@ -947,9 +992,37 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI,
Value *NumBytes =
Expander.expandCodeFor(NumBytesS, IntPtrTy, Preheader->getTerminator());
- CallInst *NewCall =
- Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
- std::min(SI->getAlignment(), LI->getAlignment()));
+ unsigned Align = std::min(SI->getAlignment(), LI->getAlignment());
+ CallInst *NewCall = nullptr;
+ // Check whether to generate an unordered atomic memcpy:
+ // If the load or store are atomic, then they must neccessarily be unordered
+ // by previous checks.
+ if (!SI->isAtomic() && !LI->isAtomic())
+ NewCall = Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes, Align);
+ else {
+ // We cannot allow unaligned ops for unordered load/store, so reject
+ // anything where the alignment isn't at least the element size.
+ if (Align < StoreSize)
+ return false;
+
+ // If the element.atomic memcpy is not lowered into explicit
+ // loads/stores later, then it will be lowered into an element-size
+ // specific lib call. If the lib call doesn't exist for our store size, then
+ // we shouldn't generate the memcpy.
+ if (StoreSize > TTI->getAtomicMemIntrinsicMaxElementSize())
+ return false;
+
+ NewCall = Builder.CreateElementUnorderedAtomicMemCpy(
+ StoreBasePtr, LoadBasePtr, NumBytes, StoreSize);
+
+ // Propagate alignment info onto the pointer args. Note that unordered
+ // atomic loads/stores are *required* by the spec to have an alignment
+ // but non-atomic loads/stores may not.
+ NewCall->addParamAttr(0, Attribute::getWithAlignment(NewCall->getContext(),
+ SI->getAlignment()));
+ NewCall->addParamAttr(1, Attribute::getWithAlignment(NewCall->getContext(),
+ LI->getAlignment()));
+ }
NewCall->setDebugLoc(SI->getDebugLoc());
DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
@@ -981,7 +1054,7 @@ bool LoopIdiomRecognize::avoidLIRForMultiBlockLoop(bool IsMemset,
}
bool LoopIdiomRecognize::runOnNoncountableLoop() {
- return recognizePopcount();
+ return recognizePopcount() || recognizeAndInsertCTLZ();
}
/// Check if the given conditional branch is based on the comparison between
@@ -1009,6 +1082,17 @@ static Value *matchCondition(BranchInst *BI, BasicBlock *LoopEntry) {
return nullptr;
}
+// Check if the recurrence variable `VarX` is in the right form to create
+// the idiom. Returns the value coerced to a PHINode if so.
+static PHINode *getRecurrenceVar(Value *VarX, Instruction *DefX,
+ BasicBlock *LoopEntry) {
+ auto *PhiX = dyn_cast<PHINode>(VarX);
+ if (PhiX && PhiX->getParent() == LoopEntry &&
+ (PhiX->getOperand(0) == DefX || PhiX->getOperand(1) == DefX))
+ return PhiX;
+ return nullptr;
+}
+
/// Return true iff the idiom is detected in the loop.
///
/// Additionally:
@@ -1078,19 +1162,15 @@ static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
if (!Dec ||
!((SubInst->getOpcode() == Instruction::Sub && Dec->isOne()) ||
(SubInst->getOpcode() == Instruction::Add &&
- Dec->isAllOnesValue()))) {
+ Dec->isMinusOne()))) {
return false;
}
}
// step 3: Check the recurrence of variable X
- {
- PhiX = dyn_cast<PHINode>(VarX1);
- if (!PhiX ||
- (PhiX->getOperand(0) != DefX2 && PhiX->getOperand(1) != DefX2)) {
- return false;
- }
- }
+ PhiX = getRecurrenceVar(VarX1, DefX2, LoopEntry);
+ if (!PhiX)
+ return false;
// step 4: Find the instruction which count the population: cnt2 = cnt1 + 1
{
@@ -1106,8 +1186,8 @@ static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
if (!Inc || !Inc->isOne())
continue;
- PHINode *Phi = dyn_cast<PHINode>(Inst->getOperand(0));
- if (!Phi || Phi->getParent() != LoopEntry)
+ PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
+ if (!Phi)
continue;
// Check if the result of the instruction is live of the loop.
@@ -1146,6 +1226,169 @@ static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB,
return true;
}
+/// Return true if the idiom is detected in the loop.
+///
+/// Additionally:
+/// 1) \p CntInst is set to the instruction Counting Leading Zeros (CTLZ)
+/// or nullptr if there is no such.
+/// 2) \p CntPhi is set to the corresponding phi node
+/// or nullptr if there is no such.
+/// 3) \p Var is set to the value whose CTLZ could be used.
+/// 4) \p DefX is set to the instruction calculating Loop exit condition.
+///
+/// The core idiom we are trying to detect is:
+/// \code
+/// if (x0 == 0)
+/// goto loop-exit // the precondition of the loop
+/// cnt0 = init-val;
+/// do {
+/// x = phi (x0, x.next); //PhiX
+/// cnt = phi(cnt0, cnt.next);
+///
+/// cnt.next = cnt + 1;
+/// ...
+/// x.next = x >> 1; // DefX
+/// ...
+/// } while(x.next != 0);
+///
+/// loop-exit:
+/// \endcode
+static bool detectCTLZIdiom(Loop *CurLoop, PHINode *&PhiX,
+ Instruction *&CntInst, PHINode *&CntPhi,
+ Instruction *&DefX) {
+ BasicBlock *LoopEntry;
+ Value *VarX = nullptr;
+
+ DefX = nullptr;
+ PhiX = nullptr;
+ CntInst = nullptr;
+ CntPhi = nullptr;
+ LoopEntry = *(CurLoop->block_begin());
+
+ // step 1: Check if the loop-back branch is in desirable form.
+ if (Value *T = matchCondition(
+ dyn_cast<BranchInst>(LoopEntry->getTerminator()), LoopEntry))
+ DefX = dyn_cast<Instruction>(T);
+ else
+ return false;
+
+ // step 2: detect instructions corresponding to "x.next = x >> 1"
+ if (!DefX || DefX->getOpcode() != Instruction::AShr)
+ return false;
+ if (ConstantInt *Shft = dyn_cast<ConstantInt>(DefX->getOperand(1)))
+ if (!Shft || !Shft->isOne())
+ return false;
+ VarX = DefX->getOperand(0);
+
+ // step 3: Check the recurrence of variable X
+ PhiX = getRecurrenceVar(VarX, DefX, LoopEntry);
+ if (!PhiX)
+ return false;
+
+ // step 4: Find the instruction which count the CTLZ: cnt.next = cnt + 1
+ // TODO: We can skip the step. If loop trip count is known (CTLZ),
+ // then all uses of "cnt.next" could be optimized to the trip count
+ // plus "cnt0". Currently it is not optimized.
+ // This step could be used to detect POPCNT instruction:
+ // cnt.next = cnt + (x.next & 1)
+ for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(),
+ IterE = LoopEntry->end();
+ Iter != IterE; Iter++) {
+ Instruction *Inst = &*Iter;
+ if (Inst->getOpcode() != Instruction::Add)
+ continue;
+
+ ConstantInt *Inc = dyn_cast<ConstantInt>(Inst->getOperand(1));
+ if (!Inc || !Inc->isOne())
+ continue;
+
+ PHINode *Phi = getRecurrenceVar(Inst->getOperand(0), Inst, LoopEntry);
+ if (!Phi)
+ continue;
+
+ CntInst = Inst;
+ CntPhi = Phi;
+ break;
+ }
+ if (!CntInst)
+ return false;
+
+ return true;
+}
+
+/// Recognize CTLZ idiom in a non-countable loop and convert the loop
+/// to countable (with CTLZ trip count).
+/// If CTLZ inserted as a new trip count returns true; otherwise, returns false.
+bool LoopIdiomRecognize::recognizeAndInsertCTLZ() {
+ // Give up if the loop has multiple blocks or multiple backedges.
+ if (CurLoop->getNumBackEdges() != 1 || CurLoop->getNumBlocks() != 1)
+ return false;
+
+ Instruction *CntInst, *DefX;
+ PHINode *CntPhi, *PhiX;
+ if (!detectCTLZIdiom(CurLoop, PhiX, CntInst, CntPhi, DefX))
+ return false;
+
+ bool IsCntPhiUsedOutsideLoop = false;
+ for (User *U : CntPhi->users())
+ if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
+ IsCntPhiUsedOutsideLoop = true;
+ break;
+ }
+ bool IsCntInstUsedOutsideLoop = false;
+ for (User *U : CntInst->users())
+ if (!CurLoop->contains(dyn_cast<Instruction>(U))) {
+ IsCntInstUsedOutsideLoop = true;
+ break;
+ }
+ // If both CntInst and CntPhi are used outside the loop the profitability
+ // is questionable.
+ if (IsCntInstUsedOutsideLoop && IsCntPhiUsedOutsideLoop)
+ return false;
+
+ // For some CPUs result of CTLZ(X) intrinsic is undefined
+ // when X is 0. If we can not guarantee X != 0, we need to check this
+ // when expand.
+ bool ZeroCheck = false;
+ // It is safe to assume Preheader exist as it was checked in
+ // parent function RunOnLoop.
+ BasicBlock *PH = CurLoop->getLoopPreheader();
+ Value *InitX = PhiX->getIncomingValueForBlock(PH);
+ // If we check X != 0 before entering the loop we don't need a zero
+ // check in CTLZ intrinsic, but only if Cnt Phi is not used outside of the
+ // loop (if it is used we count CTLZ(X >> 1)).
+ if (!IsCntPhiUsedOutsideLoop)
+ if (BasicBlock *PreCondBB = PH->getSinglePredecessor())
+ if (BranchInst *PreCondBr =
+ dyn_cast<BranchInst>(PreCondBB->getTerminator())) {
+ if (matchCondition(PreCondBr, PH) == InitX)
+ ZeroCheck = true;
+ }
+
+ // Check if CTLZ intrinsic is profitable. Assume it is always profitable
+ // if we delete the loop (the loop has only 6 instructions):
+ // %n.addr.0 = phi [ %n, %entry ], [ %shr, %while.cond ]
+ // %i.0 = phi [ %i0, %entry ], [ %inc, %while.cond ]
+ // %shr = ashr %n.addr.0, 1
+ // %tobool = icmp eq %shr, 0
+ // %inc = add nsw %i.0, 1
+ // br i1 %tobool
+
+ IRBuilder<> Builder(PH->getTerminator());
+ SmallVector<const Value *, 2> Ops =
+ {InitX, ZeroCheck ? Builder.getTrue() : Builder.getFalse()};
+ ArrayRef<const Value *> Args(Ops);
+ if (CurLoop->getHeader()->size() != 6 &&
+ TTI->getIntrinsicCost(Intrinsic::ctlz, InitX->getType(), Args) >
+ TargetTransformInfo::TCC_Basic)
+ return false;
+
+ const DebugLoc DL = DefX->getDebugLoc();
+ transformLoopToCountable(PH, CntInst, CntPhi, InitX, DL, ZeroCheck,
+ IsCntPhiUsedOutsideLoop);
+ return true;
+}
+
/// Recognizes a population count idiom in a non-countable loop.
///
/// If detected, transforms the relevant code to issue the popcount intrinsic
@@ -1209,6 +1452,134 @@ static CallInst *createPopcntIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
return CI;
}
+static CallInst *createCTLZIntrinsic(IRBuilder<> &IRBuilder, Value *Val,
+ const DebugLoc &DL, bool ZeroCheck) {
+ Value *Ops[] = {Val, ZeroCheck ? IRBuilder.getTrue() : IRBuilder.getFalse()};
+ Type *Tys[] = {Val->getType()};
+
+ Module *M = IRBuilder.GetInsertBlock()->getParent()->getParent();
+ Value *Func = Intrinsic::getDeclaration(M, Intrinsic::ctlz, Tys);
+ CallInst *CI = IRBuilder.CreateCall(Func, Ops);
+ CI->setDebugLoc(DL);
+
+ return CI;
+}
+
+/// Transform the following loop:
+/// loop:
+/// CntPhi = PHI [Cnt0, CntInst]
+/// PhiX = PHI [InitX, DefX]
+/// CntInst = CntPhi + 1
+/// DefX = PhiX >> 1
+// LOOP_BODY
+/// Br: loop if (DefX != 0)
+/// Use(CntPhi) or Use(CntInst)
+///
+/// Into:
+/// If CntPhi used outside the loop:
+/// CountPrev = BitWidth(InitX) - CTLZ(InitX >> 1)
+/// Count = CountPrev + 1
+/// else
+/// Count = BitWidth(InitX) - CTLZ(InitX)
+/// loop:
+/// CntPhi = PHI [Cnt0, CntInst]
+/// PhiX = PHI [InitX, DefX]
+/// PhiCount = PHI [Count, Dec]
+/// CntInst = CntPhi + 1
+/// DefX = PhiX >> 1
+/// Dec = PhiCount - 1
+/// LOOP_BODY
+/// Br: loop if (Dec != 0)
+/// Use(CountPrev + Cnt0) // Use(CntPhi)
+/// or
+/// Use(Count + Cnt0) // Use(CntInst)
+///
+/// If LOOP_BODY is empty the loop will be deleted.
+/// If CntInst and DefX are not used in LOOP_BODY they will be removed.
+void LoopIdiomRecognize::transformLoopToCountable(
+ BasicBlock *Preheader, Instruction *CntInst, PHINode *CntPhi, Value *InitX,
+ const DebugLoc DL, bool ZeroCheck, bool IsCntPhiUsedOutsideLoop) {
+ BranchInst *PreheaderBr = dyn_cast<BranchInst>(Preheader->getTerminator());
+
+ // Step 1: Insert the CTLZ instruction at the end of the preheader block
+ // Count = BitWidth - CTLZ(InitX);
+ // If there are uses of CntPhi create:
+ // CountPrev = BitWidth - CTLZ(InitX >> 1);
+ IRBuilder<> Builder(PreheaderBr);
+ Builder.SetCurrentDebugLocation(DL);
+ Value *CTLZ, *Count, *CountPrev, *NewCount, *InitXNext;
+
+ if (IsCntPhiUsedOutsideLoop)
+ InitXNext = Builder.CreateAShr(InitX,
+ ConstantInt::get(InitX->getType(), 1));
+ else
+ InitXNext = InitX;
+ CTLZ = createCTLZIntrinsic(Builder, InitXNext, DL, ZeroCheck);
+ Count = Builder.CreateSub(
+ ConstantInt::get(CTLZ->getType(),
+ CTLZ->getType()->getIntegerBitWidth()),
+ CTLZ);
+ if (IsCntPhiUsedOutsideLoop) {
+ CountPrev = Count;
+ Count = Builder.CreateAdd(
+ CountPrev,
+ ConstantInt::get(CountPrev->getType(), 1));
+ }
+ if (IsCntPhiUsedOutsideLoop)
+ NewCount = Builder.CreateZExtOrTrunc(CountPrev,
+ cast<IntegerType>(CntInst->getType()));
+ else
+ NewCount = Builder.CreateZExtOrTrunc(Count,
+ cast<IntegerType>(CntInst->getType()));
+
+ // If the CTLZ counter's initial value is not zero, insert Add Inst.
+ Value *CntInitVal = CntPhi->getIncomingValueForBlock(Preheader);
+ ConstantInt *InitConst = dyn_cast<ConstantInt>(CntInitVal);
+ if (!InitConst || !InitConst->isZero())
+ NewCount = Builder.CreateAdd(NewCount, CntInitVal);
+
+ // Step 2: Insert new IV and loop condition:
+ // loop:
+ // ...
+ // PhiCount = PHI [Count, Dec]
+ // ...
+ // Dec = PhiCount - 1
+ // ...
+ // Br: loop if (Dec != 0)
+ BasicBlock *Body = *(CurLoop->block_begin());
+ auto *LbBr = dyn_cast<BranchInst>(Body->getTerminator());
+ ICmpInst *LbCond = cast<ICmpInst>(LbBr->getCondition());
+ Type *Ty = Count->getType();
+
+ PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front());
+
+ Builder.SetInsertPoint(LbCond);
+ Instruction *TcDec = cast<Instruction>(
+ Builder.CreateSub(TcPhi, ConstantInt::get(Ty, 1),
+ "tcdec", false, true));
+
+ TcPhi->addIncoming(Count, Preheader);
+ TcPhi->addIncoming(TcDec, Body);
+
+ CmpInst::Predicate Pred =
+ (LbBr->getSuccessor(0) == Body) ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
+ LbCond->setPredicate(Pred);
+ LbCond->setOperand(0, TcDec);
+ LbCond->setOperand(1, ConstantInt::get(Ty, 0));
+
+ // Step 3: All the references to the original counter outside
+ // the loop are replaced with the NewCount -- the value returned from
+ // __builtin_ctlz(x).
+ if (IsCntPhiUsedOutsideLoop)
+ CntPhi->replaceUsesOutsideBlock(NewCount, Body);
+ else
+ CntInst->replaceUsesOutsideBlock(NewCount, Body);
+
+ // step 4: Forget the "non-computable" trip-count SCEV associated with the
+ // loop. The loop would otherwise not be deleted even if it becomes empty.
+ SE->forgetLoop(CurLoop);
+}
+
void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB,
Instruction *CntInst,
PHINode *CntPhi, Value *Var) {
diff --git a/gnu/llvm/tools/clang/include/clang/Basic/Builtins.def b/gnu/llvm/tools/clang/include/clang/Basic/Builtins.def
index 0472a662a7e..1ddb9beaf91 100644
--- a/gnu/llvm/tools/clang/include/clang/Basic/Builtins.def
+++ b/gnu/llvm/tools/clang/include/clang/Basic/Builtins.def
@@ -52,6 +52,7 @@
// LL -> long long
// LLL -> __int128_t (e.g. LLLi)
// W -> int64_t
+// N -> 'int' size if target is LP64, 'L' otherwise.
// S -> signed
// U -> unsigned
// I -> Required to constant fold to an integer constant expression.
@@ -718,11 +719,11 @@ BUILTIN(__builtin_rindex, "c*cC*i", "Fn")
LANGBUILTIN(_alloca, "v*z", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__assume, "vb", "n", ALL_MS_LANGUAGES)
LIBBUILTIN(_byteswap_ushort, "UsUs", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
-LIBBUILTIN(_byteswap_ulong, "ULiULi", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
+LIBBUILTIN(_byteswap_ulong, "UNiUNi", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
LIBBUILTIN(_byteswap_uint64, "ULLiULLi", "fnc", "stdlib.h", ALL_MS_LANGUAGES)
LANGBUILTIN(__debugbreak, "v", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(__exception_code, "ULi", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_exception_code, "ULi", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__exception_code, "UNi", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_exception_code, "UNi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__exception_info, "v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_exception_info, "v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__abnormal_termination, "i", "n", ALL_MS_LANGUAGES)
@@ -730,49 +731,50 @@ LANGBUILTIN(_abnormal_termination, "i", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__GetExceptionInfo, "v*.", "ntu", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedAnd8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedAnd16, "ssD*s", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedAnd, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedAnd, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchange8, "ccD*cc", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchange16, "ssD*ss", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedCompareExchange, "LiLiD*LiLi", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedCompareExchange, "NiNiD*NiNi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchange64, "LLiLLiD*LLiLLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedCompareExchangePointer, "v*v*D*v*v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedDecrement16, "ssD*", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedDecrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchange, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedDecrement, "NiNiD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchange, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchange8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchange16, "ssD*s", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeAdd8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeAdd16, "ssD*s", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchangeAdd, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeAdd, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangePointer, "v*v*D*v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeSub8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedExchangeSub16, "ssD*s", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedExchangeSub, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedExchangeSub, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedIncrement16, "ssD*", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedIncrement, "LiLiD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedIncrement, "NiNiD*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedOr8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedOr16, "ssD*s", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedOr, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedOr, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor8, "ccD*c", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_InterlockedXor16, "ssD*s", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_InterlockedXor, "LiLiD*Li", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_InterlockedXor, "NiNiD*Ni", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_interlockedbittestandset, "UcNiD*Ni", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__noop, "i.", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__popcnt16, "UsUs", "nc", ALL_MS_LANGUAGES)
LANGBUILTIN(__popcnt, "UiUi", "nc", ALL_MS_LANGUAGES)
LANGBUILTIN(__popcnt64, "ULLiULLi", "nc", ALL_MS_LANGUAGES)
-LANGBUILTIN(__readfsdword, "ULiULi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_ReturnAddress, "v*", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotl8, "UcUcUc", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotl16, "UsUsUc", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotl, "UiUii", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_lrotl, "ULiULii", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_lrotl, "UNiUNii", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotl64, "ULLiULLii", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotr8, "UcUcUc", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotr16, "UsUsUc", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotr, "UiUii", "n", ALL_MS_LANGUAGES)
-LANGBUILTIN(_lrotr, "ULiULii", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(_lrotr, "UNiUNii", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(_rotr64, "ULLiULLii", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__va_start, "vc**.", "nt", ALL_MS_LANGUAGES)
+LANGBUILTIN(__fastfail, "vUi", "nr", ALL_MS_LANGUAGES)
// Microsoft library builtins.
LIBBUILTIN(_setjmpex, "iJ", "fj", "setjmpex.h", ALL_MS_LANGUAGES)
@@ -1364,7 +1366,7 @@ BUILTIN(__builtin_coro_free, "v*v*", "n")
BUILTIN(__builtin_coro_id, "v*Iiv*v*v*", "n")
BUILTIN(__builtin_coro_alloc, "b", "n")
BUILTIN(__builtin_coro_begin, "v*v*", "n")
-BUILTIN(__builtin_coro_end, "vv*Ib", "n")
+BUILTIN(__builtin_coro_end, "bv*Ib", "n")
BUILTIN(__builtin_coro_suspend, "cIb", "n")
BUILTIN(__builtin_coro_param, "bv*v*", "n")
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Pipe functions.
@@ -1408,6 +1410,14 @@ LANGBUILTIN(to_private, "v*v*", "tn", OCLC20_LANG)
BUILTIN(__builtin_os_log_format_buffer_size, "zcC*.", "p:0:nut")
BUILTIN(__builtin_os_log_format, "v*v*cC*.", "p:0:nt")
+// Builtins for XRay
+BUILTIN(__xray_customevent, "vcC*z", "")
+
+// Win64-compatible va_list functions
+BUILTIN(__builtin_ms_va_start, "vc*&.", "nt")
+BUILTIN(__builtin_ms_va_end, "vc*&", "n")
+BUILTIN(__builtin_ms_va_copy, "vc*&c*&", "n")
+
#undef BUILTIN
#undef LIBBUILTIN
#undef LANGBUILTIN
diff --git a/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
index 812d7c9db9d..23e4d4633ae 100644
--- a/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -34,7 +34,11 @@ def CXX14BinaryLiteral : DiagGroup<"c++14-binary-literal">;
def GNUBinaryLiteral : DiagGroup<"gnu-binary-literal">;
def GNUCompoundLiteralInitializer : DiagGroup<"gnu-compound-literal-initializer">;
def BitFieldConstantConversion : DiagGroup<"bitfield-constant-conversion">;
+def BitFieldEnumConversion : DiagGroup<"bitfield-enum-conversion">;
def BitFieldWidth : DiagGroup<"bitfield-width">;
+def CoroutineMissingUnhandledException :
+ DiagGroup<"coroutine-missing-unhandled-exception">;
+def Coroutine : DiagGroup<"coroutine", [CoroutineMissingUnhandledException]>;
def ConstantConversion :
DiagGroup<"constant-conversion", [ BitFieldConstantConversion ] >;
def LiteralConversion : DiagGroup<"literal-conversion">;
@@ -94,7 +98,9 @@ def CXX11CompatDeprecatedWritableStr :
def DeprecatedAttributes : DiagGroup<"deprecated-attributes">;
def DeprecatedDeclarations : DiagGroup<"deprecated-declarations">;
def UnavailableDeclarations : DiagGroup<"unavailable-declarations">;
-def UnguardedAvailability : DiagGroup<"unguarded-availability">;
+def UnguardedAvailabilityNew : DiagGroup<"unguarded-availability-new">;
+def UnguardedAvailability : DiagGroup<"unguarded-availability",
+ [UnguardedAvailabilityNew]>;
// partial-availability is an alias of unguarded-availability.
def : DiagGroup<"partial-availability", [UnguardedAvailability]>;
def DeprecatedDynamicExceptionSpec
@@ -146,6 +152,13 @@ def GNUFoldingConstant : DiagGroup<"gnu-folding-constant">;
def FormatExtraArgs : DiagGroup<"format-extra-args">;
def FormatZeroLength : DiagGroup<"format-zero-length">;
+def InvalidIOSDeploymentTarget : DiagGroup<"invalid-ios-deployment-target">;
+
+def CXX17CompatMangling : DiagGroup<"c++17-compat-mangling">;
+def : DiagGroup<"c++1z-compat-mangling", [CXX17CompatMangling]>;
+// Name of this warning in GCC.
+def NoexceptType : DiagGroup<"noexcept-type", [CXX17CompatMangling]>;
+
// Warnings for C++1y code which is not compatible with prior C++ standards.
def CXXPre14Compat : DiagGroup<"c++98-c++11-compat">;
def CXXPre14CompatPedantic : DiagGroup<"c++98-c++11-compat-pedantic",
@@ -175,6 +188,8 @@ def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
def CXX11Narrowing : DiagGroup<"c++11-narrowing">;
+def CXX11WarnOverrideDestructor :
+ DiagGroup<"inconsistent-missing-destructor-override">;
def CXX11WarnOverrideMethod : DiagGroup<"inconsistent-missing-override">;
// Original name of this warning in Clang
@@ -204,8 +219,10 @@ def CXX14Compat : DiagGroup<"c++14-compat", [CXXPre1zCompat]>;
def CXX14CompatPedantic : DiagGroup<"c++14-compat-pedantic",
[CXXPre1zCompatPedantic]>;
-def CXX1zCompat : DiagGroup<"c++1z-compat", [DeprecatedRegister,
- DeprecatedIncrementBool]>;
+def CXX17Compat : DiagGroup<"c++17-compat", [DeprecatedRegister,
+ DeprecatedIncrementBool,
+ CXX17CompatMangling]>;
+def : DiagGroup<"c++1z-compat", [CXX17Compat]>;
def ExitTimeDestructors : DiagGroup<"exit-time-destructors">;
def FlexibleArrayExtensions : DiagGroup<"flexible-array-extensions">;
@@ -301,6 +318,7 @@ def : DiagGroup<"nonportable-cfstrings">;
def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
def : DiagGroup<"effc++", [NonVirtualDtor]>;
def OveralignedType : DiagGroup<"over-aligned">;
+def AlignedAllocationUnavailable : DiagGroup<"aligned-allocation-unavailable">;
def OldStyleCast : DiagGroup<"old-style-cast">;
def : DiagGroup<"old-style-definition">;
def OutOfLineDeclaration : DiagGroup<"out-of-line-declaration">;
@@ -355,6 +373,7 @@ def SemiBeforeMethodBody : DiagGroup<"semicolon-before-method-body">;
def Sentinel : DiagGroup<"sentinel">;
def MissingMethodReturnType : DiagGroup<"missing-method-return-type">;
+def ShadowField : DiagGroup<"shadow-field">;
def ShadowFieldInConstructorModified : DiagGroup<"shadow-field-in-constructor-modified">;
def ShadowFieldInConstructor : DiagGroup<"shadow-field-in-constructor",
[ShadowFieldInConstructorModified]>;
@@ -366,7 +385,7 @@ def ShadowUncapturedLocal : DiagGroup<"shadow-uncaptured-local">;
def Shadow : DiagGroup<"shadow", [ShadowFieldInConstructorModified,
ShadowIvar]>;
def ShadowAll : DiagGroup<"shadow-all", [Shadow, ShadowFieldInConstructor,
- ShadowUncapturedLocal]>;
+ ShadowUncapturedLocal, ShadowField]>;
def Shorten64To32 : DiagGroup<"shorten-64-to-32">;
def : DiagGroup<"sign-promo">;
@@ -454,7 +473,9 @@ def Uninitialized : DiagGroup<"uninitialized", [UninitializedSometimes,
def IgnoredPragmaIntrinsic : DiagGroup<"ignored-pragma-intrinsic">;
def UnknownPragmas : DiagGroup<"unknown-pragmas">;
def IgnoredPragmas : DiagGroup<"ignored-pragmas", [IgnoredPragmaIntrinsic]>;
-def Pragmas : DiagGroup<"pragmas", [UnknownPragmas, IgnoredPragmas]>;
+def PragmaClangAttribute : DiagGroup<"pragma-clang-attribute">;
+def Pragmas : DiagGroup<"pragmas", [UnknownPragmas, IgnoredPragmas,
+ PragmaClangAttribute]>;
def UnknownWarningOption : DiagGroup<"unknown-warning-option">;
def NSobjectAttribute : DiagGroup<"NSObject-attribute">;
def IndependentClassAttribute : DiagGroup<"IndependentClass-attribute">;
@@ -477,9 +498,11 @@ def UnneededInternalDecl : DiagGroup<"unneeded-internal-declaration">;
def UnneededMemberFunction : DiagGroup<"unneeded-member-function">;
def UnusedPrivateField : DiagGroup<"unused-private-field">;
def UnusedFunction : DiagGroup<"unused-function", [UnneededInternalDecl]>;
+def UnusedTemplate : DiagGroup<"unused-template", [UnneededInternalDecl]>;
def UnusedMemberFunction : DiagGroup<"unused-member-function",
[UnneededMemberFunction]>;
def UnusedLabel : DiagGroup<"unused-label">;
+def UnusedLambdaCapture : DiagGroup<"unused-lambda-capture">;
def UnusedParameter : DiagGroup<"unused-parameter">;
def UnusedResult : DiagGroup<"unused-result">;
def PotentiallyEvaluatedExpression : DiagGroup<"potentially-evaluated-expression">;
@@ -602,6 +625,7 @@ def Conversion : DiagGroup<"conversion",
[BoolConversion,
ConstantConversion,
EnumConversion,
+ BitFieldEnumConversion,
FloatConversion,
Shorten64To32,
IntConversion,
@@ -616,9 +640,11 @@ def Conversion : DiagGroup<"conversion",
def Unused : DiagGroup<"unused",
[UnusedArgument, UnusedFunction, UnusedLabel,
// UnusedParameter, (matches GCC's behavior)
+ // UnusedTemplate, (clean-up libc++ before enabling)
// UnusedMemberFunction, (clean-up llvm before enabling)
- UnusedPrivateField, UnusedLocalTypedef,
- UnusedValue, UnusedVariable, UnusedPropertyIvar]>,
+ UnusedPrivateField, UnusedLambdaCapture,
+ UnusedLocalTypedef, UnusedValue, UnusedVariable,
+ UnusedPropertyIvar]>,
DiagCategory<"Unused Entity Issue">;
// Format settings.
@@ -748,10 +774,11 @@ def CXX14 : DiagGroup<"c++14-extensions", [CXX14BinaryLiteral]>;
// A warning group for warnings about using C++1z features as extensions in
// earlier C++ versions.
-def CXX1z : DiagGroup<"c++1z-extensions">;
+def CXX17 : DiagGroup<"c++17-extensions">;
def : DiagGroup<"c++0x-extensions", [CXX11]>;
def : DiagGroup<"c++1y-extensions", [CXX14]>;
+def : DiagGroup<"c++1z-extensions", [CXX17]>;
def DelegatingCtorCycles :
DiagGroup<"delegating-ctor-cycles">;
@@ -879,10 +906,11 @@ def BackendOptimizationRemarkAnalysis : DiagGroup<"pass-analysis">;
def BackendOptimizationFailure : DiagGroup<"pass-failed">;
// Instrumentation based profiling warnings.
+def ProfileInstrMissing : DiagGroup<"profile-instr-missing">;
def ProfileInstrOutOfDate : DiagGroup<"profile-instr-out-of-date">;
def ProfileInstrUnprofiled : DiagGroup<"profile-instr-unprofiled">;
-// AddressSanitizer frontent instrumentation remarks.
+// AddressSanitizer frontend instrumentation remarks.
def SanitizeAddressRemarks : DiagGroup<"sanitize-address">;
// Issues with serialized diagnostics.
diff --git a/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 05fe50f468a..e7c3c89b676 100644
--- a/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/gnu/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -211,9 +211,9 @@ def warn_auto_storage_class : Warning<
def warn_deprecated_register : Warning<
"'register' storage class specifier is deprecated "
- "and incompatible with C++1z">, InGroup<DeprecatedRegister>;
+ "and incompatible with C++17">, InGroup<DeprecatedRegister>;
def ext_register_storage_class : ExtWarn<
- "ISO C++1z does not allow 'register' storage class specifier">,
+ "ISO C++17 does not allow 'register' storage class specifier">,
DefaultError, InGroup<Register>;
def err_invalid_decl_spec_combination : Error<
@@ -259,6 +259,9 @@ def err_anyx86_interrupt_attribute : Error<
"a pointer as the first parameter|a %2 type as the second parameter}1">;
def err_anyx86_interrupt_called : Error<
"interrupt service routine cannot be called directly">;
+def warn_arm_interrupt_calling_convention : Warning<
+ "call to function without interrupt attribute could clobber interruptee's VFP registers">,
+ InGroup<Extra>;
def warn_mips_interrupt_attribute : Warning<
"MIPS 'interrupt' attribute only applies to functions that have "
"%select{no parameters|a 'void' return type}0">,
@@ -300,6 +303,8 @@ def note_empty_parens_zero_initialize : Note<
"replace parentheses with an initializer to declare a variable">;
def warn_unused_function : Warning<"unused function %0">,
InGroup<UnusedFunction>, DefaultIgnore;
+def warn_unused_template : Warning<"unused %select{function|variable}0 template %1">,
+ InGroup<UnusedTemplate>, DefaultIgnore;
def warn_unused_member_function : Warning<"unused member function %0">,
InGroup<UnusedMemberFunction>, DefaultIgnore;
def warn_used_but_marked_unused: Warning<"%0 was marked unused but was used">,
@@ -316,6 +321,9 @@ def warn_unneeded_member_function : Warning<
InGroup<UnneededMemberFunction>, DefaultIgnore;
def warn_unused_private_field: Warning<"private field %0 is not used">,
InGroup<UnusedPrivateField>, DefaultIgnore;
+def warn_unused_lambda_capture: Warning<"lambda capture %0 is not "
+ "%select{used|required to be captured for this use}1">,
+ InGroup<UnusedLambdaCapture>, DefaultIgnore;
def warn_parameter_size: Warning<
"%0 is a large (%1 bytes) pass-by-value argument; "
@@ -363,7 +371,9 @@ def warn_decl_shadow :
"local variable|"
"variable in %2|"
"static data member of %2|"
- "field of %2}1">,
+ "field of %2|"
+ "typedef in %2|"
+ "type alias in %2}1">,
InGroup<Shadow>, DefaultIgnore;
def warn_decl_shadow_uncaptured_local :
Warning<warn_decl_shadow.Text>,
@@ -381,9 +391,9 @@ def err_decomp_decl_context : Error<
"decomposition declaration not permitted in this context">;
def warn_cxx14_compat_decomp_decl : Warning<
"decomposition declarations are incompatible with "
- "C++ standards before C++1z">, DefaultIgnore, InGroup<CXXPre1zCompat>;
+ "C++ standards before C++17">, DefaultIgnore, InGroup<CXXPre1zCompat>;
def ext_decomp_decl : ExtWarn<
- "decomposition declarations are a C++1z extension">, InGroup<CXX1z>;
+ "decomposition declarations are a C++17 extension">, InGroup<CXX17>;
def err_decomp_decl_spec : Error<
"decomposition declaration cannot be declared "
"%plural{1:'%1'|:with '%1' specifiers}0">;
@@ -484,7 +494,7 @@ def err_access_decl : Error<
"ISO C++11 does not allow access declarations; "
"use using declarations instead">;
def ext_dynamic_exception_spec : ExtWarn<
- "ISO C++1z does not allow dynamic exception specifications">,
+ "ISO C++17 does not allow dynamic exception specifications">,
InGroup<DynamicExceptionSpec>, DefaultError;
def warn_exception_spec_deprecated : Warning<
"dynamic exception specifications are deprecated">,
@@ -497,7 +507,7 @@ def warn_deprecated_copy_operation : Warning<
InGroup<Deprecated>, DefaultIgnore;
def warn_cxx1z_compat_exception_spec_in_signature : Warning<
"mangled name of %0 will change in C++17 due to non-throwing exception "
- "specification in function signature">, InGroup<CXX1zCompat>;
+ "specification in function signature">, InGroup<CXX17CompatMangling>;
def warn_global_constructor : Warning<
"declaration requires a global constructor">,
@@ -527,10 +537,10 @@ def err_maybe_falloff_nonvoid_block : Error<
def err_falloff_nonvoid_block : Error<
"control reaches end of non-void block">;
def warn_maybe_falloff_nonvoid_coroutine : Warning<
- "control may reach end of non-void coroutine">,
+ "control may reach end of coroutine; which is undefined behavior because the promise type %0 does not declare 'return_void()'">,
InGroup<ReturnType>;
def warn_falloff_nonvoid_coroutine : Warning<
- "control reaches end of non-void coroutine">,
+ "control reaches end of coroutine; which is undefined behavior because the promise type %0 does not declare 'return_void()'">,
InGroup<ReturnType>;
def warn_suggest_noreturn_function : Warning<
"%select{function|method}0 %1 could be declared with attribute 'noreturn'">,
@@ -722,6 +732,9 @@ def err_super_in_lambda_unsupported : Error<
def warn_pragma_unused_undeclared_var : Warning<
"undeclared variable %0 used as an argument for '#pragma unused'">,
InGroup<IgnoredPragmas>;
+def warn_atl_uuid_deprecated : Warning<
+ "specifying 'uuid' as an ATL attribute is deprecated; use __declspec instead">,
+ InGroup<DeprecatedDeclarations>;
def warn_pragma_unused_expected_var_arg : Warning<
"only variables can be arguments to '#pragma unused'">,
InGroup<IgnoredPragmas>;
@@ -742,6 +755,25 @@ def err_pragma_loop_compatibility : Error<
def err_pragma_loop_precedes_nonloop : Error<
"expected a for, while, or do-while loop to follow '%0'">;
+def err_pragma_attribute_matcher_subrule_contradicts_rule : Error<
+ "redundant attribute subject matcher sub-rule '%0'; '%1' already matches "
+ "those declarations">;
+def err_pragma_attribute_matcher_negated_subrule_contradicts_subrule : Error<
+ "negated attribute subject matcher sub-rule '%0' contradicts sub-rule '%1'">;
+def err_pragma_attribute_invalid_matchers : Error<
+ "attribute %0 can't be applied to %1">;
+def err_pragma_attribute_stack_mismatch : Error<
+ "'#pragma clang attribute pop' with no matching '#pragma clang attribute push'">;
+def warn_pragma_attribute_unused : Warning<
+ "unused attribute %0 in '#pragma clang attribute push' region">,
+ InGroup<PragmaClangAttribute>;
+def note_pragma_attribute_region_ends_here : Note<
+ "'#pragma clang attribute push' regions ends here">;
+def err_pragma_attribute_no_pop_eof : Error<"unterminated "
+ "'#pragma clang attribute push' at end of file">;
+def note_pragma_attribute_applied_decl_here : Note<
+ "when applied to this declaration">;
+
/// Objective-C parser diagnostics
def err_duplicate_class_def : Error<
"duplicate interface definition for class %0">;
@@ -776,8 +808,10 @@ def warn_property_types_are_incompatible : Warning<
"property type %0 is incompatible with type %1 inherited from %2">,
InGroup<DiagGroup<"incompatible-property-type">>;
def warn_protocol_property_mismatch : Warning<
- "property of type %0 was selected for synthesis">,
+ "property %select{of type %1|with attribute '%1'|without attribute '%1'|with "
+ "getter %1|with setter %1}0 was selected for synthesis">,
InGroup<DiagGroup<"protocol-property-synthesis-ambiguity">>;
+def err_protocol_property_mismatch: Error<warn_protocol_property_mismatch.Text>;
def err_undef_interface : Error<"cannot find interface declaration for %0">;
def err_category_forward_interface : Error<
"cannot define %select{category|class extension}0 for undefined class %1">;
@@ -997,6 +1031,8 @@ def warn_auto_synthesizing_protocol_property :Warning<
"auto property synthesis will not synthesize property %0"
" declared in protocol %1">,
InGroup<DiagGroup<"objc-protocol-property-synthesis">>;
+def note_add_synthesize_directive : Note<
+ "add a '@synthesize' directive">;
def warn_no_autosynthesis_shared_ivar_property : Warning <
"auto property synthesis will not synthesize property "
"%0 because it cannot share an ivar with another synthesized property">,
@@ -1054,7 +1090,9 @@ def err_category_property : Error<
def note_property_declare : Note<
"property declared here">;
def note_protocol_property_declare : Note<
- "it could also be property of type %0 declared here">;
+ "it could also be property "
+ "%select{of type %1|without attribute '%1'|with attribute '%1'|with getter "
+ "%1|with setter %1}0 declared here">;
def note_property_synthesize : Note<
"property synthesized here">;
def err_synthesize_category_decl : Error<
@@ -1153,20 +1191,24 @@ def err_objc_kindof_nonobject : Error<
def err_objc_kindof_wrong_position : Error<
"'__kindof' type specifier must precede the declarator">;
+def err_objc_method_unsupported_param_ret_type : Error<
+ "%0 %select{parameter|return}1 type is unsupported; "
+ "support for vector types for this target is introduced in %2">;
+
// C++ declarations
def err_static_assert_expression_is_not_constant : Error<
"static_assert expression is not an integral constant expression">;
def err_static_assert_failed : Error<"static_assert failed%select{ %1|}0">;
def ext_static_assert_no_message : ExtWarn<
- "static_assert with no message is a C++1z extension">, InGroup<CXX1z>;
+ "static_assert with no message is a C++17 extension">, InGroup<CXX17>;
def warn_cxx14_compat_static_assert_no_message : Warning<
- "static_assert with no message is incompatible with C++ standards before C++1z">,
+ "static_assert with no message is incompatible with C++ standards before C++17">,
DefaultIgnore, InGroup<CXXPre1zCompat>;
def ext_inline_variable : ExtWarn<
- "inline variables are a C++1z extension">, InGroup<CXX1z>;
+ "inline variables are a C++17 extension">, InGroup<CXX17>;
def warn_cxx14_compat_inline_variable : Warning<
- "inline variables are incompatible with C++ standards before C++1z">,
+ "inline variables are incompatible with C++ standards before C++17">,
DefaultIgnore, InGroup<CXXPre1zCompat>;
def warn_inline_namespace_reopened_noninline : Warning<
@@ -1201,8 +1243,9 @@ def warn_cxx98_compat_unelaborated_friend_type : Warning<
def err_qualified_friend_not_found : Error<
"no function named %0 with type %1 was found in the specified scope">;
def err_introducing_special_friend : Error<
- "must use a qualified name when declaring a %select{constructor|"
- "destructor|conversion operator}0 as a friend">;
+ "%plural{[0,2]:must use a qualified name when declaring|3:cannot declare}0"
+ " a %select{constructor|destructor|conversion operator|deduction guide}0 "
+ "as a friend">;
def err_tagless_friend_type_template : Error<
"friend type templates must use an elaborated type">;
def err_no_matching_local_friend : Error<
@@ -1447,6 +1490,15 @@ def err_nested_name_spec_is_not_class : Error<
def ext_nested_name_spec_is_enum : ExtWarn<
"use of enumeration in a nested name specifier is a C++11 extension">,
InGroup<CXX11>;
+def err_out_of_line_qualified_id_type_names_constructor : Error<
+ "qualified reference to %0 is a constructor name rather than a "
+ "%select{template name|type}1 in this context">;
+def ext_out_of_line_qualified_id_type_names_constructor : ExtWarn<
+ "ISO C++ specifies that "
+ "qualified reference to %0 is a constructor name rather than a "
+ "%select{template name|type}1 in this context, despite preceding "
+ "%select{'typename'|'template'}2 keyword">, SFINAEFailure,
+ InGroup<DiagGroup<"injected-class-name">>;
// C++ class members
def err_storageclass_invalid_for_member : Error<
@@ -1508,11 +1560,9 @@ def note_ivar_decl : Note<"instance variable is declared here">;
def note_bitfield_decl : Note<"bit-field is declared here">;
def note_implicit_param_decl : Note<"%0 is an implicit parameter">;
def note_member_synthesized_at : Note<
- "implicit %select{default constructor|copy constructor|move constructor|copy "
- "assignment operator|move assignment operator|destructor}0 for %1 first "
- "required here">;
-def note_inhctor_synthesized_at : Note<
- "inherited constructor for %0 first required here">;
+ "in implicit %select{default constructor|copy constructor|move constructor|"
+ "copy assignment operator|move assignment operator|destructor}0 for %1 "
+ "first required here">;
def err_missing_default_ctor : Error<
"%select{constructor for %1 must explicitly initialize the|"
"implicit default constructor for %1 must explicitly initialize the|"
@@ -1608,7 +1658,14 @@ def err_covariant_return_type_class_type_more_qualified : Error<
"return type of virtual function %0 is not covariant with the return type of "
"the function it overrides (class type %1 is more qualified than class "
"type %2">;
-
+
+// C++ implicit special member functions
+def note_in_declaration_of_implicit_special_member : Note<
+ "while declaring the implicit "
+ "%select{default constructor|copy constructor|move constructor|"
+ "copy assignment operator|move assignment operator|destructor}1"
+ " for %0">;
+
// C++ constructors
def err_constructor_cannot_be : Error<"constructor cannot be declared '%0'">;
def err_invalid_qualified_constructor : Error<
@@ -1664,8 +1721,8 @@ def err_init_conversion_failed : Error<
"cannot initialize %select{a variable|a parameter|return object|an "
"exception object|a member subobject|an array element|a new value|a value|a "
"base class|a constructor delegation|a vector element|a block element|a "
- "complex element|a lambda capture|a compound literal initializer|a "
- "related result|a parameter of CF audited function}0 "
+ "block element|a complex element|a lambda capture|a compound literal "
+ "initializer|a related result|a parameter of CF audited function}0 "
"%diff{of type $ with an %select{rvalue|lvalue}2 of type $|"
"with an %select{rvalue|lvalue}2 of incompatible type}1,3"
"%select{|: different classes%diff{ ($ vs $)|}5,6"
@@ -1791,8 +1848,9 @@ def note_uninit_fixit_remove_cond : Note<
"remove the %select{'%1' if its condition|condition if it}0 "
"is always %select{false|true}2">;
def err_init_incomplete_type : Error<"initialization of incomplete type %0">;
-def err_list_init_in_parens : Error<"list-initializer for non-class type %0 "
- "must not be parenthesized">;
+def err_list_init_in_parens : Error<
+ "cannot initialize %select{non-class|reference}0 type %1 with a "
+ "parenthesized initializer list">;
def warn_unsequenced_mod_mod : Warning<
"multiple unsequenced modifications to %0">, InGroup<Unsequenced>;
@@ -1833,8 +1891,8 @@ def warn_cxx98_compat_temp_copy : Warning<
InGroup<CXX98CompatBindToTemporaryCopy>, DefaultIgnore;
def err_selected_explicit_constructor : Error<
"chosen constructor is explicit in copy-initialization">;
-def note_constructor_declared_here : Note<
- "constructor declared here">;
+def note_explicit_ctor_deduction_guide_here : Note<
+ "explicit %select{constructor|deduction guide}0 declared here">;
// C++11 decltype
def err_decltype_in_declarator : Error<
@@ -1845,8 +1903,8 @@ def warn_cxx98_compat_auto_type_specifier : Warning<
"'auto' type specifier is incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
def err_auto_variable_cannot_appear_in_own_initializer : Error<
- "variable %0 declared with %select{'auto'|'decltype(auto)'|'__auto_type'}1 "
- "type cannot appear in its own initializer">;
+ "variable %0 declared with deduced type %1 "
+ "cannot appear in its own initializer">;
def err_binding_cannot_appear_in_own_initializer : Error<
"binding %0 cannot appear in the initializer of its own "
"decomposition declaration">;
@@ -1855,20 +1913,29 @@ def err_illegal_decl_array_of_auto : Error<
def err_new_array_of_auto : Error<
"cannot allocate array of 'auto'">;
def err_auto_not_allowed : Error<
- "%select{'auto'|'decltype(auto)'|'__auto_type'}0 not allowed "
+ "%select{'auto'|'decltype(auto)'|'__auto_type'|"
+ "use of "
+ "%select{class template|function template|variable template|alias template|"
+ "template template parameter|template}2 %3 requires template arguments; "
+ "argument deduction}0 not allowed "
"%select{in function prototype"
"|in non-static struct member|in struct member"
"|in non-static union member|in union member"
"|in non-static class member|in interface member"
- "|in exception declaration|in template parameter|in block literal"
+ "|in exception declaration|in template parameter until C++17|in block literal"
"|in template argument|in typedef|in type alias|in function return type"
"|in conversion function type|here|in lambda parameter"
- "|in type allocated by 'new'|in K&R-style function parameter}1"
- "%select{|||||||| until C++1z||||||||||}1">;
+ "|in type allocated by 'new'|in K&R-style function parameter"
+ "|in template parameter|in friend declaration}1">;
+def err_dependent_deduced_tst : Error<
+ "typename specifier refers to "
+ "%select{class template|function template|variable template|alias template|"
+ "template template parameter|template}0 member in %1; "
+ "argument deduction not allowed here">;
def err_auto_not_allowed_var_inst : Error<
"'auto' variable template instantiation is not allowed">;
def err_auto_var_requires_init : Error<
- "declaration of variable %0 with type %1 requires an initializer">;
+ "declaration of variable %0 with deduced type %1 requires an initializer">;
def err_auto_new_requires_ctor_arg : Error<
"new expression for type %0 requires a constructor argument">;
def err_auto_new_list_init : Error<
@@ -1898,8 +1965,13 @@ def err_auto_var_deduction_failure_from_init_list : Error<
def err_auto_new_deduction_failure : Error<
"new expression for type %0 has incompatible constructor argument of type %1">;
def err_auto_different_deductions : Error<
- "'%select{auto|decltype(auto)|__auto_type}0' deduced as %1 in declaration "
- "of %2 and deduced as %3 in declaration of %4">;
+ "%select{'auto'|'decltype(auto)'|'__auto_type'|template arguments}0 "
+ "deduced as %1 in declaration of %2 and "
+ "deduced as %3 in declaration of %4">;
+def err_auto_non_deduced_not_alone : Error<
+ "%select{function with deduced return type|"
+ "declaration with trailing return type}0 "
+ "must be the only declaration in its group">;
def err_implied_std_initializer_list_not_found : Error<
"cannot deduce type of initializer list because std::initializer_list was "
"not found; include <initializer_list>">;
@@ -1915,6 +1987,8 @@ def err_auto_bitfield : Error<
"cannot pass bit-field as __auto_type initializer in C">;
// C++1y decltype(auto) type
+def err_decltype_auto_invalid : Error<
+ "'decltype(auto)' not allowed here">;
def err_decltype_auto_cannot_be_combined : Error<
"'decltype(auto)' cannot be combined with other type specifiers">;
def err_decltype_auto_function_declarator_not_declaration : Error<
@@ -1925,6 +1999,56 @@ def err_decltype_auto_compound_type : Error<
def err_decltype_auto_initializer_list : Error<
"cannot deduce 'decltype(auto)' from initializer list">;
+// C++1z deduced class template specialization types
+def err_deduced_class_template_compound_type : Error<
+ "cannot %select{form pointer to|form reference to|form array of|"
+ "form function returning|use parentheses when declaring variable with}0 "
+ "deduced class template specialization type">;
+def err_deduced_non_class_template_specialization_type : Error<
+ "%select{<error>|function template|variable template|alias template|"
+ "template template parameter|template}0 %1 requires template arguments; "
+ "argument deduction only allowed for class templates">;
+def err_deduced_class_template_ctor_ambiguous : Error<
+ "ambiguous deduction for template arguments of %0">;
+def err_deduced_class_template_ctor_no_viable : Error<
+ "no viable constructor or deduction guide for deduction of "
+ "template arguments of %0">;
+def err_deduced_class_template_incomplete : Error<
+ "template %0 has no definition and no %select{|viable }1deduction guides "
+ "for deduction of template arguments">;
+def err_deduced_class_template_deleted : Error<
+ "class template argument deduction for %0 selected a deleted constructor">;
+def err_deduced_class_template_explicit : Error<
+ "class template argument deduction for %0 selected an explicit "
+ "%select{constructor|deduction guide}1 for copy-list-initialization">;
+def err_deduction_guide_no_trailing_return_type : Error<
+ "deduction guide declaration without trailing return type">;
+def err_deduction_guide_bad_trailing_return_type : Error<
+ "deduced type %1 of deduction guide is not %select{|written as }2"
+ "a specialization of template %0">;
+def err_deduction_guide_with_complex_decl : Error<
+ "cannot specify any part of a return type in the "
+ "declaration of a deduction guide">;
+def err_deduction_guide_invalid_specifier : Error<
+ "deduction guide cannot be declared '%0'">;
+def err_deduction_guide_name_not_class_template : Error<
+ "cannot specify deduction guide for "
+ "%select{<error>|function template|variable template|alias template|"
+ "template template parameter|dependent template name}0 %1">;
+def err_deduction_guide_wrong_scope : Error<
+ "deduction guide must be declared in the same scope as template %q0">;
+def err_deduction_guide_defines_function : Error<
+ "deduction guide cannot have a function definition">;
+def err_deduction_guide_explicit_mismatch : Error<
+ "deduction guide is %select{not |}0declared 'explicit' but "
+ "previous declaration was%select{ not|}0">;
+def err_deduction_guide_specialized : Error<"deduction guide cannot be "
+ "%select{explicitly instantiated|explicitly specialized}0">;
+def err_deduction_guide_template_not_deducible : Error<
+ "deduction guide template contains "
+ "%select{a template parameter|template parameters}0 that cannot be "
+ "deduced">;
+
// C++1y deduced return types
def err_auto_fn_deduction_failure : Error<
"cannot deduce return type %0 from returned value of type %1">;
@@ -1950,6 +2074,9 @@ def override_keyword_hides_virtual_member_function : Error<
"%select{function|functions}1">;
def err_function_marked_override_not_overriding : Error<
"%0 marked 'override' but does not override any member functions">;
+def warn_destructor_marked_not_override_overriding : Warning <
+ "%0 overrides a destructor but is not marked 'override'">,
+ InGroup<CXX11WarnOverrideDestructor>, DefaultIgnore;
def warn_function_marked_not_override_overriding : Warning <
"%0 overrides a member function but is not marked 'override'">,
InGroup<CXX11WarnOverrideMethod>;
@@ -1970,7 +2097,7 @@ def err_enum_invalid_underlying : Error<
"non-integral type %0 is an invalid underlying type">;
def err_enumerator_too_large : Error<
"enumerator value is not representable in the underlying type %0">;
-def ext_enumerator_too_large : ExtWarn<
+def ext_enumerator_too_large : Extension<
"enumerator value is not representable in the underlying type %0">,
InGroup<MicrosoftEnumValue>;
def err_enumerator_wrapped : Error<
@@ -2020,11 +2147,11 @@ def err_for_range_iter_deduction_failure : Error<
def err_for_range_member_begin_end_mismatch : Error<
"range type %0 has '%select{begin|end}1' member but no '%select{end|begin}1' member">;
def ext_for_range_begin_end_types_differ : ExtWarn<
- "'begin' and 'end' returning different types (%0 and %1) is a C++1z extension">,
- InGroup<CXX1z>;
+ "'begin' and 'end' returning different types (%0 and %1) is a C++17 extension">,
+ InGroup<CXX17>;
def warn_for_range_begin_end_types_differ : Warning<
"'begin' and 'end' returning different types (%0 and %1) is incompatible "
- "with C++ standards before C++1z">, InGroup<CXXPre1zCompat>, DefaultIgnore;
+ "with C++ standards before C++17">, InGroup<CXXPre1zCompat>, DefaultIgnore;
def note_in_for_range: Note<
"when looking up '%select{begin|end}0' function for range expression "
"of type %1">;
@@ -2215,6 +2342,9 @@ def err_concept_specialized : Error<
"%select{function|variable}0 concept cannot be "
"%select{explicitly instantiated|explicitly specialized|partially specialized}1">;
+def err_template_different_associated_constraints : Error<
+ "associated constraints differ in template redeclaration">;
+
// C++11 char16_t/char32_t
def warn_cxx98_compat_unicode_type : Warning<
"'%0' type specifier is incompatible with C++98">,
@@ -2261,7 +2391,7 @@ def warn_unsupported_target_attribute
InGroup<IgnoredAttributes>;
def err_attribute_unsupported
: Error<"%0 attribute is not supported for this target">;
-// The err_*_attribute_argument_not_int are seperate because they're used by
+// The err_*_attribute_argument_not_int are separate because they're used by
// VerifyIntegerConstantExpression.
def err_aligned_attribute_argument_not_int : Error<
"'aligned' attribute requires integer constant">;
@@ -2339,6 +2469,9 @@ def err_attribute_invalid_size : Error<
"vector size not an integral multiple of component size">;
def err_attribute_zero_size : Error<"zero vector size">;
def err_attribute_size_too_large : Error<"vector size too large">;
+def err_typecheck_vector_not_convertable_implict_truncation : Error<
+ "cannot convert between %select{scalar|vector}0 type %1 and vector type"
+ " %2 as implicit conversion would cause truncation">;
def err_typecheck_vector_not_convertable : Error<
"cannot convert between vector values of different size (%0 and %1)">;
def err_typecheck_vector_not_convertable_non_scalar : Error<
@@ -2361,7 +2494,7 @@ def err_attribute_address_multiple_qualifiers : Error<
def err_attribute_address_function_type : Error<
"function type may not be qualified with an address space">;
def err_as_qualified_auto_decl : Error<
- "automatic variable qualified with an address space">;
+ "automatic variable qualified with an%select{| invalid}0 address space">;
def err_arg_with_address_space : Error<
"parameter may not be qualified with an address space">;
def err_field_with_address_space : Error<
@@ -2640,6 +2773,7 @@ def warn_attribute_wrong_decl_type : Warning<
"|types and namespaces"
"|Objective-C interfaces"
"|methods and properties"
+ "|functions, methods, and properties"
"|struct or union"
"|struct, union or class"
"|types"
@@ -2659,7 +2793,8 @@ def warn_attribute_wrong_decl_type : Warning<
"|functions, methods, enums, and classes"
"|structs, classes, variables, functions, and inline namespaces"
"|variables, functions, methods, types, enumerations, enumerators, labels, and non-static data members"
- "|classes and enumerations}1">,
+ "|classes and enumerations"
+ "|named declarations}1">,
InGroup<IgnoredAttributes>;
def err_attribute_wrong_decl_type : Error<warn_attribute_wrong_decl_type.Text>;
def warn_type_attribute_wrong_type : Warning<
@@ -2698,9 +2833,9 @@ def warn_cconv_structors : Warning<
def err_regparm_mismatch : Error<"function declared with regparm(%0) "
"attribute was previously declared "
"%plural{0:without the regparm|:with the regparm(%1)}1 attribute">;
-def err_returns_retained_mismatch : Error<
- "function declared with the ns_returns_retained attribute "
- "was previously declared without the ns_returns_retained attribute">;
+def err_function_attribute_mismatch : Error<
+ "function declared with %0 attribute "
+ "was previously declared without the %0 attribute">;
def err_objc_precise_lifetime_bad_type : Error<
"objc_precise_lifetime only applies to retainable types; type here is %0">;
def warn_objc_precise_lifetime_meaningless : Error<
@@ -2741,17 +2876,32 @@ def note_protocol_method : Note<
def warn_unguarded_availability :
Warning<"%0 is only available on %1 %2 or newer">,
InGroup<UnguardedAvailability>, DefaultIgnore;
+def warn_unguarded_availability_new :
+ Warning<warn_unguarded_availability.Text>,
+ InGroup<UnguardedAvailabilityNew>;
def warn_partial_availability : Warning<"%0 is only available conditionally">,
InGroup<UnguardedAvailability>, DefaultIgnore;
+def warn_partial_availability_new : Warning<warn_partial_availability.Text>,
+ InGroup<UnguardedAvailabilityNew>;
def note_partial_availability_silence : Note<
- "explicitly redeclare %0 to silence this warning">;
+ "annotate %select{%1|anonymous %1}0 with an availability attribute to silence">;
def note_unguarded_available_silence : Note<
- "enclose %0 in an @available check to silence this warning">;
+ "enclose %0 in %select{an @available|a __builtin_available}1 check to silence"
+ " this warning">;
def warn_partial_message : Warning<"%0 is partial: %1">,
InGroup<UnguardedAvailability>, DefaultIgnore;
+def warn_partial_message_new : Warning<warn_partial_message.Text>,
+ InGroup<UnguardedAvailabilityNew>;
def warn_partial_fwdclass_message : Warning<
"%0 may be partial because the receiver type is unknown">,
InGroup<UnguardedAvailability>, DefaultIgnore;
+def warn_partial_fwdclass_message_new :
+ Warning<warn_partial_fwdclass_message.Text>,
+ InGroup<UnguardedAvailabilityNew>;
+def warn_at_available_unchecked_use : Warning<
+ "%select{@available|__builtin_available}0 does not guard availability here; "
+ "use if (%select{@available|__builtin_available}0) instead">,
+ InGroup<DiagGroup<"unsupported-availability-guard">>;
// Thread Safety Attributes
def warn_invalid_capability_name : Warning<
@@ -3132,7 +3282,8 @@ def err_attribute_regparm_invalid_number : Error<
"'regparm' parameter must be between 0 and %0 inclusive">;
def err_attribute_not_supported_in_lang : Error<
"%0 attribute is not supported in %select{C|C++|Objective-C}1">;
-
+def err_attribute_not_supported_on_arch
+ : Error<"%0 attribute is not supported on '%1'">;
// Clang-Specific Attributes
def warn_attribute_iboutlet : Warning<
@@ -3149,13 +3300,15 @@ def warn_iboutletcollection_property_assign : Warning<
"IBOutletCollection properties should be copy/strong and not assign">,
InGroup<ObjCInvalidIBOutletProperty>;
-def err_attribute_overloadable_missing : Error<
- "%select{overloaded function|redeclaration of}0 %1 must have the "
- "'overloadable' attribute">;
+def err_attribute_overloadable_mismatch : Error<
+ "redeclaration of %0 must %select{not |}1have the 'overloadable' attribute">;
def note_attribute_overloadable_prev_overload : Note<
- "previous overload of function is here">;
+ "previous %select{unmarked |}0overload of function is here">;
def err_attribute_overloadable_no_prototype : Error<
"'overloadable' function %0 must have a prototype">;
+def err_attribute_overloadable_multiple_unmarked_overloads : Error<
+ "at most one overload for a given name may lack the 'overloadable' "
+ "attribute">;
def warn_ns_attribute_wrong_return_type : Warning<
"%0 attribute only applies to %select{functions|methods|properties}1 that "
"return %select{an Objective-C object|a pointer|a non-retainable pointer}2">,
@@ -3369,6 +3522,8 @@ def note_ovl_candidate_substitution_failure : Note<
"candidate template ignored: substitution failure%0%1">;
def note_ovl_candidate_disabled_by_enable_if : Note<
"candidate template ignored: disabled by %0%1">;
+def note_ovl_candidate_disabled_by_requirement : Note<
+ "candidate template ignored: requirement '%0' was not satisfied%1">;
def note_ovl_candidate_has_pass_object_size_params: Note<
"candidate address cannot be taken because parameter %0 has "
"pass_object_size attribute">;
@@ -3380,7 +3535,7 @@ def note_ovl_candidate_disabled_by_function_cond_attr : Note<
def note_ovl_candidate_disabled_by_extension : Note<
"candidate disabled due to OpenCL extension">;
def err_addrof_function_disabled_by_enable_if_attr : Error<
- "cannot take address of function %0 becuase it has one or more "
+ "cannot take address of function %0 because it has one or more "
"non-tautological enable_if conditions">;
def note_addrof_ovl_candidate_disabled_by_enable_if_attr : Note<
"candidate function made ineligible by enable_if">;
@@ -3750,7 +3905,7 @@ def err_template_nontype_parm_bad_type : Error<
"a non-type template parameter cannot have type %0">;
def warn_cxx14_compat_template_nontype_parm_auto_type : Warning<
"non-type template parameters declared with %0 are incompatible with C++ "
- "standards before C++1z">,
+ "standards before C++17">,
DefaultIgnore, InGroup<CXXPre1zCompat>;
def err_template_param_default_arg_redefinition : Error<
"template parameter redefines default argument">;
@@ -3789,11 +3944,13 @@ def err_template_decl_ref : Error<
// C++ Template Argument Lists
def err_template_missing_args : Error<
- "use of class template %0 requires template arguments">;
+ "use of "
+ "%select{class template|function template|variable template|alias template|"
+ "template template parameter|template}0 %1 requires template arguments">;
def err_template_arg_list_different_arity : Error<
"%select{too few|too many}0 template arguments for "
- "%select{class template|function template|template template parameter"
- "|template}1 %2">;
+ "%select{class template|function template|variable template|alias template|"
+ "template template parameter|template}1 %2">;
def note_template_decl_here : Note<"template is declared here">;
def err_template_arg_must_be_type : Error<
"template argument for template type parameter must be a type">;
@@ -4063,7 +4220,7 @@ def ext_partial_specs_not_deducible : ExtWarn<
"%select{a template parameter|template parameters}1 that cannot be "
"deduced; this partial specialization will never be used">,
DefaultError, InGroup<DiagGroup<"unusable-partial-specialization">>;
-def note_partial_spec_unused_parameter : Note<
+def note_non_deducible_parameter : Note<
"non-deducible template parameter %0">;
def err_partial_spec_ordering_ambiguous : Error<
"ambiguous partial specializations of %0">;
@@ -4280,12 +4437,17 @@ def err_typename_nested_not_found : Error<"no type named %0 in %1">;
def err_typename_nested_not_found_enable_if : Error<
"no type named 'type' in %0; 'enable_if' cannot be used to disable "
"this declaration">;
+def err_typename_nested_not_found_requirement : Error<
+ "failed requirement '%0'; 'enable_if' cannot be used to disable this "
+ "declaration">;
def err_typename_nested_not_type : Error<
"typename specifier refers to non-type member %0 in %1">;
def note_typename_refers_here : Note<
"referenced member %0 is declared here">;
def err_typename_missing : Error<
"missing 'typename' prior to dependent type name '%0%1'">;
+def err_typename_missing_template : Error<
+ "missing 'typename' prior to dependent type template name '%0%1'">;
def ext_typename_missing : ExtWarn<
"missing 'typename' prior to dependent type name '%0%1'">,
InGroup<DiagGroup<"typename-missing">>;
@@ -4304,7 +4466,7 @@ def err_template_kw_refers_to_non_template : Error<
"%0 following the 'template' keyword does not refer to a template">;
def err_template_kw_refers_to_class_template : Error<
"'%0%1' instantiated to a class template, not a function template">;
-def note_referenced_class_template : Error<
+def note_referenced_class_template : Note<
"class template declared here">;
def err_template_kw_missing : Error<
"missing 'template' keyword prior to dependent template name '%0%1'">;
@@ -4417,8 +4579,11 @@ def warn_deprecated_fwdclass_message : Warning<
"%0 may be deprecated because the receiver type is unknown">,
InGroup<DeprecatedDeclarations>;
def warn_deprecated_def : Warning<
- "Implementing deprecated %select{method|class|category}0">,
- InGroup<DeprecatedImplementations>, DefaultIgnore;
+ "implementing deprecated %select{method|class|category}0">,
+ InGroup<DeprecatedImplementations>, DefaultIgnore;
+def warn_unavailable_def : Warning<
+ "implementing unavailable method">,
+ InGroup<DeprecatedImplementations>, DefaultIgnore;
def err_unavailable : Error<"%0 is unavailable">;
def err_property_method_unavailable :
Error<"property access is using %0 method which is unavailable">;
@@ -4445,7 +4610,7 @@ def warn_missing_prototype : Warning<
def note_declaration_not_a_prototype : Note<
"this declaration is not a prototype; add 'void' to make it a prototype for a zero-parameter function">;
def warn_strict_prototypes : Warning<
- "this %select{function declaration is not|"
+ "this %select{function declaration is not|block declaration is not|"
"old-style function definition is not preceded by}0 a prototype">,
InGroup<DiagGroup<"strict-prototypes">>, DefaultIgnore;
def warn_missing_variable_declarations : Warning<
@@ -4476,6 +4641,8 @@ def err_abi_tag_on_redeclaration : Error<
"cannot add 'abi_tag' attribute in a redeclaration">;
def err_new_abi_tag_on_redeclaration : Error<
"'abi_tag' %0 missing in original declaration">;
+def note_use_ifdef_guards : Note<
+ "unguarded header; consider using #ifdef guards or #pragma once">;
def note_deleted_dtor_no_operator_delete : Note<
"virtual destructor requires an unambiguous, accessible 'operator delete'">;
@@ -4632,7 +4799,7 @@ def ext_forward_ref_enum : Extension<
"ISO C forbids forward references to 'enum' types">;
def err_forward_ref_enum : Error<
"ISO C++ forbids forward references to 'enum' types">;
-def ext_ms_forward_ref_enum : Extension<
+def ext_ms_forward_ref_enum : ExtWarn<
"forward references to 'enum' types are a Microsoft extension">,
InGroup<MicrosoftEnumForwardReference>;
def ext_forward_ref_enum_def : Extension<
@@ -4808,6 +4975,21 @@ def warn_bitfield_width_exceeds_type_width: Warning<
def warn_anon_bitfield_width_exceeds_type_width : Warning<
"width of anonymous bit-field (%0 bits) exceeds width of its type; value "
"will be truncated to %1 bit%s1">, InGroup<BitFieldWidth>;
+def warn_bitfield_too_small_for_enum : Warning<
+ "bit-field %0 is not wide enough to store all enumerators of %1">,
+ InGroup<BitFieldEnumConversion>, DefaultIgnore;
+def note_widen_bitfield : Note<
+ "widen this field to %0 bits to store all values of %1">;
+def warn_unsigned_bitfield_assigned_signed_enum : Warning<
+ "assigning value of signed enum type %1 to unsigned bit-field %0; "
+ "negative enumerators of enum %1 will be converted to positive values">,
+ InGroup<BitFieldEnumConversion>, DefaultIgnore;
+def warn_signed_bitfield_enum_conversion : Warning<
+ "signed bit-field %0 needs an extra bit to represent the largest positive "
+ "enumerators of %1">,
+ InGroup<BitFieldEnumConversion>, DefaultIgnore;
+def note_change_bitfield_sign : Note<
+ "consider making the bitfield type %select{unsigned|signed}0">;
def warn_missing_braces : Warning<
"suggest braces around initialization of subobject">,
@@ -4862,6 +5044,8 @@ def note_protected_by_if_available : Note<
"jump enters controlled statement of if available">;
def note_protected_by_vla : Note<
"jump bypasses initialization of variable length array">;
+def note_protected_by_objc_fast_enumeration : Note<
+ "jump enters Objective-C fast enumeration loop">;
def note_protected_by_objc_try : Note<
"jump bypasses initialization of @try block">;
def note_protected_by_objc_catch : Note<
@@ -5164,7 +5348,7 @@ def err_arc_inconsistent_property_ownership : Error<
def warn_block_capture_autoreleasing : Warning<
"block captures an autoreleasing out-parameter, which may result in "
"use-after-free bugs">,
- InGroup<BlockCaptureAutoReleasing>, DefaultIgnore;
+ InGroup<BlockCaptureAutoReleasing>;
def note_declare_parameter_autoreleasing : Note<
"declare the parameter __autoreleasing explicitly to suppress this warning">;
def note_declare_parameter_strong : Note<
@@ -5455,6 +5639,11 @@ def err_enumerator_does_not_exist : Error<
def note_enum_specialized_here : Note<
"enum %0 was explicitly specialized here">;
+def err_specialization_not_primary_template : Error<
+ "cannot reference member of primary template because deduced class "
+ "template specialization %0 is %select{instantiated from a partial|"
+ "an explicit}1 specialization">;
+
def err_member_redeclared : Error<"class member cannot be redeclared">;
def ext_member_redeclared : ExtWarn<"class member cannot be redeclared">,
InGroup<RedeclaredClassMember>;
@@ -5627,6 +5816,9 @@ def err_objc_object_assignment : Error<
"cannot assign to class object (%0 invalid)">;
def err_typecheck_invalid_operands : Error<
"invalid operands to binary expression (%0 and %1)">;
+def err_typecheck_logical_vector_expr_gnu_cpp_restrict : Error<
+ "logical expression with vector %select{type %1 and non-vector type %2|types"
+ " %1 and %2}0 is only supported in C++">;
def err_typecheck_sub_ptr_compatible : Error<
"%diff{$ and $ are not pointers to compatible types|"
"pointers to incompatible types}0,1">;
@@ -5725,8 +5917,8 @@ def err_this_static_member_func : Error<
def err_invalid_member_use_in_static_method : Error<
"invalid use of member %0 in static member function">;
def err_invalid_qualified_function_type : Error<
- "%select{static |non-}0member function %select{of type %2 |}1"
- "cannot have '%3' qualifier">;
+ "%select{non-member function|static member function|deduction guide}0 "
+ "%select{of type %2 |}1cannot have '%3' qualifier">;
def err_compound_qualified_function_type : Error<
"%select{block pointer|pointer|reference}0 to function type %select{%2 |}1"
"cannot have '%3' qualifier">;
@@ -5750,8 +5942,8 @@ def err_builtin_func_cast_more_than_one_arg : Error<
"function-style cast to a builtin type can only take one argument">;
def err_value_init_for_array_type : Error<
"array types cannot be value-initialized">;
-def err_value_init_for_function_type : Error<
- "function types cannot be value-initialized">;
+def err_init_for_function_type : Error<
+ "cannot create object of function type %0">;
def warn_format_nonliteral_noargs : Warning<
"format string is not a string literal (potentially insecure)">,
InGroup<FormatSecurity>;
@@ -5916,6 +6108,12 @@ def warn_objc_circular_container : Warning<
"adding '%0' to '%1' might cause circular dependency in container">,
InGroup<DiagGroup<"objc-circular-container">>;
def note_objc_circular_container_declared_here : Note<"'%0' declared here">;
+def warn_objc_unsafe_perform_selector : Warning<
+ "%0 is incompatible with selectors that return a "
+ "%select{struct|union|vector}1 type">,
+ InGroup<DiagGroup<"objc-unsafe-perform-selector">>;
+def note_objc_unsafe_perform_selector_method_declared_here : Note<
+ "method %0 that returns %1 declared here">;
def warn_setter_getter_impl_required : Warning<
"property %0 requires method %1 to be defined - "
@@ -6134,12 +6332,14 @@ def warn_ambiguous_suitable_delete_function_found : Warning<
InGroup<DiagGroup<"ambiguous-delete">>;
def note_member_declared_here : Note<
"member %0 declared here">;
+def note_member_first_declared_here : Note<
+ "member %0 first declared here">;
def err_decrement_bool : Error<"cannot decrement expression of type bool">;
def warn_increment_bool : Warning<
"incrementing expression of type bool is deprecated and "
- "incompatible with C++1z">, InGroup<DeprecatedIncrementBool>;
+ "incompatible with C++17">, InGroup<DeprecatedIncrementBool>;
def ext_increment_bool : ExtWarn<
- "ISO C++1z does not allow incrementing expression of type bool">,
+ "ISO C++17 does not allow incrementing expression of type bool">,
DefaultError, InGroup<IncrementBool>;
def err_increment_decrement_enum : Error<
"cannot %select{decrement|increment}0 expression of enum type %1">;
@@ -6167,6 +6367,13 @@ def err_exceptions_disabled : Error<
"cannot use '%0' with exceptions disabled">;
def err_objc_exceptions_disabled : Error<
"cannot use '%0' with Objective-C exceptions disabled">;
+def warn_throw_in_noexcept_func : Warning<
+ "%0 has a non-throwing exception specification but can still throw">,
+ InGroup<Exceptions>;
+def note_throw_in_dtor : Note<
+ "%select{destructor|deallocator}0 has a %select{non-throwing|implicit "
+ "non-throwing}1 exception specification">;
+def note_throw_in_function : Note<"function declared non-throwing here">;
def err_seh_try_outside_functions : Error<
"cannot use SEH '__try' in blocks, captured regions, or Obj-C method decls">;
def err_mixing_cxx_try_seh_try : Error<
@@ -6212,6 +6419,12 @@ def warn_overaligned_type : Warning<
"type %0 requires %1 bytes of alignment and the default allocator only "
"guarantees %2 bytes">,
InGroup<OveralignedType>, DefaultIgnore;
+def warn_aligned_allocation_unavailable :Warning<
+ "aligned %select{allocation|deallocation}0 function of type '%1' possibly "
+ "unavailable on %2">, InGroup<AlignedAllocationUnavailable>, DefaultError;
+def note_silence_unligned_allocation_unavailable : Note<
+ "if you supply your own aligned allocation functions, use "
+ "-Wno-aligned-allocation-unavailable to silence this diagnostic">;
def err_conditional_void_nonvoid : Error<
"%select{left|right}1 operand to ? is void, but %select{right|left}1 operand "
@@ -6315,10 +6528,10 @@ let CategoryName = "Lambda Issue" in {
// C++1z '*this' captures.
def warn_cxx14_compat_star_this_lambda_capture : Warning<
- "by value capture of '*this' is incompatible with C++ standards before C++1z">,
+ "by value capture of '*this' is incompatible with C++ standards before C++17">,
InGroup<CXXPre1zCompat>, DefaultIgnore;
def ext_star_this_lambda_capture_cxx1z : ExtWarn<
- "capture of '*this' by copy is a C++1z extension">, InGroup<CXX1z>;
+ "capture of '*this' by copy is a C++17 extension">, InGroup<CXX17>;
}
def err_return_in_captured_stmt : Error<
@@ -6987,7 +7200,7 @@ def warn_unused_volatile : Warning<
def ext_cxx14_attr : Extension<
"use of the %0 attribute is a C++14 extension">, InGroup<CXX14>;
def ext_cxx1z_attr : Extension<
- "use of the %0 attribute is a C++1z extension">, InGroup<CXX1z>;
+ "use of the %0 attribute is a C++17 extension">, InGroup<CXX17>;
def warn_unused_comparison : Warning<
"%select{%select{|in}1equality|relational}0 comparison result unused">,
@@ -7001,7 +7214,7 @@ def err_incomplete_type_used_in_type_trait_expr : Error<
def err_require_constant_init_failed : Error<
"variable does not have a constant initializer">;
def note_declared_required_constant_init_here : Note<
- "required by 'require_constant_initializer' attribute here">;
+ "required by 'require_constant_initialization' attribute here">;
def err_dimension_expr_not_constant_integer : Error<
"dimension expression does not evaluate to a constant unsigned int">;
@@ -7099,7 +7312,7 @@ def err_invalid_conversion_between_vector_and_integer : Error<
"invalid conversion between vector type %0 and integer type %1 "
"of different size">;
-def err_opencl_function_pointer_variable : Error<
+def err_opencl_function_pointer : Error<
"pointers to functions are not allowed">;
def err_opencl_taking_function_address : Error<
@@ -7770,7 +7983,11 @@ def warn_empty_switch_body : Warning<
def note_empty_body_on_separate_line : Note<
"put the semicolon on a separate line to silence this warning">;
-def err_va_start_used_in_non_variadic_function : Error<
+def err_va_start_captured_stmt : Error<
+ "'va_start' cannot be used in a captured statement">;
+def err_va_start_outside_function : Error<
+ "'va_start' cannot be used outside a function">;
+def err_va_start_fixed_function : Error<
"'va_start' used in function with fixed args">;
def err_va_start_used_in_wrong_abi_function : Error<
"'va_start' used in %select{System V|Win64}0 ABI function">;
@@ -7841,10 +8058,13 @@ def err_block_on_nonlocal : Error<
def err_block_on_vm : Error<
"__block attribute not allowed on declaration with a variably modified type">;
-def err_shufflevector_non_vector : Error<
- "first two arguments to __builtin_shufflevector must be vectors">;
-def err_shufflevector_incompatible_vector : Error<
- "first two arguments to __builtin_shufflevector must have the same type">;
+def err_vec_builtin_non_vector : Error<
+ "first two arguments to %0 must be vectors">;
+def err_vec_builtin_incompatible_vector : Error<
+ "first two arguments to %0 must have the same type">;
+def err_vsx_builtin_nonconstant_argument : Error<
+ "argument %0 to %1 must be a 2-bit unsigned literal (i.e. 0, 1, 2 or 3)">;
+
def err_shufflevector_nonconstant_argument : Error<
"index for __builtin_shufflevector must be a constant integer">;
def err_shufflevector_argument_too_large : Error<
@@ -7893,12 +8113,14 @@ def err_systemz_invalid_tabort_code : Error<
"invalid transaction abort code">;
def err_64_bit_builtin_32_bit_tgt : Error<
"this builtin is only available on 64-bit targets">;
+def err_builtin_x64_aarch64_only : Error<
+ "this builtin is only available on x86-64 and aarch64 targets">;
def err_ppc_builtin_only_on_pwr7 : Error<
"this builtin is only valid on POWER7 or later CPUs">;
-def err_x86_builtin_32_bit_tgt : Error<
- "this builtin is only available on x86-64 targets">;
def err_x86_builtin_invalid_rounding : Error<
"invalid rounding argument">;
+def err_x86_builtin_invalid_scale : Error<
+ "scale argument must be 1, 2, 4, or 8">;
def err_builtin_longjmp_unsupported : Error<
"__builtin_longjmp is not supported for the current target">;
@@ -8026,9 +8248,20 @@ def err_undeclared_use_suggest : Error<
"use of undeclared %0; did you mean %1?">;
def err_undeclared_var_use_suggest : Error<
"use of undeclared identifier %0; did you mean %1?">;
+def err_no_template : Error<"no template named %0">;
def err_no_template_suggest : Error<"no template named %0; did you mean %1?">;
+def err_no_member_template : Error<"no template named %0 in %1">;
def err_no_member_template_suggest : Error<
"no template named %0 in %1; did you mean %select{|simply }2%3?">;
+def err_non_template_in_template_id : Error<
+ "%0 does not name a template but is followed by template arguments">;
+def err_non_template_in_template_id_suggest : Error<
+ "%0 does not name a template but is followed by template arguments; "
+ "did you mean %1?">;
+def err_non_template_in_member_template_id_suggest : Error<
+ "member %0 of %1 is not a template; did you mean %select{|simply }2%3?">;
+def note_non_template_in_template_id_found : Note<
+ "non-template declaration found by name lookup">;
def err_mem_init_not_member_or_class_suggest : Error<
"initializer %0 does not name a non-static data member or base "
"class; did you mean the %select{base class|member}1 %2?">;
@@ -8113,16 +8346,26 @@ def err_opencl_ptrptr_kernel_param : Error<
def err_kernel_arg_address_space : Error<
"pointer arguments to kernel functions must reside in '__global', "
"'__constant' or '__local' address space">;
+def err_opencl_ext_vector_component_invalid_length : Error<
+ "vector component access has invalid length %0. Supported: 1,2,3,4,8,16.">;
def err_opencl_function_variable : Error<
"%select{non-kernel function|function scope}0 variable cannot be declared in %1 address space">;
+def err_opencl_addrspace_scope : Error<
+ "variables in the %0 address space can only be declared in the outermost "
+ "scope of a kernel function">;
def err_static_function_scope : Error<
"variables in function scope cannot be declared static">;
def err_opencl_bitfields : Error<
"bit-fields are not supported in OpenCL">;
def err_opencl_vla : Error<
"variable length arrays are not supported in OpenCL">;
+def err_opencl_scalar_type_rank_greater_than_vector_type : Error<
+ "scalar operand type has greater rank than the type of the vector "
+ "element. (%0 and %1)">;
def err_bad_kernel_param_type : Error<
"%0 cannot be used as the type of a kernel parameter">;
+def err_opencl_implicit_function_decl : Error<
+ "implicit declaration of function %0 is invalid in OpenCL">;
def err_record_with_pointers_kernel_param : Error<
"%select{struct|union}0 kernel parameters may not contain pointers">;
def note_within_field_of_type : Note<
@@ -8143,6 +8386,8 @@ def err_sampler_argument_required : Error<
"sampler_t variable required - got %0">;
def err_wrong_sampler_addressspace: Error<
"sampler type cannot be used with the __local and __global address space qualifiers">;
+def err_opencl_nonconst_global_sampler : Error<
+ "global sampler requires a const or constant address space qualifier">;
def err_opencl_cast_non_zero_to_event_t : Error<
"cannot cast non-zero value '%0' to 'event_t'">;
def err_opencl_global_invalid_addr_space : Error<
@@ -8158,9 +8403,9 @@ def err_opencl_return_value_with_address_space : Error<
"return value cannot be qualified with address space">;
def err_opencl_constant_no_init : Error<
"variable in constant address space must be initialized">;
-def err_atomic_init_constant : Error<
- "atomic variable can only be assigned to a compile time constant"
- " in the declaration statement in the program scope">;
+def err_opencl_atomic_init: Error<
+ "atomic variable can be %select{assigned|initialized}0 to a variable only "
+ "in global address space">;
def err_opencl_implicit_vector_conversion : Error<
"implicit conversions between vector types (%0 and %1) are not permitted">;
def err_opencl_invalid_type_array : Error<
@@ -8177,7 +8422,7 @@ def warn_opencl_attr_deprecated_ignored : Warning <
def err_opencl_variadic_function : Error<
"invalid prototype, variadic arguments are not allowed in OpenCL">;
def err_opencl_requires_extension : Error<
- "use of %select{type |declaration}0%1 requires %2 extension to be enabled">;
+ "use of %select{type|declaration}0 %1 requires %2 extension to be enabled">;
// OpenCL v2.0 s6.13.6 -- Builtin Pipe Functions
def err_opencl_builtin_pipe_first_arg : Error<
@@ -8210,6 +8455,8 @@ def err_opencl_invalid_block_declaration : Error<
"invalid block variable declaration - must be %select{const qualified|initialized}0">;
def err_opencl_extern_block_declaration : Error<
"invalid block variable declaration - using 'extern' storage class is disallowed">;
+def err_opencl_block_ref_block : Error<
+ "cannot refer to a block inside block">;
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
def err_opencl_builtin_to_addr_arg_num : Error<
@@ -8220,16 +8467,17 @@ def err_opencl_builtin_to_addr_invalid_arg : Error<
// OpenCL v2.0 s6.13.17 Enqueue kernel restrictions.
def err_opencl_enqueue_kernel_incorrect_args : Error<
"illegal call to enqueue_kernel, incorrect argument types">;
-def err_opencl_enqueue_kernel_expected_type : Error<
- "illegal call to enqueue_kernel, expected %0 argument type">;
def err_opencl_enqueue_kernel_local_size_args : Error<
"mismatch in number of block parameters and local size arguments passed">;
def err_opencl_enqueue_kernel_invalid_local_size_type : Error<
"illegal call to enqueue_kernel, parameter needs to be specified as integer type">;
def err_opencl_enqueue_kernel_blocks_non_local_void_args : Error<
- "blocks used in device side enqueue are expected to have parameters of type 'local void*'">;
+ "blocks used in enqueue_kernel call are expected to have parameters of type 'local void*'">;
def err_opencl_enqueue_kernel_blocks_no_args : Error<
- "blocks in this form of device side enqueue call are expected to have have no parameters">;
+ "blocks with parameters are not accepted in this prototype of enqueue_kernel call">;
+
+def err_opencl_builtin_expected_type : Error<
+ "illegal call to %0, expected %1 argument type">;
// OpenCL v2.2 s2.1.2.3 - Vector Component Access
def ext_opencl_ext_vector_type_rgba_selector: ExtWarn<
@@ -8407,11 +8655,11 @@ def err_omp_unknown_reduction_identifier : Error<
def err_omp_not_resolved_reduction_identifier : Error<
"unable to resolve declare reduction construct for type %0">;
def err_omp_reduction_ref_type_arg : Error<
- "argument of OpenMP clause 'reduction' must reference the same object in all threads">;
+ "argument of OpenMP clause '%0' must reference the same object in all threads">;
def err_omp_clause_not_arithmetic_type_arg : Error<
- "arguments of OpenMP clause 'reduction' for 'min' or 'max' must be of %select{scalar|arithmetic}0 type">;
+ "arguments of OpenMP clause '%0' for 'min' or 'max' must be of %select{scalar|arithmetic}1 type">;
def err_omp_clause_floating_type_arg : Error<
- "arguments of OpenMP clause 'reduction' with bitwise operators cannot be of floating type">;
+ "arguments of OpenMP clause '%0' with bitwise operators cannot be of floating type">;
def err_omp_once_referenced : Error<
"variable can appear only once in OpenMP '%0' clause">;
def err_omp_once_referenced_in_target_update : Error<
@@ -8549,8 +8797,8 @@ def err_omp_not_mappable_type : Error<
"type %0 is not mappable to target">;
def err_omp_invalid_map_type_for_directive : Error<
"%select{map type '%1' is not allowed|map type must be specified}0 for '#pragma omp %2'">;
-def err_omp_no_map_for_directive : Error<
- "expected at least one map clause for '#pragma omp %0'">;
+def err_omp_no_clause_for_directive : Error<
+ "expected at least one %0 clause for '#pragma omp %1'">;
def note_omp_polymorphic_in_target : Note<
"mappable type cannot be polymorphic">;
def note_omp_static_member_in_target : Note<
@@ -8619,6 +8867,10 @@ def warn_omp_nesting_simd : Warning<
def err_omp_orphaned_device_directive : Error<
"orphaned 'omp %0' directives are prohibited"
"; perhaps you forget to enclose the directive into a %select{|||target |teams }1region?">;
+def err_omp_reduction_non_addressable_expression : Error<
+ "expected addressable reduction item for the task-based directives">;
+def err_omp_reduction_with_nogroup : Error<
+ "'reduction' clause cannot be used with 'nogroup' clause">;
} // end of OpenMP category
let CategoryName = "Related Result Type Issue" in {
@@ -8649,9 +8901,11 @@ def err_invalid_type_for_program_scope_var : Error<
}
let CategoryName = "Modules Issue" in {
+def err_module_decl_in_module_map_module : Error<
+ "'module' declaration found while building module from module map">;
def err_module_interface_implementation_mismatch : Error<
- "%select{'module'|'module partition'|'module implementation'}0 declaration "
- "found while %select{not |not |}0building module interface">;
+ "missing 'export' specifier in module declaration while "
+ "building module interface">;
def err_current_module_name_mismatch : Error<
"module name '%0' specified on command line does not match name of module">;
def err_module_redefinition : Error<
@@ -8694,22 +8948,31 @@ def err_module_self_import : Error<
"import of module '%0' appears within same top-level module '%1'">;
def err_module_import_in_implementation : Error<
"@import of module '%0' in implementation of '%1'; use #import">;
+
+// C++ Modules TS
def err_export_within_export : Error<
"export declaration appears within another export declaration">;
+def err_export_not_in_module_interface : Error<
+ "export declaration can only be used within a module interface unit after "
+ "the module declaration">;
def ext_equivalent_internal_linkage_decl_in_modules : ExtWarn<
"ambiguous use of internal linkage declaration %0 defined in multiple modules">,
InGroup<DiagGroup<"modules-ambiguous-internal-linkage">>;
def note_equivalent_internal_linkage_decl : Note<
"declared here%select{ in module '%1'|}0">;
+
+def note_redefinition_modules_same_file : Note<
+ "'%0' included multiple times, additional include site in header from module '%1'">;
+def note_redefinition_include_same_file : Note<
+ "'%0' included multiple times, additional include site here">;
}
let CategoryName = "Coroutines Issue" in {
def err_return_in_coroutine : Error<
"return statement not allowed in coroutine; did you mean 'co_return'?">;
def note_declared_coroutine_here : Note<
- "function is a coroutine due to use of "
- "'%select{co_await|co_yield|co_return}0' here">;
+ "function is a coroutine due to use of '%0' here">;
def err_coroutine_objc_method : Error<
"Objective-C methods as coroutines are not yet supported">;
def err_coroutine_unevaluated_context : Error<
@@ -8721,24 +8984,58 @@ def err_coroutine_invalid_func_context : Error<
"|a copy assignment operator|a move assignment operator|the 'main' function"
"|a constexpr function|a function with a deduced return type"
"|a varargs function}0">;
-def err_implied_std_coroutine_traits_not_found : Error<
- "you need to include <experimental/coroutine> before defining a coroutine">;
+def err_implied_coroutine_type_not_found : Error<
+ "%0 type was not found; include <experimental/coroutine> before defining "
+ "a coroutine">;
+def err_implicit_coroutine_std_nothrow_type_not_found : Error<
+ "std::nothrow was not found; include <new> before defining a coroutine which "
+ "uses get_return_object_on_allocation_failure()">;
+def err_malformed_std_nothrow : Error<
+ "std::nothrow must be a valid variable declaration">;
+def err_malformed_std_coroutine_handle : Error<
+ "std::experimental::coroutine_handle must be a class template">;
+def err_coroutine_handle_missing_member : Error<
+ "std::experimental::coroutine_handle missing a member named '%0'">;
def err_malformed_std_coroutine_traits : Error<
"'std::experimental::coroutine_traits' must be a class template">;
def err_implied_std_coroutine_traits_promise_type_not_found : Error<
"this function cannot be a coroutine: %q0 has no member named 'promise_type'">;
def err_implied_std_coroutine_traits_promise_type_not_class : Error<
"this function cannot be a coroutine: %0 is not a class">;
-def err_coroutine_traits_missing_specialization : Error<
+def err_coroutine_promise_type_incomplete : Error<
+ "this function cannot be a coroutine: %0 is an incomplete type">;
+def err_coroutine_type_missing_specialization : Error<
"this function cannot be a coroutine: missing definition of "
"specialization %q0">;
-def err_implied_std_current_exception_not_found : Error<
- "you need to include <exception> before defining a coroutine that implicitly "
- "uses 'set_exception'">;
-def err_malformed_std_current_exception : Error<
- "'std::current_exception' must be a function">;
-def err_coroutine_promise_return_ill_formed : Error<
- "%0 declares both 'return_value' and 'return_void'">;
+def err_coroutine_promise_incompatible_return_functions : Error<
+ "the coroutine promise type %0 declares both 'return_value' and 'return_void'">;
+def err_coroutine_promise_requires_return_function : Error<
+ "the coroutine promise type %0 must declare either 'return_value' or 'return_void'">;
+def note_coroutine_promise_implicit_await_transform_required_here : Note<
+ "call to 'await_transform' implicitly required by 'co_await' here">;
+def note_coroutine_promise_suspend_implicitly_required : Note<
+ "call to '%select{initial_suspend|final_suspend}0' implicitly "
+ "required by the %select{initial suspend point|final suspend point}0">;
+def err_coroutine_promise_unhandled_exception_required : Error<
+ "%0 is required to declare the member 'unhandled_exception()'">;
+def warn_coroutine_promise_unhandled_exception_required_with_exceptions : Warning<
+ "%0 is required to declare the member 'unhandled_exception()' when exceptions are enabled">,
+ InGroup<CoroutineMissingUnhandledException>;
+def err_coroutine_promise_get_return_object_on_allocation_failure : Error<
+ "%0: 'get_return_object_on_allocation_failure()' must be a static member function">;
+def err_seh_in_a_coroutine_with_cxx_exceptions : Error<
+ "cannot use SEH '__try' in a coroutine when C++ exceptions are enabled">;
+def err_coroutine_promise_new_requires_nothrow : Error<
+ "%0 is required to have a non-throwing noexcept specification when the promise "
+ "type declares 'get_return_object_on_allocation_failure()'">;
+def note_coroutine_promise_call_implicitly_required : Note<
+ "call to %0 implicitly required by coroutine function here">;
+def err_await_suspend_invalid_return_type : Error<
+ "return type of 'await_suspend' is required to be 'void' or 'bool' (have %0)"
+>;
+def note_await_ready_no_bool_conversion : Note<
+ "return type of 'await_ready' is required to be contextually convertible to 'bool'"
+>;
}
let CategoryName = "Documentation Issue" in {
@@ -8749,8 +9046,13 @@ def warn_not_a_doxygen_trailing_member_comment : Warning<
let CategoryName = "Instrumentation Issue" in {
def warn_profile_data_out_of_date : Warning<
"profile data may be out of date: of %0 function%s0, %1 %plural{1:has|:have}1"
- " no data and %2 %plural{1:has|:have}2 mismatched data that will be ignored">,
+ " mismatched data that will be ignored">,
InGroup<ProfileInstrOutOfDate>;
+def warn_profile_data_missing : Warning<
+ "profile data may be incomplete: of %0 function%s0, %1 %plural{1:has|:have}1"
+ " no data">,
+ InGroup<ProfileInstrMissing>,
+ DefaultIgnore;
def warn_profile_data_unprofiled : Warning<
"no profile data available for file \"%0\"">,
InGroup<ProfileInstrUnprofiled>;
@@ -8780,6 +9082,9 @@ def warn_nullability_lost : Warning<
"implicit conversion from nullable pointer %0 to non-nullable pointer "
"type %1">,
InGroup<NullableToNonNullConversion>, DefaultIgnore;
+def warn_zero_as_null_pointer_constant : Warning<
+ "zero as null pointer constant">,
+ InGroup<DiagGroup<"zero-as-null-pointer-constant">>, DefaultIgnore;
def err_nullability_cs_multilevel : Error<
"nullability keyword %0 cannot be applied to multi-level pointer type %1">;
@@ -8905,4 +9210,9 @@ def ext_warn_gnu_final : ExtWarn<
"__final is a GNU extension, consider using C++11 final">,
InGroup<GccCompat>;
+def warn_shadow_field :
+ Warning<"non-static data member '%0' of '%1' shadows member inherited from type '%2'">,
+ InGroup<ShadowField>, DefaultIgnore;
+def note_shadow_field : Note<"declared here">;
+
} // end of sema component.
diff --git a/gnu/llvm/tools/clang/include/clang/Driver/Options.td b/gnu/llvm/tools/clang/include/clang/Driver/Options.td
index db6aeb56b03..a9eb04b180e 100644
--- a/gnu/llvm/tools/clang/include/clang/Driver/Options.td
+++ b/gnu/llvm/tools/clang/include/clang/Driver/Options.td
@@ -33,6 +33,9 @@ def NoArgumentUnused : OptionFlag;
// lines that use it.
def Unsupported : OptionFlag;
+// Ignored - The option is unsupported, and the driver will silently ignore it.
+def Ignored : OptionFlag;
+
// CoreOption - This is considered a "core" Clang option, available in both
// clang and clang-cl modes.
def CoreOption : OptionFlag;
@@ -50,72 +53,160 @@ def CC1AsOption : OptionFlag;
// NoDriverOption - This option should not be accepted by the driver.
def NoDriverOption : OptionFlag;
+// A short name to show in documentation. The name will be interpreted as rST.
+class DocName<string name> { string DocName = name; }
+
+// A brief description to show in documentation, interpreted as rST.
+class DocBrief<code descr> { code DocBrief = descr; }
+
+// Indicates that this group should be flattened into its parent when generating
+// documentation.
+class DocFlatten { bit DocFlatten = 1; }
+
+// Indicates that this warning is ignored, but accepted with a warning for
+// GCC compatibility.
+class IgnoredGCCCompat : Flags<[HelpHidden]> {}
+
/////////
// Groups
+def Action_Group : OptionGroup<"<action group>">, DocName<"Actions">,
+ DocBrief<[{The action to perform on the input.}]>;
+
// Meta-group for options which are only used for compilation,
// and not linking etc.
-def CompileOnly_Group : OptionGroup<"<CompileOnly group>">;
-
-def Action_Group : OptionGroup<"<action group>">;
-
-def I_Group : OptionGroup<"<I group>">, Group<CompileOnly_Group>;
-def M_Group : OptionGroup<"<M group>">, Group<CompileOnly_Group>;
-def T_Group : OptionGroup<"<T group>">;
-def O_Group : OptionGroup<"<O group>">, Group<CompileOnly_Group>;
-def R_Group : OptionGroup<"<R group>">, Group<CompileOnly_Group>;
-def R_value_Group : OptionGroup<"<R (with value) group>">, Group<R_Group>;
-def W_Group : OptionGroup<"<W group>">, Group<CompileOnly_Group>;
-def W_value_Group : OptionGroup<"<W (with value) group>">, Group<W_Group>;
-def d_Group : OptionGroup<"<d group>">;
-def f_Group : OptionGroup<"<f group>">, Group<CompileOnly_Group>;
-def f_clang_Group : OptionGroup<"<f (clang-only) group>">, Group<CompileOnly_Group>;
-def g_Group : OptionGroup<"<g group>">;
-def gN_Group : OptionGroup<"<gN group>">, Group<g_Group>;
-def ggdbN_Group : OptionGroup<"<ggdbN group>">, Group<gN_Group>;
-def gTune_Group : OptionGroup<"<gTune group>">, Group<g_Group>;
-def g_flags_Group : OptionGroup<"<g flags group>">;
-def i_Group : OptionGroup<"<i group>">, Group<CompileOnly_Group>;
-def clang_i_Group : OptionGroup<"<clang i group>">, Group<i_Group>;
-def m_Group : OptionGroup<"<m group>">, Group<CompileOnly_Group>;
-def opencl_Group : OptionGroup<"<opencl group>">, Group<CompileOnly_Group>;
+def CompileOnly_Group : OptionGroup<"<CompileOnly group>">,
+ DocName<"Compilation flags">, DocBrief<[{
+Flags controlling the behavior of Clang during compilation. These flags have
+no effect during actions that do not perform compilation.}]>;
+
+def Preprocessor_Group : OptionGroup<"<Preprocessor group>">,
+ Group<CompileOnly_Group>,
+ DocName<"Preprocessor flags">, DocBrief<[{
+Flags controlling the behavior of the Clang preprocessor.}]>;
+
+def IncludePath_Group : OptionGroup<"<I/i group>">, Group<Preprocessor_Group>,
+ DocName<"Include path management">,
+ DocBrief<[{
+Flags controlling how ``#include``\s are resolved to files.}]>;
+
+def I_Group : OptionGroup<"<I group>">, Group<IncludePath_Group>, DocFlatten;
+def i_Group : OptionGroup<"<i group>">, Group<IncludePath_Group>, DocFlatten;
+def clang_i_Group : OptionGroup<"<clang i group>">, Group<i_Group>, DocFlatten;
+
+def M_Group : OptionGroup<"<M group>">, Group<Preprocessor_Group>,
+ DocName<"Dependency file generation">, DocBrief<[{
+Flags controlling generation of a dependency file for ``make``-like build
+systems.}]>;
+
+def d_Group : OptionGroup<"<d group>">, Group<Preprocessor_Group>,
+ DocName<"Dumping preprocessor state">, DocBrief<[{
+Flags allowing the state of the preprocessor to be dumped in various ways.}]>;
+
+def Diag_Group : OptionGroup<"<W/R group>">, Group<CompileOnly_Group>,
+ DocName<"Diagnostic flags">, DocBrief<[{
+Flags controlling which warnings, errors, and remarks Clang will generate.
+See the :doc:`full list of warning and remark flags <DiagnosticsReference>`.}]>;
+
+def R_Group : OptionGroup<"<R group>">, Group<Diag_Group>, DocFlatten;
+def R_value_Group : OptionGroup<"<R (with value) group>">, Group<R_Group>,
+ DocFlatten;
+def W_Group : OptionGroup<"<W group>">, Group<Diag_Group>, DocFlatten;
+def W_value_Group : OptionGroup<"<W (with value) group>">, Group<W_Group>,
+ DocFlatten;
+
+def f_Group : OptionGroup<"<f group>">, Group<CompileOnly_Group>,
+ DocName<"Target-independent compilation options">;
+
+def f_clang_Group : OptionGroup<"<f (clang-only) group>">,
+ Group<CompileOnly_Group>, DocFlatten;
+def pedantic_Group : OptionGroup<"<pedantic group>">, Group<f_Group>,
+ DocFlatten;
+def opencl_Group : OptionGroup<"<opencl group>">, Group<f_Group>,
+ DocName<"OpenCL flags">;
+
+def m_Group : OptionGroup<"<m group>">, Group<CompileOnly_Group>,
+ DocName<"Target-dependent compilation options">;
// Feature groups - these take command line options that correspond directly to
// target specific features and can be translated directly from command line
// options.
-def m_x86_Features_Group : OptionGroup<"<x86 features group>">,
- Group<m_Group>,
- Flags<[CoreOption]>;
-def m_hexagon_Features_Group : OptionGroup<"<hexagon features group>">,
- Group<m_Group>;
-def m_arm_Features_Group : OptionGroup<"<arm features group>">,
- Group<m_Group>;
def m_aarch64_Features_Group : OptionGroup<"<aarch64 features group>">,
- Group<m_Group>;
+ Group<m_Group>, DocName<"AARCH64">;
+def m_amdgpu_Features_Group : OptionGroup<"<amdgpu features group>">,
+ Group<m_Group>, DocName<"AMDGPU">;
+def m_arm_Features_Group : OptionGroup<"<arm features group>">,
+ Group<m_Group>, DocName<"ARM">;
+def m_hexagon_Features_Group : OptionGroup<"<hexagon features group>">,
+ Group<m_Group>, DocName<"Hexagon">;
def m_ppc_Features_Group : OptionGroup<"<ppc features group>">,
- Group<m_Group>;
+ Group<m_Group>, DocName<"PowerPC">;
def m_wasm_Features_Group : OptionGroup<"<wasm features group>">,
- Group<m_Group>;
-def m_amdgpu_Features_Group : OptionGroup<"<amdgpu features group>">,
- Group<m_Group>;
+ Group<m_Group>, DocName<"WebAssembly">;
+def m_x86_Features_Group : OptionGroup<"<x86 features group>">,
+ Group<m_Group>, Flags<[CoreOption]>, DocName<"X86">;
+
+def m_libc_Group : OptionGroup<"<m libc group>">, Group<m_Group>,
+ Flags<[HelpHidden]>;
+
+def O_Group : OptionGroup<"<O group>">, Group<CompileOnly_Group>,
+ DocName<"Optimization level">, DocBrief<[{
+Flags controlling how much optimization should be performed.}]>;
+
+def DebugInfo_Group : OptionGroup<"<g group>">, Group<CompileOnly_Group>,
+ DocName<"Debug information generation">, DocBrief<[{
+Flags controlling how much and what kind of debug information should be
+generated.}]>;
+
+def g_Group : OptionGroup<"<g group>">, Group<DebugInfo_Group>,
+ DocName<"Kind and level of debug information">;
+def gN_Group : OptionGroup<"<gN group>">, Group<g_Group>,
+ DocName<"Debug level">;
+def ggdbN_Group : OptionGroup<"<ggdbN group>">, Group<gN_Group>, DocFlatten;
+def gTune_Group : OptionGroup<"<gTune group>">, Group<g_Group>,
+ DocName<"Debugger to tune debug information for">;
+def g_flags_Group : OptionGroup<"<g flags group>">, Group<DebugInfo_Group>,
+ DocName<"Debug information flags">;
+
+def StaticAnalyzer_Group : OptionGroup<"<Static analyzer group>">,
+ DocName<"Static analyzer flags">, DocBrief<[{
+Flags controlling the behavior of the Clang Static Analyzer.}]>;
+
+// gfortran options that we recognize in the driver and pass along when
+// invoking GCC to compile Fortran code.
+def gfortran_Group : OptionGroup<"<gfortran group>">,
+ DocName<"Fortran compilation flags">, DocBrief<[{
+Flags that will be passed onto the ``gfortran`` compiler when Clang is given
+a Fortran input.}]>;
-def m_libc_Group : OptionGroup<"<m libc group>">, Group<m_Group>;
-def u_Group : OptionGroup<"<u group>">;
+def Link_Group : OptionGroup<"<T/e/s/t/u group>">, DocName<"Linker flags">,
+ DocBrief<[{Flags that are passed on to the linker}]>;
+def T_Group : OptionGroup<"<T group>">, Group<Link_Group>, DocFlatten;
+def u_Group : OptionGroup<"<u group>">, Group<Link_Group>, DocFlatten;
-def pedantic_Group : OptionGroup<"<pedantic group>">,
- Group<CompileOnly_Group>;
-def reserved_lib_Group : OptionGroup<"<reserved libs group>">;
+def reserved_lib_Group : OptionGroup<"<reserved libs group>">,
+ Flags<[Unsupported]>;
// Temporary groups for clang options which we know we don't support,
// but don't want to verbosely warn the user about.
def clang_ignored_f_Group : OptionGroup<"<clang ignored f group>">,
- Group<f_Group>;
+ Group<f_Group>, Flags<[Ignored]>;
def clang_ignored_m_Group : OptionGroup<"<clang ignored m group>">,
- Group<m_Group>;
+ Group<m_Group>, Flags<[Ignored]>;
+
+// Group for clang options in the process of deprecation.
+// Please include the version that deprecated the flag as comment to allow
+// easier garbage collection.
+def clang_ignored_legacy_options_Group : OptionGroup<"<clang legacy flags>">,
+ Group<f_Group>, Flags<[Ignored]>;
+
+// Retired with clang-5.0
+def : Flag<["-"], "fslp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
+def : Flag<["-"], "fno-slp-vectorize-aggressive">, Group<clang_ignored_legacy_options_Group>;
// Group that ignores all gcc optimizations that won't be implemented
def clang_ignored_gcc_optimization_f_Group : OptionGroup<
- "<clang_ignored_gcc_optimization_f_Group>">, Group<f_Group>;
+ "<clang_ignored_gcc_optimization_f_Group>">, Group<f_Group>, Flags<[Ignored]>;
/////////
// Options
@@ -141,7 +232,7 @@ def clang_ignored_gcc_optimization_f_Group : OptionGroup<
// Developer Driver Options
-def internal_Group : OptionGroup<"<clang internal options>">;
+def internal_Group : OptionGroup<"<clang internal options>">, Flags<[HelpHidden]>;
def internal_driver_Group : OptionGroup<"<clang driver internal options>">,
Group<internal_Group>, HelpText<"DRIVER OPTIONS">;
def internal_debug_Group :
@@ -184,6 +275,8 @@ def arcmt_migrate_report_output : Separate<["-"], "arcmt-migrate-report-output">
def arcmt_migrate_emit_arc_errors : Flag<["-"], "arcmt-migrate-emit-errors">,
HelpText<"Emit ARC errors even if the migrator can fix them">,
Flags<[CC1Option]>;
+def gen_reproducer: Flag<["-"], "gen-reproducer">, InternalDebugOpt,
+ HelpText<"Auto-generates preprocessed source files and a reproduction script">;
def _migrate : Flag<["--"], "migrate">, Flags<[DriverOption]>,
HelpText<"Run the migrator">;
@@ -236,23 +329,34 @@ def _HASH_HASH_HASH : Flag<["-"], "###">, Flags<[DriverOption, CoreOption]>,
HelpText<"Print (but do not run) the commands to run for this compilation">;
def _DASH_DASH : Option<["--"], "", KIND_REMAINING_ARGS>,
Flags<[DriverOption, CoreOption]>;
-def A : JoinedOrSeparate<["-"], "A">, Flags<[RenderJoined]>;
-def B : JoinedOrSeparate<["-"], "B">;
-def CC : Flag<["-"], "CC">, Flags<[CC1Option]>;
-def C : Flag<["-"], "C">, Flags<[CC1Option]>;
-def D : JoinedOrSeparate<["-"], "D">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
+def A : JoinedOrSeparate<["-"], "A">, Flags<[RenderJoined]>, Group<gfortran_Group>;
+def B : JoinedOrSeparate<["-"], "B">, MetaVarName<"<dir>">,
+ HelpText<"Add <dir> to search path for binaries and object files used implicitly">;
+def CC : Flag<["-"], "CC">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
+ HelpText<"Include comments from within macros in preprocessed output">;
+def C : Flag<["-"], "C">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
+ HelpText<"Include comments in preprocessed output">;
+def D : JoinedOrSeparate<["-"], "D">, Group<Preprocessor_Group>,
+ Flags<[CC1Option]>, MetaVarName<"<macro>=<value>">,
+ HelpText<"Define <macro> to <value> (or 1 if <value> omitted)">;
def E : Flag<["-"], "E">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
- HelpText<"Only run the preprocessor">;
+ HelpText<"Only run the preprocessor">;
def F : JoinedOrSeparate<["-"], "F">, Flags<[RenderJoined,CC1Option]>,
HelpText<"Add directory to framework include search path">;
-def G : JoinedOrSeparate<["-"], "G">, Flags<[DriverOption]>;
-def G_EQ : Joined<["-"], "G=">, Flags<[DriverOption]>;
-def H : Flag<["-"], "H">, Flags<[CC1Option]>,
+def G : JoinedOrSeparate<["-"], "G">, Flags<[DriverOption]>, Group<m_Group>,
+ MetaVarName<"<size>">, HelpText<"Put objects of at most <size> bytes "
+ "into small data section (MIPS / Hexagon)">;
+def G_EQ : Joined<["-"], "G=">, Flags<[DriverOption]>, Group<m_Group>, Alias<G>;
+def H : Flag<["-"], "H">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Show header includes and nesting depth">;
-def I_ : Flag<["-"], "I-">, Group<I_Group>;
-def I : JoinedOrSeparate<["-"], "I">, Group<I_Group>, Flags<[CC1Option,CC1AsOption]>,
+def I_ : Flag<["-"], "I-">, Group<I_Group>,
+ HelpText<"Restrict all prior -I flags to double-quoted inclusion and "
+ "remove current directory from include path">;
+def I : JoinedOrSeparate<["-"], "I">, Group<I_Group>,
+ Flags<[CC1Option,CC1AsOption]>, MetaVarName<"<dir>">,
HelpText<"Add directory to include search path">;
-def L : JoinedOrSeparate<["-"], "L">, Flags<[RenderJoined]>;
+def L : JoinedOrSeparate<["-"], "L">, Flags<[RenderJoined]>, Group<Link_Group>,
+ MetaVarName<"<dir>">, HelpText<"Add directory to library search path">;
def MD : Flag<["-"], "MD">, Group<M_Group>,
HelpText<"Write a depfile containing user and system headers">;
def MMD : Flag<["-"], "MMD">, Group<M_Group>,
@@ -276,9 +380,9 @@ def MT : JoinedOrSeparate<["-"], "MT">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Specify name of main file output in depfile">;
def MV : Flag<["-"], "MV">, Group<M_Group>, Flags<[CC1Option]>,
HelpText<"Use NMake/Jom format for the depfile">;
-def Mach : Flag<["-"], "Mach">;
-def O0 : Flag<["-"], "O0">, Group<O_Group>, Flags<[CC1Option]>;
-def O4 : Flag<["-"], "O4">, Group<O_Group>, Flags<[CC1Option]>;
+def Mach : Flag<["-"], "Mach">, Group<Link_Group>;
+def O0 : Flag<["-"], "O0">, Group<O_Group>, Flags<[CC1Option, HelpHidden]>;
+def O4 : Flag<["-"], "O4">, Group<O_Group>, Flags<[CC1Option, HelpHidden]>;
def ObjCXX : Flag<["-"], "ObjC++">, Flags<[DriverOption]>,
HelpText<"Treat source input files as Objective-C++ inputs">;
def ObjC : Flag<["-"], "ObjC">, Flags<[DriverOption]>,
@@ -286,12 +390,12 @@ def ObjC : Flag<["-"], "ObjC">, Flags<[DriverOption]>,
def O : Joined<["-"], "O">, Group<O_Group>, Flags<[CC1Option]>;
def O_flag : Flag<["-"], "O">, Flags<[CC1Option]>, Alias<O>, AliasArgs<["2"]>;
def Ofast : Joined<["-"], "Ofast">, Group<O_Group>, Flags<[CC1Option]>;
-def P : Flag<["-"], "P">, Flags<[CC1Option]>,
+def P : Flag<["-"], "P">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Disable linemarker output in -E mode">;
-def Qn : Flag<["-"], "Qn">;
+def Qn : Flag<["-"], "Qn">, IgnoredGCCCompat;
def Qunused_arguments : Flag<["-"], "Qunused-arguments">, Flags<[DriverOption, CoreOption]>,
HelpText<"Don't emit warning for unused driver arguments">;
-def Q : Flag<["-"], "Q">;
+def Q : Flag<["-"], "Q">, IgnoredGCCCompat;
def Rpass_EQ : Joined<["-"], "Rpass=">, Group<R_value_Group>, Flags<[CC1Option]>,
HelpText<"Report transformations performed by optimization passes whose "
"name matches the given POSIX regular expression">;
@@ -307,23 +411,28 @@ def R_Joined : Joined<["-"], "R">, Group<R_Group>, Flags<[CC1Option, CoreOption]
MetaVarName<"<remark>">, HelpText<"Enable the specified remark">;
def S : Flag<["-"], "S">, Flags<[DriverOption,CC1Option]>, Group<Action_Group>,
HelpText<"Only run preprocess and compilation steps">;
-def Tbss : JoinedOrSeparate<["-"], "Tbss">, Group<T_Group>;
-def Tdata : JoinedOrSeparate<["-"], "Tdata">, Group<T_Group>;
-def Ttext : JoinedOrSeparate<["-"], "Ttext">, Group<T_Group>;
-def T : JoinedOrSeparate<["-"], "T">, Group<T_Group>;
-def U : JoinedOrSeparate<["-"], "U">, Group<CompileOnly_Group>, Flags<[CC1Option]>;
+def Tbss : JoinedOrSeparate<["-"], "Tbss">, Group<T_Group>,
+ MetaVarName<"<addr>">, HelpText<"Set starting address of BSS to <addr>">;
+def Tdata : JoinedOrSeparate<["-"], "Tdata">, Group<T_Group>,
+ MetaVarName<"<addr>">, HelpText<"Set starting address of BSS to <addr>">;
+def Ttext : JoinedOrSeparate<["-"], "Ttext">, Group<T_Group>,
+ MetaVarName<"<addr>">, HelpText<"Set starting address of BSS to <addr>">;
+def T : JoinedOrSeparate<["-"], "T">, Group<T_Group>,
+ MetaVarName<"<script>">, HelpText<"Specify <script> as linker script">;
+def U : JoinedOrSeparate<["-"], "U">, Group<Preprocessor_Group>,
+ Flags<[CC1Option]>, MetaVarName<"<macro>">, HelpText<"Undefine macro <macro>">;
def V : JoinedOrSeparate<["-"], "V">, Flags<[DriverOption, Unsupported]>;
def Wa_COMMA : CommaJoined<["-"], "Wa,">,
HelpText<"Pass the comma separated arguments in <arg> to the assembler">,
MetaVarName<"<arg>">;
-def Wall : Flag<["-"], "Wall">, Group<W_Group>, Flags<[CC1Option]>;
-def WCL4 : Flag<["-"], "WCL4">, Group<W_Group>, Flags<[CC1Option]>;
-def Wdeprecated : Flag<["-"], "Wdeprecated">, Group<W_Group>, Flags<[CC1Option]>;
+def Wall : Flag<["-"], "Wall">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
+def WCL4 : Flag<["-"], "WCL4">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
+def Wdeprecated : Flag<["-"], "Wdeprecated">, Group<W_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable warnings for deprecated constructs and define __DEPRECATED">;
def Wno_deprecated : Flag<["-"], "Wno-deprecated">, Group<W_Group>, Flags<[CC1Option]>;
-def Wextra : Flag<["-"], "Wextra">, Group<W_Group>, Flags<[CC1Option]>;
def Wl_COMMA : CommaJoined<["-"], "Wl,">, Flags<[LinkerInput, RenderAsInput]>,
HelpText<"Pass the comma separated arguments in <arg> to the linker">,
- MetaVarName<"<arg>">;
+ MetaVarName<"<arg>">, Group<Link_Group>;
// FIXME: This is broken; these should not be Joined arguments.
def Wno_nonportable_cfstrings : Joined<["-"], "Wno-nonportable-cfstrings">, Group<W_Group>,
Flags<[CC1Option]>;
@@ -331,16 +440,18 @@ def Wnonportable_cfstrings : Joined<["-"], "Wnonportable-cfstrings">, Group<W_Gr
Flags<[CC1Option]>;
def Wp_COMMA : CommaJoined<["-"], "Wp,">,
HelpText<"Pass the comma separated arguments in <arg> to the preprocessor">,
- MetaVarName<"<arg>">;
-def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>, Flags<[CC1Option]>;
-def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>, Flags<[CC1Option]>;
+ MetaVarName<"<arg>">, Group<Preprocessor_Group>;
+def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
+def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def W_Joined : Joined<["-"], "W">, Group<W_Group>, Flags<[CC1Option, CoreOption]>,
MetaVarName<"<warning>">, HelpText<"Enable the specified warning">;
def Xanalyzer : Separate<["-"], "Xanalyzer">,
- HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">;
+ HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">,
+ Group<StaticAnalyzer_Group>;
def Xarch__ : JoinedAndSeparate<["-"], "Xarch_">, Flags<[DriverOption]>;
def Xassembler : Separate<["-"], "Xassembler">,
- HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">;
+ HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">,
+ Group<CompileOnly_Group>;
def Xclang : Separate<["-"], "Xclang">,
HelpText<"Pass <arg> to the clang compiler">, MetaVarName<"<arg>">,
Flags<[DriverOption, CoreOption]>, Group<CompileOnly_Group>;
@@ -349,14 +460,17 @@ def Xcuda_fatbinary : Separate<["-"], "Xcuda-fatbinary">,
def Xcuda_ptxas : Separate<["-"], "Xcuda-ptxas">,
HelpText<"Pass <arg> to the ptxas assembler">, MetaVarName<"<arg>">;
def z : Separate<["-"], "z">, Flags<[LinkerInput, RenderAsInput]>,
- HelpText<"Pass -z <arg> to the linker">, MetaVarName<"<arg>">;
+ HelpText<"Pass -z <arg> to the linker">, MetaVarName<"<arg>">,
+ Group<Link_Group>;
def Xlinker : Separate<["-"], "Xlinker">, Flags<[LinkerInput, RenderAsInput]>,
- HelpText<"Pass <arg> to the linker">, MetaVarName<"<arg>">;
-def Xpreprocessor : Separate<["-"], "Xpreprocessor">,
+ HelpText<"Pass <arg> to the linker">, MetaVarName<"<arg>">,
+ Group<Link_Group>;
+def Xpreprocessor : Separate<["-"], "Xpreprocessor">, Group<Preprocessor_Group>,
HelpText<"Pass <arg> to the preprocessor">, MetaVarName<"<arg>">;
-def X_Flag : Flag<["-"], "X">;
-def X_Joined : Joined<["-"], "X">;
-def Z_Flag : Flag<["-"], "Z">;
+def X_Flag : Flag<["-"], "X">, Group<Link_Group>;
+def X_Joined : Joined<["-"], "X">, IgnoredGCCCompat;
+def Z_Flag : Flag<["-"], "Z">, Group<Link_Group>;
+// FIXME: All we do with this is reject it. Remove.
def Z_Joined : Joined<["-"], "Z">;
def all__load : Flag<["-"], "all_load">;
def allowable__client : Separate<["-"], "allowable_client">;
@@ -365,6 +479,7 @@ def arch__errors__fatal : Flag<["-"], "arch_errors_fatal">;
def arch : Separate<["-"], "arch">, Flags<[DriverOption]>;
def arch__only : Separate<["-"], "arch_only">;
def a : Joined<["-"], "a">;
+def autocomplete : Joined<["--"], "autocomplete=">;
def bind__at__load : Flag<["-"], "bind_at_load">;
def bundle__loader : Separate<["-"], "bundle_loader">;
def bundle : Flag<["-"], "bundle">;
@@ -388,7 +503,7 @@ def cl_mad_enable : Flag<["-"], "cl-mad-enable">, Group<opencl_Group>, Flags<[CC
def cl_no_signed_zeros : Flag<["-"], "cl-no-signed-zeros">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow use of less precise no signed zeros computations in the generated binary.">;
def cl_std_EQ : Joined<["-"], "cl-std=">, Group<opencl_Group>, Flags<[CC1Option]>,
- HelpText<"OpenCL language standard to compile for.">;
+ HelpText<"OpenCL language standard to compile for.">, Values<"cl,CL,cl1.1,CL1.1,cl1.2,CL1.2,cl2.0,CL2.0">;
def cl_denorms_are_zero : Flag<["-"], "cl-denorms-are-zero">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Allow denormals to be flushed to zero.">;
def cl_fp32_correctly_rounded_divide_sqrt : Flag<["-"], "cl-fp32-correctly-rounded-divide-sqrt">, Group<opencl_Group>, Flags<[CC1Option]>,
@@ -402,7 +517,7 @@ def current__version : JoinedOrSeparate<["-"], "current_version">;
def cxx_isystem : JoinedOrSeparate<["-"], "cxx-isystem">, Group<clang_i_Group>,
HelpText<"Add directory to the C++ SYSTEM include search path">, Flags<[CC1Option]>,
MetaVarName<"<directory>">;
-def c : Flag<["-"], "c">, Flags<[DriverOption]>,
+def c : Flag<["-"], "c">, Flags<[DriverOption]>, Group<Action_Group>,
HelpText<"Only run preprocess, compile, and assemble steps">;
def cuda_device_only : Flag<["--"], "cuda-device-only">,
HelpText<"Compile CUDA code for device only">;
@@ -462,7 +577,7 @@ def emit_ast : Flag<["-"], "emit-ast">,
def emit_llvm : Flag<["-"], "emit-llvm">, Flags<[CC1Option]>, Group<Action_Group>,
HelpText<"Use the LLVM representation for assembler and object files">;
def exported__symbols__list : Separate<["-"], "exported_symbols_list">;
-def e : JoinedOrSeparate<["-"], "e">;
+def e : JoinedOrSeparate<["-"], "e">, Group<Link_Group>;
def fPIC : Flag<["-"], "fPIC">, Group<f_Group>;
def fno_PIC : Flag<["-"], "fno-PIC">, Group<f_Group>;
def fPIE : Flag<["-"], "fPIE">, Group<f_Group>;
@@ -511,14 +626,28 @@ def fno_gnu_inline_asm : Flag<["-"], "fno-gnu-inline-asm">, Group<f_Group>,
Flags<[DriverOption, CC1Option]>,
HelpText<"Disable GNU style inline asm">;
+def fprofile_sample_use : Flag<["-"], "fprofile-sample-use">, Group<f_Group>,
+ Flags<[CoreOption]>;
+def fno_profile_sample_use : Flag<["-"], "fno-profile-sample-use">, Group<f_Group>,
+ Flags<[CoreOption]>;
def fprofile_sample_use_EQ : Joined<["-"], "fprofile-sample-use=">,
Group<f_Group>, Flags<[DriverOption, CC1Option]>,
HelpText<"Enable sample-based profile guided optimizations">;
+def fauto_profile : Flag<["-"], "fauto-profile">, Group<f_Group>,
+ Alias<fprofile_sample_use>;
+def fno_auto_profile : Flag<["-"], "fno-auto-profile">, Group<f_Group>,
+ Alias<fno_profile_sample_use>;
def fauto_profile_EQ : Joined<["-"], "fauto-profile=">,
Alias<fprofile_sample_use_EQ>;
+def fdebug_info_for_profiling : Flag<["-"], "fdebug-info-for-profiling">, Group<f_Group>,
+ Flags<[CC1Option]>,
+ HelpText<"Emit extra debug info to make sample profile more accurate.">;
+def fno_debug_info_for_profiling : Flag<["-"], "fno-debug-info-for-profiling">, Group<f_Group>,
+ Flags<[DriverOption]>,
+ HelpText<"Do not emit extra debug info for sample profiler.">;
def fprofile_instr_generate : Flag<["-"], "fprofile-instr-generate">,
Group<f_Group>, Flags<[CoreOption]>,
- HelpText<"Generate instrumented code to collect execution counts into default.profraw file (overriden by '=' form of option or LLVM_PROFILE_FILE env var)">;
+ HelpText<"Generate instrumented code to collect execution counts into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
def fprofile_instr_generate_EQ : Joined<["-"], "fprofile-instr-generate=">,
Group<f_Group>, Flags<[CoreOption]>, MetaVarName<"<file>">,
HelpText<"Generate instrumented code to collect execution counts into <file> (overridden by LLVM_PROFILE_FILE env var)">;
@@ -565,6 +694,9 @@ def fbuiltin : Flag<["-"], "fbuiltin">, Group<f_Group>;
def fbuiltin_module_map : Flag <["-"], "fbuiltin-module-map">, Group<f_Group>,
Flags<[DriverOption]>, HelpText<"Load the clang builtins module map file.">;
def fcaret_diagnostics : Flag<["-"], "fcaret-diagnostics">, Group<f_Group>;
+def fclang_abi_compat_EQ : Joined<["-"], "fclang-abi-compat=">, Group<f_clang_Group>,
+ Flags<[CC1Option]>, MetaVarName<"<version>">, Values<"<major>.<minor>,latest">,
+ HelpText<"Attempt to match the ABI of Clang <version>">;
def fclasspath_EQ : Joined<["-"], "fclasspath=">, Group<f_Group>;
def fcolor_diagnostics : Flag<["-"], "fcolor-diagnostics">, Group<f_Group>,
Flags<[CoreOption, CC1Option]>, HelpText<"Use colors in diagnostics">;
@@ -585,7 +717,8 @@ def fconstexpr_depth_EQ : Joined<["-"], "fconstexpr-depth=">, Group<f_Group>;
def fconstexpr_steps_EQ : Joined<["-"], "fconstexpr-steps=">, Group<f_Group>;
def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">,
Group<f_Group>;
-def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>;
+def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>,
+ HelpText<"Disable auto-generation of preprocessed source files and a script for reproduction during a clang crash">;
def fcreate_profile : Flag<["-"], "fcreate-profile">, Group<f_Group>;
def fcxx_exceptions: Flag<["-"], "fcxx-exceptions">, Group<f_Group>,
HelpText<"Enable C++ exceptions">, Flags<[CC1Option]>;
@@ -603,6 +736,9 @@ def fdiagnostics_print_source_range_info : Flag<["-"], "fdiagnostics-print-sourc
HelpText<"Print source range spans in numeric form">;
def fdiagnostics_show_hotness : Flag<["-"], "fdiagnostics-show-hotness">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Enable profile hotness information in diagnostic line">;
+def fdiagnostics_hotness_threshold_EQ : Joined<["-"], "fdiagnostics-hotness-threshold=">,
+ Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<number>">,
+ HelpText<"Prevent optimization remarks from being output if they do not have at least this profile count">;
def fdiagnostics_show_option : Flag<["-"], "fdiagnostics-show-option">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Print option name with mappable diagnostics">;
def fdiagnostics_show_note_include_stack : Flag<["-"], "fdiagnostics-show-note-include-stack">,
@@ -659,65 +795,74 @@ def fno_signaling_math : Flag<["-"], "fno-signaling-math">, Group<f_Group>;
def fjump_tables : Flag<["-"], "fjump-tables">, Group<f_Group>;
def fno_jump_tables : Flag<["-"], "fno-jump-tables">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Do not use jump tables for lowering switches">;
+
+// Begin sanitizer flags. These should all be core options exposed in all driver
+// modes.
+let Flags = [CC1Option, CoreOption] in {
+
def fsanitize_EQ : CommaJoined<["-"], "fsanitize=">, Group<f_clang_Group>,
- Flags<[CC1Option, CoreOption]>, MetaVarName<"<check>">,
+ MetaVarName<"<check>">,
HelpText<"Turn on runtime checks for various forms of undefined "
"or suspicious behavior. See user manual for available checks">;
def fno_sanitize_EQ : CommaJoined<["-"], "fno-sanitize=">, Group<f_clang_Group>,
- Flags<[CoreOption]>;
+ Flags<[CoreOption, DriverOption]>;
def fsanitize_blacklist : Joined<["-"], "fsanitize-blacklist=">,
- Group<f_clang_Group>, Flags<[CC1Option, CoreOption]>,
+ Group<f_clang_Group>,
HelpText<"Path to blacklist file for sanitizers">;
def fno_sanitize_blacklist : Flag<["-"], "fno-sanitize-blacklist">,
Group<f_clang_Group>,
HelpText<"Don't use blacklist file for sanitizers">;
def fsanitize_coverage
: CommaJoined<["-"], "fsanitize-coverage=">,
- Group<f_clang_Group>, Flags<[CoreOption]>,
+ Group<f_clang_Group>,
HelpText<"Specify the type of coverage instrumentation for Sanitizers">;
def fno_sanitize_coverage
: CommaJoined<["-"], "fno-sanitize-coverage=">,
- Group<f_clang_Group>, Flags<[CoreOption]>,
+ Group<f_clang_Group>, Flags<[CoreOption, DriverOption]>,
HelpText<"Disable specified features of coverage instrumentation for "
- "Sanitizers">;
+ "Sanitizers">, Values<"func,bb,edge,indirect-calls,trace-bb,trace-cmp,trace-div,trace-gep,8bit-counters,trace-pc,trace-pc-guard,no-prune,inline-8bit-counters">;
def fsanitize_memory_track_origins_EQ : Joined<["-"], "fsanitize-memory-track-origins=">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">;
def fsanitize_memory_track_origins : Flag<["-"], "fsanitize-memory-track-origins">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">;
def fno_sanitize_memory_track_origins : Flag<["-"], "fno-sanitize-memory-track-origins">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable origins tracking in MemorySanitizer">;
def fsanitize_memory_use_after_dtor : Flag<["-"], "fsanitize-memory-use-after-dtor">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
HelpText<"Enable use-after-destroy detection in MemorySanitizer">;
def fsanitize_address_field_padding : Joined<["-"], "fsanitize-address-field-padding=">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
HelpText<"Level of field padding for AddressSanitizer">;
def fsanitize_address_use_after_scope : Flag<["-"], "fsanitize-address-use-after-scope">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
HelpText<"Enable use-after-scope detection in AddressSanitizer">;
def fno_sanitize_address_use_after_scope : Flag<["-"], "fno-sanitize-address-use-after-scope">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable use-after-scope detection in AddressSanitizer">;
-def fsanitize_recover : Flag<["-"], "fsanitize-recover">, Group<f_clang_Group>,
- Flags<[CoreOption]>;
+def fsanitize_address_globals_dead_stripping : Flag<["-"], "fsanitize-address-globals-dead-stripping">,
+ Group<f_clang_Group>,
+ HelpText<"Enable linker dead stripping of globals in AddressSanitizer">;
+def fsanitize_recover : Flag<["-"], "fsanitize-recover">, Group<f_clang_Group>;
def fno_sanitize_recover : Flag<["-"], "fno-sanitize-recover">,
- Group<f_clang_Group>, Flags<[CoreOption]>;
+ Flags<[CoreOption, DriverOption]>,
+ Group<f_clang_Group>;
def fsanitize_recover_EQ : CommaJoined<["-"], "fsanitize-recover=">,
Group<f_clang_Group>,
- Flags<[CC1Option, CoreOption]>,
HelpText<"Enable recovery for specified sanitizers">;
def fno_sanitize_recover_EQ
: CommaJoined<["-"], "fno-sanitize-recover=">,
- Group<f_clang_Group>, Flags<[CoreOption]>,
+ Group<f_clang_Group>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable recovery for specified sanitizers">;
def fsanitize_trap_EQ : CommaJoined<["-"], "fsanitize-trap=">, Group<f_clang_Group>,
- Flags<[CC1Option, CoreOption]>,
HelpText<"Enable trapping for specified sanitizers">;
def fno_sanitize_trap_EQ : CommaJoined<["-"], "fno-sanitize-trap=">, Group<f_clang_Group>,
- Flags<[CoreOption]>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable trapping for specified sanitizers">;
def fsanitize_undefined_trap_on_error : Flag<["-"], "fsanitize-undefined-trap-on-error">,
Group<f_clang_Group>;
@@ -726,39 +871,47 @@ def fno_sanitize_undefined_trap_on_error : Flag<["-"], "fno-sanitize-undefined-t
def fsanitize_link_cxx_runtime : Flag<["-"], "fsanitize-link-c++-runtime">,
Group<f_clang_Group>;
def fsanitize_cfi_cross_dso : Flag<["-"], "fsanitize-cfi-cross-dso">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
HelpText<"Enable control flow integrity (CFI) checks for cross-DSO calls.">;
def fno_sanitize_cfi_cross_dso : Flag<["-"], "fno-sanitize-cfi-cross-dso">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Flags<[CoreOption, DriverOption]>,
+ Group<f_clang_Group>,
HelpText<"Disable control flow integrity (CFI) checks for cross-DSO calls.">;
def fsanitize_stats : Flag<["-"], "fsanitize-stats">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
HelpText<"Enable sanitizer statistics gathering.">;
def fno_sanitize_stats : Flag<["-"], "fno-sanitize-stats">,
- Group<f_clang_Group>, Flags<[CC1Option]>,
+ Group<f_clang_Group>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable sanitizer statistics gathering.">;
def fsanitize_thread_memory_access : Flag<["-"], "fsanitize-thread-memory-access">,
Group<f_clang_Group>,
HelpText<"Enable memory access instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_memory_access : Flag<["-"], "fno-sanitize-thread-memory-access">,
Group<f_clang_Group>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable memory access instrumentation in ThreadSanitizer">;
def fsanitize_thread_func_entry_exit : Flag<["-"], "fsanitize-thread-func-entry-exit">,
Group<f_clang_Group>,
HelpText<"Enable function entry/exit instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_func_entry_exit : Flag<["-"], "fno-sanitize-thread-func-entry-exit">,
Group<f_clang_Group>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable function entry/exit instrumentation in ThreadSanitizer">;
def fsanitize_thread_atomics : Flag<["-"], "fsanitize-thread-atomics">,
Group<f_clang_Group>,
HelpText<"Enable atomic operations instrumentation in ThreadSanitizer (default)">;
def fno_sanitize_thread_atomics : Flag<["-"], "fno-sanitize-thread-atomics">,
Group<f_clang_Group>,
+ Flags<[CoreOption, DriverOption]>,
HelpText<"Disable atomic operations instrumentation in ThreadSanitizer">;
def fsanitize_undefined_strip_path_components_EQ : Joined<["-"], "fsanitize-undefined-strip-path-components=">,
- Group<f_clang_Group>, Flags<[CC1Option]>, MetaVarName<"<number>">,
+ Group<f_clang_Group>, MetaVarName<"<number>">,
HelpText<"Strip (or keep only, if negative) a given number of path components "
"when emitting check metadata.">;
+
+} // end -f[no-]sanitize* flags
+
def funsafe_math_optimizations : Flag<["-"], "funsafe-math-optimizations">,
Group<f_Group>;
def fno_unsafe_math_optimizations : Flag<["-"], "fno-unsafe-math-optimizations">,
@@ -786,7 +939,7 @@ def ftrapping_math : Flag<["-"], "ftrapping-math">, Group<f_Group>, Flags<[CC1Op
def fno_trapping_math : Flag<["-"], "fno-trapping-math">, Group<f_Group>, Flags<[CC1Option]>;
def ffp_contract : Joined<["-"], "ffp-contract=">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Form fused FP ops (e.g. FMAs): fast (everywhere)"
- " | on (according to FP_CONTRACT pragma, default) | off (never fuse)">;
+ " | on (according to FP_CONTRACT pragma, default) | off (never fuse)">, Values<"fast,on,off">;
def ffor_scope : Flag<["-"], "ffor-scope">, Group<f_Group>;
def fno_for_scope : Flag<["-"], "fno-for-scope">, Group<f_Group>;
@@ -795,6 +948,10 @@ def frewrite_includes : Flag<["-"], "frewrite-includes">, Group<f_Group>,
Flags<[CC1Option]>;
def fno_rewrite_includes : Flag<["-"], "fno-rewrite-includes">, Group<f_Group>;
+def frewrite_imports : Flag<["-"], "frewrite-imports">, Group<f_Group>,
+ Flags<[CC1Option]>;
+def fno_rewrite_imports : Flag<["-"], "fno-rewrite-imports">, Group<f_Group>;
+
def frewrite_map_file : Separate<["-"], "frewrite-map-file">,
Group<f_Group>,
Flags<[ DriverOption, CC1Option ]>;
@@ -816,12 +973,13 @@ def fno_gnu89_inline : Flag<["-"], "fno-gnu89-inline">, Group<f_Group>;
def fgnu_runtime : Flag<["-"], "fgnu-runtime">, Group<f_Group>,
HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
def fheinous_gnu_extensions : Flag<["-"], "fheinous-gnu-extensions">, Flags<[CC1Option]>;
-def filelist : Separate<["-"], "filelist">, Flags<[LinkerInput]>;
+def filelist : Separate<["-"], "filelist">, Flags<[LinkerInput]>,
+ Group<Link_Group>;
def : Flag<["-"], "findirect-virtual-calls">, Alias<fapple_kext>;
def finline_functions : Flag<["-"], "finline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Inline suitable functions">;
def finline_hint_functions: Flag<["-"], "finline-hint-functions">, Group<f_clang_Group>, Flags<[CC1Option]>,
- HelpText<"Inline functions wich are (explicitly or implicitly) marked inline">;
+ HelpText<"Inline functions which are (explicitly or implicitly) marked inline">;
def finline : Flag<["-"], "finline">, Group<clang_ignored_f_Group>;
def fexperimental_new_pass_manager : Flag<["-"], "fexperimental-new-pass-manager">,
Group<f_clang_Group>, Flags<[CC1Option]>,
@@ -845,11 +1003,20 @@ def fxray_instruction_threshold_ :
JoinedOrSeparate<["-"], "fxray-instruction-threshold">,
Group<f_Group>, Flags<[CC1Option]>;
+def fxray_always_instrument :
+ JoinedOrSeparate<["-"], "fxray-always-instrument=">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Filename defining the whitelist for imbuing the 'always instrument' XRay attribute.">;
+def fxray_never_instrument :
+ JoinedOrSeparate<["-"], "fxray-never-instrument=">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Filename defining the whitelist for imbuing the 'never instrument' XRay attribute.">;
+
def flat__namespace : Flag<["-"], "flat_namespace">;
def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group<f_Group>;
def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group<f_Group>;
-def flto_EQ : Joined<["-"], "flto=">, Flags<[CC1Option]>, Group<f_Group>,
- HelpText<"Set LTO mode to either 'full' or 'thin'">;
+def flto_EQ : Joined<["-"], "flto=">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
+ HelpText<"Set LTO mode to either 'full' or 'thin'">, Values<"thin,full">;
def flto : Flag<["-"], "flto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Enable LTO in 'full' mode">;
def fno_lto : Flag<["-"], "fno-lto">, Group<f_Group>,
@@ -1007,7 +1174,7 @@ def fno_experimental_new_pass_manager : Flag<["-"], "fno-experimental-new-pass-m
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Disables an experimental new pass manager in LLVM.">;
def fveclib : Joined<["-"], "fveclib=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Use the given vector functions library">;
+ HelpText<"Use the given vector functions library">, Values<"Accelerate,SVML,none">;
def fno_lax_vector_conversions : Flag<["-"], "fno-lax-vector-conversions">, Group<f_Group>,
HelpText<"Disallow implicit conversions between vectors with a different number of elements or different element types">, Flags<[CC1Option]>;
def fno_merge_all_constants : Flag<["-"], "fno-merge-all-constants">, Group<f_Group>,
@@ -1191,7 +1358,7 @@ def fno_short_wchar : Flag<["-"], "fno-short-wchar">, Group<f_Group>, Flags<[CC1
HelpText<"Force wchar_t to be an unsigned int">;
def fshow_overloads_EQ : Joined<["-"], "fshow-overloads=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Which overload candidates to show when overload resolution fails: "
- "best|all; defaults to all">;
+ "best|all; defaults to all">, Values<"best,all">;
def fshow_column : Flag<["-"], "fshow-column">, Group<f_Group>, Flags<[CC1Option]>;
def fshow_source_location : Flag<["-"], "fshow-source-location">, Group<f_Group>;
def fspell_checking : Flag<["-"], "fspell-checking">, Group<f_Group>;
@@ -1213,6 +1380,10 @@ def fno_standalone_debug : Flag<["-"], "fno-standalone-debug">, Group<f_Group>,
HelpText<"Limit debug information produced to reduce size of debug binary">;
def flimit_debug_info : Flag<["-"], "flimit-debug-info">, Flags<[CoreOption]>, Alias<fno_standalone_debug>;
def fno_limit_debug_info : Flag<["-"], "fno-limit-debug-info">, Flags<[CoreOption]>, Alias<fstandalone_debug>;
+def fdebug_macro : Flag<["-"], "fdebug-macro">, Group<f_Group>, Flags<[CoreOption]>,
+ HelpText<"Emit macro debug information">;
+def fno_debug_macro : Flag<["-"], "fno-debug-macro">, Group<f_Group>, Flags<[CoreOption]>,
+ HelpText<"Do not emit macro debug information">;
def fstrict_aliasing : Flag<["-"], "fstrict-aliasing">, Group<f_Group>,
Flags<[DriverOption, CoreOption]>;
def fstrict_enums : Flag<["-"], "fstrict-enums">, Group<f_Group>, Flags<[CC1Option]>,
@@ -1250,9 +1421,6 @@ def : Flag<["-"], "fno-tree-vectorize">, Alias<fno_vectorize>;
def fslp_vectorize : Flag<["-"], "fslp-vectorize">, Group<f_Group>,
HelpText<"Enable the superword-level parallelism vectorization passes">;
def fno_slp_vectorize : Flag<["-"], "fno-slp-vectorize">, Group<f_Group>;
-def fslp_vectorize_aggressive : Flag<["-"], "fslp-vectorize-aggressive">, Group<f_Group>,
- HelpText<"Enable the BB vectorization passes">;
-def fno_slp_vectorize_aggressive : Flag<["-"], "fno-slp-vectorize-aggressive">, Group<f_Group>;
def : Flag<["-"], "ftree-slp-vectorize">, Alias<fslp_vectorize>;
def : Flag<["-"], "fno-tree-slp-vectorize">, Alias<fno_slp_vectorize>;
def Wlarge_by_value_copy_def : Flag<["-"], "Wlarge-by-value-copy">,
@@ -1301,7 +1469,7 @@ def fuse_init_array : Flag<["-"], "fuse-init-array">, Group<f_Group>, Flags<[CC1
def fno_var_tracking : Flag<["-"], "fno-var-tracking">, Group<clang_ignored_f_Group>;
def fverbose_asm : Flag<["-"], "fverbose-asm">, Group<f_Group>;
def fvisibility_EQ : Joined<["-"], "fvisibility=">, Group<f_Group>,
- HelpText<"Set the default symbol visibility for all global declarations">;
+ HelpText<"Set the default symbol visibility for all global declarations">, Values<"hidden,default">;
def fvisibility_inlines_hidden : Flag<["-"], "fvisibility-inlines-hidden">, Group<f_Group>,
HelpText<"Give inline C++ member functions default visibility by default">,
Flags<[CC1Option]>;
@@ -1335,11 +1503,17 @@ def fno_unique_section_names : Flag <["-"], "fno-unique-section-names">,
def fstrict_return : Flag<["-"], "fstrict-return">, Group<f_Group>,
Flags<[CC1Option]>,
- HelpText<"Always treat control flow paths that fall off the end of a non-void"
- "function as unreachable">;
+ HelpText<"Always treat control flow paths that fall off the end of a "
+ "non-void function as unreachable">;
def fno_strict_return : Flag<["-"], "fno-strict-return">, Group<f_Group>,
Flags<[CC1Option]>;
+def fallow_editor_placeholders : Flag<["-"], "fallow-editor-placeholders">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Treat editor placeholders as valid source code">;
+def fno_allow_editor_placeholders : Flag<["-"],
+ "fno-allow-editor-placeholders">, Group<f_Group>;
+
def fdebug_types_section: Flag <["-"], "fdebug-types-section">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Place debug types in their own section (ELF Only)">;
def fno_debug_types_section: Flag<["-"], "fno-debug-types-section">, Group<f_Group>,
@@ -1402,6 +1576,10 @@ def gdwarf_aranges : Flag<["-"], "gdwarf-aranges">, Group<g_flags_Group>;
def gmodules : Flag <["-"], "gmodules">, Group<gN_Group>,
HelpText<"Generate debug info with external references to clang modules"
" or precompiled headers">;
+def gz : Flag<["-"], "gz">, Group<g_flags_Group>,
+ HelpText<"DWARF debug sections compression type">;
+def gz_EQ : Joined<["-"], "gz=">, Group<g_flags_Group>,
+ HelpText<"DWARF debug sections compression type">;
def headerpad__max__install__names : Joined<["-"], "headerpad_max_install_names">;
def help : Flag<["-", "--"], "help">, Flags<[CC1Option,CC1AsOption]>,
HelpText<"Display available options">;
@@ -1411,6 +1589,11 @@ def idirafter : JoinedOrSeparate<["-"], "idirafter">, Group<clang_i_Group>, Flag
HelpText<"Add directory to AFTER include search path">;
def iframework : JoinedOrSeparate<["-"], "iframework">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Add directory to SYSTEM framework search path">;
+def iframeworkwithsysroot : JoinedOrSeparate<["-"], "iframeworkwithsysroot">,
+ Group<clang_i_Group>,
+ HelpText<"Add directory to SYSTEM framework search path, "
+ "absolute paths are relative to -isysroot">,
+ MetaVarName<"<directory>">, Flags<[CC1Option]>;
def imacros : JoinedOrSeparate<["-", "--"], "imacros">, Group<clang_i_Group>, Flags<[CC1Option]>,
HelpText<"Include macros from file before parsing">, MetaVarName<"<file>">;
def image__base : Separate<["-"], "image_base">;
@@ -1449,7 +1632,8 @@ def ivfsoverlay : JoinedOrSeparate<["-"], "ivfsoverlay">, Group<clang_i_Group>,
HelpText<"Overlay the virtual filesystem described by file over the real file system">;
def i : Joined<["-"], "i">, Group<i_Group>;
def keep__private__externs : Flag<["-"], "keep_private_externs">;
-def l : JoinedOrSeparate<["-"], "l">, Flags<[LinkerInput, RenderJoined]>;
+def l : JoinedOrSeparate<["-"], "l">, Flags<[LinkerInput, RenderJoined]>,
+ Group<Link_Group>;
def lazy__framework : Separate<["-"], "lazy_framework">, Flags<[LinkerInput]>;
def lazy__library : Separate<["-"], "lazy_library">, Flags<[LinkerInput]>;
def mlittle_endian : Flag<["-"], "mlittle-endian">, Flags<[DriverOption]>;
@@ -1484,15 +1668,17 @@ def mpure_code : Flag<["-"], "mpure-code">, Alias<mexecute_only>; // Alias for G
def mno_pure_code : Flag<["-"], "mno-pure-code">, Alias<mno_execute_only>;
def mtvos_version_min_EQ : Joined<["-"], "mtvos-version-min=">, Group<m_Group>;
def mappletvos_version_min_EQ : Joined<["-"], "mappletvos-version-min=">, Alias<mtvos_version_min_EQ>;
-def mtvos_simulator_version_min_EQ : Joined<["-"], "mtvos-simulator-version-min=">, Alias<mtvos_version_min_EQ>;
-def mappletvsimulator_version_min_EQ : Joined<["-"], "mappletvsimulator-version-min=">, Alias<mtvos_version_min_EQ>;
+def mtvos_simulator_version_min_EQ : Joined<["-"], "mtvos-simulator-version-min=">;
+def mappletvsimulator_version_min_EQ : Joined<["-"], "mappletvsimulator-version-min=">, Alias<mtvos_simulator_version_min_EQ>;
def mwatchos_version_min_EQ : Joined<["-"], "mwatchos-version-min=">, Group<m_Group>;
-def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-version-min=">, Alias<mwatchos_version_min_EQ>;
-def mwatchsimulator_version_min_EQ : Joined<["-"], "mwatchsimulator-version-min=">, Alias<mwatchos_version_min_EQ>;
+def mwatchos_simulator_version_min_EQ : Joined<["-"], "mwatchos-simulator-version-min=">;
+def mwatchsimulator_version_min_EQ : Joined<["-"], "mwatchsimulator-version-min=">, Alias<mwatchos_simulator_version_min_EQ>;
def march_EQ : Joined<["-"], "march=">, Group<m_Group>;
def masm_EQ : Joined<["-"], "masm=">, Group<m_Group>, Flags<[DriverOption]>;
def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>;
def mimplicit_it_EQ : Joined<["-"], "mimplicit-it=">, Group<m_Group>;
+def mdefault_build_attributes : Joined<["-"], "mdefault-build-attributes">, Group<m_Group>;
+def mno_default_build_attributes : Joined<["-"], "mno-default-build-attributes">, Group<m_Group>;
def mconstant_cfstrings : Flag<["-"], "mconstant-cfstrings">, Group<clang_ignored_m_Group>;
def mconsole : Joined<["-"], "mconsole">, Group<m_Group>, Flags<[DriverOption]>;
def mwindows : Joined<["-"], "mwindows">, Group<m_Group>, Flags<[DriverOption]>;
@@ -1500,6 +1686,7 @@ def mdll : Joined<["-"], "mdll">, Group<m_Group>, Flags<[DriverOption]>;
def municode : Joined<["-"], "municode">, Group<m_Group>, Flags<[DriverOption]>;
def mthreads : Joined<["-"], "mthreads">, Group<m_Group>, Flags<[DriverOption]>;
def mcpu_EQ : Joined<["-"], "mcpu=">, Group<m_Group>;
+def mmcu_EQ : Joined<["-"], "mmcu=">, Group<m_Group>;
def mdynamic_no_pic : Joined<["-"], "mdynamic-no-pic">, Group<m_Group>;
def mfix_and_continue : Flag<["-"], "mfix-and-continue">, Group<clang_ignored_m_Group>;
def mieee_fp : Flag<["-"], "mieee-fp">, Group<clang_ignored_m_Group>;
@@ -1507,7 +1694,7 @@ def minline_all_stringops : Flag<["-"], "minline-all-stringops">, Group<clang_ig
def mno_inline_all_stringops : Flag<["-"], "mno-inline-all-stringops">, Group<clang_ignored_m_Group>;
def malign_double : Flag<["-"], "malign-double">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Align doubles to two words in structs (x86 only)">;
-def mfloat_abi_EQ : Joined<["-"], "mfloat-abi=">, Group<m_Group>;
+def mfloat_abi_EQ : Joined<["-"], "mfloat-abi=">, Group<m_Group>, Values<"soft,softfp,hard">;
def mfpmath_EQ : Joined<["-"], "mfpmath=">, Group<m_Group>;
def mfpu_EQ : Joined<["-"], "mfpu=">, Group<m_Group>;
def mhwdiv_EQ : Joined<["-"], "mhwdiv=">, Group<m_Group>;
@@ -1517,8 +1704,8 @@ def mhard_float : Flag<["-"], "mhard-float">, Group<m_Group>;
def miphoneos_version_min_EQ : Joined<["-"], "miphoneos-version-min=">, Group<m_Group>;
def mios_version_min_EQ : Joined<["-"], "mios-version-min=">,
Alias<miphoneos_version_min_EQ>, HelpText<"Set iOS deployment target">;
-def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">, Alias<miphoneos_version_min_EQ>;
-def miphonesimulator_version_min_EQ : Joined<["-"], "miphonesimulator-version-min=">, Alias<miphoneos_version_min_EQ>;
+def mios_simulator_version_min_EQ : Joined<["-"], "mios-simulator-version-min=">;
+def miphonesimulator_version_min_EQ : Joined<["-"], "miphonesimulator-version-min=">, Alias<mios_simulator_version_min_EQ>;
def mkernel : Flag<["-"], "mkernel">, Group<m_Group>;
def mlinker_version_EQ : Joined<["-"], "mlinker-version=">,
Flags<[DriverOption]>;
@@ -1526,6 +1713,8 @@ def mllvm : Separate<["-"], "mllvm">, Flags<[CC1Option,CC1AsOption,CoreOption]>,
HelpText<"Additional arguments to forward to LLVM's option processing">;
def mmacosx_version_min_EQ : Joined<["-"], "mmacosx-version-min=">,
Group<m_Group>, HelpText<"Set Mac OS X deployment target">;
+def mmacos_version_min_EQ : Joined<["-"], "mmacos-version-min=">,
+ Group<m_Group>, Alias<mmacosx_version_min_EQ>;
def mms_bitfields : Flag<["-"], "mms-bitfields">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard">;
def mno_ms_bitfields : Flag<["-"], "mno-ms-bitfields">, Group<m_Group>,
@@ -1537,9 +1726,9 @@ def mstack_alignment : Joined<["-"], "mstack-alignment=">, Group<m_Group>, Flags
def mstack_probe_size : Joined<["-"], "mstack-probe-size=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the stack probe size">;
def mthread_model : Separate<["-"], "mthread-model">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"The thread model to use, e.g. posix, single (posix by default)">;
+ HelpText<"The thread model to use, e.g. posix, single (posix by default)">, Values<"posix,single">;
def meabi : Separate<["-"], "meabi">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Set EABI type, e.g. 4, 5 or gnu (default depends on triple)">;
+ HelpText<"Set EABI type, e.g. 4, 5 or gnu (default depends on triple)">, Values<"default,4,5,gnu">;
def mmmx : Flag<["-"], "mmmx">, Group<m_x86_Features_Group>;
def mno_3dnowa : Flag<["-"], "mno-3dnowa">, Group<m_x86_Features_Group>;
@@ -1573,6 +1762,7 @@ def mno_avx : Flag<["-"], "mno-avx">, Group<m_x86_Features_Group>;
def mno_avx2 : Flag<["-"], "mno-avx2">, Group<m_x86_Features_Group>;
def mno_avx512f : Flag<["-"], "mno-avx512f">, Group<m_x86_Features_Group>;
def mno_avx512cd : Flag<["-"], "mno-avx512cd">, Group<m_x86_Features_Group>;
+def mno_avx512vpopcntdq : Flag<["-"], "mno-avx512vpopcntdq">, Group<m_x86_Features_Group>;
def mno_avx512er : Flag<["-"], "mno-avx512er">, Group<m_x86_Features_Group>;
def mno_avx512pf : Flag<["-"], "mno-avx512pf">, Group<m_x86_Features_Group>;
def mno_avx512dq : Flag<["-"], "mno-avx512dq">, Group<m_x86_Features_Group>;
@@ -1588,6 +1778,7 @@ def mno_bmi : Flag<["-"], "mno-bmi">, Group<m_x86_Features_Group>;
def mno_bmi2 : Flag<["-"], "mno-bmi2">, Group<m_x86_Features_Group>;
def mno_popcnt : Flag<["-"], "mno-popcnt">, Group<m_x86_Features_Group>;
def mno_tbm : Flag<["-"], "mno-tbm">, Group<m_x86_Features_Group>;
+def mno_lwp : Flag<["-"], "mno-lwp">, Group<m_x86_Features_Group>;
def mno_fma4 : Flag<["-"], "mno-fma4">, Group<m_x86_Features_Group>;
def mno_fma : Flag<["-"], "mno-fma">, Group<m_x86_Features_Group>;
def mno_xop : Flag<["-"], "mno-xop">, Group<m_x86_Features_Group>;
@@ -1604,7 +1795,14 @@ def mno_xsaveopt : Flag<["-"], "mno-xsaveopt">, Group<m_x86_Features_Group>;
def mno_xsavec : Flag<["-"], "mno-xsavec">, Group<m_x86_Features_Group>;
def mno_xsaves : Flag<["-"], "mno-xsaves">, Group<m_x86_Features_Group>;
def mno_mwaitx : Flag<["-"], "mno-mwaitx">, Group<m_x86_Features_Group>;
+def mno_clzero : Flag<["-"], "mno-clzero">, Group<m_x86_Features_Group>;
def mno_pku : Flag<["-"], "mno-pku">, Group<m_x86_Features_Group>;
+def mno_clflushopt : Flag<["-"], "mno-clflushopt">, Group<m_x86_Features_Group>;
+def mno_clwb : Flag<["-"], "mno-clwb">, Group<m_x86_Features_Group>;
+def mno_movbe : Flag<["-"], "mno-movbe">, Group<m_x86_Features_Group>;
+def mno_mpx : Flag<["-"], "mno-mpx">, Group<m_x86_Features_Group>;
+def mno_sgx : Flag<["-"], "mno-sgx">, Group<m_x86_Features_Group>;
+def mno_prefetchwt1 : Flag<["-"], "mno-prefetchwt1">, Group<m_x86_Features_Group>;
def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_arm_Features_Group>,
HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64 only)">;
@@ -1626,6 +1824,8 @@ def mcrc : Flag<["-"], "mcrc">, Group<m_arm_Features_Group>,
HelpText<"Allow use of CRC instructions (ARM only)">;
def mnocrc : Flag<["-"], "mnocrc">, Group<m_arm_Features_Group>,
HelpText<"Disallow use of CRC instructions (ARM only)">;
+def mno_neg_immediates: Flag<["-"], "mno-neg-immediates">, Group<m_arm_Features_Group>,
+ HelpText<"Disallow converting instructions with negative immediates to their negation or inversion.">;
def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>,
HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">;
@@ -1648,6 +1848,10 @@ def mamdgpu_debugger_abi : Joined<["-"], "mamdgpu-debugger-abi=">,
HelpText<"Generate additional code for specified <version> of debugger ABI (AMDGPU only)">,
MetaVarName<"<version>">;
+def faltivec : Flag<["-"], "faltivec">, Group<f_Group>, Flags<[DriverOption]>;
+def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>, Flags<[DriverOption]>;
+def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>;
+def mno_altivec : Flag<["-"], "mno-altivec">, Group<m_ppc_Features_Group>;
def mvsx : Flag<["-"], "mvsx">, Group<m_ppc_Features_Group>;
def mno_vsx : Flag<["-"], "mno-vsx">, Group<m_ppc_Features_Group>;
def mpower8_vector : Flag<["-"], "mpower8-vector">,
@@ -1698,12 +1902,6 @@ def mlongcall: Flag<["-"], "mlongcall">,
def mno_longcall : Flag<["-"], "mno-longcall">,
Group<m_ppc_Features_Group>;
-def faltivec : Flag<["-"], "faltivec">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable AltiVec vector initializer syntax">;
-def fno_altivec : Flag<["-"], "fno-altivec">, Group<f_Group>, Flags<[CC1Option]>;
-def maltivec : Flag<["-"], "maltivec">, Alias<faltivec>;
-def mno_altivec : Flag<["-"], "mno-altivec">, Alias<fno_altivec>;
-
def mvx : Flag<["-"], "mvx">, Group<m_Group>;
def mno_vx : Flag<["-"], "mno-vx">, Group<m_Group>;
@@ -1735,7 +1933,8 @@ def mno_incremental_linker_compatible : Flag<["-"], "mno-incremental-linker-comp
HelpText<"(integrated-as) Emit an object file which cannot be used with an incremental linker">;
def mrtd : Flag<["-"], "mrtd">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Make StdCall calling convention the default">;
-def msmall_data_threshold_EQ : Joined <["-"], "msmall-data-threshold=">, Group<m_Group>;
+def msmall_data_threshold_EQ : Joined <["-"], "msmall-data-threshold=">,
+ Group<m_Group>, Alias<G>;
def msoft_float : Flag<["-"], "msoft-float">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Use software floating point">;
def mno_implicit_float : Flag<["-"], "mno-implicit-float">, Group<m_Group>,
@@ -1747,6 +1946,8 @@ def mpie_copy_relocations : Flag<["-"], "mpie-copy-relocations">, Group<m_Group>
Flags<[CC1Option]>,
HelpText<"Use copy relocations support for PIE builds">;
def mno_pie_copy_relocations : Flag<["-"], "mno-pie-copy-relocations">, Group<m_Group>;
+def mfentry : Flag<["-"], "mfentry">, HelpText<"Insert calls to fentry at function entry (x86 only)">,
+ Flags<[CC1Option]>, Group<m_Group>;
def mx87 : Flag<["-"], "mx87">, Group<m_x86_Features_Group>;
def m80387 : Flag<["-"], "m80387">, Alias<mx87>;
def msse2 : Flag<["-"], "msse2">, Group<m_x86_Features_Group>;
@@ -1762,6 +1963,7 @@ def mavx : Flag<["-"], "mavx">, Group<m_x86_Features_Group>;
def mavx2 : Flag<["-"], "mavx2">, Group<m_x86_Features_Group>;
def mavx512f : Flag<["-"], "mavx512f">, Group<m_x86_Features_Group>;
def mavx512cd : Flag<["-"], "mavx512cd">, Group<m_x86_Features_Group>;
+def mavx512vpopcntdq : Flag<["-"], "mavx512vpopcntdq">, Group<m_x86_Features_Group>;
def mavx512er : Flag<["-"], "mavx512er">, Group<m_x86_Features_Group>;
def mavx512pf : Flag<["-"], "mavx512pf">, Group<m_x86_Features_Group>;
def mavx512dq : Flag<["-"], "mavx512dq">, Group<m_x86_Features_Group>;
@@ -1777,6 +1979,7 @@ def mbmi : Flag<["-"], "mbmi">, Group<m_x86_Features_Group>;
def mbmi2 : Flag<["-"], "mbmi2">, Group<m_x86_Features_Group>;
def mpopcnt : Flag<["-"], "mpopcnt">, Group<m_x86_Features_Group>;
def mtbm : Flag<["-"], "mtbm">, Group<m_x86_Features_Group>;
+def mlwp : Flag<["-"], "mlwp">, Group<m_x86_Features_Group>;
def mfma4 : Flag<["-"], "mfma4">, Group<m_x86_Features_Group>;
def mfma : Flag<["-"], "mfma">, Group<m_x86_Features_Group>;
def mxop : Flag<["-"], "mxop">, Group<m_x86_Features_Group>;
@@ -1794,6 +1997,13 @@ def mxsaveopt : Flag<["-"], "mxsaveopt">, Group<m_x86_Features_Group>;
def mxsavec : Flag<["-"], "mxsavec">, Group<m_x86_Features_Group>;
def mxsaves : Flag<["-"], "mxsaves">, Group<m_x86_Features_Group>;
def mmwaitx : Flag<["-"], "mmwaitx">, Group<m_x86_Features_Group>;
+def mclzero : Flag<["-"], "mclzero">, Group<m_x86_Features_Group>;
+def mclflushopt : Flag<["-"], "mclflushopt">, Group<m_x86_Features_Group>;
+def mclwb : Flag<["-"], "mclwb">, Group<m_x86_Features_Group>;
+def mmovbe : Flag<["-"], "mmovbe">, Group<m_x86_Features_Group>;
+def mmpx : Flag<["-"], "mmpx">, Group<m_x86_Features_Group>;
+def msgx : Flag<["-"], "msgx">, Group<m_x86_Features_Group>;
+def mprefetchwt1 : Flag<["-"], "mprefetchwt1">, Group<m_x86_Features_Group>;
def mips16 : Flag<["-"], "mips16">, Group<m_Group>;
def mno_mips16 : Flag<["-"], "mno-mips16">, Group<m_Group>;
def mmicromips : Flag<["-"], "mmicromips">, Group<m_Group>;
@@ -1812,10 +2022,18 @@ def mdspr2 : Flag<["-"], "mdspr2">, Group<m_Group>;
def mno_dspr2 : Flag<["-"], "mno-dspr2">, Group<m_Group>;
def msingle_float : Flag<["-"], "msingle-float">, Group<m_Group>;
def mdouble_float : Flag<["-"], "mdouble-float">, Group<m_Group>;
+def mmadd4 : Flag<["-"], "mmadd4">, Group<m_Group>,
+ HelpText<"Enable the generation of 4-operand madd.s, madd.d and related instructions.">;
+def mno_madd4 : Flag<["-"], "mno-madd4">, Group<m_Group>,
+ HelpText<"Disable the generation of 4-operand madd.s, madd.d and related instructions.">;
def mmsa : Flag<["-"], "mmsa">, Group<m_Group>,
HelpText<"Enable MSA ASE (MIPS only)">;
def mno_msa : Flag<["-"], "mno-msa">, Group<m_Group>,
HelpText<"Disable MSA ASE (MIPS only)">;
+def mmt : Flag<["-"], "mmt">, Group<m_Group>,
+ HelpText<"Enable MT ASE (MIPS only)">;
+def mno_mt : Flag<["-"], "mno-mt">, Group<m_Group>,
+ HelpText<"Disable MT ASE (MIPS only)">;
def mfp64 : Flag<["-"], "mfp64">, Group<m_Group>,
HelpText<"Use 64-bit floating point registers (MIPS only)">;
def mfp32 : Flag<["-"], "mfp32">, Group<m_Group>,
@@ -1895,7 +2113,7 @@ def no_cpp_precomp : Flag<["-"], "no-cpp-precomp">, Group<clang_ignored_f_Group>
def no_integrated_cpp : Flag<["-", "--"], "no-integrated-cpp">, Flags<[DriverOption]>;
def no_pedantic : Flag<["-", "--"], "no-pedantic">, Group<pedantic_Group>;
def no__dead__strip__inits__and__terms : Flag<["-"], "no_dead_strip_inits_and_terms">;
-def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option]>,
+def nobuiltininc : Flag<["-"], "nobuiltininc">, Flags<[CC1Option, CoreOption]>,
HelpText<"Disable builtin #include directories">;
def nocudainc : Flag<["-"], "nocudainc">;
def nocudalib : Flag<["-"], "nocudalib">;
@@ -1904,10 +2122,11 @@ def nofixprebinding : Flag<["-"], "nofixprebinding">;
def nolibc : Flag<["-"], "nolibc">;
def nomultidefs : Flag<["-"], "nomultidefs">;
def nopie : Flag<["-"], "nopie">;
+def no_pie : Flag<["-"], "no-pie">, Alias<nopie>;
def noprebind : Flag<["-"], "noprebind">;
def noseglinkedit : Flag<["-"], "noseglinkedit">;
def nostartfiles : Flag<["-"], "nostartfiles">;
-def nostdinc : Flag<["-"], "nostdinc">;
+def nostdinc : Flag<["-"], "nostdinc">, Flags<[CoreOption]>;
def nostdlibinc : Flag<["-"], "nostdlibinc">;
def nostdincxx : Flag<["-"], "nostdinc++">, Flags<[CC1Option]>,
HelpText<"Disable standard #include directories for the C++ standard library">;
@@ -1938,6 +2157,8 @@ def print_multi_os_directory : Flag<["-", "--"], "print-multi-os-directory">,
Flags<[Unsupported]>;
def print_prog_name_EQ : Joined<["-", "--"], "print-prog-name=">,
HelpText<"Print the full program path of <name>">, MetaVarName<"<name>">;
+def print_resource_dir : Flag<["-", "--"], "print-resource-dir">,
+ HelpText<"Print the resource directory pathname">;
def print_search_dirs : Flag<["-", "--"], "print-search-dirs">,
HelpText<"Print the paths used for finding libraries and programs">;
def private__bundle : Flag<["-"], "private_bundle">;
@@ -1959,10 +2180,15 @@ def resource_dir : Separate<["-"], "resource-dir">,
HelpText<"The directory which holds the compiler resource files">;
def resource_dir_EQ : Joined<["-"], "resource-dir=">, Flags<[DriverOption, CoreOption]>,
Alias<resource_dir>;
-def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>;
+def rpath : Separate<["-"], "rpath">, Flags<[LinkerInput]>, Group<Link_Group>;
def rtlib_EQ : Joined<["-", "--"], "rtlib=">,
HelpText<"Compiler runtime library to use">;
-def r : Flag<["-"], "r">, Flags<[LinkerInput,NoArgumentUnused]>;
+def frtlib_add_rpath: Flag<["-"], "frtlib-add-rpath">, Flags<[NoArgumentUnused]>,
+ HelpText<"Add -rpath with architecture-specific resource directory to the linker flags">;
+def fno_rtlib_add_rpath: Flag<["-"], "fno-rtlib-add-rpath">, Flags<[NoArgumentUnused]>,
+ HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags">;
+def r : Flag<["-"], "r">, Flags<[LinkerInput,NoArgumentUnused]>,
+ Group<Link_Group>;
def save_temps_EQ : Joined<["-", "--"], "save-temps=">, Flags<[DriverOption]>,
HelpText<"Save intermediate compilation results.">;
def save_temps : Flag<["-", "--"], "save-temps">, Flags<[DriverOption]>,
@@ -2001,7 +2227,7 @@ def std_default_EQ : Joined<["-"], "std-default=">;
def std_EQ : Joined<["-", "--"], "std=">, Flags<[CC1Option]>,
Group<CompileOnly_Group>, HelpText<"Language standard to compile for">;
def stdlib_EQ : Joined<["-", "--"], "stdlib=">, Flags<[CC1Option]>,
- HelpText<"C++ standard library to use">;
+ HelpText<"C++ standard library to use">, Values<"libc++,libstdc++,platform">;
def sub__library : JoinedOrSeparate<["-"], "sub_library">;
def sub__umbrella : JoinedOrSeparate<["-"], "sub_umbrella">;
def system_header_prefix : Joined<["--"], "system-header-prefix=">,
@@ -2014,7 +2240,7 @@ def no_system_header_prefix : Joined<["--"], "no-system-header-prefix=">,
HelpText<"Treat all #include paths starting with <prefix> as not including a "
"system header.">;
def : Separate<["--"], "no-system-header-prefix">, Alias<no_system_header_prefix>;
-def s : Flag<["-"], "s">;
+def s : Flag<["-"], "s">, Group<Link_Group>;
def target : Joined<["--"], "target=">, Flags<[DriverOption, CoreOption]>,
HelpText<"Generate code for the given target">;
def gcc_toolchain : Joined<["--"], "gcc-toolchain=">, Flags<[DriverOption]>,
@@ -2028,7 +2254,7 @@ def trigraphs : Flag<["-", "--"], "trigraphs">, Alias<ftrigraphs>,
HelpText<"Process trigraph sequences">;
def twolevel__namespace__hints : Flag<["-"], "twolevel_namespace_hints">;
def twolevel__namespace : Flag<["-"], "twolevel_namespace">;
-def t : Flag<["-"], "t">;
+def t : Flag<["-"], "t">, Group<Link_Group>;
def umbrella : Separate<["-"], "umbrella">;
def undefined : JoinedOrSeparate<["-"], "undefined">, Group<u_Group>;
def undef : Flag<["-"], "undef">, Group<u_Group>, Flags<[CC1Option]>,
@@ -2185,6 +2411,8 @@ def mv55 : Flag<["-"], "mv55">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv55"]>;
def mv60 : Flag<["-"], "mv60">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv60"]>;
+def mv62 : Flag<["-"], "mv62">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv62"]>;
def mhexagon_hvx : Flag<["-"], "mhvx">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Enable Hexagon Vector eXtensions">;
def mno_hexagon_hvx : Flag<["-"], "mno-hvx">, Group<m_hexagon_Features_Group>,
@@ -2322,10 +2550,6 @@ defm devirtualize : BooleanFFlag<"devirtualize">, Group<clang_ignored_gcc_optimi
defm devirtualize_speculatively : BooleanFFlag<"devirtualize-speculatively">,
Group<clang_ignored_gcc_optimization_f_Group>;
-// gfortran options that we recognize in the driver and pass along when
-// invoking GCC to compile Fortran code.
-def gfortran_Group : OptionGroup<"gfortran Group">;
-
// Generic gfortran options.
def A_DASH : Joined<["-"], "A-">, Group<gfortran_Group>;
def J : JoinedOrSeparate<["-"], "J">, Flags<[RenderJoined]>, Group<gfortran_Group>;
diff --git a/gnu/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h b/gnu/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
index b8294bf0376..069ca4b8f84 100644
--- a/gnu/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/gnu/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
@@ -225,6 +225,11 @@ IntrusiveRefCntPtr<vfs::FileSystem>
createVFSFromCompilerInvocation(const CompilerInvocation &CI,
DiagnosticsEngine &Diags);
+IntrusiveRefCntPtr<vfs::FileSystem>
+createVFSFromCompilerInvocation(const CompilerInvocation &CI,
+ DiagnosticsEngine &Diags,
+ IntrusiveRefCntPtr<vfs::FileSystem> BaseFS);
+
} // end namespace clang
#endif
diff --git a/gnu/llvm/tools/clang/include/clang/Sema/Sema.h b/gnu/llvm/tools/clang/include/clang/Sema/Sema.h
index 1ddeb443735..2fd73e61ff7 100644
--- a/gnu/llvm/tools/clang/include/clang/Sema/Sema.h
+++ b/gnu/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -26,6 +26,7 @@
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
@@ -101,6 +102,7 @@ namespace clang {
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
+ class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
@@ -334,6 +336,35 @@ public:
/// \brief Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
+ /// \brief pragma clang section kind
+ enum PragmaClangSectionKind {
+ PCSK_Invalid = 0,
+ PCSK_BSS = 1,
+ PCSK_Data = 2,
+ PCSK_Rodata = 3,
+ PCSK_Text = 4
+ };
+
+ enum PragmaClangSectionAction {
+ PCSA_Set = 0,
+ PCSA_Clear = 1
+ };
+
+ struct PragmaClangSection {
+ std::string SectionName;
+ bool Valid = false;
+ SourceLocation PragmaLocation;
+
+ void Act(SourceLocation PragmaLocation,
+ PragmaClangSectionAction Action,
+ StringLiteral* Name);
+ };
+
+ PragmaClangSection PragmaClangBSSSection;
+ PragmaClangSection PragmaClangDataSection;
+ PragmaClangSection PragmaClangRodataSection;
+ PragmaClangSection PragmaClangTextSection;
+
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
@@ -435,6 +466,20 @@ public:
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
+ /// \brief This represents the stack of attributes that were pushed by
+ /// \#pragma clang attribute.
+ struct PragmaAttributeEntry {
+ SourceLocation Loc;
+ AttributeList *Attribute;
+ SmallVector<attr::SubjectMatchRule, 4> MatchRules;
+ bool IsUsed;
+ };
+ SmallVector<PragmaAttributeEntry, 2> PragmaAttributeStack;
+
+ /// \brief The declaration that is currently receiving an attribute from the
+ /// #pragma attribute stack.
+ const Decl *PragmaAttributeCurrentTargetDecl;
+
/// \brief This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
@@ -673,16 +718,37 @@ public:
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
+ bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
- : S(S), SavedContext(S, DC)
- {
+ : S(S), SavedContext(S, DC) {
S.PushFunctionScope();
- S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+ S.PushExpressionEvaluationContext(
+ Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+ if (auto *FD = dyn_cast<FunctionDecl>(DC))
+ FD->setWillHaveBody(true);
+ else
+ assert(isa<ObjCMethodDecl>(DC));
+ }
+
+ void addContextNote(SourceLocation UseLoc) {
+ assert(!PushedCodeSynthesisContext);
+
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
+ Ctx.PointOfInstantiation = UseLoc;
+ Ctx.Entity = cast<Decl>(S.CurContext);
+ S.pushCodeSynthesisContext(Ctx);
+
+ PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
+ if (PushedCodeSynthesisContext)
+ S.popCodeSynthesisContext();
+ if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
+ FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
@@ -800,7 +866,7 @@ public:
/// \brief Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
- enum ExpressionEvaluationContext {
+ enum class ExpressionEvaluationContext {
/// \brief The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
@@ -906,8 +972,12 @@ public:
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
- return Context == Unevaluated || Context == UnevaluatedAbstract ||
- Context == UnevaluatedList;
+ return Context == ExpressionEvaluationContext::Unevaluated ||
+ Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
+ Context == ExpressionEvaluationContext::UnevaluatedList;
+ }
+ bool isConstantEvaluated() const {
+ return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
@@ -931,7 +1001,7 @@ public:
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
- class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
+ class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
@@ -943,9 +1013,9 @@ public:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
- SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
- : FastFoldingSetNode(ID)
- {}
+ SpecialMemberOverloadResult() : Pair() {}
+ SpecialMemberOverloadResult(CXXMethodDecl *MD)
+ : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
@@ -954,9 +1024,18 @@ public:
void setKind(Kind K) { Pair.setInt(K); }
};
+ class SpecialMemberOverloadResultEntry
+ : public llvm::FastFoldingSetNode,
+ public SpecialMemberOverloadResult {
+ public:
+ SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
+ : FastFoldingSetNode(ID)
+ {}
+ };
+
/// \brief A cache of special member function overload resolution results
/// for C++ records.
- llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
+ llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// \brief A cache of the flags available in enumerations with the flag_bits
/// attribute.
@@ -1038,6 +1117,16 @@ public:
/// same special member, we should act as if it is not yet declared.
llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
+ /// The function definitions which were renamed as part of typo-correction
+ /// to match their respective declarations. We want to keep track of them
+ /// to ensure that we don't emit a "redefinition" error if we encounter a
+ /// correctly named definition after the renamed definition.
+ llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
+
+ /// Stack of types that correspond to the parameter entities that are
+ /// currently being copy-initialized. Can be empty.
+ llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
+
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
@@ -1054,14 +1143,12 @@ public:
/// statements.
class FPContractStateRAII {
public:
- FPContractStateRAII(Sema& S)
- : S(S), OldFPContractState(S.FPFeatures.fp_contract) {}
- ~FPContractStateRAII() {
- S.FPFeatures.fp_contract = OldFPContractState;
- }
+ FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
+ ~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
+
private:
Sema& S;
- bool OldFPContractState : 1;
+ FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
@@ -1179,6 +1266,7 @@ public:
void emitAndClearUnusedLocalTypedefWarnings();
+ void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void CheckDelegatingCtorCycles();
@@ -1235,9 +1323,11 @@ public:
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
- /// \param IgnoreCapturedRegions true if should find the top-most lambda scope
- /// info ignoring all inner captured regions scope infos.
- sema::LambdaScopeInfo *getCurLambda(bool IgnoreCapturedRegions = false);
+ /// \param IgnoreNonLambdaCapturingScope true if should find the top-most
+ /// lambda scope info ignoring all inner capturing scopes that are not
+ /// lambda scopes.
+ sema::LambdaScopeInfo *
+ getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// \brief Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
@@ -1420,17 +1510,20 @@ private:
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
- VisibleModuleSet VisibleModules;
+ /// Get the module whose scope we are currently within.
+ Module *getCurrentModule() const {
+ return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
+ }
- Module *CachedFakeTopLevelModule;
+ VisibleModuleSet VisibleModules;
public:
/// \brief Get the module owning an entity.
- Module *getOwningModule(Decl *Entity);
+ Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// \brief Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
- void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc);
+ void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); }
@@ -1449,6 +1542,11 @@ public:
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
+ bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
+
+ /// Determine if \p D and \p Suggested have a structurally compatible
+ /// layout as described in C11 6.2.7/1.
+ bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
@@ -1464,6 +1562,12 @@ public:
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
+ /// Determine if there is a visible declaration of \p D that is an explicit
+ /// specialization declaration for a specialization of a template. (For a
+ /// member specialization, use hasVisibleMemberSpecialization.)
+ bool hasVisibleExplicitSpecialization(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
+
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
@@ -1531,9 +1635,13 @@ public:
//
struct SkipBodyInfo {
- SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {}
+ SkipBodyInfo()
+ : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
+ New(nullptr) {}
bool ShouldSkip;
+ bool CheckSameAsPrevious;
NamedDecl *Previous;
+ NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
@@ -1548,6 +1656,7 @@ public:
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
+ bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
@@ -1556,7 +1665,7 @@ public:
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
- bool AllowClassTemplates = false);
+ bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
@@ -1689,6 +1798,35 @@ public:
bool IsAddressOfOperand,
std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr);
+ /// Describes the detailed kind of a template name. Used in diagnostics.
+ enum class TemplateNameKindForDiagnostics {
+ ClassTemplate,
+ FunctionTemplate,
+ VarTemplate,
+ AliasTemplate,
+ TemplateTemplateParam,
+ DependentTemplate
+ };
+ TemplateNameKindForDiagnostics
+ getTemplateNameKindForDiagnostics(TemplateName Name);
+
+ /// Determine whether it's plausible that E was intended to be a
+ /// template-name.
+ bool mightBeIntendedToBeTemplateName(ExprResult E) {
+ if (!getLangOpts().CPlusPlus || E.isInvalid())
+ return false;
+ if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
+ return !DRE->hasExplicitTemplateArgs();
+ if (auto *ME = dyn_cast<MemberExpr>(E.get()))
+ return !ME->hasExplicitTemplateArgs();
+ // Any additional cases recognized here should also be handled by
+ // diagnoseExprIntendedAsTemplateName.
+ return false;
+ }
+ void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
+ SourceLocation Less,
+ SourceLocation Greater);
+
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
@@ -1709,8 +1847,11 @@ public:
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
+ NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
+ const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
- void CheckShadow(VarDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R);
+ void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
+ const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
@@ -1747,6 +1888,8 @@ public:
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
+ bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
+ Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
@@ -1769,7 +1912,7 @@ public:
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
- bool IsExplicitSpecialization);
+ bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
@@ -1794,7 +1937,6 @@ public:
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
- bool canInitializeWithParenthesizedList(QualType TargetType);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
@@ -1887,7 +2029,8 @@ public:
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
- DeclGroupPtrTy ActOnModuleDecl(SourceLocation ModuleLoc, ModuleDeclKind MDK,
+ DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
+ SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// \brief The parser has processed a module import declaration.
@@ -2012,15 +2155,14 @@ public:
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
- SourceLocation KWLoc, CXXScopeSpec &SS,
- IdentifierInfo *Name, SourceLocation NameLoc,
- AttributeList *Attr, AccessSpecifier AS,
- SourceLocation ModulePrivateLoc,
- MultiTemplateParamsArg TemplateParameterLists,
- bool &OwnedDecl, bool &IsDependent,
- SourceLocation ScopedEnumKWLoc,
+ SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
+ SourceLocation NameLoc, AttributeList *Attr,
+ AccessSpecifier AS, SourceLocation ModulePrivateLoc,
+ MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
+ bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
- bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr);
+ bool IsTypeSpecifier, bool IsTemplateParamOrArg,
+ SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
@@ -2085,6 +2227,12 @@ public:
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
+ /// Perform ODR-like check for C/ObjC when merging tag types from modules.
+ /// Differently from C++, actually parse the body and reject / error out
+ /// in case of a structural mismatch.
+ bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
+ SkipBodyInfo &SkipBody);
+
typedef void *SkippedDefinitionContext;
/// \brief Invoked when we enter a tag definition that we're skipping.
@@ -2138,8 +2286,8 @@ public:
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
- AttributeList *Attrs,
- SourceLocation EqualLoc, Expr *Val);
+ AttributeList *Attrs, SourceLocation EqualLoc,
+ Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl,
ArrayRef<Decl *> Elements,
@@ -2282,6 +2430,7 @@ public:
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
+ void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
@@ -2593,8 +2742,7 @@ public:
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
- void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
- ArrayRef<Expr *> Args,
+ void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
@@ -2642,7 +2790,7 @@ public:
/// of a function.
///
/// Returns true if any errors were emitted.
- bool diagnoseArgIndependentDiagnoseIfAttrs(const FunctionDecl *Function,
+ bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
@@ -2672,7 +2820,8 @@ public:
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
- bool resolveAndFixAddressOfOnlyViableOverloadCandidate(ExprResult &SrcExpr);
+ bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
+ ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
@@ -2876,13 +3025,13 @@ public:
LOLR_StringTemplate
};
- SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
- CXXSpecialMember SM,
- bool ConstArg,
- bool VolatileArg,
- bool RValueThis,
- bool ConstThis,
- bool VolatileThis);
+ SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
+ CXXSpecialMember SM,
+ bool ConstArg,
+ bool VolatileArg,
+ bool RValueThis,
+ bool ConstThis,
+ bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
@@ -2963,9 +3112,6 @@ public:
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
- void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions,
- DeclAccessPair Operator,
- QualType T1, QualType T2);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
@@ -2998,7 +3144,8 @@ public:
bool IncludeGlobalScope = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
- bool IncludeGlobalScope = true);
+ bool IncludeGlobalScope = true,
+ bool IncludeDependentBases = false);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
@@ -3072,6 +3219,8 @@ public:
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
+ void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
+
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
@@ -3098,6 +3247,8 @@ public:
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
+ // Helper for delayed processing of attributes.
+ void ProcessDeclAttributeDelayed(Decl *D, const AttributeList *AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
@@ -3115,6 +3266,7 @@ public:
bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckNoReturnAttr(const AttributeList &attr);
+ bool CheckNoCallerSavedRegsAttr(const AttributeList &attr);
bool checkStringLiteralArgumentAttr(const AttributeList &Attr,
unsigned ArgNum, StringRef &Str,
SourceLocation *ArgLocation = nullptr);
@@ -3184,7 +3336,6 @@ public:
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
- typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
@@ -3209,9 +3360,10 @@ public:
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
- void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
- ObjCInterfaceDecl *IDecl);
- void DefaultSynthesizeProperties(Scope *S, Decl *D);
+ void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
+ ObjCInterfaceDecl *IDecl,
+ SourceLocation AtEnd);
+ void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
@@ -3237,7 +3389,9 @@ public:
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
+ SourceLocation GetterNameLoc,
Selector SetterSel,
+ SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
@@ -3253,7 +3407,9 @@ public:
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
+ SourceLocation GetterNameLoc,
Selector SetterSel,
+ SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
@@ -3707,6 +3863,9 @@ public:
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
+ /// Warn when implicitly casting 0 to nullptr.
+ void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
+
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
@@ -3722,11 +3881,10 @@ public:
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
- void EmitAvailabilityWarning(AvailabilityResult AR, NamedDecl *D,
- StringRef Message, SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass,
- const ObjCPropertyDecl *ObjCProperty,
- bool ObjCPropertyAccess);
+ void DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ bool ObjCPropertyAccess,
+ bool AvoidPartialAvailabilityChecks = false);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
@@ -3739,8 +3897,9 @@ public:
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass=nullptr,
- bool ObjCPropertyAccess=false);
+ const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
+ bool ObjCPropertyAccess = false,
+ bool AvoidPartialAvailabilityChecks = false);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
@@ -3782,7 +3941,7 @@ public:
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
- void MarkDeclRefReferenced(DeclRefExpr *E);
+ void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
@@ -4328,7 +4487,7 @@ public:
/// \brief Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
- bool isInitListConstructor(const CXXConstructorDecl *Ctor);
+ bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope,
SourceLocation UsingLoc,
@@ -4737,7 +4896,8 @@ public:
ParsedType ObjectType,
bool EnteringContext);
- ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
+ ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
+ ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
@@ -4953,7 +5113,7 @@ public:
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
- /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
+ /// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
@@ -5106,7 +5266,8 @@ public:
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
- bool *IsCorrectedToColon = nullptr);
+ bool *IsCorrectedToColon = nullptr,
+ bool OnlyNamespace = false);
/// \brief The parser has parsed a nested-name-specifier 'identifier::'.
///
@@ -5130,13 +5291,16 @@ public:
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
+ /// \param OnlyNamespace If true, only considers namespaces in lookup.
+ ///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
- bool *IsCorrectedToColon = nullptr);
+ bool *IsCorrectedToColon = nullptr,
+ bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
@@ -5312,6 +5476,12 @@ public:
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
+ /// \brief Does copying/destroying the captured variable have side effects?
+ bool CaptureHasSideEffects(const sema::LambdaScopeInfo::Capture &From);
+
+ /// \brief Diagnose if an explicit lambda capture is unused.
+ void DiagnoseUnusedLambdaCapture(const sema::LambdaScopeInfo::Capture &From);
+
/// \brief Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
@@ -5604,6 +5774,9 @@ public:
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
+ void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
+ StorageClass &SC);
+ void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD,
@@ -5807,6 +5980,12 @@ public:
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
+ /// Determine whether a particular identifier might be the name in a C++1z
+ /// deduction-guide declaration.
+ bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ ParsedTemplateTy *Template = nullptr);
+
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
@@ -5882,7 +6061,7 @@ public:
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
- bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid);
+ bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS,
@@ -5911,11 +6090,13 @@ public:
TypeResult
ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
- TemplateTy Template, SourceLocation TemplateLoc,
+ TemplateTy Template, IdentifierInfo *TemplateII,
+ SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
- bool IsCtorOrDtorName = false);
+ bool IsCtorOrDtorName = false,
+ bool IsClassName = false);
/// \brief Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
@@ -5957,13 +6138,10 @@ public:
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
- TemplateNameKind ActOnDependentTemplateName(Scope *S,
- CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- UnqualifiedId &Name,
- ParsedType ObjectType,
- bool EnteringContext,
- TemplateTy &Template);
+ TemplateNameKind ActOnDependentTemplateName(
+ Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
+ TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult
ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
@@ -6003,6 +6181,7 @@ public:
TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
+ void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult
ActOnExplicitInstantiation(Scope *S,
@@ -6088,12 +6267,17 @@ public:
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
+ /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
+ /// contain the converted forms of the template arguments as written.
+ /// Otherwise, \p TemplateArgs will not be modified.
+ ///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
- SmallVectorImpl<TemplateArgument> &Converted);
+ SmallVectorImpl<TemplateArgument> &Converted,
+ bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
@@ -6182,7 +6366,8 @@ public:
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
- /// \param TemplateNameLoc The location of the template name.
+ /// \param TemplateII The identifier used to name the template.
+ /// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
@@ -6191,7 +6376,8 @@ public:
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
- SourceLocation TemplateNameLoc,
+ IdentifierInfo *TemplateII,
+ SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
@@ -6706,6 +6892,9 @@ public:
/// \brief Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
+ /// \brief Completely replace the \c auto in \p TypeWithAuto by
+ /// \p Replacement. This does not retain any \c auto type sugar.
+ QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// \brief Result type of DeduceAutoType.
enum DeduceAutoResult {
@@ -6724,6 +6913,15 @@ public:
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
+ /// \brief Declare implicit deduction guides for a class template if we've
+ /// not already done so.
+ void DeclareImplicitDeductionGuides(TemplateDecl *Template,
+ SourceLocation Loc);
+
+ QualType DeduceTemplateSpecializationFromInitializer(
+ TypeSourceInfo *TInfo, const InitializedEntity &Entity,
+ const InitializationKind &Kind, MultiExprArg Init);
+
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
@@ -6792,10 +6990,12 @@ public:
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
- /// \brief A template instantiation that is currently in progress.
- struct ActiveTemplateInstantiation {
+ /// A context in which code is being synthesized (where a source location
+ /// alone is not sufficient to identify the context). This covers template
+ /// instantiation and various forms of implicitly-generated functions.
+ struct CodeSynthesisContext {
/// \brief The kind of template instantiation we are performing
- enum InstantiationKind {
+ enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
@@ -6834,28 +7034,46 @@ public:
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
- ExceptionSpecInstantiation
+ ExceptionSpecInstantiation,
+
+ /// We are declaring an implicit special member function.
+ DeclaringSpecialMember,
+
+ /// We are defining a synthesized function (such as a defaulted special
+ /// member).
+ DefiningSynthesizedFunction,
} Kind;
- /// \brief The point of instantiation within the source code.
+ /// \brief Was the enclosing context a non-instantiation SFINAE context?
+ bool SavedInNonInstantiationSFINAEContext;
+
+ /// \brief The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
+ /// \brief The entity that is being synthesized.
+ Decl *Entity;
+
/// \brief The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
- /// \brief The entity that is being instantiated.
- Decl *Entity;
-
/// \brief The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
- /// \brief The number of template arguments in TemplateArgs.
- unsigned NumTemplateArgs;
+ // FIXME: Wrap this union around more members, or perhaps store the
+ // kind-specific members in the RAII object owning the context.
+ union {
+ /// \brief The number of template arguments in TemplateArgs.
+ unsigned NumTemplateArgs;
+
+ /// \brief The special member being declared or defined.
+ CXXSpecialMember SpecialMember;
+ };
ArrayRef<TemplateArgument> template_arguments() const {
+ assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
@@ -6868,56 +7086,20 @@ public:
/// template instantiation.
SourceRange InstantiationRange;
- ActiveTemplateInstantiation()
- : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr),
+ CodeSynthesisContext()
+ : Kind(TemplateInstantiation), Entity(nullptr), Template(nullptr),
TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {}
/// \brief Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
-
- friend bool operator==(const ActiveTemplateInstantiation &X,
- const ActiveTemplateInstantiation &Y) {
- if (X.Kind != Y.Kind)
- return false;
-
- if (X.Entity != Y.Entity)
- return false;
-
- switch (X.Kind) {
- case TemplateInstantiation:
- case ExceptionSpecInstantiation:
- return true;
-
- case PriorTemplateArgumentSubstitution:
- case DefaultTemplateArgumentChecking:
- return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs;
-
- case DefaultTemplateArgumentInstantiation:
- case ExplicitTemplateArgumentSubstitution:
- case DeducedTemplateArgumentSubstitution:
- case DefaultFunctionArgumentInstantiation:
- return X.TemplateArgs == Y.TemplateArgs;
-
- }
-
- llvm_unreachable("Invalid InstantiationKind!");
- }
-
- friend bool operator!=(const ActiveTemplateInstantiation &X,
- const ActiveTemplateInstantiation &Y) {
- return !(X == Y);
- }
};
- /// \brief List of active template instantiations.
+ /// \brief List of active code synthesis contexts.
///
- /// This vector is treated as a stack. As one template instantiation
- /// requires another template instantiation, additional
- /// instantiations are pushed onto the stack up to a
- /// user-configurable limit LangOptions::InstantiationDepth.
- SmallVector<ActiveTemplateInstantiation, 16>
- ActiveTemplateInstantiations;
+ /// This vector is treated as a stack. As synthesis of one entity requires
+ /// synthesis of another, additional contexts are pushed onto the stack.
+ SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
@@ -6928,7 +7110,7 @@ public:
/// \brief Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
- SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules;
+ SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// \brief Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
@@ -6951,19 +7133,22 @@ public:
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
- /// \brief The number of ActiveTemplateInstantiation entries in
- /// \c ActiveTemplateInstantiations that are not actual instantiations and,
- /// therefore, should not be counted as part of the instantiation depth.
+ /// \brief The number of \p CodeSynthesisContexts that are not template
+ /// instantiations and, therefore, should not be counted as part of the
+ /// instantiation depth.
+ ///
+ /// When the instantiation depth reaches the user-configurable limit
+ /// \p LangOptions::InstantiationDepth we will abort instantiation.
+ // FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
- /// \brief The last template from which a template instantiation
+ /// \brief The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
- /// This value is used to suppress printing of redundant template
- /// instantiation backtraces when there are multiple errors in the
- /// same instantiation. FIXME: Does this belong in Sema? It's tough
- /// to implement it anywhere else.
- ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
+ /// This value is used to suppress printing of redundant context stacks
+ /// when there are multiple errors or warnings in the same instantiation.
+ // FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
+ unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// \brief The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
@@ -7041,7 +7226,7 @@ public:
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
- ActiveTemplateInstantiation::InstantiationKind Kind,
+ CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
@@ -7120,12 +7305,11 @@ public:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
- bool SavedInNonInstantiationSFINAEContext;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
- Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind,
+ Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
@@ -7137,8 +7321,27 @@ public:
operator=(const InstantiatingTemplate&) = delete;
};
+ void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
+ void popCodeSynthesisContext();
+
+ /// Determine whether we are currently performing template instantiation.
+ bool inTemplateInstantiation() const {
+ return CodeSynthesisContexts.size() > NonInstantiationEntries;
+ }
+
+ void PrintContextStack() {
+ if (!CodeSynthesisContexts.empty() &&
+ CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
+ PrintInstantiationStack();
+ LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
+ }
+ if (PragmaAttributeCurrentTargetDecl)
+ PrintPragmaAttributeInstantiationPoint();
+ }
void PrintInstantiationStack();
+ void PrintPragmaAttributeInstantiationPoint();
+
/// \brief Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
@@ -7248,9 +7451,9 @@ public:
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
- class SavePendingInstantiationsAndVTableUsesRAII {
+ class GlobalEagerInstantiationScope {
public:
- SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled)
+ GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
@@ -7258,7 +7461,14 @@ public:
SavedVTableUses.swap(S.VTableUses);
}
- ~SavePendingInstantiationsAndVTableUsesRAII() {
+ void perform() {
+ if (Enabled) {
+ S.DefineUsedVTables();
+ S.PerformPendingInstantiations();
+ }
+ }
+
+ ~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
@@ -7288,14 +7498,16 @@ public:
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
- class SavePendingLocalImplicitInstantiationsRAII {
+ class LocalEagerInstantiationScope {
public:
- SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) {
+ LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
- ~SavePendingLocalImplicitInstantiationsRAII() {
+ void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
+
+ ~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
@@ -7305,7 +7517,7 @@ public:
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
- SavedPendingLocalImplicitInstantiations;
+ SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
@@ -7339,7 +7551,8 @@ public:
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
- SourceLocation Loc, DeclarationName Entity);
+ SourceLocation Loc, DeclarationName Entity,
+ bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
@@ -7357,6 +7570,10 @@ public:
unsigned ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
+ bool SubstExceptionSpec(SourceLocation Loc,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &ExceptionStorage,
+ const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
@@ -7436,6 +7653,15 @@ public:
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
+ void
+ InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
+ const Decl *Pattern, Decl *Inst,
+ LateInstantiatedAttrVec *LateAttrs = nullptr,
+ LocalInstantiationScope *OuterMostScope = nullptr);
+
+ bool usesPartialOrExplicitSpecialization(
+ SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
+
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
@@ -7510,7 +7736,8 @@ public:
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
- const MultiLevelTemplateArgumentList &TemplateArgs);
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
@@ -7597,7 +7824,8 @@ public:
Decl * const *ProtoRefs,
unsigned NumProtoRefs,
const SourceLocation *ProtoLocs,
- SourceLocation EndProtoLoc);
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList);
Decl *ActOnStartClassImplementation(
SourceLocation AtClassImplLoc,
@@ -7941,6 +8169,11 @@ public:
POAK_Reset // #pragma options align=reset
};
+ /// ActOnPragmaClangSection - Called on well formed \#pragma clang section
+ void ActOnPragmaClangSection(SourceLocation PragmaLoc,
+ PragmaClangSectionAction Action,
+ PragmaClangSectionKind SecKind, StringRef SecName);
+
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
@@ -8039,8 +8272,9 @@ public:
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
- /// \#pragma {STDC,OPENCL} FP_CONTRACT
- void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
+ /// \#pragma {STDC,OPENCL} FP_CONTRACT and
+ /// \#pragma clang fp contract
+ void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
@@ -8073,6 +8307,20 @@ public:
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
+ /// \brief Called on well-formed '\#pragma clang attribute push'.
+ void ActOnPragmaAttributePush(AttributeList &Attribute,
+ SourceLocation PragmaLoc,
+ attr::ParsedSubjectMatchRuleSet Rules);
+
+ /// \brief Called on well-formed '\#pragma clang attribute pop'.
+ void ActOnPragmaAttributePop(SourceLocation PragmaLoc);
+
+ /// \brief Adds the attributes that have been specified using the
+ /// '\#pragma clang attribute push' directives to the given declaration.
+ void AddPragmaAttributes(Scope *S, Decl *D);
+
+ void DiagnoseUnterminatedPragmaAttribute();
+
/// \brief Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
@@ -8103,6 +8351,11 @@ public:
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
+ /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
+ /// declaration.
+ void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
+ unsigned SpellingListIndex);
+
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
@@ -8124,17 +8377,26 @@ public:
unsigned SpellingListIndex, bool isNSConsumed,
bool isTemplateInstantiation);
+ bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
+
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
+ bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
+ StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
- StmtResult ActOnCoreturnStmt(SourceLocation KwLoc, Expr *E);
+ StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
- ExprResult BuildCoawaitExpr(SourceLocation KwLoc, Expr *E);
+ ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
+ bool IsImplicit = false);
+ ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
+ UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
- StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E);
-
+ StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
+ bool IsImplicit = false);
+ StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
+ VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
//===--------------------------------------------------------------------===//
@@ -8187,7 +8449,7 @@ public:
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
- bool checkOpenCLDisabledDecl(const Decl &D, const Expr &E);
+ bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
@@ -8205,6 +8467,12 @@ private:
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
+ /// Push new OpenMP function region for non-capturing function.
+ void pushOpenMPFunctionRegion();
+
+ /// Pop OpenMP function region for non-capturing function.
+ void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
+
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
@@ -8314,6 +8582,9 @@ public:
return IsInOpenMPDeclareTargetContext;
}
+ /// Return the number of captured regions created for an OpenMP directive.
+ static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
+
/// \brief Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// \brief End of OpenMP region.
@@ -8408,7 +8679,8 @@ public:
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp taskgroup'.
- StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc,
+ StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// \brief Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
@@ -8750,6 +9022,13 @@ public:
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
+ /// Called on well-formed 'task_reduction' clause.
+ OMPClause *ActOnOpenMPTaskReductionClause(
+ ArrayRef<Expr *> VarList, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
+ CXXScopeSpec &ReductionIdScopeSpec,
+ const DeclarationNameInfo &ReductionId,
+ ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// \brief Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
@@ -9116,6 +9395,8 @@ public:
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
+ QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
+ ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
@@ -9257,7 +9538,7 @@ public:
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
- ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
+ ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
@@ -9265,14 +9546,14 @@ public:
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// \brief Checks for invalid conversions and casts between
- /// retainable pointers and other pointer kinds.
- ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
- QualType castType, Expr *&op,
- CheckedConversionKind CCK,
- bool Diagnose = true,
- bool DiagnoseCFAudited = false,
- BinaryOperatorKind Opc = BO_PtrMemD
- );
+ /// retainable pointers and other pointer kinds for ARC and Weak.
+ ARCConversionResult CheckObjCConversion(SourceRange castRange,
+ QualType castType, Expr *&op,
+ CheckedConversionKind CCK,
+ bool Diagnose = true,
+ bool DiagnoseCFAudited = false,
+ BinaryOperatorKind Opc = BO_PtrMemD
+ );
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
@@ -9777,6 +10058,8 @@ public:
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
+ void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
+ const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args);
@@ -9863,6 +10146,7 @@ public:
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteNaturalLanguage();
+ void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
@@ -9922,15 +10206,15 @@ private:
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
- bool SemaBuiltinVAStartImpl(CallExpr *TheCall);
- bool SemaBuiltinVAStart(CallExpr *TheCall);
- bool SemaBuiltinMSVAStart(CallExpr *TheCall);
+ bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARM(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
+ bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
@@ -10029,6 +10313,11 @@ private:
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
+ /// Check if there is a field shadowing.
+ void CheckShadowInheritedFields(const SourceLocation &Loc,
+ DeclarationName FieldName,
+ const CXXRecordDecl *RD);
+
/// \brief Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
@@ -10135,17 +10424,6 @@ public:
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
- /// \brief The diagnostic we should emit for \c D, or \c AR_Available.
- ///
- /// \param D The declaration to check. Note that this may be altered to point
- /// to another declaration that \c D gets it's availability from. i.e., we
- /// walk the list of typedefs to find an availability attribute.
- ///
- /// \param Message If non-null, this will be populated with the message from
- /// the availability attribute that is selected.
- AvailabilityResult ShouldDiagnoseAvailabilityOfDecl(NamedDecl *&D,
- std::string *Message);
-
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
@@ -10222,6 +10500,7 @@ class EnterExpressionEvaluationContext {
bool Entered = true;
public:
+
EnterExpressionEvaluationContext(Sema &Actions,
Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
@@ -10252,8 +10531,8 @@ public:
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
- Actions.PushExpressionEvaluationContext(Sema::UnevaluatedList, nullptr,
- false);
+ Actions.PushExpressionEvaluationContext(
+ Sema::ExpressionEvaluationContext::UnevaluatedList, nullptr, false);
Entered = true;
}
}
diff --git a/gnu/llvm/tools/clang/lib/Basic/Targets.cpp b/gnu/llvm/tools/clang/lib/Basic/Targets.cpp
index 50e6a751e88..5d75aa5a752 100644
--- a/gnu/llvm/tools/clang/lib/Basic/Targets.cpp
+++ b/gnu/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -111,12 +111,28 @@ public:
: OSTargetInfo<Target>(Triple, Opts) {}
};
+// Ananas target
+template<typename Target>
+class AnanasTargetInfo : public OSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Ananas defines
+ Builder.defineMacro("__Ananas__");
+ Builder.defineMacro("__ELF__");
+ }
+public:
+ AnanasTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : OSTargetInfo<Target>(Triple, Opts) {}
+};
+
static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
const llvm::Triple &Triple,
StringRef &PlatformName,
VersionTuple &PlatformMinVersion) {
Builder.defineMacro("__APPLE_CC__", "6000");
Builder.defineMacro("__APPLE__");
+ Builder.defineMacro("__STDC_NO_THREADS__");
Builder.defineMacro("OBJC_NEW_PROPERTIES");
// AddressSanitizer doesn't play well with source fortification, which is on
// by default on Darwin.
@@ -483,6 +499,10 @@ public:
switch (Triple.getArch()) {
default:
break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
@@ -545,16 +565,18 @@ protected:
Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
}
public:
OpenBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
switch (Triple.getArch()) {
- default:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- case llvm::Triple::arm:
- case llvm::Triple::sparc:
+ this->HasFloat128 = true;
+ // FALLTHROUGH
+ default:
this->MCountName = "__mcount";
break;
case llvm::Triple::mips64:
@@ -884,6 +906,7 @@ class PPCTargetInfo : public TargetInfo {
std::string CPU;
// Target cpu features.
+ bool HasAltivec;
bool HasVSX;
bool HasP8Vector;
bool HasP8Crypto;
@@ -899,9 +922,10 @@ protected:
public:
PPCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
- : TargetInfo(Triple), HasVSX(false), HasP8Vector(false),
+ : TargetInfo(Triple), HasAltivec(false), HasVSX(false), HasP8Vector(false),
HasP8Crypto(false), HasDirectMove(false), HasQPX(false), HasHTM(false),
HasBPERMD(false), HasExtDiv(false), HasP9Vector(false) {
+ SuitableAlign = 128;
SimdDefaultAlign = 128;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
@@ -928,6 +952,13 @@ public:
ArchDefineA2q = 1 << 15
} ArchDefineTypes;
+ // Set the language option for altivec based on our value.
+ void adjust(LangOptions &Opts) override {
+ if (HasAltivec)
+ Opts.AltiVec = 1;
+ TargetInfo::adjust(Opts);
+ }
+
// Note: GCC recognizes the following additional cpus:
// 401, 403, 405, 405fp, 440fp, 464, 464fp, 476, 476fp, 505, 740, 801,
// 821, 823, 8540, 8548, e300c2, e300c3, e500mc64, e6500, 860, cell,
@@ -1162,7 +1193,9 @@ const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
for (const auto &Feature : Features) {
- if (Feature == "+vsx") {
+ if (Feature == "+altivec") {
+ HasAltivec = true;
+ } else if (Feature == "+vsx") {
HasVSX = true;
} else if (Feature == "+bpermd") {
HasBPERMD = true;
@@ -1222,82 +1255,100 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (ABI == "elfv2")
Builder.defineMacro("_CALL_ELF", "2");
+ // This typically is only for a new enough linker (bfd >= 2.16.2 or gold), but
+ // our suppport post-dates this and it should work on all 64-bit ppc linux
+ // platforms. It is guaranteed to work on all elfv2 platforms.
+ if (getTriple().getOS() == llvm::Triple::Linux && PointerWidth == 64)
+ Builder.defineMacro("_CALL_LINUX", "1");
+
// Subtarget options.
Builder.defineMacro("__NATURAL_ALIGNMENT__");
Builder.defineMacro("__REGISTER_PREFIX__", "");
// FIXME: Should be controlled by command line option.
- if (LongDoubleWidth == 128)
+ if (LongDoubleWidth == 128) {
Builder.defineMacro("__LONG_DOUBLE_128__");
-
- if (Opts.AltiVec) {
- Builder.defineMacro("__VEC__", "10206");
- Builder.defineMacro("__ALTIVEC__");
+ Builder.defineMacro("__LONGDOUBLE128");
}
+ // Define this for elfv2 (64-bit only) or 64-bit darwin.
+ if (ABI == "elfv2" ||
+ (getTriple().getOS() == llvm::Triple::Darwin && PointerWidth == 64))
+ Builder.defineMacro("__STRUCT_PARM_ALIGN__", "16");
+
// CPU identification.
- ArchDefineTypes defs = (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
- .Case("440", ArchDefineName)
- .Case("450", ArchDefineName | ArchDefine440)
- .Case("601", ArchDefineName)
- .Case("602", ArchDefineName | ArchDefinePpcgr)
- .Case("603", ArchDefineName | ArchDefinePpcgr)
- .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
- .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
- .Case("604", ArchDefineName | ArchDefinePpcgr)
- .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
- .Case("620", ArchDefineName | ArchDefinePpcgr)
- .Case("630", ArchDefineName | ArchDefinePpcgr)
- .Case("7400", ArchDefineName | ArchDefinePpcgr)
- .Case("7450", ArchDefineName | ArchDefinePpcgr)
- .Case("750", ArchDefineName | ArchDefinePpcgr)
- .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("a2", ArchDefineA2)
- .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
- .Case("pwr3", ArchDefinePpcgr)
- .Case("pwr4", ArchDefineName | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr5", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("pwr5x", ArchDefineName | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr6", ArchDefineName | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr6x", ArchDefineName | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6
- | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x
- | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr9", ArchDefineName | ArchDefinePwr8 | ArchDefinePwr7
- | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("power3", ArchDefinePpcgr)
- .Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("power5x", ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power6", ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power6x", ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6
- | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x
- | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power9", ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7
- | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Default(ArchDefineNone);
+ ArchDefineTypes defs =
+ (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
+ .Case("440", ArchDefineName)
+ .Case("450", ArchDefineName | ArchDefine440)
+ .Case("601", ArchDefineName)
+ .Case("602", ArchDefineName | ArchDefinePpcgr)
+ .Case("603", ArchDefineName | ArchDefinePpcgr)
+ .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("604", ArchDefineName | ArchDefinePpcgr)
+ .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
+ .Case("620", ArchDefineName | ArchDefinePpcgr)
+ .Case("630", ArchDefineName | ArchDefinePpcgr)
+ .Case("7400", ArchDefineName | ArchDefinePpcgr)
+ .Case("7450", ArchDefineName | ArchDefinePpcgr)
+ .Case("750", ArchDefineName | ArchDefinePpcgr)
+ .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("a2", ArchDefineA2)
+ .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
+ .Case("pwr3", ArchDefinePpcgr)
+ .Case("pwr4", ArchDefineName | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr5", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("pwr5x", ArchDefineName | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr6", ArchDefineName | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr6x", ArchDefineName | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x |
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr9", ArchDefineName | ArchDefinePwr8 | ArchDefinePwr7 |
+ ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power3", ArchDefinePpcgr)
+ .Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power5x", ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power6", ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power6x", ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
+ ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power9", ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
+ ArchDefinePwr6x | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ // powerpc64le automatically defaults to at least power8.
+ .Case("ppc64le", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
+ ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Default(ArchDefineNone);
if (defs & ArchDefineName)
Builder.defineMacro(Twine("_ARCH_", StringRef(CPU).upper()));
@@ -1341,6 +1392,10 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__TOS_BGQ__");
}
+ if (HasAltivec) {
+ Builder.defineMacro("__VEC__", "10206");
+ Builder.defineMacro("__ALTIVEC__");
+ }
if (HasVSX)
Builder.defineMacro("__VSX__");
if (HasP8Vector)
@@ -1360,6 +1415,9 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (PointerWidth == 64)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ // We have support for the bswap intrinsics so we can define this.
+ Builder.defineMacro("__HAVE_BSWAP__", "1");
+
// FIXME: The following are not yet generated here by Clang, but are
// generated by GCC:
//
@@ -1372,8 +1430,6 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// __RSQRTEF__
// _SOFT_DOUBLE_
// __NO_LWSYNC__
- // __HAVE_BSWAP__
- // __LONGDOUBLE128
// __CMODEL_MEDIUM__
// __CMODEL_LARGE__
// _CALL_SYSV
@@ -1479,6 +1535,11 @@ bool PPCTargetInfo::initFeatureMap(
.Case("pwr8", true)
.Case("pwr7", true)
.Default(false);
+ Features["htm"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Default(false);
if (!ppcUserFeaturesCheck(Diags, FeaturesVec))
return false;
@@ -1488,44 +1549,47 @@ bool PPCTargetInfo::initFeatureMap(
bool PPCTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
- .Case("powerpc", true)
- .Case("vsx", HasVSX)
- .Case("power8-vector", HasP8Vector)
- .Case("crypto", HasP8Crypto)
- .Case("direct-move", HasDirectMove)
- .Case("qpx", HasQPX)
- .Case("htm", HasHTM)
- .Case("bpermd", HasBPERMD)
- .Case("extdiv", HasExtDiv)
- .Case("float128", HasFloat128)
- .Case("power9-vector", HasP9Vector)
- .Default(false);
+ .Case("powerpc", true)
+ .Case("altivec", HasAltivec)
+ .Case("vsx", HasVSX)
+ .Case("power8-vector", HasP8Vector)
+ .Case("crypto", HasP8Crypto)
+ .Case("direct-move", HasDirectMove)
+ .Case("qpx", HasQPX)
+ .Case("htm", HasHTM)
+ .Case("bpermd", HasBPERMD)
+ .Case("extdiv", HasExtDiv)
+ .Case("float128", HasFloat128)
+ .Case("power9-vector", HasP9Vector)
+ .Default(false);
}
void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name, bool Enabled) const {
- // If we're enabling direct-move or power8-vector go ahead and enable vsx
- // as well. Do the inverse if we're disabling vsx. We'll diagnose any user
- // incompatible options.
if (Enabled) {
- if (Name == "direct-move" ||
- Name == "power8-vector" ||
- Name == "float128" ||
- Name == "power9-vector") {
- // power9-vector is really a superset of power8-vector so encode that.
- Features[Name] = Features["vsx"] = true;
- if (Name == "power9-vector")
- Features["power8-vector"] = true;
- } else {
- Features[Name] = true;
- }
+ // If we're enabling any of the vsx based features then enable vsx and
+ // altivec. We'll diagnose any problems later.
+ bool FeatureHasVSX = llvm::StringSwitch<bool>(Name)
+ .Case("vsx", true)
+ .Case("direct-move", true)
+ .Case("power8-vector", true)
+ .Case("power9-vector", true)
+ .Case("float128", true)
+ .Default(false);
+ if (FeatureHasVSX)
+ Features["vsx"] = Features["altivec"] = true;
+ if (Name == "power9-vector")
+ Features["power8-vector"] = true;
+ Features[Name] = true;
} else {
- if (Name == "vsx") {
- Features[Name] = Features["direct-move"] = Features["power8-vector"] =
+ // If we're disabling altivec or vsx go ahead and disable all of the vsx
+ // features.
+ if ((Name == "altivec") || (Name == "vsx"))
+ Features["vsx"] = Features["direct-move"] = Features["power8-vector"] =
Features["float128"] = Features["power9-vector"] = false;
- } else {
- Features[Name] = false;
- }
+ if (Name == "power8-vector")
+ Features["power9-vector"] = false;
+ Features[Name] = false;
}
}
@@ -1716,7 +1780,6 @@ public:
BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool?
PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
LongLongAlign = 32;
- SuitableAlign = 128;
resetDataLayout("E-m:o-p:32:32-f64:32:64-n32");
}
BuiltinVaListKind getBuiltinVaListKind() const override {
@@ -1729,12 +1792,12 @@ public:
DarwinPPC64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: DarwinTargetInfo<PPC64TargetInfo>(Triple, Opts) {
HasAlignMac68kSupport = true;
- SuitableAlign = 128;
resetDataLayout("E-m:o-i64:64-n32:64");
}
};
static const unsigned NVPTXAddrSpaceMap[] = {
+ 0, // Default
1, // opencl_global
3, // opencl_local
4, // opencl_constant
@@ -1988,14 +2051,45 @@ ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
return llvm::makeArrayRef(GCCRegNames);
}
-static const unsigned AMDGPUAddrSpaceMap[] = {
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 4, // opencl_generic
- 1, // cuda_device
- 2, // cuda_constant
- 3 // cuda_shared
+static const LangAS::Map AMDGPUPrivIsZeroDefIsGenMap = {
+ 4, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 4, // opencl_generic
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
+};
+static const LangAS::Map AMDGPUGenIsZeroDefIsGenMap = {
+ 0, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 0, // opencl_generic
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
+};
+static const LangAS::Map AMDGPUPrivIsZeroDefIsPrivMap = {
+ 0, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 4, // opencl_generic
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
+};
+static const LangAS::Map AMDGPUGenIsZeroDefIsPrivMap = {
+ 5, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 0, // opencl_generic
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
};
// If you edit the description strings, make sure you update
@@ -2005,15 +2099,39 @@ static const char *const DataLayoutStringR600 =
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
-static const char *const DataLayoutStringSI =
+static const char *const DataLayoutStringSIPrivateIsZero =
"e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
"-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
+static const char *const DataLayoutStringSIGenericIsZero =
+ "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32"
+ "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5";
+
class AMDGPUTargetInfo final : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
static const char * const GCCRegNames[];
+ struct AddrSpace {
+ unsigned Generic, Global, Local, Constant, Private;
+ AddrSpace(bool IsGenericZero_ = false){
+ if (IsGenericZero_) {
+ Generic = 0;
+ Global = 1;
+ Local = 3;
+ Constant = 2;
+ Private = 5;
+ } else {
+ Generic = 4;
+ Global = 1;
+ Local = 3;
+ Constant = 2;
+ Private = 0;
+ }
+ }
+ };
+
/// \brief The GPU profiles supported by the AMDGPU target.
enum GPUKind {
GK_NONE,
@@ -2027,18 +2145,27 @@ class AMDGPUTargetInfo final : public TargetInfo {
GK_CAYMAN,
GK_GFX6,
GK_GFX7,
- GK_GFX8
+ GK_GFX8,
+ GK_GFX9
} GPU;
bool hasFP64:1;
bool hasFMAF:1;
bool hasLDEXPF:1;
- bool hasFullSpeedFP32Denorms:1;
+ const AddrSpace AS;
+
+ static bool hasFullSpeedFMAF32(StringRef GPUName) {
+ return parseAMDGCNName(GPUName) >= GK_GFX9;
+ }
static bool isAMDGCN(const llvm::Triple &TT) {
return TT.getArch() == llvm::Triple::amdgcn;
}
+ static bool isGenericZero(const llvm::Triple &TT) {
+ return TT.getEnvironmentName() == "amdgiz" ||
+ TT.getEnvironmentName() == "amdgizcl";
+ }
public:
AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple) ,
@@ -2046,32 +2173,62 @@ public:
hasFP64(false),
hasFMAF(false),
hasLDEXPF(false),
- hasFullSpeedFP32Denorms(false){
+ AS(isGenericZero(Triple)){
if (getTriple().getArch() == llvm::Triple::amdgcn) {
hasFP64 = true;
hasFMAF = true;
hasLDEXPF = true;
}
-
+ auto IsGenericZero = isGenericZero(Triple);
resetDataLayout(getTriple().getArch() == llvm::Triple::amdgcn ?
- DataLayoutStringSI : DataLayoutStringR600);
-
- AddrSpaceMap = &AMDGPUAddrSpaceMap;
+ (IsGenericZero ? DataLayoutStringSIGenericIsZero :
+ DataLayoutStringSIPrivateIsZero)
+ : DataLayoutStringR600);
+ assert(DataLayout->getAllocaAddrSpace() == AS.Private);
+
+ setAddressSpaceMap(Triple.getOS() == llvm::Triple::Mesa3D ||
+ Triple.getEnvironment() == llvm::Triple::OpenCL ||
+ Triple.getEnvironmentName() == "amdgizcl" ||
+ !isAMDGCN(Triple));
UseAddrSpaceMapMangling = true;
+
+ // Set pointer width and alignment for target address space 0.
+ PointerWidth = PointerAlign = DataLayout->getPointerSizeInBits();
+ if (getMaxPointerWidth() == 64) {
+ LongWidth = LongAlign = 64;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntPtrType = SignedLong;
+ }
+ }
+
+ void setAddressSpaceMap(bool DefaultIsPrivate) {
+ if (isGenericZero(getTriple())) {
+ AddrSpaceMap = DefaultIsPrivate ? &AMDGPUGenIsZeroDefIsPrivMap
+ : &AMDGPUGenIsZeroDefIsGenMap;
+ } else {
+ AddrSpaceMap = DefaultIsPrivate ? &AMDGPUPrivIsZeroDefIsPrivMap
+ : &AMDGPUPrivIsZeroDefIsGenMap;
+ }
+ }
+
+ void adjust(LangOptions &Opts) override {
+ TargetInfo::adjust(Opts);
+ setAddressSpaceMap(Opts.OpenCL || !isAMDGCN(getTriple()));
}
uint64_t getPointerWidthV(unsigned AddrSpace) const override {
if (GPU <= GK_CAYMAN)
return 32;
- switch(AddrSpace) {
- default:
- return 64;
- case 0:
- case 3:
- case 5:
- return 32;
+ if (AddrSpace == AS.Private || AddrSpace == AS.Local) {
+ return 32;
}
+ return 64;
+ }
+
+ uint64_t getPointerAlignV(unsigned AddrSpace) const override {
+ return getPointerWidthV(AddrSpace);
}
uint64_t getMaxPointerWidth() const override {
@@ -2111,15 +2268,16 @@ public:
for (auto &I : TargetOpts.FeaturesAsWritten) {
if (I == "+fp32-denormals" || I == "-fp32-denormals")
hasFP32Denormals = true;
- if (I == "+fp64-denormals" || I == "-fp64-denormals")
+ if (I == "+fp64-fp16-denormals" || I == "-fp64-fp16-denormals")
hasFP64Denormals = true;
}
if (!hasFP32Denormals)
- TargetOpts.Features.push_back((Twine(hasFullSpeedFP32Denorms &&
+ TargetOpts.Features.push_back(
+ (Twine(hasFullSpeedFMAF32(TargetOpts.CPU) &&
!CGOpts.FlushDenorm ? '+' : '-') + Twine("fp32-denormals")).str());
- // Always do not flush fp64 denorms.
+ // Always do not flush fp64 or fp16 denorms.
if (!hasFP64Denormals && hasFP64)
- TargetOpts.Features.push_back("+fp64-denormals");
+ TargetOpts.Features.push_back("+fp64-fp16-denormals");
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
@@ -2204,6 +2362,8 @@ public:
.Case("gfx803", GK_GFX8)
.Case("gfx804", GK_GFX8)
.Case("gfx810", GK_GFX8)
+ .Case("gfx900", GK_GFX9)
+ .Case("gfx901", GK_GFX9)
.Default(GK_NONE);
}
@@ -2246,6 +2406,32 @@ public:
return LangAS::opencl_constant;
}
+ llvm::Optional<unsigned> getConstantAddressSpace() const override {
+ return LangAS::FirstTargetAddressSpace + AS.Constant;
+ }
+
+ /// \returns Target specific vtbl ptr address space.
+ unsigned getVtblPtrAddressSpace() const override { return AS.Constant; }
+
+ /// \returns If a target requires an address within a target specific address
+ /// space \p AddressSpace to be converted in order to be used, then return the
+ /// corresponding target specific DWARF address space.
+ ///
+ /// \returns Otherwise return None and no conversion will be emitted in the
+ /// DWARF.
+ Optional<unsigned> getDWARFAddressSpace(
+ unsigned AddressSpace) const override {
+ const unsigned DWARF_Private = 1;
+ const unsigned DWARF_Local = 2;
+ if (AddressSpace == AS.Private) {
+ return DWARF_Private;
+ } else if (AddressSpace == AS.Local) {
+ return DWARF_Local;
+ } else {
+ return None;
+ }
+ }
+
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
switch (CC) {
default:
@@ -2260,7 +2446,7 @@ public:
// address space has value 0 but in private and local address space has
// value ~0.
uint64_t getNullPointerValue(unsigned AS) const override {
- return AS != LangAS::opencl_local && AS != 0 ? 0 : ~0;
+ return AS == LangAS::opencl_local ? ~0 : 0;
}
};
@@ -2343,9 +2529,13 @@ bool AMDGPUTargetInfo::initFeatureMap(
case GK_GFX7:
break;
+ case GK_GFX9:
+ Features["gfx9-insts"] = true;
+ LLVM_FALLTHROUGH;
case GK_GFX8:
Features["s-memrealtime"] = true;
Features["16-bit-insts"] = true;
+ Features["dpp"] = true;
break;
case GK_NONE:
@@ -2467,9 +2657,11 @@ class X86TargetInfo : public TargetInfo {
bool HasRDSEED = false;
bool HasADX = false;
bool HasTBM = false;
+ bool HasLWP = false;
bool HasFMA = false;
bool HasF16C = false;
bool HasAVX512CD = false;
+ bool HasAVX512VPOPCNTDQ = false;
bool HasAVX512ER = false;
bool HasAVX512PF = false;
bool HasAVX512DQ = false;
@@ -2487,11 +2679,10 @@ class X86TargetInfo : public TargetInfo {
bool HasXSAVEC = false;
bool HasXSAVES = false;
bool HasMWAITX = false;
+ bool HasCLZERO = false;
bool HasPKU = false;
bool HasCLFLUSHOPT = false;
- bool HasPCOMMIT = false;
bool HasCLWB = false;
- bool HasUMIP = false;
bool HasMOVBE = false;
bool HasPREFETCHWT1 = false;
@@ -2537,7 +2728,7 @@ class X86TargetInfo : public TargetInfo {
CK_C3_2,
/// This enumerator is a bit odd, as GCC no longer accepts -march=yonah.
- /// Clang however has some logic to suport this.
+ /// Clang however has some logic to support this.
// FIXME: Warn, deprecate, and potentially remove this.
CK_Yonah,
//@}
@@ -2568,6 +2759,7 @@ class X86TargetInfo : public TargetInfo {
//@{
CK_Bonnell,
CK_Silvermont,
+ CK_Goldmont,
//@}
/// \name Nehalem
@@ -2709,6 +2901,7 @@ class X86TargetInfo : public TargetInfo {
.Case("atom", CK_Bonnell) // Legacy name.
.Case("silvermont", CK_Silvermont)
.Case("slm", CK_Silvermont) // Legacy name.
+ .Case("goldmont", CK_Goldmont)
.Case("nehalem", CK_Nehalem)
.Case("corei7", CK_Nehalem) // Legacy name.
.Case("westmere", CK_Westmere)
@@ -2924,6 +3117,7 @@ public:
case CK_Penryn:
case CK_Bonnell:
case CK_Silvermont:
+ case CK_Goldmont:
case CK_Nehalem:
case CK_Westmere:
case CK_SandyBridge:
@@ -2969,6 +3163,7 @@ public:
case CC_Swift:
case CC_X86Pascal:
case CC_IntelOclBicc:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -3051,7 +3246,6 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "cx16", true);
break;
case CK_Core2:
- case CK_Bonnell:
setFeatureEnabledImpl(Features, "ssse3", true);
setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "cx16", true);
@@ -3065,8 +3259,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512ifma", true);
setFeatureEnabledImpl(Features, "avx512vbmi", true);
setFeatureEnabledImpl(Features, "sha", true);
- setFeatureEnabledImpl(Features, "umip", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_SkylakeServer:
setFeatureEnabledImpl(Features, "avx512f", true);
setFeatureEnabledImpl(Features, "avx512cd", true);
@@ -3074,49 +3267,69 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512bw", true);
setFeatureEnabledImpl(Features, "avx512vl", true);
setFeatureEnabledImpl(Features, "pku", true);
- setFeatureEnabledImpl(Features, "pcommit", true);
setFeatureEnabledImpl(Features, "clwb", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_SkylakeClient:
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
setFeatureEnabledImpl(Features, "mpx", true);
setFeatureEnabledImpl(Features, "sgx", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
- // FALLTHROUGH
+ setFeatureEnabledImpl(Features, "rtm", true);
+ LLVM_FALLTHROUGH;
case CK_Broadwell:
setFeatureEnabledImpl(Features, "rdseed", true);
setFeatureEnabledImpl(Features, "adx", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_Haswell:
setFeatureEnabledImpl(Features, "avx2", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "rtm", true);
setFeatureEnabledImpl(Features, "fma", true);
setFeatureEnabledImpl(Features, "movbe", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_IvyBridge:
setFeatureEnabledImpl(Features, "rdrnd", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "fsgsbase", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_SandyBridge:
setFeatureEnabledImpl(Features, "avx", true);
setFeatureEnabledImpl(Features, "xsave", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_Westmere:
- case CK_Silvermont:
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_Nehalem:
setFeatureEnabledImpl(Features, "sse4.2", true);
setFeatureEnabledImpl(Features, "fxsr", true);
setFeatureEnabledImpl(Features, "cx16", true);
break;
+ case CK_Goldmont:
+ setFeatureEnabledImpl(Features, "sha", true);
+ setFeatureEnabledImpl(Features, "rdrnd", true);
+ setFeatureEnabledImpl(Features, "rdseed", true);
+ setFeatureEnabledImpl(Features, "xsave", true);
+ setFeatureEnabledImpl(Features, "xsaveopt", true);
+ setFeatureEnabledImpl(Features, "xsavec", true);
+ setFeatureEnabledImpl(Features, "xsaves", true);
+ setFeatureEnabledImpl(Features, "clflushopt", true);
+ setFeatureEnabledImpl(Features, "mpx", true);
+ LLVM_FALLTHROUGH;
+ case CK_Silvermont:
+ setFeatureEnabledImpl(Features, "aes", true);
+ setFeatureEnabledImpl(Features, "pclmul", true);
+ setFeatureEnabledImpl(Features, "sse4.2", true);
+ LLVM_FALLTHROUGH;
+ case CK_Bonnell:
+ setFeatureEnabledImpl(Features, "movbe", true);
+ setFeatureEnabledImpl(Features, "ssse3", true);
+ setFeatureEnabledImpl(Features, "fxsr", true);
+ setFeatureEnabledImpl(Features, "cx16", true);
+ break;
case CK_KNL:
setFeatureEnabledImpl(Features, "avx512f", true);
setFeatureEnabledImpl(Features, "avx512cd", true);
@@ -3171,7 +3384,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "sse4a", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "popcnt", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_K8SSE3:
case CK_OpteronSSE3:
case CK_Athlon64SSE3:
@@ -3186,7 +3399,8 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
- // FALLTHROUGH
+ setFeatureEnabledImpl(Features, "movbe", true);
+ LLVM_FALLTHROUGH;
case CK_BTVER1:
setFeatureEnabledImpl(Features, "ssse3", true);
setFeatureEnabledImpl(Features, "sse4a", true);
@@ -3203,6 +3417,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
+ setFeatureEnabledImpl(Features, "clzero", true);
setFeatureEnabledImpl(Features, "cx16", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "fma", true);
@@ -3227,20 +3442,21 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx2", true);
setFeatureEnabledImpl(Features, "bmi2", true);
setFeatureEnabledImpl(Features, "mwaitx", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_BDVER3:
setFeatureEnabledImpl(Features, "fsgsbase", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_BDVER2:
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "fma", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "tbm", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_BDVER1:
// xop implies avx, sse4a and fma4.
setFeatureEnabledImpl(Features, "xop", true);
+ setFeatureEnabledImpl(Features, "lwp", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
@@ -3287,23 +3503,32 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
switch (Level) {
case AVX512F:
Features["avx512f"] = true;
+ LLVM_FALLTHROUGH;
case AVX2:
Features["avx2"] = true;
+ LLVM_FALLTHROUGH;
case AVX:
Features["avx"] = true;
Features["xsave"] = true;
+ LLVM_FALLTHROUGH;
case SSE42:
Features["sse4.2"] = true;
+ LLVM_FALLTHROUGH;
case SSE41:
Features["sse4.1"] = true;
+ LLVM_FALLTHROUGH;
case SSSE3:
Features["ssse3"] = true;
+ LLVM_FALLTHROUGH;
case SSE3:
Features["sse3"] = true;
+ LLVM_FALLTHROUGH;
case SSE2:
Features["sse2"] = true;
+ LLVM_FALLTHROUGH;
case SSE1:
Features["sse"] = true;
+ LLVM_FALLTHROUGH;
case NoSSE:
break;
}
@@ -3314,29 +3539,38 @@ void X86TargetInfo::setSSELevel(llvm::StringMap<bool> &Features,
case NoSSE:
case SSE1:
Features["sse"] = false;
+ LLVM_FALLTHROUGH;
case SSE2:
Features["sse2"] = Features["pclmul"] = Features["aes"] =
Features["sha"] = false;
+ LLVM_FALLTHROUGH;
case SSE3:
Features["sse3"] = false;
setXOPLevel(Features, NoXOP, false);
+ LLVM_FALLTHROUGH;
case SSSE3:
Features["ssse3"] = false;
+ LLVM_FALLTHROUGH;
case SSE41:
Features["sse4.1"] = false;
+ LLVM_FALLTHROUGH;
case SSE42:
Features["sse4.2"] = false;
+ LLVM_FALLTHROUGH;
case AVX:
Features["fma"] = Features["avx"] = Features["f16c"] = Features["xsave"] =
Features["xsaveopt"] = false;
setXOPLevel(Features, FMA4, false);
+ LLVM_FALLTHROUGH;
case AVX2:
Features["avx2"] = false;
+ LLVM_FALLTHROUGH;
case AVX512F:
Features["avx512f"] = Features["avx512cd"] = Features["avx512er"] =
- Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] =
- Features["avx512vl"] = Features["avx512vbmi"] =
- Features["avx512ifma"] = false;
+ Features["avx512pf"] = Features["avx512dq"] = Features["avx512bw"] =
+ Features["avx512vl"] = Features["avx512vbmi"] =
+ Features["avx512ifma"] = Features["avx512vpopcntdq"] = false;
+ break;
}
}
@@ -3346,10 +3580,13 @@ void X86TargetInfo::setMMXLevel(llvm::StringMap<bool> &Features,
switch (Level) {
case AMD3DNowAthlon:
Features["3dnowa"] = true;
+ LLVM_FALLTHROUGH;
case AMD3DNow:
Features["3dnow"] = true;
+ LLVM_FALLTHROUGH;
case MMX:
Features["mmx"] = true;
+ LLVM_FALLTHROUGH;
case NoMMX3DNow:
break;
}
@@ -3360,10 +3597,13 @@ void X86TargetInfo::setMMXLevel(llvm::StringMap<bool> &Features,
case NoMMX3DNow:
case MMX:
Features["mmx"] = false;
+ LLVM_FALLTHROUGH;
case AMD3DNow:
Features["3dnow"] = false;
+ LLVM_FALLTHROUGH;
case AMD3DNowAthlon:
Features["3dnowa"] = false;
+ break;
}
}
@@ -3373,12 +3613,15 @@ void X86TargetInfo::setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
switch (Level) {
case XOP:
Features["xop"] = true;
+ LLVM_FALLTHROUGH;
case FMA4:
Features["fma4"] = true;
setSSELevel(Features, AVX, true);
+ LLVM_FALLTHROUGH;
case SSE4A:
Features["sse4a"] = true;
setSSELevel(Features, SSE3, true);
+ LLVM_FALLTHROUGH;
case NoXOP:
break;
}
@@ -3389,10 +3632,13 @@ void X86TargetInfo::setXOPLevel(llvm::StringMap<bool> &Features, XOPEnum Level,
case NoXOP:
case SSE4A:
Features["sse4a"] = false;
+ LLVM_FALLTHROUGH;
case FMA4:
Features["fma4"] = false;
+ LLVM_FALLTHROUGH;
case XOP:
Features["xop"] = false;
+ break;
}
}
@@ -3436,7 +3682,8 @@ void X86TargetInfo::setFeatureEnabledImpl(llvm::StringMap<bool> &Features,
setSSELevel(Features, AVX512F, Enabled);
} else if (Name == "avx512cd" || Name == "avx512er" || Name == "avx512pf" ||
Name == "avx512dq" || Name == "avx512bw" || Name == "avx512vl" ||
- Name == "avx512vbmi" || Name == "avx512ifma") {
+ Name == "avx512vbmi" || Name == "avx512ifma" ||
+ Name == "avx512vpopcntdq") {
if (Enabled)
setSSELevel(Features, AVX512F, Enabled);
// Enable BWI instruction if VBMI is being enabled.
@@ -3512,12 +3759,16 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasADX = true;
} else if (Feature == "+tbm") {
HasTBM = true;
+ } else if (Feature == "+lwp") {
+ HasLWP = true;
} else if (Feature == "+fma") {
HasFMA = true;
} else if (Feature == "+f16c") {
HasF16C = true;
} else if (Feature == "+avx512cd") {
HasAVX512CD = true;
+ } else if (Feature == "+avx512vpopcntdq") {
+ HasAVX512VPOPCNTDQ = true;
} else if (Feature == "+avx512er") {
HasAVX512ER = true;
} else if (Feature == "+avx512pf") {
@@ -3558,14 +3809,12 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasPKU = true;
} else if (Feature == "+clflushopt") {
HasCLFLUSHOPT = true;
- } else if (Feature == "+pcommit") {
- HasPCOMMIT = true;
} else if (Feature == "+clwb") {
HasCLWB = true;
- } else if (Feature == "+umip") {
- HasUMIP = true;
} else if (Feature == "+prefetchwt1") {
HasPREFETCHWT1 = true;
+ } else if (Feature == "+clzero") {
+ HasCLZERO = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -3648,7 +3897,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_PentiumMMX:
Builder.defineMacro("__pentium_mmx__");
Builder.defineMacro("__tune_pentium_mmx__");
- // Fallthrough
+ LLVM_FALLTHROUGH;
case CK_i586:
case CK_Pentium:
defineCPUMacros(Builder, "i586");
@@ -3658,15 +3907,15 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Pentium3M:
case CK_PentiumM:
Builder.defineMacro("__tune_pentium3__");
- // Fallthrough
+ LLVM_FALLTHROUGH;
case CK_Pentium2:
case CK_C3_2:
Builder.defineMacro("__tune_pentium2__");
- // Fallthrough
+ LLVM_FALLTHROUGH;
case CK_PentiumPro:
Builder.defineMacro("__tune_i686__");
Builder.defineMacro("__tune_pentiumpro__");
- // Fallthrough
+ LLVM_FALLTHROUGH;
case CK_i686:
Builder.defineMacro("__i686");
Builder.defineMacro("__i686__");
@@ -3693,6 +3942,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Silvermont:
defineCPUMacros(Builder, "slm");
break;
+ case CK_Goldmont:
+ defineCPUMacros(Builder, "goldmont");
+ break;
case CK_Nehalem:
case CK_Westmere:
case CK_SandyBridge:
@@ -3719,7 +3971,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_K6_2:
Builder.defineMacro("__k6_2__");
Builder.defineMacro("__tune_k6_2__");
- // Fallthrough
+ LLVM_FALLTHROUGH;
case CK_K6_3:
if (CPU != CK_K6_2) { // In case of fallthrough
// FIXME: GCC may be enabling these in cases where some other k6
@@ -3728,7 +3980,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__k6_3__");
Builder.defineMacro("__tune_k6_3__");
}
- // Fallthrough
+ LLVM_FALLTHROUGH;
case CK_K6:
defineCPUMacros(Builder, "k6");
break;
@@ -3829,16 +4081,22 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasTBM)
Builder.defineMacro("__TBM__");
+ if (HasLWP)
+ Builder.defineMacro("__LWP__");
+
if (HasMWAITX)
Builder.defineMacro("__MWAITX__");
switch (XOPLevel) {
case XOP:
Builder.defineMacro("__XOP__");
+ LLVM_FALLTHROUGH;
case FMA4:
Builder.defineMacro("__FMA4__");
+ LLVM_FALLTHROUGH;
case SSE4A:
Builder.defineMacro("__SSE4A__");
+ LLVM_FALLTHROUGH;
case NoXOP:
break;
}
@@ -3851,6 +4109,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasAVX512CD)
Builder.defineMacro("__AVX512CD__");
+ if (HasAVX512VPOPCNTDQ)
+ Builder.defineMacro("__AVX512VPOPCNTDQ__");
if (HasAVX512ER)
Builder.defineMacro("__AVX512ER__");
if (HasAVX512PF)
@@ -3883,29 +4143,50 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__PKU__");
if (HasCX16)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
+ if (HasCLFLUSHOPT)
+ Builder.defineMacro("__CLFLUSHOPT__");
+ if (HasCLWB)
+ Builder.defineMacro("__CLWB__");
+ if (HasMPX)
+ Builder.defineMacro("__MPX__");
+ if (HasSGX)
+ Builder.defineMacro("__SGX__");
+ if (HasPREFETCHWT1)
+ Builder.defineMacro("__PREFETCHWT1__");
+ if (HasCLZERO)
+ Builder.defineMacro("__CLZERO__");
// Each case falls through to the previous one here.
switch (SSELevel) {
case AVX512F:
Builder.defineMacro("__AVX512F__");
+ LLVM_FALLTHROUGH;
case AVX2:
Builder.defineMacro("__AVX2__");
+ LLVM_FALLTHROUGH;
case AVX:
Builder.defineMacro("__AVX__");
+ LLVM_FALLTHROUGH;
case SSE42:
Builder.defineMacro("__SSE4_2__");
+ LLVM_FALLTHROUGH;
case SSE41:
Builder.defineMacro("__SSE4_1__");
+ LLVM_FALLTHROUGH;
case SSSE3:
Builder.defineMacro("__SSSE3__");
+ LLVM_FALLTHROUGH;
case SSE3:
Builder.defineMacro("__SSE3__");
+ LLVM_FALLTHROUGH;
case SSE2:
Builder.defineMacro("__SSE2__");
Builder.defineMacro("__SSE2_MATH__"); // -mfp-math=sse always implied.
+ LLVM_FALLTHROUGH;
case SSE1:
Builder.defineMacro("__SSE__");
Builder.defineMacro("__SSE_MATH__"); // -mfp-math=sse always implied.
+ LLVM_FALLTHROUGH;
case NoSSE:
break;
}
@@ -3927,6 +4208,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
break;
default:
Builder.defineMacro("_M_IX86_FP", Twine(0));
+ break;
}
}
@@ -3934,10 +4216,13 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
switch (MMX3DNowLevel) {
case AMD3DNowAthlon:
Builder.defineMacro("__3dNOW_A__");
+ LLVM_FALLTHROUGH;
case AMD3DNow:
Builder.defineMacro("__3dNOW__");
+ LLVM_FALLTHROUGH;
case MMX:
Builder.defineMacro("__MMX__");
+ LLVM_FALLTHROUGH;
case NoMMX3DNow:
break;
}
@@ -3949,6 +4234,9 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
}
if (CPU >= CK_i586)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+
+ if (HasFloat128)
+ Builder.defineMacro("__SIZEOF_FLOAT128__", "16");
}
bool X86TargetInfo::hasFeature(StringRef Feature) const {
@@ -3958,6 +4246,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("avx2", SSELevel >= AVX2)
.Case("avx512f", SSELevel >= AVX512F)
.Case("avx512cd", HasAVX512CD)
+ .Case("avx512vpopcntdq", HasAVX512VPOPCNTDQ)
.Case("avx512er", HasAVX512ER)
.Case("avx512pf", HasAVX512PF)
.Case("avx512dq", HasAVX512DQ)
@@ -3969,6 +4258,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("bmi2", HasBMI2)
.Case("clflushopt", HasCLFLUSHOPT)
.Case("clwb", HasCLWB)
+ .Case("clzero", HasCLZERO)
.Case("cx16", HasCX16)
.Case("f16c", HasF16C)
.Case("fma", HasFMA)
@@ -3982,7 +4272,6 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("movbe", HasMOVBE)
.Case("mpx", HasMPX)
.Case("pclmul", HasPCLMUL)
- .Case("pcommit", HasPCOMMIT)
.Case("pku", HasPKU)
.Case("popcnt", HasPOPCNT)
.Case("prefetchwt1", HasPREFETCHWT1)
@@ -4000,7 +4289,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse4.2", SSELevel >= SSE42)
.Case("sse4a", XOPLevel >= SSE4A)
.Case("tbm", HasTBM)
- .Case("umip", HasUMIP)
+ .Case("lwp", HasLWP)
.Case("x86", true)
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
@@ -4043,6 +4332,7 @@ bool X86TargetInfo::validateCpuSupports(StringRef FeatureStr) const {
.Case("avx512bw", true)
.Case("avx512dq", true)
.Case("avx512cd", true)
+ .Case("avx512vpopcntdq", true)
.Case("avx512er", true)
.Case("avx512pf", true)
.Case("avx512vbmi", true)
@@ -4426,7 +4716,9 @@ static void addMinGWDefines(const LangOptions &Opts, MacroBuilder &Builder) {
class MinGWX86_32TargetInfo : public WindowsX86_32TargetInfo {
public:
MinGWX86_32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : WindowsX86_32TargetInfo(Triple, Opts) {}
+ : WindowsX86_32TargetInfo(Triple, Opts) {
+ HasFloat128 = true;
+ }
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
@@ -4607,10 +4899,11 @@ public:
case CC_Swift:
case CC_X86VectorCall:
case CC_IntelOclBicc:
- case CC_X86_64Win64:
+ case CC_Win64:
case CC_PreserveMost:
case CC_PreserveAll:
case CC_X86RegCall:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4684,6 +4977,7 @@ public:
case CC_X86_64SysV:
case CC_Swift:
case CC_X86RegCall:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -4718,6 +5012,7 @@ public:
// with x86 FP ops. Weird.
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
+ HasFloat128 = true;
}
void getTargetDefines(const LangOptions &Opts,
@@ -5054,6 +5349,8 @@ class ARMTargetInfo : public TargetInfo {
return "7M";
case llvm::ARM::AK_ARMV7EM:
return "7EM";
+ case llvm::ARM::AK_ARMV7VE:
+ return "7VE";
case llvm::ARM::AK_ARMV8A:
return "8A";
case llvm::ARM::AK_ARMV8_1A:
@@ -5142,6 +5439,8 @@ public:
default:
if (Triple.getOS() == llvm::Triple::NetBSD)
setABI("apcs-gnu");
+ else if (Triple.getOS() == llvm::Triple::OpenBSD)
+ setABI("aapcs-linux");
else
setABI("aapcs");
break;
@@ -5154,6 +5453,10 @@ public:
// ARM has atomics up to 8 bytes
setAtomic();
+ // Maximum alignment for ARM NEON data types should be 64-bits (AAPCS)
+ if (IsAAPCS && (Triple.getEnvironment() != llvm::Triple::Android))
+ MaxVectorAlign = 64;
+
// Do force alignment of members that follow zero length bitfields. If
// the alignment of the zero-length bitfield is greater than the member
// that follows it, `bar', `bar' will be aligned as the type of the
@@ -5163,7 +5466,7 @@ public:
if (Triple.getOS() == llvm::Triple::Linux ||
Triple.getOS() == llvm::Triple::UnknownOS)
this->MCountName =
- Opts.EABIVersion == "gnu" ? "\01__gnu_mcount_nc" : "\01mcount";
+ Opts.EABIVersion == llvm::EABI::GNU ? "\01__gnu_mcount_nc" : "\01mcount";
}
StringRef getABI() const override { return ABI; }
@@ -5207,7 +5510,24 @@ public:
if (Feature[0] == '+')
Features[Feature.drop_front(1)] = true;
- return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
+ // Enable or disable thumb-mode explicitly per function to enable mixed
+ // ARM and Thumb code generation.
+ if (isThumb())
+ Features["thumb-mode"] = true;
+ else
+ Features["thumb-mode"] = false;
+
+ // Convert user-provided arm and thumb GNU target attributes to
+ // [-|+]thumb-mode target features respectively.
+ std::vector<std::string> UpdatedFeaturesVec(FeaturesVec);
+ for (auto &Feature : UpdatedFeaturesVec) {
+ if (Feature.compare("+arm") == 0)
+ Feature = "-thumb-mode";
+ else if (Feature.compare("+thumb") == 0)
+ Feature = "+thumb-mode";
+ }
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec);
}
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -5308,6 +5628,7 @@ public:
.Case("softfloat", SoftFloat)
.Case("thumb", isThumb())
.Case("neon", (FPU & NeonFPU) && !SoftFloat)
+ .Case("vfp", FPU && !SoftFloat)
.Case("hwdiv", HWDiv & HWDivThumb)
.Case("hwdiv-arm", HWDiv & HWDivARM)
.Default(false);
@@ -5326,6 +5647,17 @@ public:
bool setFPMath(StringRef Name) override;
+ void getTargetDefinesARMV81A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
+ }
+
+ void getTargetDefinesARMV82A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the ARMv8.1-A defines
+ getTargetDefinesARMV81A(Opts, Builder);
+ }
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
// Target identification.
@@ -5333,9 +5665,11 @@ public:
Builder.defineMacro("__arm__");
// For bare-metal none-eabi.
if (getTriple().getOS() == llvm::Triple::UnknownOS &&
- getTriple().getEnvironment() == llvm::Triple::EABI)
+ (getTriple().getEnvironment() == llvm::Triple::EABI ||
+ getTriple().getEnvironment() == llvm::Triple::EABIHF))
Builder.defineMacro("__ELF__");
+
// Target properties.
Builder.defineMacro("__REGISTER_PREFIX__", "");
@@ -5522,8 +5856,15 @@ public:
if (Opts.UnsafeFPMath)
Builder.defineMacro("__ARM_FP_FAST", "1");
- if (ArchKind == llvm::ARM::AK_ARMV8_1A)
- Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
+ switch(ArchKind) {
+ default: break;
+ case llvm::ARM::AK_ARMV8_1A:
+ getTargetDefinesARMV81A(Opts, Builder);
+ break;
+ case llvm::ARM::AK_ARMV8_2A:
+ getTargetDefinesARMV82A(Opts, Builder);
+ break;
+ }
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
@@ -5629,6 +5970,7 @@ public:
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_Swift:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -5788,6 +6130,7 @@ public:
case CC_X86VectorCall:
return CCCR_Ignore;
case CC_C:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -5907,14 +6250,16 @@ class AArch64TargetInfo : public TargetInfo {
enum FPUModeEnum {
FPUMode,
- NeonMode
+ NeonMode = (1 << 0),
+ SveMode = (1 << 1)
};
unsigned FPU;
unsigned CRC;
unsigned Crypto;
unsigned Unaligned;
- unsigned V8_1A;
+ unsigned HasFullFP16;
+ llvm::AArch64::ArchKind ArchKind;
static const Builtin::Info BuiltinInfo[];
@@ -5945,6 +6290,9 @@ public:
LongDoubleWidth = LongDoubleAlign = SuitableAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ // Make __builtin_ms_va_list available.
+ HasBuiltinMSVaList = true;
+
// {} in inline assembly are neon specifiers, not assembly variant
// specifiers.
NoAsmVariants = true;
@@ -5959,9 +6307,10 @@ public:
// AArch64 targets default to using the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::GenericAArch64);
- if (Triple.getOS() == llvm::Triple::Linux ||
- Triple.getOS() == llvm::Triple::UnknownOS)
- this->MCountName = Opts.EABIVersion == "gnu" ? "\01_mcount" : "mcount";
+ if (Triple.getOS() == llvm::Triple::Linux)
+ this->MCountName = "\01_mcount";
+ else if (Triple.getOS() == llvm::Triple::UnknownOS)
+ this->MCountName = Opts.EABIVersion == llvm::EABI::GNU ? "\01_mcount" : "mcount";
}
StringRef getABI() const override { return ABI; }
@@ -5979,10 +6328,26 @@ public:
static_cast<unsigned>(llvm::AArch64::ArchKind::AK_INVALID);
}
+ void getTargetDefinesARMV81A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
+ }
+
+ void getTargetDefinesARMV82A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the ARMv8.1 defines
+ getTargetDefinesARMV81A(Opts, Builder);
+ }
+
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
// Target identification.
Builder.defineMacro("__aarch64__");
+ // For bare-metal none-eabi.
+ if (getTriple().getOS() == llvm::Triple::UnknownOS &&
+ (getTriple().getEnvironment() == llvm::Triple::EABI ||
+ getTriple().getEnvironment() == llvm::Triple::EABIHF))
+ Builder.defineMacro("__ELF__");
// Target properties.
Builder.defineMacro("_LP64");
@@ -6023,12 +6388,15 @@ public:
Builder.defineMacro("__ARM_SIZEOF_MINIMAL_ENUM",
Opts.ShortEnums ? "1" : "4");
- if (FPU == NeonMode) {
+ if (FPU & NeonMode) {
Builder.defineMacro("__ARM_NEON", "1");
// 64-bit NEON supports half, single and double precision operations.
Builder.defineMacro("__ARM_NEON_FP", "0xE");
}
+ if (FPU & SveMode)
+ Builder.defineMacro("__ARM_FEATURE_SVE", "1");
+
if (CRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
@@ -6038,8 +6406,15 @@ public:
if (Unaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
- if (V8_1A)
- Builder.defineMacro("__ARM_FEATURE_QRDMX", "1");
+ switch(ArchKind) {
+ default: break;
+ case llvm::AArch64::ArchKind::AK_ARMV8_1A:
+ getTargetDefinesARMV81A(Opts, Builder);
+ break;
+ case llvm::AArch64::ArchKind::AK_ARMV8_2A:
+ getTargetDefinesARMV82A(Opts, Builder);
+ break;
+ }
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
@@ -6057,7 +6432,8 @@ public:
return Feature == "aarch64" ||
Feature == "arm64" ||
Feature == "arm" ||
- (Feature == "neon" && FPU == NeonMode);
+ (Feature == "neon" && (FPU & NeonMode)) ||
+ (Feature == "sve" && (FPU & SveMode));
}
bool handleTargetFeatures(std::vector<std::string> &Features,
@@ -6066,11 +6442,14 @@ public:
CRC = 0;
Crypto = 0;
Unaligned = 1;
- V8_1A = 0;
+ HasFullFP16 = 0;
+ ArchKind = llvm::AArch64::ArchKind::AK_ARMV8A;
for (const auto &Feature : Features) {
if (Feature == "+neon")
- FPU = NeonMode;
+ FPU |= NeonMode;
+ if (Feature == "+sve")
+ FPU |= SveMode;
if (Feature == "+crc")
CRC = 1;
if (Feature == "+crypto")
@@ -6078,7 +6457,11 @@ public:
if (Feature == "+strict-align")
Unaligned = 0;
if (Feature == "+v8.1a")
- V8_1A = 1;
+ ArchKind = llvm::AArch64::ArchKind::AK_ARMV8_1A;
+ if (Feature == "+v8.2a")
+ ArchKind = llvm::AArch64::ArchKind::AK_ARMV8_2A;
+ if (Feature == "+fullfp16")
+ HasFullFP16 = 1;
}
setDataLayout();
@@ -6092,6 +6475,8 @@ public:
case CC_Swift:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_OpenCLKernel:
+ case CC_Win64:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -6261,6 +6646,56 @@ public:
}
};
+class MicrosoftARM64TargetInfo
+ : public WindowsTargetInfo<AArch64leTargetInfo> {
+ const llvm::Triple Triple;
+
+public:
+ MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
+ const TargetOptions &Opts)
+ : WindowsTargetInfo<AArch64leTargetInfo>(Triple, Opts), Triple(Triple) {
+
+ // This is an LLP64 platform.
+ // int:4, long:4, long long:8, long double:8.
+ WCharType = UnsignedShort;
+ IntWidth = IntAlign = 32;
+ LongWidth = LongAlign = 32;
+ DoubleAlign = LongLongAlign = 64;
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble();
+ IntMaxType = SignedLongLong;
+ Int64Type = SignedLongLong;
+ SizeType = UnsignedLongLong;
+ PtrDiffType = SignedLongLong;
+ IntPtrType = SignedLongLong;
+
+ TheCXXABI.set(TargetCXXABI::Microsoft);
+ }
+
+ void setDataLayout() override {
+ resetDataLayout("e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
+ }
+
+ void getVisualStudioDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsTargetInfo<AArch64leTargetInfo>::getVisualStudioDefines(Opts,
+ Builder);
+ Builder.defineMacro("_WIN32", "1");
+ Builder.defineMacro("_WIN64", "1");
+ Builder.defineMacro("_M_ARM64", "1");
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ WindowsTargetInfo::getTargetDefines(Opts, Builder);
+ getVisualStudioDefines(Opts, Builder);
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::CharPtrBuiltinVaList;
+ }
+};
+
class AArch64beTargetInfo : public AArch64TargetInfo {
void setDataLayout() override {
assert(!getTriple().isOSBinFormatMachO());
@@ -6407,6 +6842,7 @@ public:
.Case("hexagonv5", "5")
.Case("hexagonv55", "55")
.Case("hexagonv60", "60")
+ .Case("hexagonv62", "62")
.Default(nullptr);
}
@@ -6451,6 +6887,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__HEXAGON_ARCH__", "60");
Builder.defineMacro("__QDSP6_V60__");
Builder.defineMacro("__QDSP6_ARCH__", "60");
+ } else if (CPU == "hexagonv62") {
+ Builder.defineMacro("__HEXAGON_V62__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "62");
}
if (hasFeature("hvx")) {
@@ -6706,6 +7145,11 @@ public:
case 'N': // Same as 'K' but zext (required for SIMode)
case 'O': // The constant 4096
return true;
+
+ case 'f':
+ case 'e':
+ info.setAllowsRegister();
+ return true;
}
return false;
}
@@ -7008,13 +7452,14 @@ class SystemZTargetInfo : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
static const char *const GCCRegNames[];
std::string CPU;
+ int ISARevision;
bool HasTransactionalExecution;
bool HasVector;
public:
SystemZTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
- : TargetInfo(Triple), CPU("z10"), HasTransactionalExecution(false),
- HasVector(false) {
+ : TargetInfo(Triple), CPU("z10"), ISARevision(8),
+ HasTransactionalExecution(false), HasVector(false) {
IntMaxType = SignedLong;
Int64Type = SignedLong;
TLSSupported = true;
@@ -7036,6 +7481,8 @@ public:
Builder.defineMacro("__zarch__");
Builder.defineMacro("__LONG_DOUBLE_128__");
+ Builder.defineMacro("__ARCH__", Twine(ISARevision));
+
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
@@ -7043,8 +7490,10 @@ public:
if (HasTransactionalExecution)
Builder.defineMacro("__HTM__");
+ if (HasVector)
+ Builder.defineMacro("__VX__");
if (Opts.ZVector)
- Builder.defineMacro("__VEC__", "10301");
+ Builder.defineMacro("__VEC__", "10302");
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
return llvm::makeArrayRef(BuiltinInfo,
@@ -7065,37 +7514,38 @@ public:
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::SystemZBuiltinVaList;
}
+ int getISARevision(const StringRef &Name) const {
+ return llvm::StringSwitch<int>(Name)
+ .Cases("arch8", "z10", 8)
+ .Cases("arch9", "z196", 9)
+ .Cases("arch10", "zEC12", 10)
+ .Cases("arch11", "z13", 11)
+ .Cases("arch12", "z14", 12)
+ .Default(-1);
+ }
bool setCPU(const std::string &Name) override {
CPU = Name;
- bool CPUKnown = llvm::StringSwitch<bool>(Name)
- .Case("z10", true)
- .Case("arch8", true)
- .Case("z196", true)
- .Case("arch9", true)
- .Case("zEC12", true)
- .Case("arch10", true)
- .Case("z13", true)
- .Case("arch11", true)
- .Default(false);
-
- return CPUKnown;
+ ISARevision = getISARevision(CPU);
+ return ISARevision != -1;
}
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
StringRef CPU,
const std::vector<std::string> &FeaturesVec) const override {
- if (CPU == "zEC12" || CPU == "arch10")
- Features["transactional-execution"] = true;
- if (CPU == "z13" || CPU == "arch11") {
+ int ISARevision = getISARevision(CPU);
+ if (ISARevision >= 10)
Features["transactional-execution"] = true;
+ if (ISARevision >= 11)
Features["vector"] = true;
- }
+ if (ISARevision >= 12)
+ Features["vector-enhancements-1"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
bool handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) override {
HasTransactionalExecution = false;
+ HasVector = false;
for (const auto &Feature : Features) {
if (Feature == "+transactional-execution")
HasTransactionalExecution = true;
@@ -7114,6 +7564,11 @@ public:
bool hasFeature(StringRef Feature) const override {
return llvm::StringSwitch<bool>(Feature)
.Case("systemz", true)
+ .Case("arch8", ISARevision >= 8)
+ .Case("arch9", ISARevision >= 9)
+ .Case("arch10", ISARevision >= 10)
+ .Case("arch11", ISARevision >= 11)
+ .Case("arch12", ISARevision >= 12)
.Case("htm", HasTransactionalExecution)
.Case("vx", HasVector)
.Default(false);
@@ -7123,6 +7578,7 @@ public:
switch (CC) {
case CC_C:
case CC_Swift:
+ case CC_OpenCLKernel:
return CCCR_OK;
default:
return CCCR_Warning;
@@ -7208,7 +7664,7 @@ public:
IntPtrType = SignedInt;
PtrDiffType = SignedInt;
SigAtomicType = SignedLong;
- resetDataLayout("e-m:e-p:16:16-i32:16:32-a:16-n8:16");
+ resetDataLayout("e-m:e-p:16:16-i32:16-i64:16-f32:16-f64:16-a:8-n8:16-S16");
}
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
@@ -7267,6 +7723,7 @@ ArrayRef<const char *> MSP430TargetInfo::getGCCRegNames() const {
// publicly available in http://tce.cs.tut.fi
static const unsigned TCEOpenCLAddrSpaceMap[] = {
+ 0, // Default
3, // opencl_global
4, // opencl_local
5, // opencl_constant
@@ -7405,6 +7862,157 @@ public:
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
return None;
}
+ CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
+ switch (CC) {
+ default:
+ return CCCR_Warning;
+ case CC_C:
+ case CC_OpenCLKernel:
+ return CCCR_OK;
+ }
+ }
+};
+
+class Nios2TargetInfo : public TargetInfo {
+ void setDataLayout() {
+ if (BigEndian)
+ resetDataLayout("E-p:32:32:32-i8:8:32-i16:16:32-n32");
+ else
+ resetDataLayout("e-p:32:32:32-i8:8:32-i16:16:32-n32");
+ }
+
+ static const Builtin::Info BuiltinInfo[];
+ std::string CPU;
+ std::string ABI;
+
+public:
+ Nios2TargetInfo(const llvm::Triple &triple, const TargetOptions &opts)
+ : TargetInfo(triple), CPU(opts.CPU), ABI(opts.ABI) {
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ setDataLayout();
+ }
+
+ StringRef getABI() const override { return ABI; }
+ bool setABI(const std::string &Name) override {
+ if (Name == "o32" || Name == "eabi") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+
+ bool setCPU(const std::string &Name) override {
+ if (Name == "nios2r1" || Name == "nios2r2") {
+ CPU = Name;
+ return true;
+ }
+ return false;
+ }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override {
+ DefineStd(Builder, "nios2", Opts);
+ DefineStd(Builder, "NIOS2", Opts);
+
+ Builder.defineMacro("__nios2");
+ Builder.defineMacro("__NIOS2");
+ Builder.defineMacro("__nios2__");
+ Builder.defineMacro("__NIOS2__");
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override {
+ return llvm::makeArrayRef(BuiltinInfo, clang::Nios2::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
+ }
+
+ bool isFeatureSupportedByCPU(StringRef Feature, StringRef CPU) const {
+ const bool isR2 = CPU == "nios2r2";
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("nios2r2mandatory", isR2)
+ .Case("nios2r2bmx", isR2)
+ .Case("nios2r2mpx", isR2)
+ .Case("nios2r2cdx", isR2)
+ .Default(false);
+ }
+
+ bool initFeatureMap(llvm::StringMap<bool> &Features,
+ DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeatureVec) const override {
+ static const char *allFeatures[] = {
+ "nios2r2mandatory", "nios2r2bmx", "nios2r2mpx", "nios2r2cdx"
+ };
+ for (const char *feature : allFeatures) {
+ Features[feature] = isFeatureSupportedByCPU(feature, CPU);
+ }
+ return true;
+ }
+
+ bool hasFeature(StringRef Feature) const override {
+ return isFeatureSupportedByCPU(Feature, CPU);
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+
+ ArrayRef<const char *> getGCCRegNames() const override {
+ static const char *const GCCRegNames[] = {
+ // CPU register names
+ // Must match second column of GCCRegAliases
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20",
+ "r21", "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30",
+ "r31",
+ // Floating point register names
+ "ctl0", "ctl1", "ctl2", "ctl3", "ctl4", "ctl5", "ctl6", "ctl7", "ctl8",
+ "ctl9", "ctl10", "ctl11", "ctl12", "ctl13", "ctl14", "ctl15"
+ };
+ return llvm::makeArrayRef(GCCRegNames);
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default:
+ return false;
+
+ case 'r': // CPU registers.
+ case 'd': // Equivalent to "r" unless generating MIPS16 code.
+ case 'y': // Equivalent to "r", backwards compatibility only.
+ case 'f': // floating-point registers.
+ case 'c': // $25 for indirect jumps
+ case 'l': // lo register
+ case 'x': // hilo register pair
+ Info.setAllowsRegister();
+ return true;
+ }
+ }
+
+ const char *getClobbers() const override { return ""; }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ static const TargetInfo::GCCRegAlias aliases[] = {
+ {{"zero"}, "r0"}, {{"at"}, "r1"}, {{"et"}, "r24"},
+ {{"bt"}, "r25"}, {{"gp"}, "r26"}, {{"sp"}, "r27"},
+ {{"fp"}, "r28"}, {{"ea"}, "r29"}, {{"ba"}, "r30"},
+ {{"ra"}, "r31"}, {{"status"}, "ctl0"}, {{"estatus"}, "ctl1"},
+ {{"bstatus"}, "ctl2"}, {{"ienable"}, "ctl3"}, {{"ipending"}, "ctl4"},
+ {{"cpuid"}, "ctl5"}, {{"exception"}, "ctl7"}, {{"pteaddr"}, "ctl8"},
+ {{"tlbacc"}, "ctl9"}, {{"tlbmisc"}, "ctl10"}, {{"badaddr"}, "ctl12"},
+ {{"config"}, "ctl13"}, {{"mpubase"}, "ctl14"}, {{"mpuacc"}, "ctl15"},
+ };
+ return llvm::makeArrayRef(aliases);
+ }
+};
+
+const Builtin::Info Nios2TargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+#include "clang/Basic/BuiltinsNios2.def"
};
class MipsTargetInfo : public TargetInfo {
@@ -7433,6 +8041,8 @@ class MipsTargetInfo : public TargetInfo {
bool IsMicromips;
bool IsNan2008;
bool IsSingleFloat;
+ bool IsNoABICalls;
+ bool CanUseBSDABICalls;
enum MipsFloatABI {
HardFloat, SoftFloat
} FloatABI;
@@ -7440,6 +8050,7 @@ class MipsTargetInfo : public TargetInfo {
NoDSP, DSP1, DSP2
} DspRev;
bool HasMSA;
+ bool DisableMadd4;
protected:
bool HasFP64;
@@ -7448,8 +8059,9 @@ protected:
public:
MipsTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), IsMips16(false), IsMicromips(false),
- IsNan2008(false), IsSingleFloat(false), FloatABI(HardFloat),
- DspRev(NoDSP), HasMSA(false), HasFP64(false) {
+ IsNan2008(false), IsSingleFloat(false), IsNoABICalls(false),
+ CanUseBSDABICalls(false), FloatABI(HardFloat), DspRev(NoDSP),
+ HasMSA(false), DisableMadd4(false), HasFP64(false) {
TheCXXABI.set(TargetCXXABI::GenericMIPS);
setABI((getTriple().getArch() == llvm::Triple::mips ||
@@ -7458,6 +8070,9 @@ public:
: "n64");
CPU = ABI == "o32" ? "mips32r2" : "mips64r2";
+
+ CanUseBSDABICalls = Triple.getOS() == llvm::Triple::FreeBSD ||
+ Triple.getOS() == llvm::Triple::OpenBSD;
}
bool isNaN2008Default() const {
@@ -7534,7 +8149,11 @@ public:
void setN64ABITypes() {
setN32N64ABITypes();
- Int64Type = SignedLong;
+ if (getTriple().getOS() == llvm::Triple::OpenBSD) {
+ Int64Type = SignedLongLong;
+ } else {
+ Int64Type = SignedLong;
+ }
IntMaxType = Int64Type;
LongWidth = LongAlign = 64;
PointerWidth = PointerAlign = 64;
@@ -7638,6 +8257,12 @@ public:
} else
llvm_unreachable("Invalid ABI.");
+ if (!IsNoABICalls) {
+ Builder.defineMacro("__mips_abicalls");
+ if (CanUseBSDABICalls)
+ Builder.defineMacro("__ABICALLS__");
+ }
+
Builder.defineMacro("__REGISTER_PREFIX__", "");
switch (FloatABI) {
@@ -7682,6 +8307,9 @@ public:
if (HasMSA)
Builder.defineMacro("__mips_msa", Twine(1));
+ if (DisableMadd4)
+ Builder.defineMacro("__mips_no_madd4", Twine(1));
+
Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0)));
Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth()));
Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth()));
@@ -7844,6 +8472,8 @@ public:
DspRev = std::max(DspRev, DSP2);
else if (Feature == "+msa")
HasMSA = true;
+ else if (Feature == "+nomadd4")
+ DisableMadd4 = true;
else if (Feature == "+fp64")
HasFP64 = true;
else if (Feature == "-fp64")
@@ -7852,6 +8482,8 @@ public:
IsNan2008 = true;
else if (Feature == "-nan2008")
IsNan2008 = false;
+ else if (Feature == "+noabicalls")
+ IsNoABICalls = true;
}
setDataLayout();
@@ -8174,7 +8806,7 @@ public:
explicit WebAssembly32TargetInfo(const llvm::Triple &T,
const TargetOptions &Opts)
: WebAssemblyTargetInfo(T, Opts) {
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
resetDataLayout("e-m:e-p:32:32-i64:64-n32:64-S128");
}
@@ -8215,6 +8847,7 @@ const Builtin::Info Le64TargetInfo::BuiltinInfo[] = {
};
static const unsigned SPIRAddrSpaceMap[] = {
+ 0, // Default
1, // opencl_global
3, // opencl_local
2, // opencl_constant
@@ -8434,6 +9067,254 @@ public:
}
};
+/// Information about a specific microcontroller.
+struct MCUInfo {
+ const char *Name;
+ const char *DefineName;
+};
+
+// This list should be kept up-to-date with AVRDevices.td in LLVM.
+static ArrayRef<MCUInfo> AVRMcus = {
+ { "at90s1200", "__AVR_AT90S1200__" },
+ { "attiny11", "__AVR_ATtiny11__" },
+ { "attiny12", "__AVR_ATtiny12__" },
+ { "attiny15", "__AVR_ATtiny15__" },
+ { "attiny28", "__AVR_ATtiny28__" },
+ { "at90s2313", "__AVR_AT90S2313__" },
+ { "at90s2323", "__AVR_AT90S2323__" },
+ { "at90s2333", "__AVR_AT90S2333__" },
+ { "at90s2343", "__AVR_AT90S2343__" },
+ { "attiny22", "__AVR_ATtiny22__" },
+ { "attiny26", "__AVR_ATtiny26__" },
+ { "at86rf401", "__AVR_AT86RF401__" },
+ { "at90s4414", "__AVR_AT90S4414__" },
+ { "at90s4433", "__AVR_AT90S4433__" },
+ { "at90s4434", "__AVR_AT90S4434__" },
+ { "at90s8515", "__AVR_AT90S8515__" },
+ { "at90c8534", "__AVR_AT90c8534__" },
+ { "at90s8535", "__AVR_AT90S8535__" },
+ { "ata5272", "__AVR_ATA5272__" },
+ { "attiny13", "__AVR_ATtiny13__" },
+ { "attiny13a", "__AVR_ATtiny13A__" },
+ { "attiny2313", "__AVR_ATtiny2313__" },
+ { "attiny2313a", "__AVR_ATtiny2313A__" },
+ { "attiny24", "__AVR_ATtiny24__" },
+ { "attiny24a", "__AVR_ATtiny24A__" },
+ { "attiny4313", "__AVR_ATtiny4313__" },
+ { "attiny44", "__AVR_ATtiny44__" },
+ { "attiny44a", "__AVR_ATtiny44A__" },
+ { "attiny84", "__AVR_ATtiny84__" },
+ { "attiny84a", "__AVR_ATtiny84A__" },
+ { "attiny25", "__AVR_ATtiny25__" },
+ { "attiny45", "__AVR_ATtiny45__" },
+ { "attiny85", "__AVR_ATtiny85__" },
+ { "attiny261", "__AVR_ATtiny261__" },
+ { "attiny261a", "__AVR_ATtiny261A__" },
+ { "attiny461", "__AVR_ATtiny461__" },
+ { "attiny461a", "__AVR_ATtiny461A__" },
+ { "attiny861", "__AVR_ATtiny861__" },
+ { "attiny861a", "__AVR_ATtiny861A__" },
+ { "attiny87", "__AVR_ATtiny87__" },
+ { "attiny43u", "__AVR_ATtiny43U__" },
+ { "attiny48", "__AVR_ATtiny48__" },
+ { "attiny88", "__AVR_ATtiny88__" },
+ { "attiny828", "__AVR_ATtiny828__" },
+ { "at43usb355", "__AVR_AT43USB355__" },
+ { "at76c711", "__AVR_AT76C711__" },
+ { "atmega103", "__AVR_ATmega103__" },
+ { "at43usb320", "__AVR_AT43USB320__" },
+ { "attiny167", "__AVR_ATtiny167__" },
+ { "at90usb82", "__AVR_AT90USB82__" },
+ { "at90usb162", "__AVR_AT90USB162__" },
+ { "ata5505", "__AVR_ATA5505__" },
+ { "atmega8u2", "__AVR_ATmega8U2__" },
+ { "atmega16u2", "__AVR_ATmega16U2__" },
+ { "atmega32u2", "__AVR_ATmega32U2__" },
+ { "attiny1634", "__AVR_ATtiny1634__" },
+ { "atmega8", "__AVR_ATmega8__" },
+ { "ata6289", "__AVR_ATA6289__" },
+ { "atmega8a", "__AVR_ATmega8A__" },
+ { "ata6285", "__AVR_ATA6285__" },
+ { "ata6286", "__AVR_ATA6286__" },
+ { "atmega48", "__AVR_ATmega48__" },
+ { "atmega48a", "__AVR_ATmega48A__" },
+ { "atmega48pa", "__AVR_ATmega48PA__" },
+ { "atmega48p", "__AVR_ATmega48P__" },
+ { "atmega88", "__AVR_ATmega88__" },
+ { "atmega88a", "__AVR_ATmega88A__" },
+ { "atmega88p", "__AVR_ATmega88P__" },
+ { "atmega88pa", "__AVR_ATmega88PA__" },
+ { "atmega8515", "__AVR_ATmega8515__" },
+ { "atmega8535", "__AVR_ATmega8535__" },
+ { "atmega8hva", "__AVR_ATmega8HVA__" },
+ { "at90pwm1", "__AVR_AT90PWM1__" },
+ { "at90pwm2", "__AVR_AT90PWM2__" },
+ { "at90pwm2b", "__AVR_AT90PWM2B__" },
+ { "at90pwm3", "__AVR_AT90PWM3__" },
+ { "at90pwm3b", "__AVR_AT90PWM3B__" },
+ { "at90pwm81", "__AVR_AT90PWM81__" },
+ { "ata5790", "__AVR_ATA5790__" },
+ { "ata5795", "__AVR_ATA5795__" },
+ { "atmega16", "__AVR_ATmega16__" },
+ { "atmega16a", "__AVR_ATmega16A__" },
+ { "atmega161", "__AVR_ATmega161__" },
+ { "atmega162", "__AVR_ATmega162__" },
+ { "atmega163", "__AVR_ATmega163__" },
+ { "atmega164a", "__AVR_ATmega164A__" },
+ { "atmega164p", "__AVR_ATmega164P__" },
+ { "atmega164pa", "__AVR_ATmega164PA__" },
+ { "atmega165", "__AVR_ATmega165__" },
+ { "atmega165a", "__AVR_ATmega165A__" },
+ { "atmega165p", "__AVR_ATmega165P__" },
+ { "atmega165pa", "__AVR_ATmega165PA__" },
+ { "atmega168", "__AVR_ATmega168__" },
+ { "atmega168a", "__AVR_ATmega168A__" },
+ { "atmega168p", "__AVR_ATmega168P__" },
+ { "atmega168pa", "__AVR_ATmega168PA__" },
+ { "atmega169", "__AVR_ATmega169__" },
+ { "atmega169a", "__AVR_ATmega169A__" },
+ { "atmega169p", "__AVR_ATmega169P__" },
+ { "atmega169pa", "__AVR_ATmega169PA__" },
+ { "atmega32", "__AVR_ATmega32__" },
+ { "atmega32a", "__AVR_ATmega32A__" },
+ { "atmega323", "__AVR_ATmega323__" },
+ { "atmega324a", "__AVR_ATmega324A__" },
+ { "atmega324p", "__AVR_ATmega324P__" },
+ { "atmega324pa", "__AVR_ATmega324PA__" },
+ { "atmega325", "__AVR_ATmega325__" },
+ { "atmega325a", "__AVR_ATmega325A__" },
+ { "atmega325p", "__AVR_ATmega325P__" },
+ { "atmega325pa", "__AVR_ATmega325PA__" },
+ { "atmega3250", "__AVR_ATmega3250__" },
+ { "atmega3250a", "__AVR_ATmega3250A__" },
+ { "atmega3250p", "__AVR_ATmega3250P__" },
+ { "atmega3250pa", "__AVR_ATmega3250PA__" },
+ { "atmega328", "__AVR_ATmega328__" },
+ { "atmega328p", "__AVR_ATmega328P__" },
+ { "atmega329", "__AVR_ATmega329__" },
+ { "atmega329a", "__AVR_ATmega329A__" },
+ { "atmega329p", "__AVR_ATmega329P__" },
+ { "atmega329pa", "__AVR_ATmega329PA__" },
+ { "atmega3290", "__AVR_ATmega3290__" },
+ { "atmega3290a", "__AVR_ATmega3290A__" },
+ { "atmega3290p", "__AVR_ATmega3290P__" },
+ { "atmega3290pa", "__AVR_ATmega3290PA__" },
+ { "atmega406", "__AVR_ATmega406__" },
+ { "atmega64", "__AVR_ATmega64__" },
+ { "atmega64a", "__AVR_ATmega64A__" },
+ { "atmega640", "__AVR_ATmega640__" },
+ { "atmega644", "__AVR_ATmega644__" },
+ { "atmega644a", "__AVR_ATmega644A__" },
+ { "atmega644p", "__AVR_ATmega644P__" },
+ { "atmega644pa", "__AVR_ATmega644PA__" },
+ { "atmega645", "__AVR_ATmega645__" },
+ { "atmega645a", "__AVR_ATmega645A__" },
+ { "atmega645p", "__AVR_ATmega645P__" },
+ { "atmega649", "__AVR_ATmega649__" },
+ { "atmega649a", "__AVR_ATmega649A__" },
+ { "atmega649p", "__AVR_ATmega649P__" },
+ { "atmega6450", "__AVR_ATmega6450__" },
+ { "atmega6450a", "__AVR_ATmega6450A__" },
+ { "atmega6450p", "__AVR_ATmega6450P__" },
+ { "atmega6490", "__AVR_ATmega6490__" },
+ { "atmega6490a", "__AVR_ATmega6490A__" },
+ { "atmega6490p", "__AVR_ATmega6490P__" },
+ { "atmega64rfr2", "__AVR_ATmega64RFR2__" },
+ { "atmega644rfr2", "__AVR_ATmega644RFR2__" },
+ { "atmega16hva", "__AVR_ATmega16HVA__" },
+ { "atmega16hva2", "__AVR_ATmega16HVA2__" },
+ { "atmega16hvb", "__AVR_ATmega16HVB__" },
+ { "atmega16hvbrevb", "__AVR_ATmega16HVBREVB__" },
+ { "atmega32hvb", "__AVR_ATmega32HVB__" },
+ { "atmega32hvbrevb", "__AVR_ATmega32HVBREVB__" },
+ { "atmega64hve", "__AVR_ATmega64HVE__" },
+ { "at90can32", "__AVR_AT90CAN32__" },
+ { "at90can64", "__AVR_AT90CAN64__" },
+ { "at90pwm161", "__AVR_AT90PWM161__" },
+ { "at90pwm216", "__AVR_AT90PWM216__" },
+ { "at90pwm316", "__AVR_AT90PWM316__" },
+ { "atmega32c1", "__AVR_ATmega32C1__" },
+ { "atmega64c1", "__AVR_ATmega64C1__" },
+ { "atmega16m1", "__AVR_ATmega16M1__" },
+ { "atmega32m1", "__AVR_ATmega32M1__" },
+ { "atmega64m1", "__AVR_ATmega64M1__" },
+ { "atmega16u4", "__AVR_ATmega16U4__" },
+ { "atmega32u4", "__AVR_ATmega32U4__" },
+ { "atmega32u6", "__AVR_ATmega32U6__" },
+ { "at90usb646", "__AVR_AT90USB646__" },
+ { "at90usb647", "__AVR_AT90USB647__" },
+ { "at90scr100", "__AVR_AT90SCR100__" },
+ { "at94k", "__AVR_AT94K__" },
+ { "m3000", "__AVR_AT000__" },
+ { "atmega128", "__AVR_ATmega128__" },
+ { "atmega128a", "__AVR_ATmega128A__" },
+ { "atmega1280", "__AVR_ATmega1280__" },
+ { "atmega1281", "__AVR_ATmega1281__" },
+ { "atmega1284", "__AVR_ATmega1284__" },
+ { "atmega1284p", "__AVR_ATmega1284P__" },
+ { "atmega128rfa1", "__AVR_ATmega128RFA1__" },
+ { "atmega128rfr2", "__AVR_ATmega128RFR2__" },
+ { "atmega1284rfr2", "__AVR_ATmega1284RFR2__" },
+ { "at90can128", "__AVR_AT90CAN128__" },
+ { "at90usb1286", "__AVR_AT90USB1286__" },
+ { "at90usb1287", "__AVR_AT90USB1287__" },
+ { "atmega2560", "__AVR_ATmega2560__" },
+ { "atmega2561", "__AVR_ATmega2561__" },
+ { "atmega256rfr2", "__AVR_ATmega256RFR2__" },
+ { "atmega2564rfr2", "__AVR_ATmega2564RFR2__" },
+ { "atxmega16a4", "__AVR_ATxmega16A4__" },
+ { "atxmega16a4u", "__AVR_ATxmega16a4U__" },
+ { "atxmega16c4", "__AVR_ATxmega16C4__" },
+ { "atxmega16d4", "__AVR_ATxmega16D4__" },
+ { "atxmega32a4", "__AVR_ATxmega32A4__" },
+ { "atxmega32a4u", "__AVR_ATxmega32A4U__" },
+ { "atxmega32c4", "__AVR_ATxmega32C4__" },
+ { "atxmega32d4", "__AVR_ATxmega32D4__" },
+ { "atxmega32e5", "__AVR_ATxmega32E5__" },
+ { "atxmega16e5", "__AVR_ATxmega16E5__" },
+ { "atxmega8e5", "__AVR_ATxmega8E5__" },
+ { "atxmega32x1", "__AVR_ATxmega32X1__" },
+ { "atxmega64a3", "__AVR_ATxmega64A3__" },
+ { "atxmega64a3u", "__AVR_ATxmega64A3U__" },
+ { "atxmega64a4u", "__AVR_ATxmega64A4U__" },
+ { "atxmega64b1", "__AVR_ATxmega64B1__" },
+ { "atxmega64b3", "__AVR_ATxmega64B3__" },
+ { "atxmega64c3", "__AVR_ATxmega64C3__" },
+ { "atxmega64d3", "__AVR_ATxmega64D3__" },
+ { "atxmega64d4", "__AVR_ATxmega64D4__" },
+ { "atxmega64a1", "__AVR_ATxmega64A1__" },
+ { "atxmega64a1u", "__AVR_ATxmega64A1U__" },
+ { "atxmega128a3", "__AVR_ATxmega128A3__" },
+ { "atxmega128a3u", "__AVR_ATxmega128A3U__" },
+ { "atxmega128b1", "__AVR_ATxmega128B1__" },
+ { "atxmega128b3", "__AVR_ATxmega128B3__" },
+ { "atxmega128c3", "__AVR_ATxmega128C3__" },
+ { "atxmega128d3", "__AVR_ATxmega128D3__" },
+ { "atxmega128d4", "__AVR_ATxmega128D4__" },
+ { "atxmega192a3", "__AVR_ATxmega192A3__" },
+ { "atxmega192a3u", "__AVR_ATxmega192A3U__" },
+ { "atxmega192c3", "__AVR_ATxmega192C3__" },
+ { "atxmega192d3", "__AVR_ATxmega192D3__" },
+ { "atxmega256a3", "__AVR_ATxmega256A3__" },
+ { "atxmega256a3u", "__AVR_ATxmega256A3U__" },
+ { "atxmega256a3b", "__AVR_ATxmega256A3B__" },
+ { "atxmega256a3bu", "__AVR_ATxmega256A3BU__" },
+ { "atxmega256c3", "__AVR_ATxmega256C3__" },
+ { "atxmega256d3", "__AVR_ATxmega256D3__" },
+ { "atxmega384c3", "__AVR_ATxmega384C3__" },
+ { "atxmega384d3", "__AVR_ATxmega384D3__" },
+ { "atxmega128a1", "__AVR_ATxmega128A1__" },
+ { "atxmega128a1u", "__AVR_ATxmega128A1U__" },
+ { "atxmega128a4u", "__AVR_ATxmega128a4U__" },
+ { "attiny4", "__AVR_ATtiny4__" },
+ { "attiny5", "__AVR_ATtiny5__" },
+ { "attiny9", "__AVR_ATtiny9__" },
+ { "attiny10", "__AVR_ATtiny10__" },
+ { "attiny20", "__AVR_ATtiny20__" },
+ { "attiny40", "__AVR_ATtiny40__" },
+ { "attiny102", "__AVR_ATtiny102__" },
+ { "attiny104", "__AVR_ATtiny104__" },
+};
// AVR Target
class AVRTargetInfo : public TargetInfo {
@@ -8475,7 +9356,17 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
+ Builder.defineMacro("AVR");
+ Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
+
+ if (!this->CPU.empty()) {
+ auto It = std::find_if(AVRMcus.begin(), AVRMcus.end(),
+ [&](const MCUInfo &Info) { return Info.Name == this->CPU; });
+
+ if (It != AVRMcus.end())
+ Builder.defineMacro(It->DefineName);
+ }
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
@@ -8516,6 +9407,57 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
+ // There aren't any multi-character AVR specific constraints.
+ if (StringRef(Name).size() > 1) return false;
+
+ switch (*Name) {
+ default: return false;
+ case 'a': // Simple upper registers
+ case 'b': // Base pointer registers pairs
+ case 'd': // Upper register
+ case 'l': // Lower registers
+ case 'e': // Pointer register pairs
+ case 'q': // Stack pointer register
+ case 'r': // Any register
+ case 'w': // Special upper register pairs
+ case 't': // Temporary register
+ case 'x': case 'X': // Pointer register pair X
+ case 'y': case 'Y': // Pointer register pair Y
+ case 'z': case 'Z': // Pointer register pair Z
+ Info.setAllowsRegister();
+ return true;
+ case 'I': // 6-bit positive integer constant
+ Info.setRequiresImmediate(0, 63);
+ return true;
+ case 'J': // 6-bit negative integer constant
+ Info.setRequiresImmediate(-63, 0);
+ return true;
+ case 'K': // Integer constant (Range: 2)
+ Info.setRequiresImmediate(2);
+ return true;
+ case 'L': // Integer constant (Range: 0)
+ Info.setRequiresImmediate(0);
+ return true;
+ case 'M': // 8-bit integer constant
+ Info.setRequiresImmediate(0, 0xff);
+ return true;
+ case 'N': // Integer constant (Range: -1)
+ Info.setRequiresImmediate(-1);
+ return true;
+ case 'O': // Integer constant (Range: 8, 16, 24)
+ Info.setRequiresImmediate({8, 16, 24});
+ return true;
+ case 'P': // Integer constant (Range: 1)
+ Info.setRequiresImmediate(1);
+ return true;
+ case 'R': // Integer constant (Range: -6 to 5)
+ Info.setRequiresImmediate(-6, 5);
+ return true;
+ case 'G': // Floating point constant
+ case 'Q': // A memory address based on Y or Z pointer with displacement.
+ return true;
+ }
+
return false;
}
@@ -8533,6 +9475,41 @@ public:
? (IsSigned ? SignedInt : UnsignedInt)
: TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
}
+
+ bool setCPU(const std::string &Name) override {
+ bool IsFamily = llvm::StringSwitch<bool>(Name)
+ .Case("avr1", true)
+ .Case("avr2", true)
+ .Case("avr25", true)
+ .Case("avr3", true)
+ .Case("avr31", true)
+ .Case("avr35", true)
+ .Case("avr4", true)
+ .Case("avr5", true)
+ .Case("avr51", true)
+ .Case("avr6", true)
+ .Case("avrxmega1", true)
+ .Case("avrxmega2", true)
+ .Case("avrxmega3", true)
+ .Case("avrxmega4", true)
+ .Case("avrxmega5", true)
+ .Case("avrxmega6", true)
+ .Case("avrxmega7", true)
+ .Case("avrtiny", true)
+ .Default(false);
+
+ if (IsFamily) this->CPU = Name;
+
+ bool IsMCU = std::find_if(AVRMcus.begin(), AVRMcus.end(),
+ [&](const MCUInfo &Info) { return Info.Name == Name; }) != AVRMcus.end();
+
+ if (IsMCU) this->CPU = Name;
+
+ return IsFamily || IsMCU;
+ }
+
+protected:
+ std::string CPU;
};
} // end anonymous namespace
@@ -8575,6 +9552,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
return new OpenBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ case llvm::Triple::Win32:
+ return new MicrosoftARM64TargetInfo(Triple, Opts);
default:
return new AArch64leTargetInfo(Triple, Opts);
}
@@ -8605,8 +9584,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
- case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -8643,8 +9620,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
- case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -8668,6 +9643,9 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
case llvm::Triple::msp430:
return new MSP430TargetInfo(Triple, Opts);
+ case llvm::Triple::nios2:
+ return new LinuxTargetInfo<Nios2TargetInfo>(Triple, Opts);
+
case llvm::Triple::mips:
switch (os) {
case llvm::Triple::Linux:
@@ -8860,6 +9838,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new DarwinI386TargetInfo(Triple, Opts);
switch (os) {
+ case llvm::Triple::Ananas:
+ return new AnanasTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::CloudABI:
return new CloudABITargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Linux: {
@@ -8880,8 +9860,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new BitrigI386TargetInfo(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
- case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::KFreeBSD:
return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Minix:
@@ -8917,6 +9895,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new DarwinX86_64TargetInfo(Triple, Opts);
switch (os) {
+ case llvm::Triple::Ananas:
+ return new AnanasTargetInfo<X86_64TargetInfo>(Triple, Opts);
case llvm::Triple::CloudABI:
return new CloudABITargetInfo<X86_64TargetInfo>(Triple, Opts);
case llvm::Triple::Linux: {
@@ -8977,11 +9957,19 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new SPIR64TargetInfo(Triple, Opts);
}
case llvm::Triple::wasm32:
- if (!(Triple == llvm::Triple("wasm32-unknown-unknown")))
+ if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
+ Triple.getVendor() != llvm::Triple::UnknownVendor ||
+ Triple.getOS() != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment ||
+ !(Triple.isOSBinFormatELF() || Triple.isOSBinFormatWasm()))
return nullptr;
return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
case llvm::Triple::wasm64:
- if (!(Triple == llvm::Triple("wasm64-unknown-unknown")))
+ if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
+ Triple.getVendor() != llvm::Triple::UnknownVendor ||
+ Triple.getOS() != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment ||
+ !(Triple.isOSBinFormatELF() || Triple.isOSBinFormatWasm()))
return nullptr;
return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
diff --git a/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 554d051fb15..582b31f7359 100644
--- a/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -145,7 +145,9 @@ getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
return getAArch64MicroArchFeaturesFromMtune(D, CPU, Args, Features);
}
-void aarch64::getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
+void aarch64::getAArch64TargetFeatures(const Driver &D,
+ const llvm::Triple &Triple,
+ const ArgList &Args,
std::vector<StringRef> &Features) {
Arg *A;
bool success = true;
@@ -187,9 +189,11 @@ void aarch64::getAArch64TargetFeatures(const Driver &D, const ArgList &Args,
}
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
- options::OPT_munaligned_access))
+ options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_mno_unaligned_access))
Features.push_back("+strict-align");
+ } else if (Triple.isOSOpenBSD())
+ Features.push_back("+strict-align");
if (Args.hasArg(options::OPT_ffixed_x18))
Features.push_back("+reserve-x18");
diff --git a/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.h b/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.h
index 62e419cc19f..e3187c9af0e 100644
--- a/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/gnu/llvm/tools/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -21,7 +21,8 @@ namespace driver {
namespace tools {
namespace aarch64 {
-void getAArch64TargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
+void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
std::string getAArch64TargetCPU(const llvm::opt::ArgList &Args,
diff --git a/gnu/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp b/gnu/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp
index 6a6b90f8682..83185d5b959 100644
--- a/gnu/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/gnu/llvm/tools/clang/lib/Driver/ToolChains/Clang.cpp
@@ -342,7 +342,7 @@ static void getTargetFeatures(const ToolChain &TC, const llvm::Triple &Triple,
break;
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- aarch64::getAArch64TargetFeatures(D, Args, Features);
+ aarch64::getAArch64TargetFeatures(D, Triple, Args, Features);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -2304,9 +2304,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
OFastEnabled ? options::OPT_Ofast : options::OPT_fstrict_aliasing;
// We turn strict aliasing off by default if we're in CL mode, since MSVC
// doesn't do any TBAA.
- bool TBAAOnByDefault = !getToolChain().getDriver().IsCLMode();
+ bool StrictAliasingDefault = !getToolChain().getDriver().IsCLMode();
+ // We also turn off strict aliasing on OpenBSD.
+ if (getToolChain().getTriple().isOSOpenBSD())
+ StrictAliasingDefault = false;
if (!Args.hasFlag(options::OPT_fstrict_aliasing, StrictAliasingAliasOption,
- options::OPT_fno_strict_aliasing, TBAAOnByDefault))
+ options::OPT_fno_strict_aliasing, StrictAliasingDefault))
CmdArgs.push_back("-relaxed-aliasing");
if (!Args.hasFlag(options::OPT_fstruct_path_tbaa,
options::OPT_fno_struct_path_tbaa))
@@ -3292,7 +3295,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_strict_overflow)) {
if (A->getOption().matches(options::OPT_fno_strict_overflow))
CmdArgs.push_back("-fwrapv");
- }
+ } else if (getToolChain().getTriple().isOSOpenBSD())
+ CmdArgs.push_back("-fwrapv");
if (Arg *A = Args.getLastArg(options::OPT_freroll_loops,
options::OPT_fno_reroll_loops))
@@ -4234,6 +4238,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (RewriteImports)
CmdArgs.push_back("-frewrite-imports");
+ // Disable some builtins on OpenBSD because they are just not
+ // right...
+ if (getToolChain().getTriple().isOSOpenBSD()) {
+ CmdArgs.push_back("-fno-builtin-malloc");
+ CmdArgs.push_back("-fno-builtin-calloc");
+ CmdArgs.push_back("-fno-builtin-realloc");
+ CmdArgs.push_back("-fno-builtin-valloc");
+ CmdArgs.push_back("-fno-builtin-free");
+ CmdArgs.push_back("-fno-builtin-strdup");
+ CmdArgs.push_back("-fno-builtin-strndup");
+ }
+
// Enable rewrite includes if the user's asked for it or if we're generating
// diagnostics.
// TODO: Once -module-dependency-dir works with -frewrite-includes it'd be
diff --git a/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp b/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp
index 1d54a1e9cbb..7a5e4e76dd7 100644
--- a/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -12,6 +12,8 @@
#include "Arch/Sparc.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
@@ -165,14 +167,10 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- std::string Triple = getToolChain().getTripleString();
- if (Triple.substr(0, 6) == "x86_64")
- Triple.replace(0, 6, "amd64");
- CmdArgs.push_back(
- Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple + "/4.2.1"));
-
- Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
- options::OPT_e, options::OPT_s, options::OPT_t,
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ getToolChain().AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs, {options::OPT_T_Group, options::OPT_e,
+ options::OPT_s, options::OPT_t,
options::OPT_Z_Flag, options::OPT_r});
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
@@ -188,7 +186,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// FIXME: For some reason GCC passes -lgcc before adding
// the default system libraries. Just mimic this for now.
- CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lcompiler_rt");
if (Args.hasArg(options::OPT_pthread)) {
if (!Args.hasArg(options::OPT_shared) && Args.hasArg(options::OPT_pg))
@@ -204,7 +202,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lc");
}
- CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lcompiler_rt");
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
@@ -226,7 +224,7 @@ OpenBSD::OpenBSD(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: Generic_ELF(D, Triple, Args) {
getFilePaths().push_back(getDriver().Dir + "/../lib");
- getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
}
Tool *OpenBSD::buildAssembler() const {
@@ -234,3 +232,56 @@ Tool *OpenBSD::buildAssembler() const {
}
Tool *OpenBSD::buildLinker() const { return new tools::openbsd::Linker(*this); }
+
+ToolChain::CXXStdlibType OpenBSD::GetCXXStdlibType(const ArgList &Args) const {
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue();
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+
+ getDriver().Diag(clang::diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+ return ToolChain::CST_Libcxx;
+}
+
+void OpenBSD::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ std::string Triple = getTriple().str();
+ if (Triple.substr(0, 6) == "x86_64")
+ Triple.replace(0, 6, "amd64");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/g++");
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/g++/" + Triple);
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/g++/backward");
+ break;
+ }
+}
+
+void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ switch (GetCXXStdlibType(Args)) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
+ CmdArgs.push_back("-lpthread");
+ break;
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
diff --git a/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.h b/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.h
index 1cc0ca71984..750c87b58d8 100644
--- a/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.h
+++ b/gnu/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.h
@@ -59,6 +59,13 @@ public:
bool IsObjCNonFragileABIDefault() const override { return true; }
bool isPIEDefault() const override { return true; }
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
return 2;
}
diff --git a/gnu/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/gnu/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
index 66098f45bd0..2649ff8c39f 100644
--- a/gnu/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/gnu/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -221,6 +221,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::Win32:
if (triple.getEnvironment() != llvm::Triple::Cygnus)
break;
+ LLVM_FALLTHROUGH;
default:
// FIXME: temporary hack: hard-coded paths.
AddPath("/usr/local/include", System, false);
@@ -343,6 +344,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
AddPath(BaseSDKPath + "/target/include", System, false);
if (triple.isPS4CPU())
AddPath(BaseSDKPath + "/target/include_common", System, false);
+ LLVM_FALLTHROUGH;
}
default:
AddPath("/usr/include", ExternCSystem, false);
diff --git a/gnu/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/gnu/llvm/tools/clang/lib/Sema/SemaChecking.cpp
index a94f93bbd42..06e6ecf5516 100644
--- a/gnu/llvm/tools/clang/lib/Sema/SemaChecking.cpp
+++ b/gnu/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -244,7 +244,7 @@ static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
// Scopes aren't available during instantiation. Fortunately, builtin
// functions cannot be template args so they cannot be formed through template
// instantiation. Therefore checking once during the parse is sufficient.
- if (!SemaRef.ActiveTemplateInstantiations.empty())
+ if (SemaRef.inTemplateInstantiation())
return false;
Scope *S = SemaRef.getCurScope();
@@ -309,13 +309,14 @@ static bool SemaOpenCLBuiltinKernelWorkGroupSize(Sema &S, CallExpr *TheCall) {
Expr *BlockArg = TheCall->getArg(0);
if (!isBlockPointer(BlockArg)) {
S.Diag(BlockArg->getLocStart(),
- diag::err_opencl_enqueue_kernel_expected_type) << "block";
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "block";
return true;
}
return checkOpenCLBlockArgs(S, BlockArg);
}
-/// Diagnose integer type and any valid implicit convertion to it.
+/// Diagnose integer type and any valid implicit conversion to it.
static bool checkOpenCLEnqueueIntType(Sema &S, Expr *E,
const QualType &IntType);
@@ -394,24 +395,24 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// First argument always needs to be a queue_t type.
if (!Arg0->getType()->isQueueT()) {
S.Diag(TheCall->getArg(0)->getLocStart(),
- diag::err_opencl_enqueue_kernel_expected_type)
- << S.Context.OCLQueueTy;
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << S.Context.OCLQueueTy;
return true;
}
// Second argument always needs to be a kernel_enqueue_flags_t enum value.
if (!Arg1->getType()->isIntegerType()) {
S.Diag(TheCall->getArg(1)->getLocStart(),
- diag::err_opencl_enqueue_kernel_expected_type)
- << "'kernel_enqueue_flags_t' (i.e. uint)";
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "'kernel_enqueue_flags_t' (i.e. uint)";
return true;
}
// Third argument is always an ndrange_t type.
- if (!Arg2->getType()->isNDRangeT()) {
+ if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
S.Diag(TheCall->getArg(2)->getLocStart(),
- diag::err_opencl_enqueue_kernel_expected_type)
- << S.Context.OCLNDRangeTy;
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "'ndrange_t'";
return true;
}
@@ -420,8 +421,8 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
if (NumArgs == 4) {
// check that the last argument is the right block type.
if (!isBlockPointer(Arg3)) {
- S.Diag(Arg3->getLocStart(), diag::err_opencl_enqueue_kernel_expected_type)
- << "block";
+ S.Diag(Arg3->getLocStart(), diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "block";
return true;
}
// we have a block type, check the prototype
@@ -443,8 +444,8 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// check common block argument.
Expr *Arg6 = TheCall->getArg(6);
if (!isBlockPointer(Arg6)) {
- S.Diag(Arg6->getLocStart(), diag::err_opencl_enqueue_kernel_expected_type)
- << "block";
+ S.Diag(Arg6->getLocStart(), diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "block";
return true;
}
if (checkOpenCLBlockArgs(S, Arg6))
@@ -453,8 +454,8 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
// Forth argument has to be any integer type.
if (!Arg3->getType()->isIntegerType()) {
S.Diag(TheCall->getArg(3)->getLocStart(),
- diag::err_opencl_enqueue_kernel_expected_type)
- << "integer";
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee() << "integer";
return true;
}
// check remaining common arguments.
@@ -466,7 +467,8 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
Expr::NPC_ValueDependentIsNotNull) &&
!Arg4->getType()->getPointeeOrArrayElementType()->isClkEventT()) {
S.Diag(TheCall->getArg(4)->getLocStart(),
- diag::err_opencl_enqueue_kernel_expected_type)
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
return true;
}
@@ -477,7 +479,8 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
!(Arg5->getType()->isPointerType() &&
Arg5->getType()->getPointeeType()->isClkEventT())) {
S.Diag(TheCall->getArg(5)->getLocStart(),
- diag::err_opencl_enqueue_kernel_expected_type)
+ diag::err_opencl_builtin_expected_type)
+ << TheCall->getDirectCallee()
<< S.Context.getPointerType(S.Context.OCLClkEventTy);
return true;
}
@@ -757,9 +760,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if (CheckObjCString(TheCall->getArg(0)))
return ExprError();
break;
+ case Builtin::BI__builtin_ms_va_start:
case Builtin::BI__builtin_stdarg_start:
case Builtin::BI__builtin_va_start:
- if (SemaBuiltinVAStart(TheCall))
+ if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
case Builtin::BI__va_start: {
@@ -770,7 +774,7 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
return ExprError();
break;
default:
- if (SemaBuiltinVAStart(TheCall))
+ if (SemaBuiltinVAStart(BuiltinID, TheCall))
return ExprError();
break;
}
@@ -1391,8 +1395,6 @@ bool Sema::CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
}
bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
- llvm::APSInt Result;
-
if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
BuiltinID == ARM::BI__builtin_arm_ldaex ||
BuiltinID == ARM::BI__builtin_arm_strex ||
@@ -1439,8 +1441,6 @@ bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
bool Sema::CheckAArch64BuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
- llvm::APSInt Result;
-
if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
BuiltinID == AArch64::BI__builtin_arm_ldaex ||
BuiltinID == AArch64::BI__builtin_arm_strex ||
@@ -1619,32 +1619,28 @@ bool Sema::CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Mips::BI__builtin_msa_copy_u_b:
case Mips::BI__builtin_msa_insve_b:
case Mips::BI__builtin_msa_splati_b: i = 1; l = 0; u = 15; break;
- case Mips::BI__builtin_msa_sld_b:
case Mips::BI__builtin_msa_sldi_b: i = 2; l = 0; u = 15; break;
// These intrinsics take an unsigned 3 bit immediate.
case Mips::BI__builtin_msa_copy_s_h:
case Mips::BI__builtin_msa_copy_u_h:
case Mips::BI__builtin_msa_insve_h:
case Mips::BI__builtin_msa_splati_h: i = 1; l = 0; u = 7; break;
- case Mips::BI__builtin_msa_sld_h:
case Mips::BI__builtin_msa_sldi_h: i = 2; l = 0; u = 7; break;
// These intrinsics take an unsigned 2 bit immediate.
case Mips::BI__builtin_msa_copy_s_w:
case Mips::BI__builtin_msa_copy_u_w:
case Mips::BI__builtin_msa_insve_w:
case Mips::BI__builtin_msa_splati_w: i = 1; l = 0; u = 3; break;
- case Mips::BI__builtin_msa_sld_w:
case Mips::BI__builtin_msa_sldi_w: i = 2; l = 0; u = 3; break;
// These intrinsics take an unsigned 1 bit immediate.
case Mips::BI__builtin_msa_copy_s_d:
case Mips::BI__builtin_msa_copy_u_d:
case Mips::BI__builtin_msa_insve_d:
case Mips::BI__builtin_msa_splati_d: i = 1; l = 0; u = 1; break;
- case Mips::BI__builtin_msa_sld_d:
case Mips::BI__builtin_msa_sldi_d: i = 2; l = 0; u = 1; break;
// Memory offsets and immediate loads.
// These intrinsics take a signed 10 bit immediate.
- case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 127; break;
+ case Mips::BI__builtin_msa_ldi_b: i = 0; l = -128; u = 255; break;
case Mips::BI__builtin_msa_ldi_h:
case Mips::BI__builtin_msa_ldi_w:
case Mips::BI__builtin_msa_ldi_d: i = 0; l = -512; u = 511; break;
@@ -1704,6 +1700,9 @@ bool Sema::CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case PPC::BI__builtin_tabortdci:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31) ||
SemaBuiltinConstantArgRange(TheCall, 2, 0, 31);
+ case PPC::BI__builtin_vsx_xxpermdi:
+ case PPC::BI__builtin_vsx_xxsldwi:
+ return SemaBuiltinVSX(TheCall);
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
@@ -1741,9 +1740,11 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
case SystemZ::BI__builtin_s390_vfaezbs:
case SystemZ::BI__builtin_s390_vfaezhs:
case SystemZ::BI__builtin_s390_vfaezfs: i = 2; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vfisb:
case SystemZ::BI__builtin_s390_vfidb:
return SemaBuiltinConstantArgRange(TheCall, 1, 0, 15) ||
SemaBuiltinConstantArgRange(TheCall, 2, 0, 15);
+ case SystemZ::BI__builtin_s390_vftcisb:
case SystemZ::BI__builtin_s390_vftcidb: i = 1; l = 0; u = 4095; break;
case SystemZ::BI__builtin_s390_vlbb: i = 1; l = 0; u = 15; break;
case SystemZ::BI__builtin_s390_vpdi: i = 2; l = 0; u = 15; break;
@@ -1760,6 +1761,11 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
case SystemZ::BI__builtin_s390_vstrczbs:
case SystemZ::BI__builtin_s390_vstrczhs:
case SystemZ::BI__builtin_s390_vstrczfs: i = 3; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vmslg: i = 3; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vfminsb:
+ case SystemZ::BI__builtin_s390_vfmaxsb:
+ case SystemZ::BI__builtin_s390_vfmindb:
+ case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
@@ -1990,17 +1996,121 @@ bool Sema::CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall) {
<< Arg->getSourceRange();
}
+// Check if the gather/scatter scale is legal.
+bool Sema::CheckX86BuiltinGatherScatterScale(unsigned BuiltinID,
+ CallExpr *TheCall) {
+ unsigned ArgNum = 0;
+ switch (BuiltinID) {
+ default:
+ return false;
+ case X86::BI__builtin_ia32_gatherpfdpd:
+ case X86::BI__builtin_ia32_gatherpfdps:
+ case X86::BI__builtin_ia32_gatherpfqpd:
+ case X86::BI__builtin_ia32_gatherpfqps:
+ case X86::BI__builtin_ia32_scatterpfdpd:
+ case X86::BI__builtin_ia32_scatterpfdps:
+ case X86::BI__builtin_ia32_scatterpfqpd:
+ case X86::BI__builtin_ia32_scatterpfqps:
+ ArgNum = 3;
+ break;
+ case X86::BI__builtin_ia32_gatherd_pd:
+ case X86::BI__builtin_ia32_gatherd_pd256:
+ case X86::BI__builtin_ia32_gatherq_pd:
+ case X86::BI__builtin_ia32_gatherq_pd256:
+ case X86::BI__builtin_ia32_gatherd_ps:
+ case X86::BI__builtin_ia32_gatherd_ps256:
+ case X86::BI__builtin_ia32_gatherq_ps:
+ case X86::BI__builtin_ia32_gatherq_ps256:
+ case X86::BI__builtin_ia32_gatherd_q:
+ case X86::BI__builtin_ia32_gatherd_q256:
+ case X86::BI__builtin_ia32_gatherq_q:
+ case X86::BI__builtin_ia32_gatherq_q256:
+ case X86::BI__builtin_ia32_gatherd_d:
+ case X86::BI__builtin_ia32_gatherd_d256:
+ case X86::BI__builtin_ia32_gatherq_d:
+ case X86::BI__builtin_ia32_gatherq_d256:
+ case X86::BI__builtin_ia32_gather3div2df:
+ case X86::BI__builtin_ia32_gather3div2di:
+ case X86::BI__builtin_ia32_gather3div4df:
+ case X86::BI__builtin_ia32_gather3div4di:
+ case X86::BI__builtin_ia32_gather3div4sf:
+ case X86::BI__builtin_ia32_gather3div4si:
+ case X86::BI__builtin_ia32_gather3div8sf:
+ case X86::BI__builtin_ia32_gather3div8si:
+ case X86::BI__builtin_ia32_gather3siv2df:
+ case X86::BI__builtin_ia32_gather3siv2di:
+ case X86::BI__builtin_ia32_gather3siv4df:
+ case X86::BI__builtin_ia32_gather3siv4di:
+ case X86::BI__builtin_ia32_gather3siv4sf:
+ case X86::BI__builtin_ia32_gather3siv4si:
+ case X86::BI__builtin_ia32_gather3siv8sf:
+ case X86::BI__builtin_ia32_gather3siv8si:
+ case X86::BI__builtin_ia32_gathersiv8df:
+ case X86::BI__builtin_ia32_gathersiv16sf:
+ case X86::BI__builtin_ia32_gatherdiv8df:
+ case X86::BI__builtin_ia32_gatherdiv16sf:
+ case X86::BI__builtin_ia32_gathersiv8di:
+ case X86::BI__builtin_ia32_gathersiv16si:
+ case X86::BI__builtin_ia32_gatherdiv8di:
+ case X86::BI__builtin_ia32_gatherdiv16si:
+ case X86::BI__builtin_ia32_scatterdiv2df:
+ case X86::BI__builtin_ia32_scatterdiv2di:
+ case X86::BI__builtin_ia32_scatterdiv4df:
+ case X86::BI__builtin_ia32_scatterdiv4di:
+ case X86::BI__builtin_ia32_scatterdiv4sf:
+ case X86::BI__builtin_ia32_scatterdiv4si:
+ case X86::BI__builtin_ia32_scatterdiv8sf:
+ case X86::BI__builtin_ia32_scatterdiv8si:
+ case X86::BI__builtin_ia32_scattersiv2df:
+ case X86::BI__builtin_ia32_scattersiv2di:
+ case X86::BI__builtin_ia32_scattersiv4df:
+ case X86::BI__builtin_ia32_scattersiv4di:
+ case X86::BI__builtin_ia32_scattersiv4sf:
+ case X86::BI__builtin_ia32_scattersiv4si:
+ case X86::BI__builtin_ia32_scattersiv8sf:
+ case X86::BI__builtin_ia32_scattersiv8si:
+ case X86::BI__builtin_ia32_scattersiv8df:
+ case X86::BI__builtin_ia32_scattersiv16sf:
+ case X86::BI__builtin_ia32_scatterdiv8df:
+ case X86::BI__builtin_ia32_scatterdiv16sf:
+ case X86::BI__builtin_ia32_scattersiv8di:
+ case X86::BI__builtin_ia32_scattersiv16si:
+ case X86::BI__builtin_ia32_scatterdiv8di:
+ case X86::BI__builtin_ia32_scatterdiv16si:
+ ArgNum = 4;
+ break;
+ }
+
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ if (Result == 1 || Result == 2 || Result == 4 || Result == 8)
+ return false;
+
+ return Diag(TheCall->getLocStart(), diag::err_x86_builtin_invalid_scale)
+ << Arg->getSourceRange();
+}
+
bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
if (BuiltinID == X86::BI__builtin_cpu_supports)
return SemaBuiltinCpuSupports(*this, TheCall);
- if (BuiltinID == X86::BI__builtin_ms_va_start)
- return SemaBuiltinMSVAStart(TheCall);
-
// If the intrinsic has rounding or SAE make sure its valid.
if (CheckX86BuiltinRoundingOrSAE(BuiltinID, TheCall))
return true;
+ // If the intrinsic has a gather/scatter scale immediate make sure its valid.
+ if (CheckX86BuiltinGatherScatterScale(BuiltinID, TheCall))
+ return true;
+
// For intrinsics which take an immediate value as part of the instruction,
// range check them here.
int i = 0, l = 0, u = 0;
@@ -2197,6 +2307,16 @@ bool Sema::CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case X86::BI__builtin_ia32_pternlogq256_maskz:
i = 3; l = 0; u = 255;
break;
+ case X86::BI__builtin_ia32_gatherpfdpd:
+ case X86::BI__builtin_ia32_gatherpfdps:
+ case X86::BI__builtin_ia32_gatherpfqpd:
+ case X86::BI__builtin_ia32_gatherpfqps:
+ case X86::BI__builtin_ia32_scatterpfdpd:
+ case X86::BI__builtin_ia32_scatterpfdps:
+ case X86::BI__builtin_ia32_scatterpfqpd:
+ case X86::BI__builtin_ia32_scatterpfqps:
+ i = 4; l = 2; u = 3;
+ break;
case X86::BI__builtin_ia32_pcmpestrm128:
case X86::BI__builtin_ia32_pcmpestri128:
case X86::BI__builtin_ia32_pcmpestria128:
@@ -3502,11 +3622,89 @@ ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
return Result;
}
+/// Check that the user is calling the appropriate va_start builtin for the
+/// target and calling convention.
+static bool checkVAStartABI(Sema &S, unsigned BuiltinID, Expr *Fn) {
+ const llvm::Triple &TT = S.Context.getTargetInfo().getTriple();
+ bool IsX64 = TT.getArch() == llvm::Triple::x86_64;
+ bool IsAArch64 = TT.getArch() == llvm::Triple::aarch64;
+ bool IsWindows = TT.isOSWindows();
+ bool IsMSVAStart = BuiltinID == Builtin::BI__builtin_ms_va_start;
+ if (IsX64 || IsAArch64) {
+ clang::CallingConv CC = CC_C;
+ if (const FunctionDecl *FD = S.getCurFunctionDecl())
+ CC = FD->getType()->getAs<FunctionType>()->getCallConv();
+ if (IsMSVAStart) {
+ // Don't allow this in System V ABI functions.
+ if (CC == CC_X86_64SysV || (!IsWindows && CC != CC_Win64))
+ return S.Diag(Fn->getLocStart(),
+ diag::err_ms_va_start_used_in_sysv_function);
+ } else {
+ // On x86-64/AArch64 Unix, don't allow this in Win64 ABI functions.
+ // On x64 Windows, don't allow this in System V ABI functions.
+ // (Yes, that means there's no corresponding way to support variadic
+ // System V ABI functions on Windows.)
+ if ((IsWindows && CC == CC_X86_64SysV) ||
+ (!IsWindows && CC == CC_Win64))
+ return S.Diag(Fn->getLocStart(),
+ diag::err_va_start_used_in_wrong_abi_function)
+ << !IsWindows;
+ }
+ return false;
+ }
+
+ if (IsMSVAStart)
+ return S.Diag(Fn->getLocStart(), diag::err_builtin_x64_aarch64_only);
+ return false;
+}
+
+static bool checkVAStartIsInVariadicFunction(Sema &S, Expr *Fn,
+ ParmVarDecl **LastParam = nullptr) {
+ // Determine whether the current function, block, or obj-c method is variadic
+ // and get its parameter list.
+ bool IsVariadic = false;
+ ArrayRef<ParmVarDecl *> Params;
+ DeclContext *Caller = S.CurContext;
+ if (auto *Block = dyn_cast<BlockDecl>(Caller)) {
+ IsVariadic = Block->isVariadic();
+ Params = Block->parameters();
+ } else if (auto *FD = dyn_cast<FunctionDecl>(Caller)) {
+ IsVariadic = FD->isVariadic();
+ Params = FD->parameters();
+ } else if (auto *MD = dyn_cast<ObjCMethodDecl>(Caller)) {
+ IsVariadic = MD->isVariadic();
+ // FIXME: This isn't correct for methods (results in bogus warning).
+ Params = MD->parameters();
+ } else if (isa<CapturedDecl>(Caller)) {
+ // We don't support va_start in a CapturedDecl.
+ S.Diag(Fn->getLocStart(), diag::err_va_start_captured_stmt);
+ return true;
+ } else {
+ // This must be some other declcontext that parses exprs.
+ S.Diag(Fn->getLocStart(), diag::err_va_start_outside_function);
+ return true;
+ }
+
+ if (!IsVariadic) {
+ S.Diag(Fn->getLocStart(), diag::err_va_start_fixed_function);
+ return true;
+ }
+
+ if (LastParam)
+ *LastParam = Params.empty() ? nullptr : Params.back();
+
+ return false;
+}
+
/// Check the arguments to '__builtin_va_start' or '__builtin_ms_va_start'
/// for validity. Emit an error and return true on failure; return false
/// on success.
-bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
+bool Sema::SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall) {
Expr *Fn = TheCall->getCallee();
+
+ if (checkVAStartABI(*this, BuiltinID, Fn))
+ return true;
+
if (TheCall->getNumArgs() > 2) {
Diag(TheCall->getArg(2)->getLocStart(),
diag::err_typecheck_call_too_many_args)
@@ -3527,20 +3725,10 @@ bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
if (checkBuiltinArgument(*this, TheCall, 0))
return true;
- // Determine whether the current function is variadic or not.
- BlockScopeInfo *CurBlock = getCurBlock();
- bool isVariadic;
- if (CurBlock)
- isVariadic = CurBlock->TheDecl->isVariadic();
- else if (FunctionDecl *FD = getCurFunctionDecl())
- isVariadic = FD->isVariadic();
- else
- isVariadic = getCurMethodDecl()->isVariadic();
-
- if (!isVariadic) {
- Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
+ // Check that the current function is variadic, and get its last parameter.
+ ParmVarDecl *LastParam;
+ if (checkVAStartIsInVariadicFunction(*this, Fn, &LastParam))
return true;
- }
// Verify that the second argument to the builtin is the last argument of the
// current function or method.
@@ -3555,16 +3743,7 @@ bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
- // FIXME: This isn't correct for methods (results in bogus warning).
- // Get the last formal in the current function.
- const ParmVarDecl *LastArg;
- if (CurBlock)
- LastArg = CurBlock->TheDecl->parameters().back();
- else if (FunctionDecl *FD = getCurFunctionDecl())
- LastArg = FD->parameters().back();
- else
- LastArg = getCurMethodDecl()->parameters().back();
- SecondArgIsLastNamedArgument = PV == LastArg;
+ SecondArgIsLastNamedArgument = PV == LastParam;
Type = PV->getType();
ParamLoc = PV->getLocation();
@@ -3599,48 +3778,6 @@ bool Sema::SemaBuiltinVAStartImpl(CallExpr *TheCall) {
return false;
}
-/// Check the arguments to '__builtin_va_start' for validity, and that
-/// it was called from a function of the native ABI.
-/// Emit an error and return true on failure; return false on success.
-bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
- // On x86-64 Unix, don't allow this in Win64 ABI functions.
- // On x64 Windows, don't allow this in System V ABI functions.
- // (Yes, that means there's no corresponding way to support variadic
- // System V ABI functions on Windows.)
- if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86_64) {
- unsigned OS = Context.getTargetInfo().getTriple().getOS();
- clang::CallingConv CC = CC_C;
- if (const FunctionDecl *FD = getCurFunctionDecl())
- CC = FD->getType()->getAs<FunctionType>()->getCallConv();
- if ((OS == llvm::Triple::Win32 && CC == CC_X86_64SysV) ||
- (OS != llvm::Triple::Win32 && CC == CC_X86_64Win64))
- return Diag(TheCall->getCallee()->getLocStart(),
- diag::err_va_start_used_in_wrong_abi_function)
- << (OS != llvm::Triple::Win32);
- }
- return SemaBuiltinVAStartImpl(TheCall);
-}
-
-/// Check the arguments to '__builtin_ms_va_start' for validity, and that
-/// it was called from a Win64 ABI function.
-/// Emit an error and return true on failure; return false on success.
-bool Sema::SemaBuiltinMSVAStart(CallExpr *TheCall) {
- // This only makes sense for x86-64.
- const llvm::Triple &TT = Context.getTargetInfo().getTriple();
- Expr *Callee = TheCall->getCallee();
- if (TT.getArch() != llvm::Triple::x86_64)
- return Diag(Callee->getLocStart(), diag::err_x86_builtin_32_bit_tgt);
- // Don't allow this in System V ABI functions.
- clang::CallingConv CC = CC_C;
- if (const FunctionDecl *FD = getCurFunctionDecl())
- CC = FD->getType()->getAs<FunctionType>()->getCallConv();
- if (CC == CC_X86_64SysV ||
- (TT.getOS() != llvm::Triple::Win32 && CC != CC_X86_64Win64))
- return Diag(Callee->getLocStart(),
- diag::err_ms_va_start_used_in_sysv_function);
- return SemaBuiltinVAStartImpl(TheCall);
-}
-
bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) {
// void __va_start(va_list *ap, const char *named_addr, size_t slot_size,
// const char *named_addr);
@@ -3652,26 +3789,14 @@ bool Sema::SemaBuiltinVAStartARM(CallExpr *Call) {
diag::err_typecheck_call_too_few_args_at_least)
<< 0 /*function call*/ << 3 << Call->getNumArgs();
- // Determine whether the current function is variadic or not.
- bool IsVariadic;
- if (BlockScopeInfo *CurBlock = getCurBlock())
- IsVariadic = CurBlock->TheDecl->isVariadic();
- else if (FunctionDecl *FD = getCurFunctionDecl())
- IsVariadic = FD->isVariadic();
- else if (ObjCMethodDecl *MD = getCurMethodDecl())
- IsVariadic = MD->isVariadic();
- else
- llvm_unreachable("unexpected statement type");
-
- if (!IsVariadic) {
- Diag(Func->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
- return true;
- }
-
// Type-check the first argument normally.
if (checkBuiltinArgument(*this, Call, 0))
return true;
+ // Check that the current function is variadic.
+ if (checkVAStartIsInVariadicFunction(*this, Func))
+ return true;
+
const struct {
unsigned ArgNo;
QualType Type;
@@ -3779,6 +3904,65 @@ bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
return false;
}
+// Customized Sema Checking for VSX builtins that have the following signature:
+// vector [...] builtinName(vector [...], vector [...], const int);
+// Which takes the same type of vectors (any legal vector type) for the first
+// two arguments and takes compile time constant for the third argument.
+// Example builtins are :
+// vector double vec_xxpermdi(vector double, vector double, int);
+// vector short vec_xxsldwi(vector short, vector short, int);
+bool Sema::SemaBuiltinVSX(CallExpr *TheCall) {
+ unsigned ExpectedNumArgs = 3;
+ if (TheCall->getNumArgs() < ExpectedNumArgs)
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_few_args_at_least)
+ << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
+ << TheCall->getSourceRange();
+
+ if (TheCall->getNumArgs() > ExpectedNumArgs)
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << ExpectedNumArgs << TheCall->getNumArgs()
+ << TheCall->getSourceRange();
+
+ // Check the third argument is a compile time constant
+ llvm::APSInt Value;
+ if(!TheCall->getArg(2)->isIntegerConstantExpr(Value, Context))
+ return Diag(TheCall->getLocStart(),
+ diag::err_vsx_builtin_nonconstant_argument)
+ << 3 /* argument index */ << TheCall->getDirectCallee()
+ << SourceRange(TheCall->getArg(2)->getLocStart(),
+ TheCall->getArg(2)->getLocEnd());
+
+ QualType Arg1Ty = TheCall->getArg(0)->getType();
+ QualType Arg2Ty = TheCall->getArg(1)->getType();
+
+ // Check the type of argument 1 and argument 2 are vectors.
+ SourceLocation BuiltinLoc = TheCall->getLocStart();
+ if ((!Arg1Ty->isVectorType() && !Arg1Ty->isDependentType()) ||
+ (!Arg2Ty->isVectorType() && !Arg2Ty->isDependentType())) {
+ return Diag(BuiltinLoc, diag::err_vec_builtin_non_vector)
+ << TheCall->getDirectCallee()
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ }
+
+ // Check the first two arguments are the same type.
+ if (!Context.hasSameUnqualifiedType(Arg1Ty, Arg2Ty)) {
+ return Diag(BuiltinLoc, diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee()
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ }
+
+ // When default clang type checking is turned off and the customized type
+ // checking is used, the returning type of the function must be explicitly
+ // set. Otherwise it is _Bool by default.
+ TheCall->setType(Arg1Ty);
+
+ return false;
+}
+
/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
// This is declared to take (...), so we have to check everything.
ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
@@ -3801,7 +3985,8 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
if (!LHSType->isVectorType() || !RHSType->isVectorType())
return ExprError(Diag(TheCall->getLocStart(),
- diag::err_shufflevector_non_vector)
+ diag::err_vec_builtin_non_vector)
+ << TheCall->getDirectCallee()
<< SourceRange(TheCall->getArg(0)->getLocStart(),
TheCall->getArg(1)->getLocEnd()));
@@ -3815,12 +4000,14 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
if (!RHSType->hasIntegerRepresentation() ||
RHSType->getAs<VectorType>()->getNumElements() != numElements)
return ExprError(Diag(TheCall->getLocStart(),
- diag::err_shufflevector_incompatible_vector)
+ diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee()
<< SourceRange(TheCall->getArg(1)->getLocStart(),
TheCall->getArg(1)->getLocEnd()));
} else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
return ExprError(Diag(TheCall->getLocStart(),
- diag::err_shufflevector_incompatible_vector)
+ diag::err_vec_builtin_incompatible_vector)
+ << TheCall->getDirectCallee()
<< SourceRange(TheCall->getArg(0)->getLocStart(),
TheCall->getArg(1)->getLocEnd()));
} else if (numElements != numResElements) {
@@ -5823,6 +6010,7 @@ shouldNotPrintDirectly(const ASTContext &Context,
while (const TypedefType *UserTy = TyTy->getAs<TypedefType>()) {
StringRef Name = UserTy->getDecl()->getName();
QualType CastTy = llvm::StringSwitch<QualType>(Name)
+ .Case("CFIndex", Context.LongTy)
.Case("NSInteger", Context.LongTy)
.Case("NSUInteger", Context.UnsignedLongTy)
.Case("SInt32", Context.IntTy)
@@ -6784,7 +6972,7 @@ void Sema::CheckMaxUnsignedZero(const CallExpr *Call,
if (!Call || !FDecl) return;
// Ignore template specializations and macros.
- if (!ActiveTemplateInstantiations.empty()) return;
+ if (inTemplateInstantiation()) return;
if (Call->getExprLoc().isMacroID()) return;
// Only care about the one template argument, two function parameter std::max
@@ -7342,7 +7530,7 @@ CheckReturnStackAddr(Sema &S, Expr *RetValExp, QualType lhsType,
if (!stackE)
return; // Nothing suspicious was found.
- // Parameters are initalized in the calling scope, so taking the address
+ // Parameters are initialized in the calling scope, so taking the address
// of a parameter reference doesn't need a warning.
for (auto *DRE : refVars)
if (isa<ParmVarDecl>(DRE->getDecl()))
@@ -8237,7 +8425,7 @@ bool HasEnumType(Expr *E) {
void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
// Disable warning in template instantiations.
- if (!S.ActiveTemplateInstantiations.empty())
+ if (S.inTemplateInstantiation())
return;
BinaryOperatorKind op = E->getOpcode();
@@ -8267,7 +8455,7 @@ void DiagnoseOutOfRangeComparison(Sema &S, BinaryOperator *E, Expr *Constant,
Expr *Other, const llvm::APSInt &Value,
bool RhsConstant) {
// Disable warning in template instantiations.
- if (!S.ActiveTemplateInstantiations.empty())
+ if (S.inTemplateInstantiation())
return;
// TODO: Investigate using GetExprRange() to get tighter bounds
@@ -8618,13 +8806,66 @@ bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
return false;
Expr *OriginalInit = Init->IgnoreParenImpCasts();
+ unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
llvm::APSInt Value;
- if (!OriginalInit->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects))
+ if (!OriginalInit->EvaluateAsInt(Value, S.Context,
+ Expr::SE_AllowSideEffects)) {
+ // The RHS is not constant. If the RHS has an enum type, make sure the
+ // bitfield is wide enough to hold all the values of the enum without
+ // truncation.
+ if (const auto *EnumTy = OriginalInit->getType()->getAs<EnumType>()) {
+ EnumDecl *ED = EnumTy->getDecl();
+ bool SignedBitfield = BitfieldType->isSignedIntegerType();
+
+ // Enum types are implicitly signed on Windows, so check if there are any
+ // negative enumerators to see if the enum was intended to be signed or
+ // not.
+ bool SignedEnum = ED->getNumNegativeBits() > 0;
+
+ // Check for surprising sign changes when assigning enum values to a
+ // bitfield of different signedness. If the bitfield is signed and we
+ // have exactly the right number of bits to store this unsigned enum,
+ // suggest changing the enum to an unsigned type. This typically happens
+ // on Windows where unfixed enums always use an underlying type of 'int'.
+ unsigned DiagID = 0;
+ if (SignedEnum && !SignedBitfield) {
+ DiagID = diag::warn_unsigned_bitfield_assigned_signed_enum;
+ } else if (SignedBitfield && !SignedEnum &&
+ ED->getNumPositiveBits() == FieldWidth) {
+ DiagID = diag::warn_signed_bitfield_enum_conversion;
+ }
+
+ if (DiagID) {
+ S.Diag(InitLoc, DiagID) << Bitfield << ED;
+ TypeSourceInfo *TSI = Bitfield->getTypeSourceInfo();
+ SourceRange TypeRange =
+ TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange();
+ S.Diag(Bitfield->getTypeSpecStartLoc(), diag::note_change_bitfield_sign)
+ << SignedEnum << TypeRange;
+ }
+
+ // Compute the required bitwidth. If the enum has negative values, we need
+ // one more bit than the normal number of positive bits to represent the
+ // sign bit.
+ unsigned BitsNeeded = SignedEnum ? std::max(ED->getNumPositiveBits() + 1,
+ ED->getNumNegativeBits())
+ : ED->getNumPositiveBits();
+
+ // Check the bitwidth.
+ if (BitsNeeded > FieldWidth) {
+ Expr *WidthExpr = Bitfield->getBitWidth();
+ S.Diag(InitLoc, diag::warn_bitfield_too_small_for_enum)
+ << Bitfield << ED;
+ S.Diag(WidthExpr->getExprLoc(), diag::note_widen_bitfield)
+ << BitsNeeded << ED << WidthExpr->getSourceRange();
+ }
+ }
+
return false;
+ }
unsigned OriginalWidth = Value.getBitWidth();
- unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
if (!Value.isSigned() || Value.isNegative())
if (UnaryOperator *UO = dyn_cast<UnaryOperator>(OriginalInit))
@@ -8705,7 +8946,7 @@ void DiagnoseFloatingImpCast(Sema &S, Expr *E, QualType T,
SourceLocation CContext) {
const bool IsBool = T->isSpecificBuiltinType(BuiltinType::Bool);
- const bool PruneWarnings = !S.ActiveTemplateInstantiations.empty();
+ const bool PruneWarnings = S.inTemplateInstantiation();
Expr *InnerE = E->IgnoreParenImpCasts();
// We also want to warn on, e.g., "int i = -1.234"
@@ -9722,6 +9963,9 @@ void Sema::CheckForIntOverflow (Expr *E) {
if (auto InitList = dyn_cast<InitListExpr>(E))
Exprs.append(InitList->inits().begin(), InitList->inits().end());
+
+ if (isa<ObjCBoxedExpr>(E))
+ E->IgnoreParenCasts()->EvaluateForOverflow(Context);
} while (!Exprs.empty());
}
@@ -10611,6 +10855,12 @@ void Sema::CheckArrayAccess(const Expr *expr) {
CheckArrayAccess(rhs);
return;
}
+ case Stmt::CXXOperatorCallExprClass: {
+ const auto *OCE = cast<CXXOperatorCallExpr>(expr);
+ for (const auto *Arg : OCE->arguments())
+ CheckArrayAccess(Arg);
+ return;
+ }
default:
return;
}
@@ -11316,7 +11566,7 @@ void Sema::DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
if (Diags.isIgnored(diag::warn_sizeof_pointer_expr_memaccess, OpLoc))
return;
- if (!ActiveTemplateInstantiations.empty())
+ if (inTemplateInstantiation())
return;
// Strip parens and casts away.
@@ -11841,6 +12091,10 @@ void Sema::RefersToMemberWithReducedAlignment(
if (!ME)
return;
+ // No need to check expressions with an __unaligned-qualified type.
+ if (E->getType().getQualifiers().hasUnaligned())
+ return;
+
// For a chain of MemberExpr like "a.b.c.d" this list
// will keep FieldDecl's like [d, c, b].
SmallVector<FieldDecl *, 4> ReverseMemberChain;
@@ -11851,6 +12105,8 @@ void Sema::RefersToMemberWithReducedAlignment(
if (ME->isArrow())
BaseType = BaseType->getPointeeType();
RecordDecl *RD = BaseType->getAs<RecordType>()->getDecl();
+ if (RD->isInvalidDecl())
+ return;
ValueDecl *MD = ME->getMemberDecl();
auto *FD = dyn_cast<FieldDecl>(MD);
diff --git a/gnu/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/gnu/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
index 968e7fd4262..199f4b6ca89 100644
--- a/gnu/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/gnu/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -218,21 +218,45 @@ static bool checkAttributeAtMostNumArgs(Sema &S, const AttributeList &Attr,
std::greater<unsigned>());
}
+/// \brief A helper function to provide Attribute Location for the Attr types
+/// AND the AttributeList.
+template <typename AttrInfo>
+static typename std::enable_if<std::is_base_of<clang::Attr, AttrInfo>::value,
+ SourceLocation>::type
+getAttrLoc(const AttrInfo &Attr) {
+ return Attr.getLocation();
+}
+static SourceLocation getAttrLoc(const clang::AttributeList &Attr) {
+ return Attr.getLoc();
+}
+
+/// \brief A helper function to provide Attribute Name for the Attr types
+/// AND the AttributeList.
+template <typename AttrInfo>
+static typename std::enable_if<std::is_base_of<clang::Attr, AttrInfo>::value,
+ const AttrInfo *>::type
+getAttrName(const AttrInfo &Attr) {
+ return &Attr;
+}
+static const IdentifierInfo *getAttrName(const clang::AttributeList &Attr) {
+ return Attr.getName();
+}
+
/// \brief If Expr is a valid integer constant, get the value of the integer
/// expression and return success or failure. May output an error.
-static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
- const Expr *Expr, uint32_t &Val,
- unsigned Idx = UINT_MAX) {
+template<typename AttrInfo>
+static bool checkUInt32Argument(Sema &S, const AttrInfo& Attr, const Expr *Expr,
+ uint32_t &Val, unsigned Idx = UINT_MAX) {
llvm::APSInt I(32);
if (Expr->isTypeDependent() || Expr->isValueDependent() ||
!Expr->isIntegerConstantExpr(I, S.Context)) {
if (Idx != UINT_MAX)
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << Idx << AANT_ArgumentIntegerConstant
+ S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_n_type)
+ << getAttrName(Attr) << Idx << AANT_ArgumentIntegerConstant
<< Expr->getSourceRange();
else
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_type)
- << Attr.getName() << AANT_ArgumentIntegerConstant
+ S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_type)
+ << getAttrName(Attr) << AANT_ArgumentIntegerConstant
<< Expr->getSourceRange();
return false;
}
@@ -250,9 +274,9 @@ static bool checkUInt32Argument(Sema &S, const AttributeList &Attr,
/// \brief Wrapper around checkUInt32Argument, with an extra check to be sure
/// that the result will fit into a regular (signed) int. All args have the same
/// purpose as they do in checkUInt32Argument.
-static bool checkPositiveIntArgument(Sema &S, const AttributeList &Attr,
- const Expr *Expr, int &Val,
- unsigned Idx = UINT_MAX) {
+template<typename AttrInfo>
+static bool checkPositiveIntArgument(Sema &S, const AttrInfo& Attr, const Expr *Expr,
+ int &Val, unsigned Idx = UINT_MAX) {
uint32_t UVal;
if (!checkUInt32Argument(S, Attr, Expr, UVal, Idx))
return false;
@@ -287,11 +311,10 @@ static bool checkAttrMutualExclusion(Sema &S, Decl *D, SourceRange Range,
/// instance method D. May output an error.
///
/// \returns true if IdxExpr is a valid index.
-static bool checkFunctionOrMethodParameterIndex(Sema &S, const Decl *D,
- const AttributeList &Attr,
- unsigned AttrArgNum,
- const Expr *IdxExpr,
- uint64_t &Idx) {
+template <typename AttrInfo>
+static bool checkFunctionOrMethodParameterIndex(
+ Sema &S, const Decl *D, const AttrInfo &Attr, unsigned AttrArgNum,
+ const Expr *IdxExpr, uint64_t &Idx, bool AllowImplicitThis = false) {
assert(isFunctionOrMethodOrBlock(D));
// In C++ the implicit 'this' function parameter also counts.
@@ -305,24 +328,24 @@ static bool checkFunctionOrMethodParameterIndex(Sema &S, const Decl *D,
llvm::APSInt IdxInt;
if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
!IdxExpr->isIntegerConstantExpr(IdxInt, S.Context)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
- << Attr.getName() << AttrArgNum << AANT_ArgumentIntegerConstant
+ S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_n_type)
+ << getAttrName(Attr) << AttrArgNum << AANT_ArgumentIntegerConstant
<< IdxExpr->getSourceRange();
return false;
}
Idx = IdxInt.getLimitedValue();
if (Idx < 1 || (!IV && Idx > NumParams)) {
- S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
- << Attr.getName() << AttrArgNum << IdxExpr->getSourceRange();
+ S.Diag(getAttrLoc(Attr), diag::err_attribute_argument_out_of_bounds)
+ << getAttrName(Attr) << AttrArgNum << IdxExpr->getSourceRange();
return false;
}
Idx--; // Convert to zero-based.
- if (HasImplicitThisParam) {
+ if (HasImplicitThisParam && !AllowImplicitThis) {
if (Idx == 0) {
- S.Diag(Attr.getLoc(),
+ S.Diag(getAttrLoc(Attr),
diag::err_attribute_invalid_implicit_this_argument)
- << Attr.getName() << IdxExpr->getSourceRange();
+ << getAttrName(Attr) << IdxExpr->getSourceRange();
return false;
}
--Idx;
@@ -753,31 +776,48 @@ static void handleAssertExclusiveLockAttr(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
-/// \brief Checks to be sure that the given parameter number is inbounds, and is
-/// an some integral type. Will emit appropriate diagnostics if this returns
+/// \brief Checks to be sure that the given parameter number is in bounds, and is
+/// an integral type. Will emit appropriate diagnostics if this returns
/// false.
///
/// FuncParamNo is expected to be from the user, so is base-1. AttrArgNo is used
/// to actually retrieve the argument, so it's base-0.
+template <typename AttrInfo>
static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
- const AttributeList &Attr,
- unsigned FuncParamNo, unsigned AttrArgNo) {
- assert(Attr.isArgExpr(AttrArgNo) && "Expected expression argument");
+ const AttrInfo &Attr, Expr *AttrArg,
+ unsigned FuncParamNo, unsigned AttrArgNo,
+ bool AllowDependentType = false) {
uint64_t Idx;
- if (!checkFunctionOrMethodParameterIndex(S, FD, Attr, FuncParamNo,
- Attr.getArgAsExpr(AttrArgNo), Idx))
+ if (!checkFunctionOrMethodParameterIndex(S, FD, Attr, FuncParamNo, AttrArg,
+ Idx))
return false;
const ParmVarDecl *Param = FD->getParamDecl(Idx);
+ if (AllowDependentType && Param->getType()->isDependentType())
+ return true;
if (!Param->getType()->isIntegerType() && !Param->getType()->isCharType()) {
- SourceLocation SrcLoc = Attr.getArgAsExpr(AttrArgNo)->getLocStart();
+ SourceLocation SrcLoc = AttrArg->getLocStart();
S.Diag(SrcLoc, diag::err_attribute_integers_only)
- << Attr.getName() << Param->getSourceRange();
+ << getAttrName(Attr) << Param->getSourceRange();
return false;
}
return true;
}
+/// \brief Checks to be sure that the given parameter number is in bounds, and is
+/// an integral type. Will emit appropriate diagnostics if this returns false.
+///
+/// FuncParamNo is expected to be from the user, so is base-1. AttrArgNo is used
+/// to actually retrieve the argument, so it's base-0.
+static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
+ const AttributeList &Attr,
+ unsigned FuncParamNo, unsigned AttrArgNo,
+ bool AllowDependentType = false) {
+ assert(Attr.isArgExpr(AttrArgNo) && "Expected expression argument");
+ return checkParamIsIntegerType(S, FD, Attr, Attr.getArgAsExpr(AttrArgNo),
+ FuncParamNo, AttrArgNo, AllowDependentType);
+}
+
static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
if (!checkAttributeAtLeastNumArgs(S, Attr, 1) ||
!checkAttributeAtMostNumArgs(S, Attr, 2))
@@ -792,7 +832,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
const Expr *SizeExpr = Attr.getArgAsExpr(0);
int SizeArgNo;
- // Paramater indices are 1-indexed, hence Index=1
+ // Parameter indices are 1-indexed, hence Index=1
if (!checkPositiveIntArgument(S, Attr, SizeExpr, SizeArgNo, /*Index=*/1))
return;
@@ -803,7 +843,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
int NumberArgNo = 0;
if (Attr.getNumArgs() == 2) {
const Expr *NumberExpr = Attr.getArgAsExpr(1);
- // Paramater indices are 1-based, hence Index=2
+ // Parameter indices are 1-based, hence Index=2
if (!checkPositiveIntArgument(S, Attr, NumberExpr, NumberArgNo,
/*Index=*/2))
return;
@@ -909,7 +949,7 @@ static bool checkFunctionConditionAttr(Sema &S, Decl *D,
Msg = "<no message provided>";
SmallVector<PartialDiagnosticAt, 8> Diags;
- if (!Cond->isValueDependent() &&
+ if (isa<FunctionDecl>(D) && !Cond->isValueDependent() &&
!Expr::isPotentialConstantExprUnevaluated(Cond, cast<FunctionDecl>(D),
Diags)) {
S.Diag(Attr.getLoc(), diag::err_attr_cond_never_constant_expr)
@@ -997,10 +1037,11 @@ static void handleDiagnoseIfAttr(Sema &S, Decl *D, const AttributeList &Attr) {
return;
}
- auto *FD = cast<FunctionDecl>(D);
- bool ArgDependent = ArgumentDependenceChecker(FD).referencesArgs(Cond);
+ bool ArgDependent = false;
+ if (const auto *FD = dyn_cast<FunctionDecl>(D))
+ ArgDependent = ArgumentDependenceChecker(FD).referencesArgs(Cond);
D->addAttr(::new (S.Context) DiagnoseIfAttr(
- Attr.getRange(), S.Context, Cond, Msg, DiagType, ArgDependent, FD,
+ Attr.getRange(), S.Context, Cond, Msg, DiagType, ArgDependent, cast<NamedDecl>(D),
Attr.getAttributeSpellingListIndex()));
}
@@ -1420,7 +1461,7 @@ static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// check if the attribute came from a macro expansion or a template
// instantiation.
if (NonNullArgs.empty() && Attr.getLoc().isFileID() &&
- S.ActiveTemplateInstantiations.empty()) {
+ !S.inTemplateInstantiation()) {
bool AnyPointers = isFunctionOrMethodVariadic(D);
for (unsigned I = 0, E = getFunctionOrMethodNumParams(D);
I != E && !AnyPointers; ++I) {
@@ -1484,6 +1525,12 @@ static void handleAssumeAlignedAttr(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex());
}
+static void handleAllocAlignAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ S.AddAllocAlignAttr(Attr.getRange(), D, Attr.getArgAsExpr(0),
+ Attr.getAttributeSpellingListIndex());
+}
+
void Sema::AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
Expr *OE, unsigned SpellingListIndex) {
QualType ResultType = getFunctionOrMethodResultType(D);
@@ -1535,6 +1582,44 @@ void Sema::AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
AssumeAlignedAttr(AttrRange, Context, E, OE, SpellingListIndex));
}
+void Sema::AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
+ unsigned SpellingListIndex) {
+ QualType ResultType = getFunctionOrMethodResultType(D);
+
+ AllocAlignAttr TmpAttr(AttrRange, Context, 0, SpellingListIndex);
+ SourceLocation AttrLoc = AttrRange.getBegin();
+
+ if (!ResultType->isDependentType() &&
+ !isValidPointerAttrType(ResultType, /* RefOkay */ true)) {
+ Diag(AttrLoc, diag::warn_attribute_return_pointers_refs_only)
+ << &TmpAttr << AttrRange << getFunctionOrMethodResultSourceRange(D);
+ return;
+ }
+
+ uint64_t IndexVal;
+ const auto *FuncDecl = cast<FunctionDecl>(D);
+ if (!checkFunctionOrMethodParameterIndex(*this, FuncDecl, TmpAttr,
+ /*AttrArgNo=*/1, ParamExpr,
+ IndexVal))
+ return;
+
+ QualType Ty = getFunctionOrMethodParamType(D, IndexVal);
+ if (!Ty->isDependentType() && !Ty->isIntegralType(Context)) {
+ Diag(ParamExpr->getLocStart(), diag::err_attribute_integers_only)
+ << &TmpAttr << FuncDecl->getParamDecl(IndexVal)->getSourceRange();
+ return;
+ }
+
+ // We cannot use the Idx returned from checkFunctionOrMethodParameterIndex
+ // because that has corrected for the implicit this parameter, and is zero-
+ // based. The attribute expects what the user wrote explicitly.
+ llvm::APSInt Val;
+ ParamExpr->EvaluateAsInt(Val, Context);
+
+ D->addAttr(::new (Context) AllocAlignAttr(
+ AttrRange, Context, Val.getZExtValue(), SpellingListIndex));
+}
+
/// Normalize the attribute, __foo__ becomes foo.
/// Returns true if normalization was applied.
static bool normalizeName(StringRef &AttrName) {
@@ -1839,6 +1924,17 @@ static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
Attr.getName()))
return;
+ if (Attr.isDeclspecAttribute()) {
+ const auto &Triple = S.getASTContext().getTargetInfo().getTriple();
+ const auto &Arch = Triple.getArch();
+ if (Arch != llvm::Triple::x86 &&
+ (Arch != llvm::Triple::arm && Arch != llvm::Triple::thumb)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_not_supported_on_arch)
+ << Attr.getName() << Triple.getArchName();
+ return;
+ }
+ }
+
D->addAttr(::new (S.Context) NakedAttr(Attr.getRange(), S.Context,
Attr.getAttributeSpellingListIndex()));
}
@@ -1846,17 +1942,26 @@ static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &attr) {
if (hasDeclarator(D)) return;
- if (S.CheckNoReturnAttr(attr)) return;
+ if (S.CheckNoReturnAttr(attr))
+ return;
if (!isa<ObjCMethodDecl>(D)) {
S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << attr.getName() << ExpectedFunctionOrMethod;
+ << attr.getName() << ExpectedFunctionOrMethod;
return;
}
- D->addAttr(::new (S.Context)
- NoReturnAttr(attr.getRange(), S.Context,
- attr.getAttributeSpellingListIndex()));
+ D->addAttr(::new (S.Context) NoReturnAttr(
+ attr.getRange(), S.Context, attr.getAttributeSpellingListIndex()));
+}
+
+static void handleNoCallerSavedRegsAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (S.CheckNoCallerSavedRegsAttr(Attr))
+ return;
+
+ D->addAttr(::new (S.Context) AnyX86NoCallerSavedRegistersAttr(
+ Attr.getRange(), S.Context, Attr.getAttributeSpellingListIndex()));
}
bool Sema::CheckNoReturnAttr(const AttributeList &attr) {
@@ -1868,6 +1973,22 @@ bool Sema::CheckNoReturnAttr(const AttributeList &attr) {
return false;
}
+bool Sema::CheckNoCallerSavedRegsAttr(const AttributeList &Attr) {
+ // Check whether the attribute is valid on the current target.
+ if (!Attr.existsInTarget(Context.getTargetInfo())) {
+ Diag(Attr.getLoc(), diag::warn_unknown_attribute_ignored) << Attr.getName();
+ Attr.setInvalid();
+ return true;
+ }
+
+ if (!checkAttributeNumArgs(*this, Attr, 0)) {
+ Attr.setInvalid();
+ return true;
+ }
+
+ return false;
+}
+
static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
@@ -2303,10 +2424,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
<< Platform->Ident;
NamedDecl *ND = dyn_cast<NamedDecl>(D);
- if (!ND) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ if (!ND) // We warned about this already, so just return.
return;
- }
AvailabilityChange Introduced = Attr.getAvailabilityIntroduced();
AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated();
@@ -2409,6 +2528,26 @@ static void handleAvailabilityAttr(Sema &S, Decl *D,
}
}
+static void handleExternalSourceSymbolAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+ assert(checkAttributeAtMostNumArgs(S, Attr, 3) &&
+ "Invalid number of arguments in an external_source_symbol attribute");
+
+ StringRef Language;
+ if (const auto *SE = dyn_cast_or_null<StringLiteral>(Attr.getArgAsExpr(0)))
+ Language = SE->getString();
+ StringRef DefinedIn;
+ if (const auto *SE = dyn_cast_or_null<StringLiteral>(Attr.getArgAsExpr(1)))
+ DefinedIn = SE->getString();
+ bool IsGeneratedDeclaration = Attr.getArgAsIdent(2) != nullptr;
+
+ D->addAttr(::new (S.Context) ExternalSourceSymbolAttr(
+ Attr.getRange(), S.Context, Language, DefinedIn, IsGeneratedDeclaration,
+ Attr.getAttributeSpellingListIndex()));
+}
+
template <class T>
static T *mergeVisibilityAttr(Sema &S, Decl *D, SourceRange range,
typename T::VisibilityType value,
@@ -2753,6 +2892,28 @@ static void handleWorkGroupSize(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
+// Handles intel_reqd_sub_group_size.
+static void handleSubGroupSize(Sema &S, Decl *D, const AttributeList &Attr) {
+ uint32_t SGSize;
+ const Expr *E = Attr.getArgAsExpr(0);
+ if (!checkUInt32Argument(S, Attr, E, SGSize))
+ return;
+ if (SGSize == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
+ << Attr.getName() << E->getSourceRange();
+ return;
+ }
+
+ OpenCLIntelReqdSubGroupSizeAttr *Existing =
+ D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>();
+ if (Existing && Existing->getSubGroupSize() != SGSize)
+ S.Diag(Attr.getLoc(), diag::warn_duplicate_attribute) << Attr.getName();
+
+ D->addAttr(::new (S.Context) OpenCLIntelReqdSubGroupSizeAttr(
+ Attr.getRange(), S.Context, SGSize,
+ Attr.getAttributeSpellingListIndex()));
+}
+
static void handleVecTypeHint(Sema &S, Decl *D, const AttributeList &Attr) {
if (!Attr.hasParsedType()) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments)
@@ -2917,6 +3078,28 @@ static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
Attr.getAttributeSpellingListIndex()));
}
+static void handleEnumExtensibilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!Attr.isArgIdent(0)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_type)
+ << Attr.getName() << 0 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ EnumExtensibilityAttr::Kind ExtensibilityKind;
+ IdentifierInfo *II = Attr.getArgAsIdent(0)->Ident;
+ if (!EnumExtensibilityAttr::ConvertStrToKind(II->getName(),
+ ExtensibilityKind)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << Attr.getName() << II;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) EnumExtensibilityAttr(
+ Attr.getRange(), S.Context, ExtensibilityKind,
+ Attr.getAttributeSpellingListIndex()));
+}
+
/// Handle __attribute__((format_arg((idx)))) attribute based on
/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
@@ -3194,8 +3377,9 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D,
}
if (!RD->isCompleteDefinition()) {
- S.Diag(Attr.getLoc(),
- diag::warn_transparent_union_attribute_not_definition);
+ if (!RD->isBeingDefined())
+ S.Diag(Attr.getLoc(),
+ diag::warn_transparent_union_attribute_not_definition);
return;
}
@@ -4049,6 +4233,26 @@ static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
}
+static void handleSuppressAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ std::vector<StringRef> DiagnosticIdentifiers;
+ for (unsigned I = 0, E = Attr.getNumArgs(); I != E; ++I) {
+ StringRef RuleName;
+
+ if (!S.checkStringLiteralArgumentAttr(Attr, I, RuleName, nullptr))
+ return;
+
+ // FIXME: Warn if the rule name is unknown. This is tricky because only
+ // clang-tidy knows about available rules.
+ DiagnosticIdentifiers.push_back(RuleName);
+ }
+ D->addAttr(::new (S.Context) SuppressAttr(
+ Attr.getRange(), S.Context, DiagnosticIdentifiers.data(),
+ DiagnosticIdentifiers.size(), Attr.getAttributeSpellingListIndex()));
+}
+
bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
const FunctionDecl *FD) {
if (attr.isInvalid())
@@ -4077,7 +4281,7 @@ bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC,
case AttributeList::AT_RegCall: CC = CC_X86RegCall; break;
case AttributeList::AT_MSABI:
CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_C :
- CC_X86_64Win64;
+ CC_Win64;
break;
case AttributeList::AT_SysVABI:
CC = Context.getTargetInfo().getTriple().isOSWindows() ? CC_X86_64SysV :
@@ -4398,6 +4602,21 @@ static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
Attr.getAttributeSpellingListIndex()));
}
+static void handleXRayLogArgsAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ uint64_t ArgCount;
+
+ if (!checkFunctionOrMethodParameterIndex(S, D, Attr, 1, Attr.getArgAsExpr(0),
+ ArgCount,
+ true /* AllowImplicitThis*/))
+ return;
+
+ // ArgCount isn't a parameter index [0;n), it's a count [1;n] - hence + 1.
+ D->addAttr(::new (S.Context)
+ XRayLogArgsAttr(Attr.getRange(), S.Context, ++ArgCount,
+ Attr.getAttributeSpellingListIndex()));
+}
+
//===----------------------------------------------------------------------===//
// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
@@ -4461,6 +4680,16 @@ void Sema::AddNSConsumedAttr(SourceRange attrRange, Decl *D,
CFConsumedAttr(attrRange, Context, spellingIndex));
}
+bool Sema::checkNSReturnsRetainedReturnType(SourceLocation loc,
+ QualType type) {
+ if (isValidSubjectOfNSReturnsRetainedAttribute(type))
+ return false;
+
+ Diag(loc, diag::warn_ns_attribute_wrong_return_type)
+ << "'ns_returns_retained'" << 0 << 0;
+ return true;
+}
+
static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
const AttributeList &Attr) {
QualType returnType;
@@ -4482,6 +4711,8 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
<< Attr.getRange();
return;
}
+ } else if (Attr.isUsedAsTypeAttr()) {
+ return;
} else {
AttributeDeclKind ExpectedDeclKind;
switch (Attr.getKind()) {
@@ -4525,6 +4756,9 @@ static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
}
if (!typeOK) {
+ if (Attr.isUsedAsTypeAttr())
+ return;
+
if (isa<ParmVarDecl>(D)) {
S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
<< Attr.getName() << /*pointer-to-CF*/2
@@ -4864,6 +5098,15 @@ static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
}
}
+ // FIXME: It'd be nice to also emit a fixit removing uuid(...) (and, if it's
+ // the only thing in the [] list, the [] too), and add an insertion of
+ // __declspec(uuid(...)). But sadly, neither the SourceLocs of the commas
+ // separating attributes nor of the [ and the ] are in the AST.
+ // Cf "SourceLocations of attribute list delimiters - [[ ... , ... ]] etc"
+ // on cfe-dev.
+ if (Attr.isMicrosoftAttribute()) // Check for [uuid(...)] spelling.
+ S.Diag(Attr.getLoc(), diag::warn_atl_uuid_deprecated);
+
UuidAttr *UA = S.mergeUuidAttr(D, Attr.getRange(),
Attr.getAttributeSpellingListIndex(), StrRef);
if (UA)
@@ -5127,6 +5370,32 @@ static void handleAnyX86InterruptAttr(Sema &S, Decl *D,
D->addAttr(UsedAttr::CreateImplicit(S.Context));
}
+static void handleAVRInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'interrupt'" << ExpectedFunction;
+ return;
+ }
+
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ handleSimpleAttribute<AVRInterruptAttr>(S, D, Attr);
+}
+
+static void handleAVRSignalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(D->getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'signal'" << ExpectedFunction;
+ return;
+ }
+
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ handleSimpleAttribute<AVRSignalAttr>(S, D, Attr);
+}
+
static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
// Dispatch the interrupt attribute based on the current target.
switch (S.Context.getTargetInfo().getTriple().getArch()) {
@@ -5141,6 +5410,9 @@ static void handleInterruptAttr(Sema &S, Decl *D, const AttributeList &Attr) {
case llvm::Triple::x86_64:
handleAnyX86InterruptAttr(S, D, Attr);
break;
+ case llvm::Triple::avr:
+ handleAVRInterruptAttr(S, D, Attr);
+ break;
default:
handleARMInterruptAttr(S, D, Attr);
break;
@@ -5560,18 +5832,21 @@ static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const AttributeList &Attr) {
static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D,
const AttributeList &Attr) {
// Several attributes carry different semantics than the parsing requires, so
- // those are opted out of the common handling.
+ // those are opted out of the common argument checks.
//
// We also bail on unknown and ignored attributes because those are handled
// as part of the target-specific handling logic.
- if (Attr.hasCustomParsing() ||
- Attr.getKind() == AttributeList::UnknownAttribute)
+ if (Attr.getKind() == AttributeList::UnknownAttribute)
return false;
-
// Check whether the attribute requires specific language extensions to be
// enabled.
if (!Attr.diagnoseLangOpts(S))
return true;
+ // Check whether the attribute appertains to the given subject.
+ if (!Attr.diagnoseAppertainsTo(S, D))
+ return true;
+ if (Attr.hasCustomParsing())
+ return false;
if (Attr.getMinArgs() == Attr.getMaxArgs()) {
// If there are no optional arguments, then checking for the argument count
@@ -5588,10 +5863,6 @@ static bool handleCommonAttributeFeatures(Sema &S, Scope *scope, Decl *D,
return true;
}
- // Check whether the attribute appertains to the given subject.
- if (!Attr.diagnoseAppertainsTo(S, D))
- return true;
-
return false;
}
@@ -5683,12 +5954,18 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleDLLAttr(S, D, Attr);
break;
case AttributeList::AT_Mips16:
- handleSimpleAttributeWithExclusions<Mips16Attr, MipsInterruptAttr>(S, D,
- Attr);
+ handleSimpleAttributeWithExclusions<Mips16Attr, MicroMipsAttr,
+ MipsInterruptAttr>(S, D, Attr);
break;
case AttributeList::AT_NoMips16:
handleSimpleAttribute<NoMips16Attr>(S, D, Attr);
break;
+ case AttributeList::AT_MicroMips:
+ handleSimpleAttributeWithExclusions<MicroMipsAttr, Mips16Attr>(S, D, Attr);
+ break;
+ case AttributeList::AT_NoMicroMips:
+ handleSimpleAttribute<NoMicroMipsAttr>(S, D, Attr);
+ break;
case AttributeList::AT_AMDGPUFlatWorkGroupSize:
handleAMDGPUFlatWorkGroupSizeAttr(S, D, Attr);
break;
@@ -5701,6 +5978,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_AMDGPUNumVGPR:
handleAMDGPUNumVGPRAttr(S, D, Attr);
break;
+ case AttributeList::AT_AVRSignal:
+ handleAVRSignalAttr(S, D, Attr);
+ break;
case AttributeList::AT_IBAction:
handleSimpleAttribute<IBActionAttr>(S, D, Attr);
break;
@@ -5773,6 +6053,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ExtVectorType:
handleExtVectorTypeAttr(S, scope, D, Attr);
break;
+ case AttributeList::AT_ExternalSourceSymbol:
+ handleExternalSourceSymbolAttr(S, D, Attr);
+ break;
case AttributeList::AT_MinSize:
handleMinSizeAttr(S, D, Attr);
break;
@@ -5782,6 +6065,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_FlagEnum:
handleSimpleAttribute<FlagEnumAttr>(S, D, Attr);
break;
+ case AttributeList::AT_EnumExtensibility:
+ handleEnumExtensibilityAttr(S, D, Attr);
+ break;
case AttributeList::AT_Flatten:
handleSimpleAttribute<FlattenAttr>(S, D, Attr);
break;
@@ -5838,6 +6124,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_AssumeAligned:
handleAssumeAlignedAttr(S, D, Attr);
break;
+ case AttributeList::AT_AllocAlign:
+ handleAllocAlignAttr(S, D, Attr);
+ break;
case AttributeList::AT_Overloadable:
handleSimpleAttribute<OverloadableAttr>(S, D, Attr);
break;
@@ -5924,6 +6213,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_ReqdWorkGroupSize:
handleWorkGroupSize<ReqdWorkGroupSizeAttr>(S, D, Attr);
break;
+ case AttributeList::AT_OpenCLIntelReqdSubGroupSize:
+ handleSubGroupSize(S, D, Attr);
+ break;
case AttributeList::AT_VecTypeHint:
handleVecTypeHint(S, D, Attr);
break;
@@ -6057,6 +6349,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_PreserveAll:
handleCallConvAttr(S, D, Attr);
break;
+ case AttributeList::AT_Suppress:
+ handleSuppressAttr(S, D, Attr);
+ break;
case AttributeList::AT_OpenCLKernel:
handleSimpleAttribute<OpenCLKernelAttr>(S, D, Attr);
break;
@@ -6217,6 +6512,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_TypeTagForDatatype:
handleTypeTagForDatatypeAttr(S, D, Attr);
break;
+ case AttributeList::AT_AnyX86NoCallerSavedRegisters:
+ handleNoCallerSavedRegsAttr(S, D, Attr);
+ break;
case AttributeList::AT_RenderScriptKernel:
handleSimpleAttribute<RenderScriptKernelAttr>(S, D, Attr);
break;
@@ -6224,6 +6522,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case AttributeList::AT_XRayInstrument:
handleSimpleAttribute<XRayInstrumentAttr>(S, D, Attr);
break;
+ case AttributeList::AT_XRayLogArgs:
+ handleXRayLogArgsAttr(S, D, Attr);
+ break;
}
}
@@ -6279,10 +6580,22 @@ void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
<< A << ExpectedKernelFunction;
D->setInvalidDecl();
+ } else if (Attr *A = D->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
+ Diag(D->getLocation(), diag::err_opencl_kernel_attr) << A;
+ D->setInvalidDecl();
}
}
}
+// Helper for delayed processing TransparentUnion attribute.
+void Sema::ProcessDeclAttributeDelayed(Decl *D, const AttributeList *AttrList) {
+ for (const AttributeList *Attr = AttrList; Attr; Attr = Attr->getNext())
+ if (Attr->getKind() == AttributeList::AT_TransparentUnion) {
+ handleTransparentUnionAttr(*this, D, *Attr);
+ break;
+ }
+}
+
// Annotation attributes are the only attributes allowed after an access
// specifier.
bool Sema::ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
@@ -6444,6 +6757,9 @@ void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
// Finally, apply any attributes on the decl itself.
if (const AttributeList *Attrs = PD.getAttributes())
ProcessDeclAttributeList(S, D, Attrs);
+
+ // Apply additional attributes specified by '#pragma clang attribute'.
+ AddPragmaAttributes(S, D);
}
/// Is the given declaration allowed to use a forbidden type?
@@ -6538,6 +6854,50 @@ static const AvailabilityAttr *getAttrForPlatform(ASTContext &Context,
return nullptr;
}
+/// The diagnostic we should emit for \c D, and the declaration that
+/// originated it, or \c AR_Available.
+///
+/// \param D The declaration to check.
+/// \param Message If non-null, this will be populated with the message from
+/// the availability attribute that is selected.
+static std::pair<AvailabilityResult, const NamedDecl *>
+ShouldDiagnoseAvailabilityOfDecl(const NamedDecl *D, std::string *Message) {
+ AvailabilityResult Result = D->getAvailability(Message);
+
+ // For typedefs, if the typedef declaration appears available look
+ // to the underlying type to see if it is more restrictive.
+ while (const TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (Result == AR_Available) {
+ if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
+ D = TT->getDecl();
+ Result = D->getAvailability(Message);
+ continue;
+ }
+ }
+ break;
+ }
+
+ // Forward class declarations get their attributes from their definition.
+ if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (IDecl->getDefinition()) {
+ D = IDecl->getDefinition();
+ Result = D->getAvailability(Message);
+ }
+ }
+
+ if (const auto *ECD = dyn_cast<EnumConstantDecl>(D))
+ if (Result == AR_Available) {
+ const DeclContext *DC = ECD->getDeclContext();
+ if (const auto *TheEnumDecl = dyn_cast<EnumDecl>(DC)) {
+ Result = TheEnumDecl->getAvailability(Message);
+ D = TheEnumDecl;
+ }
+ }
+
+ return {Result, D};
+}
+
+
/// \brief whether we should emit a diagnostic for \c K and \c DeclVersion in
/// the context of \c Ctx. For example, we should emit an unavailable diagnostic
/// in a deprecated context, but not the other way around.
@@ -6603,8 +6963,60 @@ static bool ShouldDiagnoseAvailabilityInContext(Sema &S, AvailabilityResult K,
return true;
}
+static bool
+shouldDiagnoseAvailabilityByDefault(const ASTContext &Context,
+ const VersionTuple &DeploymentVersion,
+ const VersionTuple &DeclVersion) {
+ const auto &Triple = Context.getTargetInfo().getTriple();
+ VersionTuple ForceAvailabilityFromVersion;
+ switch (Triple.getOS()) {
+ case llvm::Triple::IOS:
+ case llvm::Triple::TvOS:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/11);
+ break;
+ case llvm::Triple::WatchOS:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/4);
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ ForceAvailabilityFromVersion = VersionTuple(/*Major=*/10, /*Minor=*/13);
+ break;
+ default:
+ // New targets should always warn about availability.
+ return Triple.getVendor() == llvm::Triple::Apple;
+ }
+ return DeploymentVersion >= ForceAvailabilityFromVersion ||
+ DeclVersion >= ForceAvailabilityFromVersion;
+}
+
+static NamedDecl *findEnclosingDeclToAnnotate(Decl *OrigCtx) {
+ for (Decl *Ctx = OrigCtx; Ctx;
+ Ctx = cast_or_null<Decl>(Ctx->getDeclContext())) {
+ if (isa<TagDecl>(Ctx) || isa<FunctionDecl>(Ctx) || isa<ObjCMethodDecl>(Ctx))
+ return cast<NamedDecl>(Ctx);
+ if (auto *CD = dyn_cast<ObjCContainerDecl>(Ctx)) {
+ if (auto *Imp = dyn_cast<ObjCImplDecl>(Ctx))
+ return Imp->getClassInterface();
+ return CD;
+ }
+ }
+
+ return dyn_cast<NamedDecl>(OrigCtx);
+}
+
+/// Actually emit an availability diagnostic for a reference to an unavailable
+/// decl.
+///
+/// \param Ctx The context that the reference occurred in
+/// \param ReferringDecl The exact declaration that was referenced.
+/// \param OffendingDecl A related decl to \c ReferringDecl that has an
+/// availability attribute corrisponding to \c K attached to it. Note that this
+/// may not be the same as ReferringDecl, i.e. if an EnumDecl is annotated and
+/// we refer to a member EnumConstantDecl, ReferringDecl is the EnumConstantDecl
+/// and OffendingDecl is the EnumDecl.
static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
- Decl *Ctx, const NamedDecl *D,
+ Decl *Ctx, const NamedDecl *ReferringDecl,
+ const NamedDecl *OffendingDecl,
StringRef Message, SourceLocation Loc,
const ObjCInterfaceDecl *UnknownObjCClass,
const ObjCPropertyDecl *ObjCProperty,
@@ -6612,6 +7024,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
// Diagnostics for deprecated or unavailable.
unsigned diag, diag_message, diag_fwdclass_message;
unsigned diag_available_here = diag::note_availability_specified_here;
+ SourceLocation NoteLocation = OffendingDecl->getLocation();
// Matches 'diag::note_property_attribute' options.
unsigned property_note_select;
@@ -6620,7 +7033,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
unsigned available_here_select_kind;
VersionTuple DeclVersion;
- if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, D))
+ if (const AvailabilityAttr *AA = getAttrForPlatform(S.Context, OffendingDecl))
DeclVersion = AA->getIntroduced();
if (!ShouldDiagnoseAvailabilityInContext(S, K, DeclVersion, Ctx))
@@ -6634,6 +7047,8 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
diag_fwdclass_message = diag::warn_deprecated_fwdclass_message;
property_note_select = /* deprecated */ 0;
available_here_select_kind = /* deprecated */ 2;
+ if (const auto *attr = OffendingDecl->getAttr<DeprecatedAttr>())
+ NoteLocation = attr->getLocation();
break;
case AR_Unavailable:
@@ -6644,13 +7059,14 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
property_note_select = /* unavailable */ 1;
available_here_select_kind = /* unavailable */ 0;
- if (auto attr = D->getAttr<UnavailableAttr>()) {
+ if (auto attr = OffendingDecl->getAttr<UnavailableAttr>()) {
if (attr->isImplicit() && attr->getImplicitReason()) {
// Most of these failures are due to extra restrictions in ARC;
// reflect that in the primary diagnostic when applicable.
auto flagARCError = [&] {
if (S.getLangOpts().ObjCAutoRefCount &&
- S.getSourceManager().isInSystemHeader(D->getLocation()))
+ S.getSourceManager().isInSystemHeader(
+ OffendingDecl->getLocation()))
diag = diag::err_unavailable_in_arc;
};
@@ -6688,13 +7104,27 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
}
break;
- case AR_NotYetIntroduced:
- diag = diag::warn_partial_availability;
- diag_message = diag::warn_partial_message;
- diag_fwdclass_message = diag::warn_partial_fwdclass_message;
+ case AR_NotYetIntroduced: {
+ // We would like to emit the diagnostic even if -Wunguarded-availability is
+ // not specified for deployment targets >= to iOS 11 or equivalent or
+ // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
+ // later.
+ const AvailabilityAttr *AA =
+ getAttrForPlatform(S.getASTContext(), OffendingDecl);
+ VersionTuple Introduced = AA->getIntroduced();
+ bool NewWarning = shouldDiagnoseAvailabilityByDefault(
+ S.Context, S.Context.getTargetInfo().getPlatformMinVersion(),
+ Introduced);
+ diag = NewWarning ? diag::warn_partial_availability_new
+ : diag::warn_partial_availability;
+ diag_message = NewWarning ? diag::warn_partial_message_new
+ : diag::warn_partial_message;
+ diag_fwdclass_message = NewWarning ? diag::warn_partial_fwdclass_message_new
+ : diag::warn_partial_fwdclass_message;
property_note_select = /* partial */ 2;
available_here_select_kind = /* partial */ 3;
break;
+ }
case AR_Available:
llvm_unreachable("Warning for availability of available declaration?");
@@ -6703,9 +7133,9 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
CharSourceRange UseRange;
StringRef Replacement;
if (K == AR_Deprecated) {
- if (auto attr = D->getAttr<DeprecatedAttr>())
+ if (auto attr = OffendingDecl->getAttr<DeprecatedAttr>())
Replacement = attr->getReplacement();
- if (auto attr = getAttrForPlatform(S.Context, D))
+ if (auto attr = getAttrForPlatform(S.Context, OffendingDecl))
Replacement = attr->getReplacement();
if (!Replacement.empty())
@@ -6714,21 +7144,21 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
}
if (!Message.empty()) {
- S.Diag(Loc, diag_message) << D << Message
+ S.Diag(Loc, diag_message) << ReferringDecl << Message
<< (UseRange.isValid() ?
FixItHint::CreateReplacement(UseRange, Replacement) : FixItHint());
if (ObjCProperty)
S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
<< ObjCProperty->getDeclName() << property_note_select;
} else if (!UnknownObjCClass) {
- S.Diag(Loc, diag) << D
+ S.Diag(Loc, diag) << ReferringDecl
<< (UseRange.isValid() ?
FixItHint::CreateReplacement(UseRange, Replacement) : FixItHint());
if (ObjCProperty)
S.Diag(ObjCProperty->getLocation(), diag::note_property_attribute)
<< ObjCProperty->getDeclName() << property_note_select;
} else {
- S.Diag(Loc, diag_fwdclass_message) << D
+ S.Diag(Loc, diag_fwdclass_message) << ReferringDecl
<< (UseRange.isValid() ?
FixItHint::CreateReplacement(UseRange, Replacement) : FixItHint());
S.Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
@@ -6736,27 +7166,36 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
// The declaration can have multiple availability attributes, we are looking
// at one of them.
- const AvailabilityAttr *A = getAttrForPlatform(S.Context, D);
+ const AvailabilityAttr *A = getAttrForPlatform(S.Context, OffendingDecl);
if (A && A->isInherited()) {
- for (const Decl *Redecl = D->getMostRecentDecl(); Redecl;
+ for (const Decl *Redecl = OffendingDecl->getMostRecentDecl(); Redecl;
Redecl = Redecl->getPreviousDecl()) {
const AvailabilityAttr *AForRedecl = getAttrForPlatform(S.Context,
Redecl);
if (AForRedecl && !AForRedecl->isInherited()) {
// If D is a declaration with inherited attributes, the note should
// point to the declaration with actual attributes.
- S.Diag(Redecl->getLocation(), diag_available_here) << D
+ S.Diag(Redecl->getLocation(), diag_available_here) << OffendingDecl
<< available_here_select_kind;
break;
}
}
}
else
- S.Diag(D->getLocation(), diag_available_here)
- << D << available_here_select_kind;
+ S.Diag(NoteLocation, diag_available_here)
+ << OffendingDecl << available_here_select_kind;
if (K == AR_NotYetIntroduced)
- S.Diag(Loc, diag::note_partial_availability_silence) << D;
+ if (const auto *Enclosing = findEnclosingDeclToAnnotate(Ctx)) {
+ if (auto *TD = dyn_cast<TagDecl>(Enclosing))
+ if (TD->getDeclName().isEmpty()) {
+ S.Diag(TD->getLocation(), diag::note_partial_availability_silence)
+ << /*Anonymous*/1 << TD->getKindName();
+ return;
+ }
+ S.Diag(Enclosing->getLocation(), diag::note_partial_availability_silence)
+ << /*Named*/0 << Enclosing;
+ }
}
static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
@@ -6766,9 +7205,9 @@ static void handleDelayedAvailabilityCheck(Sema &S, DelayedDiagnostic &DD,
DD.Triggered = true;
DoEmitAvailabilityWarning(
- S, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityDecl(),
- DD.getAvailabilityMessage(), DD.Loc, DD.getUnknownObjCClass(),
- DD.getObjCProperty(), false);
+ S, DD.getAvailabilityResult(), Ctx, DD.getAvailabilityReferringDecl(),
+ DD.getAvailabilityOffendingDecl(), DD.getAvailabilityMessage(), DD.Loc,
+ DD.getUnknownObjCClass(), DD.getObjCProperty(), false);
}
void Sema::PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
@@ -6826,27 +7265,93 @@ void Sema::redelayDiagnostics(DelayedDiagnosticPool &pool) {
curPool->steal(pool);
}
-void Sema::EmitAvailabilityWarning(AvailabilityResult AR,
- NamedDecl *D, StringRef Message,
- SourceLocation Loc,
- const ObjCInterfaceDecl *UnknownObjCClass,
- const ObjCPropertyDecl *ObjCProperty,
- bool ObjCPropertyAccess) {
+static void EmitAvailabilityWarning(Sema &S, AvailabilityResult AR,
+ const NamedDecl *ReferringDecl,
+ const NamedDecl *OffendingDecl,
+ StringRef Message, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ const ObjCPropertyDecl *ObjCProperty,
+ bool ObjCPropertyAccess) {
// Delay if we're currently parsing a declaration.
- if (DelayedDiagnostics.shouldDelayDiagnostics()) {
- DelayedDiagnostics.add(DelayedDiagnostic::makeAvailability(
- AR, Loc, D, UnknownObjCClass, ObjCProperty, Message,
- ObjCPropertyAccess));
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ DelayedDiagnostic::makeAvailability(
+ AR, Loc, ReferringDecl, OffendingDecl, UnknownObjCClass,
+ ObjCProperty, Message, ObjCPropertyAccess));
return;
}
- Decl *Ctx = cast<Decl>(getCurLexicalContext());
- DoEmitAvailabilityWarning(*this, AR, Ctx, D, Message, Loc, UnknownObjCClass,
- ObjCProperty, ObjCPropertyAccess);
+ Decl *Ctx = cast<Decl>(S.getCurLexicalContext());
+ DoEmitAvailabilityWarning(S, AR, Ctx, ReferringDecl, OffendingDecl,
+ Message, Loc, UnknownObjCClass, ObjCProperty,
+ ObjCPropertyAccess);
}
namespace {
+/// Returns true if the given statement can be a body-like child of \p Parent.
+bool isBodyLikeChildStmt(const Stmt *S, const Stmt *Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::IfStmtClass:
+ return cast<IfStmt>(Parent)->getThen() == S ||
+ cast<IfStmt>(Parent)->getElse() == S;
+ case Stmt::WhileStmtClass:
+ return cast<WhileStmt>(Parent)->getBody() == S;
+ case Stmt::DoStmtClass:
+ return cast<DoStmt>(Parent)->getBody() == S;
+ case Stmt::ForStmtClass:
+ return cast<ForStmt>(Parent)->getBody() == S;
+ case Stmt::CXXForRangeStmtClass:
+ return cast<CXXForRangeStmt>(Parent)->getBody() == S;
+ case Stmt::ObjCForCollectionStmtClass:
+ return cast<ObjCForCollectionStmt>(Parent)->getBody() == S;
+ case Stmt::CaseStmtClass:
+ case Stmt::DefaultStmtClass:
+ return cast<SwitchCase>(Parent)->getSubStmt() == S;
+ default:
+ return false;
+ }
+}
+
+class StmtUSEFinder : public RecursiveASTVisitor<StmtUSEFinder> {
+ const Stmt *Target;
+
+public:
+ bool VisitStmt(Stmt *S) { return S != Target; }
+
+ /// Returns true if the given statement is present in the given declaration.
+ static bool isContained(const Stmt *Target, const Decl *D) {
+ StmtUSEFinder Visitor;
+ Visitor.Target = Target;
+ return !Visitor.TraverseDecl(const_cast<Decl *>(D));
+ }
+};
+
+/// Traverses the AST and finds the last statement that used a given
+/// declaration.
+class LastDeclUSEFinder : public RecursiveASTVisitor<LastDeclUSEFinder> {
+ const Decl *D;
+
+public:
+ bool VisitDeclRefExpr(DeclRefExpr *DRE) {
+ if (DRE->getDecl() == D)
+ return false;
+ return true;
+ }
+
+ static const Stmt *findLastStmtThatUsesDecl(const Decl *D,
+ const CompoundStmt *Scope) {
+ LastDeclUSEFinder Visitor;
+ Visitor.D = D;
+ for (auto I = Scope->body_rbegin(), E = Scope->body_rend(); I != E; ++I) {
+ const Stmt *S = *I;
+ if (!Visitor.TraverseStmt(const_cast<Stmt *>(S)))
+ return S;
+ }
+ return nullptr;
+ }
+};
+
/// \brief This class implements -Wunguarded-availability.
///
/// This is done with a traversal of the AST of a function that makes reference
@@ -6862,6 +7367,7 @@ class DiagnoseUnguardedAvailability
/// Stack of potentially nested 'if (@available(...))'s.
SmallVector<VersionTuple, 8> AvailabilityStack;
+ SmallVector<const Stmt *, 16> StmtStack;
void DiagnoseDeclAvailability(NamedDecl *D, SourceRange Range);
@@ -6872,10 +7378,34 @@ public:
SemaRef.Context.getTargetInfo().getPlatformMinVersion());
}
+ bool TraverseDecl(Decl *D) {
+ // Avoid visiting nested functions to prevent duplicate warnings.
+ if (!D || isa<FunctionDecl>(D))
+ return true;
+ return Base::TraverseDecl(D);
+ }
+
+ bool TraverseStmt(Stmt *S) {
+ if (!S)
+ return true;
+ StmtStack.push_back(S);
+ bool Result = Base::TraverseStmt(S);
+ StmtStack.pop_back();
+ return Result;
+ }
+
void IssueDiagnostics(Stmt *S) { TraverseStmt(S); }
bool TraverseIfStmt(IfStmt *If);
+ bool TraverseLambdaExpr(LambdaExpr *E) { return true; }
+
+ bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *PRE) {
+ if (PRE->isClassReceiver())
+ DiagnoseDeclAvailability(PRE->getClassReceiver(), PRE->getReceiverLocation());
+ return true;
+ }
+
bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
if (ObjCMethodDecl *D = Msg->getMethodDecl())
DiagnoseDeclAvailability(
@@ -6895,24 +7425,32 @@ public:
return true;
}
+ bool VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
+ SemaRef.Diag(E->getLocStart(), diag::warn_at_available_unchecked_use)
+ << (!SemaRef.getLangOpts().ObjC1);
+ return true;
+ }
+
bool VisitTypeLoc(TypeLoc Ty);
};
void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
NamedDecl *D, SourceRange Range) {
-
- VersionTuple ContextVersion = AvailabilityStack.back();
- if (AvailabilityResult Result =
- SemaRef.ShouldDiagnoseAvailabilityOfDecl(D, nullptr)) {
+ AvailabilityResult Result;
+ const NamedDecl *OffendingDecl;
+ std::tie(Result, OffendingDecl) =
+ ShouldDiagnoseAvailabilityOfDecl(D, nullptr);
+ if (Result != AR_Available) {
// All other diagnostic kinds have already been handled in
// DiagnoseAvailabilityOfDecl.
if (Result != AR_NotYetIntroduced)
return;
- const AvailabilityAttr *AA = getAttrForPlatform(SemaRef.getASTContext(), D);
+ const AvailabilityAttr *AA =
+ getAttrForPlatform(SemaRef.getASTContext(), OffendingDecl);
VersionTuple Introduced = AA->getIntroduced();
- if (ContextVersion >= Introduced)
+ if (AvailabilityStack.back() >= Introduced)
return;
// If the context of this function is less available than D, we should not
@@ -6920,18 +7458,96 @@ void DiagnoseUnguardedAvailability::DiagnoseDeclAvailability(
if (!ShouldDiagnoseAvailabilityInContext(SemaRef, Result, Introduced, Ctx))
return;
- SemaRef.Diag(Range.getBegin(), diag::warn_unguarded_availability)
+ // We would like to emit the diagnostic even if -Wunguarded-availability is
+ // not specified for deployment targets >= to iOS 11 or equivalent or
+ // for declarations that were introduced in iOS 11 (macOS 10.13, ...) or
+ // later.
+ unsigned DiagKind =
+ shouldDiagnoseAvailabilityByDefault(
+ SemaRef.Context,
+ SemaRef.Context.getTargetInfo().getPlatformMinVersion(), Introduced)
+ ? diag::warn_unguarded_availability_new
+ : diag::warn_unguarded_availability;
+
+ SemaRef.Diag(Range.getBegin(), DiagKind)
<< Range << D
<< AvailabilityAttr::getPrettyPlatformName(
SemaRef.getASTContext().getTargetInfo().getPlatformName())
<< Introduced.getAsString();
- SemaRef.Diag(D->getLocation(), diag::note_availability_specified_here)
- << D << /* partial */ 3;
+ SemaRef.Diag(OffendingDecl->getLocation(),
+ diag::note_availability_specified_here)
+ << OffendingDecl << /* partial */ 3;
+
+ auto FixitDiag =
+ SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
+ << Range << D
+ << (SemaRef.getLangOpts().ObjC1 ? /*@available*/ 0
+ : /*__builtin_available*/ 1);
+
+ // Find the statement which should be enclosed in the if @available check.
+ if (StmtStack.empty())
+ return;
+ const Stmt *StmtOfUse = StmtStack.back();
+ const CompoundStmt *Scope = nullptr;
+ for (const Stmt *S : llvm::reverse(StmtStack)) {
+ if (const auto *CS = dyn_cast<CompoundStmt>(S)) {
+ Scope = CS;
+ break;
+ }
+ if (isBodyLikeChildStmt(StmtOfUse, S)) {
+ // The declaration won't be seen outside of the statement, so we don't
+ // have to wrap the uses of any declared variables in if (@available).
+ // Therefore we can avoid setting Scope here.
+ break;
+ }
+ StmtOfUse = S;
+ }
+ const Stmt *LastStmtOfUse = nullptr;
+ if (isa<DeclStmt>(StmtOfUse) && Scope) {
+ for (const Decl *D : cast<DeclStmt>(StmtOfUse)->decls()) {
+ if (StmtUSEFinder::isContained(StmtStack.back(), D)) {
+ LastStmtOfUse = LastDeclUSEFinder::findLastStmtThatUsesDecl(D, Scope);
+ break;
+ }
+ }
+ }
+
+ const SourceManager &SM = SemaRef.getSourceManager();
+ SourceLocation IfInsertionLoc =
+ SM.getExpansionLoc(StmtOfUse->getLocStart());
+ SourceLocation StmtEndLoc =
+ SM.getExpansionRange(
+ (LastStmtOfUse ? LastStmtOfUse : StmtOfUse)->getLocEnd())
+ .second;
+ if (SM.getFileID(IfInsertionLoc) != SM.getFileID(StmtEndLoc))
+ return;
- // FIXME: Replace this with a fixit diagnostic.
- SemaRef.Diag(Range.getBegin(), diag::note_unguarded_available_silence)
- << Range << D;
+ StringRef Indentation = Lexer::getIndentationForLine(IfInsertionLoc, SM);
+ const char *ExtraIndentation = " ";
+ std::string FixItString;
+ llvm::raw_string_ostream FixItOS(FixItString);
+ FixItOS << "if (" << (SemaRef.getLangOpts().ObjC1 ? "@available"
+ : "__builtin_available")
+ << "("
+ << AvailabilityAttr::getPlatformNameSourceSpelling(
+ SemaRef.getASTContext().getTargetInfo().getPlatformName())
+ << " " << Introduced.getAsString() << ", *)) {\n"
+ << Indentation << ExtraIndentation;
+ FixitDiag << FixItHint::CreateInsertion(IfInsertionLoc, FixItOS.str());
+ SourceLocation ElseInsertionLoc = Lexer::findLocationAfterToken(
+ StmtEndLoc, tok::semi, SM, SemaRef.getLangOpts(),
+ /*SkipTrailingWhitespaceAndNewLine=*/false);
+ if (ElseInsertionLoc.isInvalid())
+ ElseInsertionLoc =
+ Lexer::getLocForEndOfToken(StmtEndLoc, 0, SM, SemaRef.getLangOpts());
+ FixItOS.str().clear();
+ FixItOS << "\n"
+ << Indentation << "} else {\n"
+ << Indentation << ExtraIndentation
+ << "// Fallback on earlier versions\n"
+ << Indentation << "}";
+ FixitDiag << FixItHint::CreateInsertion(ElseInsertionLoc, FixItOS.str());
}
}
@@ -6939,6 +7555,9 @@ bool DiagnoseUnguardedAvailability::VisitTypeLoc(TypeLoc Ty) {
const Type *TyPtr = Ty.getTypePtr();
SourceRange Range{Ty.getBeginLoc(), Ty.getEndLoc()};
+ if (Range.isInvalid())
+ return true;
+
if (const TagType *TT = dyn_cast<TagType>(TyPtr)) {
TagDecl *TD = TT->getDecl();
DiagnoseDeclAvailability(TD, Range);
@@ -6991,8 +7610,51 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
Body = FD->getBody();
} else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
Body = MD->getBody();
+ else if (auto *BD = dyn_cast<BlockDecl>(D))
+ Body = BD->getBody();
assert(Body && "Need a body here!");
DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
}
+
+void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ bool ObjCPropertyAccess,
+ bool AvoidPartialAvailabilityChecks) {
+ std::string Message;
+ AvailabilityResult Result;
+ const NamedDecl* OffendingDecl;
+ // See if this declaration is unavailable, deprecated, or partial.
+ std::tie(Result, OffendingDecl) = ShouldDiagnoseAvailabilityOfDecl(D, &Message);
+ if (Result == AR_Available)
+ return;
+
+ if (Result == AR_NotYetIntroduced) {
+ if (AvoidPartialAvailabilityChecks)
+ return;
+
+ // We need to know the @available context in the current function to
+ // diagnose this use, let DiagnoseUnguardedAvailabilityViolations do that
+ // when we're done parsing the current function.
+ if (getCurFunctionOrMethodDecl()) {
+ getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
+ return;
+ } else if (getCurBlock() || getCurLambda()) {
+ getCurFunction()->HasPotentialAvailabilityViolations = true;
+ return;
+ }
+ }
+
+ const ObjCPropertyDecl *ObjCPDecl = nullptr;
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (const ObjCPropertyDecl *PD = MD->findPropertyDecl()) {
+ AvailabilityResult PDeclResult = PD->getAvailability(nullptr);
+ if (PDeclResult == Result)
+ ObjCPDecl = PD;
+ }
+ }
+
+ EmitAvailabilityWarning(*this, Result, D, OffendingDecl, Message, Loc,
+ UnknownObjCClass, ObjCPDecl, ObjCPropertyAccess);
+}
diff --git a/gnu/llvm/tools/lld/ELF/Config.h b/gnu/llvm/tools/lld/ELF/Config.h
index f58e3e5715b..23627dd812d 100644
--- a/gnu/llvm/tools/lld/ELF/Config.h
+++ b/gnu/llvm/tools/lld/ELF/Config.h
@@ -13,7 +13,10 @@
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
-#include "llvm/Support/ELF.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Support/CachePruning.h"
+#include "llvm/Support/CodeGen.h"
+#include "llvm/Support/Endian.h"
#include <vector>
@@ -34,14 +37,14 @@ enum ELFKind {
// For --build-id.
enum class BuildIdKind { None, Fast, Md5, Sha1, Hexstring, Uuid };
-// For --discard-{all,locals,none} and --retain-symbols-file.
-enum class DiscardPolicy { Default, All, Locals, RetainFile, None };
+// For --discard-{all,locals,none}.
+enum class DiscardPolicy { Default, All, Locals, None };
// For --strip-{all,debug}.
enum class StripPolicy { None, All, Debug };
// For --unresolved-symbols.
-enum class UnresolvedPolicy { NoUndef, ReportError, Warn, Ignore };
+enum class UnresolvedPolicy { ReportError, Warn, WarnAll, Ignore, IgnoreAll };
// For --sort-section and linkerscript sorting rules.
enum class SortSectionPolicy { Default, None, Alignment, Name, Priority };
@@ -58,11 +61,16 @@ struct SymbolVersion {
// This struct contains symbols version definition that
// can be found in version script if it is used for link.
struct VersionDefinition {
- VersionDefinition(llvm::StringRef Name, uint16_t Id) : Name(Name), Id(Id) {}
llvm::StringRef Name;
- uint16_t Id;
+ uint16_t Id = 0;
std::vector<SymbolVersion> Globals;
- size_t NameOff; // Offset in string table.
+ size_t NameOff = 0; // Offset in the string table
+};
+
+// Structure for mapping renamed symbols
+struct RenamedSymbol {
+ Symbol *Target;
+ uint8_t OriginalBinding;
};
// This struct contains the global configuration for the linker.
@@ -72,6 +80,7 @@ struct VersionDefinition {
struct Configuration {
InputFile *FirstElf = nullptr;
uint8_t OSABI = 0;
+ llvm::CachePruningPolicy ThinLTOCachePolicy;
llvm::StringMap<uint64_t> SectionStartMap;
llvm::StringRef DynamicLinker;
llvm::StringRef Entry;
@@ -80,52 +89,57 @@ struct Configuration {
llvm::StringRef Init;
llvm::StringRef LTOAAPipeline;
llvm::StringRef LTONewPmPasses;
+ llvm::StringRef MapFile;
llvm::StringRef OutputFile;
+ llvm::StringRef OptRemarksFilename;
llvm::StringRef SoName;
llvm::StringRef Sysroot;
- llvm::StringSet<> RetainSymbolsFile;
- std::string RPath;
+ llvm::StringRef ThinLTOCacheDir;
+ std::string Rpath;
std::vector<VersionDefinition> VersionDefinitions;
+ std::vector<llvm::StringRef> Argv;
std::vector<llvm::StringRef> AuxiliaryList;
+ std::vector<llvm::StringRef> FilterList;
std::vector<llvm::StringRef> SearchPaths;
std::vector<llvm::StringRef> SymbolOrderingFile;
std::vector<llvm::StringRef> Undefined;
std::vector<SymbolVersion> VersionScriptGlobals;
std::vector<SymbolVersion> VersionScriptLocals;
std::vector<uint8_t> BuildIdVector;
+ llvm::MapVector<Symbol *, RenamedSymbol> RenamedSymbols;
bool AllowMultipleDefinition;
bool AsNeeded = false;
bool Bsymbolic;
bool BsymbolicFunctions;
bool ColorDiagnostics = false;
+ bool CompressDebugSections;
bool DefineCommon;
bool Demangle = true;
bool DisableVerify;
bool EhFrameHdr;
+ bool EmitRelocs;
bool EnableNewDtags;
bool ExportDynamic;
bool FatalWarnings;
bool GcSections;
bool GdbIndex;
- bool GnuHash = false;
+ bool GnuHash;
bool ICF;
- bool Mips64EL = false;
bool MipsN32Abi = false;
bool NoGnuUnique;
bool NoUndefinedVersion;
bool Nostdlib;
bool OFormatBinary;
- bool OMagic;
- bool Pic;
+ bool Omagic;
+ bool OptRemarksWithHotness;
bool Pie;
bool PrintGcSections;
- bool Rela;
bool Relocatable;
bool SaveTemps;
bool SingleRoRx;
bool Shared;
bool Static = false;
- bool SysvHash = true;
+ bool SysvHash;
bool Target1Rel;
bool Threads;
bool Trace;
@@ -134,18 +148,21 @@ struct Configuration {
bool WarnMissingEntry;
bool ZCombreloc;
bool ZExecstack;
+ bool ZNocopyreloc;
bool ZNodelete;
bool ZNodlopen;
bool ZNow;
bool ZOrigin;
bool ZRelro;
+ bool ZRodynamic;
+ bool ZText;
bool ExitEarly;
bool ZWxneeded;
DiscardPolicy Discard;
SortSectionPolicy SortSection;
- StripPolicy Strip = StripPolicy::None;
+ StripPolicy Strip;
UnresolvedPolicy UnresolvedSymbols;
- Target2Policy Target2 = Target2Policy::GotRel;
+ Target2Policy Target2;
BuildIdKind BuildId = BuildIdKind::None;
ELFKind EKind = ELFNoneKind;
uint16_t DefaultSymbolVersion = llvm::ELF::VER_NDX_GLOBAL;
@@ -158,6 +175,58 @@ struct Configuration {
unsigned LTOO;
unsigned Optimize;
unsigned ThinLTOJobs;
+
+ // The following config options do not directly correspond to any
+ // particualr command line options.
+
+ // True if we need to pass through relocations in input files to the
+ // output file. Usually false because we consume relocations.
+ bool CopyRelocs;
+
+ // True if the target is ELF64. False if ELF32.
+ bool Is64;
+
+ // True if the target is little-endian. False if big-endian.
+ bool IsLE;
+
+ // endianness::little if IsLE is true. endianness::big otherwise.
+ llvm::support::endianness Endianness;
+
+ // True if the target is the little-endian MIPS64.
+ //
+ // The reason why we have this variable only for the MIPS is because
+ // we use this often. Some ELF headers for MIPS64EL are in a
+ // mixed-endian (which is horrible and I'd say that's a serious spec
+ // bug), and we need to know whether we are reading MIPS ELF files or
+ // not in various places.
+ //
+ // (Note that MIPS64EL is not a typo for MIPS64LE. This is the official
+ // name whatever that means. A fun hypothesis is that "EL" is short for
+ // little-endian written in the little-endian order, but I don't know
+ // if that's true.)
+ bool IsMips64EL;
+
+ // The ELF spec defines two types of relocation table entries, RELA and
+ // REL. RELA is a triplet of (offset, info, addend) while REL is a
+ // tuple of (offset, info). Addends for REL are implicit and read from
+ // the location where the relocations are applied. So, REL is more
+ // compact than RELA but requires a bit of more work to process.
+ //
+ // (From the linker writer's view, this distinction is not necessary.
+ // If the ELF had chosen whichever and sticked with it, it would have
+ // been easier to write code to process relocations, but it's too late
+ // to change the spec.)
+ //
+ // Each ABI defines its relocation type. IsRela is true if target
+ // uses RELA. As far as we know, all 64-bit ABIs are using RELA. A
+ // few 32-bit ABIs are using RELA too.
+ bool IsRela;
+
+ // True if we are creating position-independent code.
+ bool Pic;
+
+ // 4 for ELF32, 8 for ELF64.
+ int Wordsize;
};
// The only instance of Configuration struct.
diff --git a/gnu/llvm/tools/lld/ELF/Driver.cpp b/gnu/llvm/tools/lld/ELF/Driver.cpp
index 99253313c35..a069bd637e7 100644
--- a/gnu/llvm/tools/lld/ELF/Driver.cpp
+++ b/gnu/llvm/tools/lld/ELF/Driver.cpp
@@ -6,17 +6,37 @@
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
+//
+// The driver drives the entire linking process. It is responsible for
+// parsing command line options and doing whatever it is instructed to do.
+//
+// One notable thing in the LLD's driver when compared to other linkers is
+// that the LLD's driver is agnostic on the host operating system.
+// Other linkers usually have implicit default values (such as a dynamic
+// linker path or library paths) for each host OS.
+//
+// I don't think implicit default values are useful because they are
+// usually explicitly specified by the compiler driver. They can even
+// be harmful when you are doing cross-linking. Therefore, in LLD, we
+// simply trust the compiler driver to pass all required options and
+// don't try to make effort on our side.
+//
+//===----------------------------------------------------------------------===//
#include "Driver.h"
#include "Config.h"
#include "Error.h"
+#include "Filesystem.h"
#include "ICF.h"
#include "InputFiles.h"
#include "InputSection.h"
#include "LinkerScript.h"
#include "Memory.h"
+#include "OutputSections.h"
+#include "ScriptParser.h"
#include "Strings.h"
#include "SymbolTable.h"
+#include "SyntheticSections.h"
#include "Target.h"
#include "Threads.h"
#include "Writer.h"
@@ -24,8 +44,8 @@
#include "lld/Driver/Driver.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Object/Decompressor.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compression.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/TarWriter.h"
#include "llvm/Support/TargetSelect.h"
@@ -48,16 +68,19 @@ BumpPtrAllocator elf::BAlloc;
StringSaver elf::Saver{BAlloc};
std::vector<SpecificAllocBase *> elf::SpecificAllocBase::Instances;
+static void setConfigs();
+
bool elf::link(ArrayRef<const char *> Args, bool CanExitEarly,
raw_ostream &Error) {
ErrorCount = 0;
ErrorOS = &Error;
- Argv0 = Args[0];
+ InputSections.clear();
Tar = nullptr;
Config = make<Configuration>();
Driver = make<LinkerDriver>();
- ScriptConfig = make<ScriptConfiguration>();
+ Script = make<LinkerScript>();
+ Config->Argv = {Args.begin(), Args.end()};
Driver->main(Args, CanExitEarly);
freeArena();
@@ -76,12 +99,10 @@ static std::tuple<ELFKind, uint16_t, uint8_t> parseEmulation(StringRef Emul) {
std::pair<ELFKind, uint16_t> Ret =
StringSwitch<std::pair<ELFKind, uint16_t>>(S)
.Cases("aarch64elf", "aarch64linux", {ELF64LEKind, EM_AARCH64})
- .Case("armelf_linux_eabi", {ELF32LEKind, EM_ARM})
+ .Cases("armelf", "armelf_linux_eabi", {ELF32LEKind, EM_ARM})
.Case("elf32_x86_64", {ELF32LEKind, EM_X86_64})
- .Case("elf32btsmip", {ELF32BEKind, EM_MIPS})
- .Case("elf32ltsmip", {ELF32LEKind, EM_MIPS})
- .Case("elf32btsmipn32", {ELF32BEKind, EM_MIPS})
- .Case("elf32ltsmipn32", {ELF32LEKind, EM_MIPS})
+ .Cases("elf32btsmip", "elf32btsmipn32", {ELF32BEKind, EM_MIPS})
+ .Cases("elf32ltsmip", "elf32ltsmipn32", {ELF32LEKind, EM_MIPS})
.Case("elf32ppc", {ELF32BEKind, EM_PPC})
.Case("elf64btsmip", {ELF64BEKind, EM_MIPS})
.Case("elf64ltsmip", {ELF64LEKind, EM_MIPS})
@@ -102,13 +123,13 @@ static std::tuple<ELFKind, uint16_t, uint8_t> parseEmulation(StringRef Emul) {
// Returns slices of MB by parsing MB as an archive file.
// Each slice consists of a member file in the archive.
-std::vector<MemoryBufferRef>
-LinkerDriver::getArchiveMembers(MemoryBufferRef MB) {
+std::vector<std::pair<MemoryBufferRef, uint64_t>> static getArchiveMembers(
+ MemoryBufferRef MB) {
std::unique_ptr<Archive> File =
check(Archive::create(MB),
MB.getBufferIdentifier() + ": failed to parse archive");
- std::vector<MemoryBufferRef> V;
+ std::vector<std::pair<MemoryBufferRef, uint64_t>> V;
Error Err = Error::success();
for (const ErrorOr<Archive::Child> &COrErr : File->children(Err)) {
Archive::Child C =
@@ -118,7 +139,7 @@ LinkerDriver::getArchiveMembers(MemoryBufferRef MB) {
check(C.getMemoryBufferRef(),
MB.getBufferIdentifier() +
": could not get the buffer for a child of the archive");
- V.push_back(MBRef);
+ V.push_back(std::make_pair(MBRef, C.getChildOffset()));
}
if (Err)
fatal(MB.getBufferIdentifier() + ": Archive::children failed: " +
@@ -131,9 +152,8 @@ LinkerDriver::getArchiveMembers(MemoryBufferRef MB) {
return V;
}
-// Opens and parses a file. Path has to be resolved already.
-// Newly created memory buffers are owned by this driver.
-void LinkerDriver::addFile(StringRef Path) {
+// Opens a file and create a file object. Path has to be resolved already.
+void LinkerDriver::addFile(StringRef Path, bool WithLOption) {
using namespace sys::fs;
Optional<MemoryBufferRef> Buffer = readFile(Path);
@@ -150,24 +170,53 @@ void LinkerDriver::addFile(StringRef Path) {
case file_magic::unknown:
readLinkerScript(MBRef);
return;
- case file_magic::archive:
+ case file_magic::archive: {
+ // Handle -whole-archive.
if (InWholeArchive) {
- for (MemoryBufferRef MB : getArchiveMembers(MBRef))
- Files.push_back(createObjectFile(MB, Path));
+ for (const auto &P : getArchiveMembers(MBRef))
+ Files.push_back(createObjectFile(P.first, Path, P.second));
return;
}
- Files.push_back(make<ArchiveFile>(MBRef));
+
+ std::unique_ptr<Archive> File =
+ check(Archive::create(MBRef), Path + ": failed to parse archive");
+
+ // If an archive file has no symbol table, it is likely that a user
+ // is attempting LTO and using a default ar command that doesn't
+ // understand the LLVM bitcode file. It is a pretty common error, so
+ // we'll handle it as if it had a symbol table.
+ if (!File->isEmpty() && !File->hasSymbolTable()) {
+ for (const auto &P : getArchiveMembers(MBRef))
+ Files.push_back(make<LazyObjectFile>(P.first, Path, P.second));
+ return;
+ }
+
+ // Handle the regular case.
+ Files.push_back(make<ArchiveFile>(std::move(File)));
return;
+ }
case file_magic::elf_shared_object:
if (Config->Relocatable) {
error("attempted static link of dynamic object " + Path);
return;
}
- Files.push_back(createSharedFile(MBRef));
+
+ // DSOs usually have DT_SONAME tags in their ELF headers, and the
+ // sonames are used to identify DSOs. But if they are missing,
+ // they are identified by filenames. We don't know whether the new
+ // file has a DT_SONAME or not because we haven't parsed it yet.
+ // Here, we set the default soname for the file because we might
+ // need it later.
+ //
+ // If a file was specified by -lfoo, the directory part is not
+ // significant, as a user did not specify it. This behavior is
+ // compatible with GNU.
+ Files.push_back(
+ createSharedFile(MBRef, WithLOption ? path::filename(Path) : Path));
return;
default:
if (InLib)
- Files.push_back(make<LazyObjectFile>(MBRef));
+ Files.push_back(make<LazyObjectFile>(MBRef, "", 0));
else
Files.push_back(createObjectFile(MBRef));
}
@@ -176,7 +225,7 @@ void LinkerDriver::addFile(StringRef Path) {
// Add a given library by searching it from input search paths.
void LinkerDriver::addLibrary(StringRef Name) {
if (Optional<std::string> Path = searchLibrary(Name))
- addFile(*Path);
+ addFile(*Path, /*WithLOption=*/true);
else
error("unable to find library -l" + Name);
}
@@ -210,6 +259,12 @@ static void checkOptions(opt::InputArgList &Args) {
if (Config->Pie && Config->Shared)
error("-shared and -pie may not be used together");
+ if (!Config->Shared && !Config->FilterList.empty())
+ error("-F may not be used without -shared");
+
+ if (!Config->Shared && !Config->AuxiliaryList.empty())
+ error("-f may not be used without -shared");
+
if (Config->Relocatable) {
if (Config->Shared)
error("-r and -shared may not be used together");
@@ -222,18 +277,11 @@ static void checkOptions(opt::InputArgList &Args) {
}
}
-static StringRef getString(opt::InputArgList &Args, unsigned Key,
- StringRef Default = "") {
- if (auto *Arg = Args.getLastArg(Key))
- return Arg->getValue();
- return Default;
-}
-
static int getInteger(opt::InputArgList &Args, unsigned Key, int Default) {
int V = Default;
if (auto *Arg = Args.getLastArg(Key)) {
StringRef S = Arg->getValue();
- if (S.getAsInteger(10, V))
+ if (!to_integer(S, V, 10))
error(Arg->getSpelling() + ": number expected, but got " + S);
}
return V;
@@ -255,13 +303,11 @@ static bool hasZOption(opt::InputArgList &Args, StringRef Key) {
static uint64_t getZOptionValue(opt::InputArgList &Args, StringRef Key,
uint64_t Default) {
for (auto *Arg : Args.filtered(OPT_z)) {
- StringRef Value = Arg->getValue();
- size_t Pos = Value.find("=");
- if (Pos != StringRef::npos && Key == Value.substr(0, Pos)) {
- Value = Value.substr(Pos + 1);
- uint64_t Result;
- if (Value.getAsInteger(0, Result))
- error("invalid " + Key + ": " + Value);
+ std::pair<StringRef, StringRef> KV = StringRef(Arg->getValue()).split('=');
+ if (KV.first == Key) {
+ uint64_t Result = Default;
+ if (!to_integer(KV.second, Result))
+ error("invalid " + Key + ": " + KV.second);
return Result;
}
}
@@ -296,8 +342,8 @@ void LinkerDriver::main(ArrayRef<const char *> ArgsArr, bool CanExitEarly) {
// lot of "configure" scripts out there that are generated by old version
// of Libtool. We cannot convince every software developer to migrate to
// the latest version and re-generate scripts. So we have this hack.
- if (Args.hasArg(OPT_version) || Args.hasArg(OPT_v))
- outs() << getLLDVersion() << " (compatible with GNU linkers)\n";
+ if (Args.hasArg(OPT_v) || Args.hasArg(OPT_version))
+ message(getLLDVersion() + " (compatible with GNU linkers)");
// ld.bfd always exits after printing out the version string.
// ld.gold proceeds if a given option is -v. Because gold's behavior
@@ -327,6 +373,7 @@ void LinkerDriver::main(ArrayRef<const char *> ArgsArr, bool CanExitEarly) {
initLLVM(Args);
createFiles(Args);
inferMachineType();
+ setConfigs();
checkOptions(Args);
if (ErrorCount)
return;
@@ -349,36 +396,76 @@ void LinkerDriver::main(ArrayRef<const char *> ArgsArr, bool CanExitEarly) {
}
}
-static UnresolvedPolicy getUnresolvedSymbolOption(opt::InputArgList &Args) {
+static bool getArg(opt::InputArgList &Args, unsigned K1, unsigned K2,
+ bool Default) {
+ if (auto *Arg = Args.getLastArg(K1, K2))
+ return Arg->getOption().getID() == K1;
+ return Default;
+}
+
+static std::vector<StringRef> getArgs(opt::InputArgList &Args, int Id) {
+ std::vector<StringRef> V;
+ for (auto *Arg : Args.filtered(Id))
+ V.push_back(Arg->getValue());
+ return V;
+}
+
+static std::string getRpath(opt::InputArgList &Args) {
+ std::vector<StringRef> V = getArgs(Args, OPT_rpath);
+ return llvm::join(V.begin(), V.end(), ":");
+}
+
+// Determines what we should do if there are remaining unresolved
+// symbols after the name resolution.
+static UnresolvedPolicy getUnresolvedSymbolPolicy(opt::InputArgList &Args) {
+ // -noinhibit-exec or -r imply some default values.
if (Args.hasArg(OPT_noinhibit_exec))
- return UnresolvedPolicy::Warn;
- if (Args.hasArg(OPT_no_undefined) || hasZOption(Args, "defs"))
- return UnresolvedPolicy::NoUndef;
- if (Config->Relocatable)
- return UnresolvedPolicy::Ignore;
+ return UnresolvedPolicy::WarnAll;
+ if (Args.hasArg(OPT_relocatable))
+ return UnresolvedPolicy::IgnoreAll;
- if (auto *Arg = Args.getLastArg(OPT_unresolved_symbols)) {
- StringRef S = Arg->getValue();
- if (S == "ignore-all" || S == "ignore-in-object-files")
- return UnresolvedPolicy::Ignore;
- if (S == "ignore-in-shared-libs" || S == "report-all")
- return UnresolvedPolicy::ReportError;
- error("unknown --unresolved-symbols value: " + S);
+ UnresolvedPolicy ErrorOrWarn = getArg(Args, OPT_error_unresolved_symbols,
+ OPT_warn_unresolved_symbols, true)
+ ? UnresolvedPolicy::ReportError
+ : UnresolvedPolicy::Warn;
+
+ // Process the last of -unresolved-symbols, -no-undefined or -z defs.
+ for (auto *Arg : llvm::reverse(Args)) {
+ switch (Arg->getOption().getID()) {
+ case OPT_unresolved_symbols: {
+ StringRef S = Arg->getValue();
+ if (S == "ignore-all" || S == "ignore-in-object-files")
+ return UnresolvedPolicy::Ignore;
+ if (S == "ignore-in-shared-libs" || S == "report-all")
+ return ErrorOrWarn;
+ error("unknown --unresolved-symbols value: " + S);
+ continue;
+ }
+ case OPT_no_undefined:
+ return ErrorOrWarn;
+ case OPT_z:
+ if (StringRef(Arg->getValue()) == "defs")
+ return ErrorOrWarn;
+ continue;
+ }
}
- return UnresolvedPolicy::ReportError;
+
+ // -shared implies -unresolved-symbols=ignore-all because missing
+ // symbols are likely to be resolved at runtime using other DSOs.
+ if (Config->Shared)
+ return UnresolvedPolicy::Ignore;
+ return ErrorOrWarn;
}
-static Target2Policy getTarget2Option(opt::InputArgList &Args) {
- if (auto *Arg = Args.getLastArg(OPT_target2)) {
- StringRef S = Arg->getValue();
- if (S == "rel")
- return Target2Policy::Rel;
- if (S == "abs")
- return Target2Policy::Abs;
- if (S == "got-rel")
- return Target2Policy::GotRel;
- error("unknown --target2 option: " + S);
- }
+static Target2Policy getTarget2(opt::InputArgList &Args) {
+ StringRef S = Args.getLastArgValue(OPT_target2, "got-rel");
+ if (S == "rel")
+ return Target2Policy::Rel;
+ if (S == "abs")
+ return Target2Policy::Abs;
+ if (S == "got-rel")
+ return Target2Policy::GotRel;
+ error("unknown --target2 option: " + S);
return Target2Policy::GotRel;
}
@@ -392,16 +479,10 @@ static bool isOutputFormatBinary(opt::InputArgList &Args) {
return false;
}
-static bool getArg(opt::InputArgList &Args, unsigned K1, unsigned K2,
- bool Default) {
- if (auto *Arg = Args.getLastArg(K1, K2))
- return Arg->getOption().getID() == K1;
- return Default;
-}
-
-static DiscardPolicy getDiscardOption(opt::InputArgList &Args) {
- if (Config->Relocatable)
+static DiscardPolicy getDiscard(opt::InputArgList &Args) {
+ if (Args.hasArg(OPT_relocatable))
return DiscardPolicy::None;
+
auto *Arg =
Args.getLastArg(OPT_discard_all, OPT_discard_locals, OPT_discard_none);
if (!Arg)
@@ -413,20 +494,30 @@ static DiscardPolicy getDiscardOption(opt::InputArgList &Args) {
return DiscardPolicy::None;
}
-static StripPolicy getStripOption(opt::InputArgList &Args) {
- if (auto *Arg = Args.getLastArg(OPT_strip_all, OPT_strip_debug)) {
- if (Arg->getOption().getID() == OPT_strip_all)
- return StripPolicy::All;
- return StripPolicy::Debug;
- }
- return StripPolicy::None;
+static StringRef getDynamicLinker(opt::InputArgList &Args) {
+ auto *Arg = Args.getLastArg(OPT_dynamic_linker, OPT_no_dynamic_linker);
+ if (!Arg || Arg->getOption().getID() == OPT_no_dynamic_linker)
+ return "";
+ return Arg->getValue();
+}
+
+static StripPolicy getStrip(opt::InputArgList &Args) {
+ if (Args.hasArg(OPT_relocatable))
+ return StripPolicy::None;
+
+ auto *Arg = Args.getLastArg(OPT_strip_all, OPT_strip_debug);
+ if (!Arg)
+ return StripPolicy::None;
+ if (Arg->getOption().getID() == OPT_strip_all)
+ return StripPolicy::All;
+ return StripPolicy::Debug;
}
static uint64_t parseSectionAddress(StringRef S, opt::Arg *Arg) {
uint64_t VA = 0;
if (S.startswith("0x"))
S = S.drop_front(2);
- if (S.getAsInteger(16, VA))
+ if (!to_integer(S, VA, 16))
error("invalid argument: " + toString(Arg));
return VA;
}
@@ -449,8 +540,8 @@ static StringMap<uint64_t> getSectionStartMap(opt::InputArgList &Args) {
return Ret;
}
-static SortSectionPolicy getSortKind(opt::InputArgList &Args) {
- StringRef S = getString(Args, OPT_sort_section);
+static SortSectionPolicy getSortSection(opt::InputArgList &Args) {
+ StringRef S = Args.getLastArgValue(OPT_sort_section);
if (S == "alignment")
return SortSectionPolicy::Alignment;
if (S == "name")
@@ -460,6 +551,44 @@ static SortSectionPolicy getSortKind(opt::InputArgList &Args) {
return SortSectionPolicy::Default;
}
+static std::pair<bool, bool> getHashStyle(opt::InputArgList &Args) {
+ StringRef S = Args.getLastArgValue(OPT_hash_style, "sysv");
+ if (S == "sysv")
+ return {true, false};
+ if (S == "gnu")
+ return {false, true};
+ if (S != "both")
+ error("unknown -hash-style: " + S);
+ return {true, true};
+}
+
+// Parse --build-id or --build-id=<style>. We handle "tree" as a
+// synonym for "sha1" because all our hash functions including
+// -build-id=sha1 are actually tree hashes for performance reasons.
+static std::pair<BuildIdKind, std::vector<uint8_t>>
+getBuildId(opt::InputArgList &Args) {
+ auto *Arg = Args.getLastArg(OPT_build_id, OPT_build_id_eq);
+ if (!Arg)
+ return {BuildIdKind::None, {}};
+
+ if (Arg->getOption().getID() == OPT_build_id)
+ return {BuildIdKind::Fast, {}};
+
+ StringRef S = Arg->getValue();
+ if (S == "md5")
+ return {BuildIdKind::Md5, {}};
+ if (S == "sha1" || S == "tree")
+ return {BuildIdKind::Sha1, {}};
+ if (S == "uuid")
+ return {BuildIdKind::Uuid, {}};
+ if (S.startswith("0x"))
+ return {BuildIdKind::Hexstring, parseHex(S.substr(2))};
+
+ if (S != "none")
+ error("unknown --build-id style: " + S);
+ return {BuildIdKind::None, {}};
+}
+
static std::vector<StringRef> getLines(MemoryBufferRef MB) {
SmallVector<StringRef, 0> Arr;
MB.getBuffer().split(Arr, '\n');
@@ -473,42 +602,58 @@ static std::vector<StringRef> getLines(MemoryBufferRef MB) {
return Ret;
}
+static bool getCompressDebugSections(opt::InputArgList &Args) {
+ StringRef S = Args.getLastArgValue(OPT_compress_debug_sections, "none");
+ if (S == "none")
+ return false;
+ if (S != "zlib")
+ error("unknown --compress-debug-sections value: " + S);
+ if (!zlib::isAvailable())
+ error("--compress-debug-sections: zlib is not available");
+ return true;
+}
+
// Initializes Config members by the command line options.
void LinkerDriver::readConfigs(opt::InputArgList &Args) {
- for (auto *Arg : Args.filtered(OPT_L))
- Config->SearchPaths.push_back(Arg->getValue());
-
- std::vector<StringRef> RPaths;
- for (auto *Arg : Args.filtered(OPT_rpath))
- RPaths.push_back(Arg->getValue());
- if (!RPaths.empty())
- Config->RPath = llvm::join(RPaths.begin(), RPaths.end(), ":");
-
- if (auto *Arg = Args.getLastArg(OPT_m)) {
- // Parse ELF{32,64}{LE,BE} and CPU type.
- StringRef S = Arg->getValue();
- std::tie(Config->EKind, Config->EMachine, Config->OSABI) =
- parseEmulation(S);
- Config->MipsN32Abi = (S == "elf32btsmipn32" || S == "elf32ltsmipn32");
- Config->Emulation = S;
- }
-
Config->AllowMultipleDefinition = Args.hasArg(OPT_allow_multiple_definition);
+ Config->AuxiliaryList = getArgs(Args, OPT_auxiliary);
Config->Bsymbolic = Args.hasArg(OPT_Bsymbolic);
Config->BsymbolicFunctions = Args.hasArg(OPT_Bsymbolic_functions);
+ Config->CompressDebugSections = getCompressDebugSections(Args);
+ Config->DefineCommon = getArg(Args, OPT_define_common, OPT_no_define_common,
+ !Args.hasArg(OPT_relocatable));
Config->Demangle = getArg(Args, OPT_demangle, OPT_no_demangle, true);
Config->DisableVerify = Args.hasArg(OPT_disable_verify);
+ Config->Discard = getDiscard(Args);
+ Config->DynamicLinker = getDynamicLinker(Args);
Config->EhFrameHdr = Args.hasArg(OPT_eh_frame_hdr);
+ Config->EmitRelocs = Args.hasArg(OPT_emit_relocs);
Config->EnableNewDtags = !Args.hasArg(OPT_disable_new_dtags);
- Config->ExportDynamic = Args.hasArg(OPT_export_dynamic);
- Config->FatalWarnings = Args.hasArg(OPT_fatal_warnings);
+ Config->Entry = Args.getLastArgValue(OPT_entry);
+ Config->ExportDynamic =
+ getArg(Args, OPT_export_dynamic, OPT_no_export_dynamic, false);
+ Config->FatalWarnings =
+ getArg(Args, OPT_fatal_warnings, OPT_no_fatal_warnings, false);
+ Config->FilterList = getArgs(Args, OPT_filter);
+ Config->Fini = Args.getLastArgValue(OPT_fini, "_fini");
Config->GcSections = getArg(Args, OPT_gc_sections, OPT_no_gc_sections, false);
Config->GdbIndex = Args.hasArg(OPT_gdb_index);
- Config->ICF = Args.hasArg(OPT_icf);
+ Config->ICF = getArg(Args, OPT_icf_all, OPT_icf_none, false);
+ Config->Init = Args.getLastArgValue(OPT_init, "_init");
+ Config->LTOAAPipeline = Args.getLastArgValue(OPT_lto_aa_pipeline);
+ Config->LTONewPmPasses = Args.getLastArgValue(OPT_lto_newpm_passes);
+ Config->LTOO = getInteger(Args, OPT_lto_O, 2);
+ Config->LTOPartitions = getInteger(Args, OPT_lto_partitions, 1);
+ Config->MapFile = Args.getLastArgValue(OPT_Map);
Config->NoGnuUnique = Args.hasArg(OPT_no_gnu_unique);
Config->NoUndefinedVersion = Args.hasArg(OPT_no_undefined_version);
Config->Nostdlib = Args.hasArg(OPT_nostdlib);
- Config->OMagic = Args.hasArg(OPT_omagic);
+ Config->OFormatBinary = isOutputFormatBinary(Args);
+ Config->Omagic = Args.hasArg(OPT_omagic);
+ Config->OptRemarksFilename = Args.getLastArgValue(OPT_opt_remarks_filename);
+ Config->OptRemarksWithHotness = Args.hasArg(OPT_opt_remarks_with_hotness);
+ Config->Optimize = getInteger(Args, OPT_O, 1);
+ Config->OutputFile = Args.getLastArgValue(OPT_o);
#ifdef __OpenBSD__
Config->Pie = getArg(Args, OPT_pie, OPT_nopie,
!Args.hasArg(OPT_shared) && !Args.hasArg(OPT_relocatable));
@@ -516,137 +661,111 @@ void LinkerDriver::readConfigs(opt::InputArgList &Args) {
Config->Pie = getArg(Args, OPT_pie, OPT_nopie, false);
#endif
Config->PrintGcSections = Args.hasArg(OPT_print_gc_sections);
+ Config->Rpath = getRpath(Args);
Config->Relocatable = Args.hasArg(OPT_relocatable);
- Config->DefineCommon = getArg(Args, OPT_define_common, OPT_no_define_common,
- !Config->Relocatable);
- Config->Discard = getDiscardOption(Args);
Config->SaveTemps = Args.hasArg(OPT_save_temps);
- Config->SingleRoRx = Args.hasArg(OPT_no_rosegment);
+ Config->SearchPaths = getArgs(Args, OPT_L);
+ Config->SectionStartMap = getSectionStartMap(Args);
Config->Shared = Args.hasArg(OPT_shared);
+ Config->SingleRoRx = Args.hasArg(OPT_no_rosegment);
+ Config->SoName = Args.getLastArgValue(OPT_soname);
+ Config->SortSection = getSortSection(Args);
+ Config->Strip = getStrip(Args);
+ Config->Sysroot = Args.getLastArgValue(OPT_sysroot);
Config->Target1Rel = getArg(Args, OPT_target1_rel, OPT_target1_abs, false);
+ Config->Target2 = getTarget2(Args);
+ Config->ThinLTOCacheDir = Args.getLastArgValue(OPT_thinlto_cache_dir);
+ Config->ThinLTOCachePolicy = check(
+ parseCachePruningPolicy(Args.getLastArgValue(OPT_thinlto_cache_policy)),
+ "--thinlto-cache-policy: invalid cache policy");
+ Config->ThinLTOJobs = getInteger(Args, OPT_thinlto_jobs, -1u);
Config->Threads = getArg(Args, OPT_threads, OPT_no_threads, true);
Config->Trace = Args.hasArg(OPT_trace);
+ Config->Undefined = getArgs(Args, OPT_undefined);
+ Config->UnresolvedSymbols = getUnresolvedSymbolPolicy(Args);
Config->Verbose = Args.hasArg(OPT_verbose);
Config->WarnCommon = Args.hasArg(OPT_warn_common);
-
- Config->DynamicLinker = getString(Args, OPT_dynamic_linker);
- Config->Entry = getString(Args, OPT_entry);
- Config->Fini = getString(Args, OPT_fini, "_fini");
- Config->Init = getString(Args, OPT_init, "_init");
- Config->LTOAAPipeline = getString(Args, OPT_lto_aa_pipeline);
- Config->LTONewPmPasses = getString(Args, OPT_lto_newpm_passes);
- Config->OutputFile = getString(Args, OPT_o);
- Config->SoName = getString(Args, OPT_soname);
- Config->Sysroot = getString(Args, OPT_sysroot);
-
- Config->Optimize = getInteger(Args, OPT_O, 1);
- Config->LTOO = getInteger(Args, OPT_lto_O, 2);
- if (Config->LTOO > 3)
- error("invalid optimization level for LTO: " + getString(Args, OPT_lto_O));
- Config->LTOPartitions = getInteger(Args, OPT_lto_partitions, 1);
- if (Config->LTOPartitions == 0)
- error("--lto-partitions: number of threads must be > 0");
- Config->ThinLTOJobs = getInteger(Args, OPT_thinlto_jobs, -1u);
- if (Config->ThinLTOJobs == 0)
- error("--thinlto-jobs: number of threads must be > 0");
-
Config->ZCombreloc = !hasZOption(Args, "nocombreloc");
Config->ZExecstack = hasZOption(Args, "execstack");
+ Config->ZNocopyreloc = hasZOption(Args, "nocopyreloc");
Config->ZNodelete = hasZOption(Args, "nodelete");
Config->ZNodlopen = hasZOption(Args, "nodlopen");
Config->ZNow = hasZOption(Args, "now");
Config->ZOrigin = hasZOption(Args, "origin");
Config->ZRelro = !hasZOption(Args, "norelro");
- Config->ZStackSize = getZOptionValue(Args, "stack-size", -1);
+ Config->ZRodynamic = hasZOption(Args, "rodynamic");
+ Config->ZStackSize = getZOptionValue(Args, "stack-size", 0);
+ Config->ZText = !hasZOption(Args, "notext");
Config->ZWxneeded = hasZOption(Args, "wxneeded");
- Config->OFormatBinary = isOutputFormatBinary(Args);
- Config->SectionStartMap = getSectionStartMap(Args);
- Config->SortSection = getSortKind(Args);
- Config->Target2 = getTarget2Option(Args);
- Config->UnresolvedSymbols = getUnresolvedSymbolOption(Args);
+ if (Config->LTOO > 3)
+ error("invalid optimization level for LTO: " +
+ Args.getLastArgValue(OPT_lto_O));
+ if (Config->LTOPartitions == 0)
+ error("--lto-partitions: number of threads must be > 0");
+ if (Config->ThinLTOJobs == 0)
+ error("--thinlto-jobs: number of threads must be > 0");
+
+ if (auto *Arg = Args.getLastArg(OPT_m)) {
+ // Parse ELF{32,64}{LE,BE} and CPU type.
+ StringRef S = Arg->getValue();
+ std::tie(Config->EKind, Config->EMachine, Config->OSABI) =
+ parseEmulation(S);
+ Config->MipsN32Abi = (S == "elf32btsmipn32" || S == "elf32ltsmipn32");
+ Config->Emulation = S;
+ }
+
+ if (Args.hasArg(OPT_print_map))
+ Config->MapFile = "-";
// --omagic is an option to create old-fashioned executables in which
// .text segments are writable. Today, the option is still in use to
// create special-purpose programs such as boot loaders. It doesn't
// make sense to create PT_GNU_RELRO for such executables.
- if (Config->OMagic)
+ if (Config->Omagic)
Config->ZRelro = false;
- if (!Config->Relocatable)
- Config->Strip = getStripOption(Args);
-
- // Config->Pic is true if we are generating position-independent code.
- Config->Pic = Config->Pie || Config->Shared;
-
- if (auto *Arg = Args.getLastArg(OPT_hash_style)) {
- StringRef S = Arg->getValue();
- if (S == "gnu") {
- Config->GnuHash = true;
- Config->SysvHash = false;
- } else if (S == "both") {
- Config->GnuHash = true;
- } else if (S != "sysv")
- error("unknown hash style: " + S);
- }
-
- // Parse --build-id or --build-id=<style>.
- if (Args.hasArg(OPT_build_id))
- Config->BuildId = BuildIdKind::Fast;
- if (auto *Arg = Args.getLastArg(OPT_build_id_eq)) {
- StringRef S = Arg->getValue();
- if (S == "md5") {
- Config->BuildId = BuildIdKind::Md5;
- } else if (S == "sha1" || S == "tree") {
- Config->BuildId = BuildIdKind::Sha1;
- } else if (S == "uuid") {
- Config->BuildId = BuildIdKind::Uuid;
- } else if (S == "none") {
- Config->BuildId = BuildIdKind::None;
- } else if (S.startswith("0x")) {
- Config->BuildId = BuildIdKind::Hexstring;
- Config->BuildIdVector = parseHex(S.substr(2));
- } else {
- error("unknown --build-id style: " + S);
- }
- }
-
- for (auto *Arg : Args.filtered(OPT_auxiliary))
- Config->AuxiliaryList.push_back(Arg->getValue());
- if (!Config->Shared && !Config->AuxiliaryList.empty())
- error("-f may not be used without -shared");
-
- for (auto *Arg : Args.filtered(OPT_undefined))
- Config->Undefined.push_back(Arg->getValue());
-
- if (auto *Arg = Args.getLastArg(OPT_dynamic_list))
- if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
- readDynamicList(*Buffer);
+ std::tie(Config->SysvHash, Config->GnuHash) = getHashStyle(Args);
+ std::tie(Config->BuildId, Config->BuildIdVector) = getBuildId(Args);
if (auto *Arg = Args.getLastArg(OPT_symbol_ordering_file))
if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
Config->SymbolOrderingFile = getLines(*Buffer);
- // If --retain-symbol-file is used, we'll retail only the symbols listed in
+ // If --retain-symbol-file is used, we'll keep only the symbols listed in
// the file and discard all others.
if (auto *Arg = Args.getLastArg(OPT_retain_symbols_file)) {
- Config->Discard = DiscardPolicy::RetainFile;
+ Config->DefaultSymbolVersion = VER_NDX_LOCAL;
if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
for (StringRef S : getLines(*Buffer))
- Config->RetainSymbolsFile.insert(S);
+ Config->VersionScriptGlobals.push_back(
+ {S, /*IsExternCpp*/ false, /*HasWildcard*/ false});
}
- for (auto *Arg : Args.filtered(OPT_export_dynamic_symbol))
- Config->VersionScriptGlobals.push_back(
- {Arg->getValue(), /*IsExternCpp*/ false, /*HasWildcard*/ false});
-
- // Dynamic lists are a simplified linker script that doesn't need the
- // "global:" and implicitly ends with a "local:*". Set the variables needed to
- // simulate that.
- if (Args.hasArg(OPT_dynamic_list) || Args.hasArg(OPT_export_dynamic_symbol)) {
- Config->ExportDynamic = true;
- if (!Config->Shared)
- Config->DefaultSymbolVersion = VER_NDX_LOCAL;
+ bool HasExportDynamic =
+ getArg(Args, OPT_export_dynamic, OPT_no_export_dynamic, false);
+
+ // Parses -dynamic-list and -export-dynamic-symbol. They make some
+ // symbols private. Note that -export-dynamic takes precedence over them
+ // as it says all symbols should be exported.
+ if (!HasExportDynamic) {
+ for (auto *Arg : Args.filtered(OPT_dynamic_list))
+ if (Optional<MemoryBufferRef> Buffer = readFile(Arg->getValue()))
+ readDynamicList(*Buffer);
+
+ for (auto *Arg : Args.filtered(OPT_export_dynamic_symbol))
+ Config->VersionScriptGlobals.push_back(
+ {Arg->getValue(), /*IsExternCpp*/ false, /*HasWildcard*/ false});
+
+ // Dynamic lists are a simplified linker script that doesn't need the
+ // "global:" and implicitly ends with a "local:*". Set the variables
+ // needed to simulate that.
+ if (Args.hasArg(OPT_dynamic_list) ||
+ Args.hasArg(OPT_export_dynamic_symbol)) {
+ Config->ExportDynamic = true;
+ if (!Config->Shared)
+ Config->DefaultSymbolVersion = VER_NDX_LOCAL;
+ }
}
if (auto *Arg = Args.getLastArg(OPT_version_script))
@@ -654,6 +773,29 @@ void LinkerDriver::readConfigs(opt::InputArgList &Args) {
readVersionScript(*Buffer);
}
+// Some Config members do not directly correspond to any particular
+// command line options, but computed based on other Config values.
+// This function initialize such members. See Config.h for the details
+// of these values.
+static void setConfigs() {
+ ELFKind Kind = Config->EKind;
+ uint16_t Machine = Config->EMachine;
+
+ // There is an ILP32 ABI for x86-64, although it's not very popular.
+ // It is called the x32 ABI.
+ bool IsX32 = (Kind == ELF32LEKind && Machine == EM_X86_64);
+
+ Config->CopyRelocs = (Config->Relocatable || Config->EmitRelocs);
+ Config->Is64 = (Kind == ELF64LEKind || Kind == ELF64BEKind);
+ Config->IsLE = (Kind == ELF32LEKind || Kind == ELF64LEKind);
+ Config->Endianness =
+ Config->IsLE ? support::endianness::little : support::endianness::big;
+ Config->IsMips64EL = (Kind == ELF64LEKind && Machine == EM_MIPS);
+ Config->IsRela = Config->Is64 || IsX32 || Config->MipsN32Abi;
+ Config->Pic = Config->Pie || Config->Shared;
+ Config->Wordsize = Config->Is64 ? 8 : 4;
+}
+
// Returns a value of "-format" option.
static bool getBinaryOption(StringRef S) {
if (S == "binary")
@@ -672,7 +814,7 @@ void LinkerDriver::createFiles(opt::InputArgList &Args) {
addLibrary(Arg->getValue());
break;
case OPT_INPUT:
- addFile(Arg->getValue());
+ addFile(Arg->getValue(), /*WithLOption=*/false);
break;
case OPT_alias_script_T:
case OPT_script:
@@ -751,7 +893,7 @@ static uint64_t getImageBase(opt::InputArgList &Args) {
StringRef S = Arg->getValue();
uint64_t V;
- if (S.getAsInteger(0, V)) {
+ if (!to_integer(S, V)) {
error("-image-base: number expected, but got " + S);
return 0;
}
@@ -760,18 +902,63 @@ static uint64_t getImageBase(opt::InputArgList &Args) {
return V;
}
+// Parses --defsym=alias option.
+static std::vector<std::pair<StringRef, StringRef>>
+getDefsym(opt::InputArgList &Args) {
+ std::vector<std::pair<StringRef, StringRef>> Ret;
+ for (auto *Arg : Args.filtered(OPT_defsym)) {
+ StringRef From;
+ StringRef To;
+ std::tie(From, To) = StringRef(Arg->getValue()).split('=');
+ if (!isValidCIdentifier(To))
+ error("--defsym: symbol name expected, but got " + To);
+ Ret.push_back({From, To});
+ }
+ return Ret;
+}
+
+// Parses `--exclude-libs=lib,lib,...`.
+// The library names may be delimited by commas or colons.
+static DenseSet<StringRef> getExcludeLibs(opt::InputArgList &Args) {
+ DenseSet<StringRef> Ret;
+ for (auto *Arg : Args.filtered(OPT_exclude_libs)) {
+ StringRef S = Arg->getValue();
+ for (;;) {
+ size_t Pos = S.find_first_of(",:");
+ if (Pos == StringRef::npos)
+ break;
+ Ret.insert(S.substr(0, Pos));
+ S = S.substr(Pos + 1);
+ }
+ Ret.insert(S);
+ }
+ return Ret;
+}
+
+// Handles the -exclude-libs option. If a static library file is specified
+// by the -exclude-libs option, all public symbols from the archive become
+// private unless otherwise specified by version scripts or something.
+// A special library name "ALL" means all archive files.
+//
+// This is not a popular option, but some programs such as bionic libc use it.
+static void excludeLibs(opt::InputArgList &Args, ArrayRef<InputFile *> Files) {
+ DenseSet<StringRef> Libs = getExcludeLibs(Args);
+ bool All = Libs.count("ALL");
+
+ for (InputFile *File : Files)
+ if (auto *F = dyn_cast<ArchiveFile>(File))
+ if (All || Libs.count(path::filename(F->getName())))
+ for (Symbol *Sym : F->getSymbols())
+ Sym->VersionId = VER_NDX_LOCAL;
+}
+
// Do actual linking. Note that when this function is called,
// all linker scripts have already been parsed.
template <class ELFT> void LinkerDriver::link(opt::InputArgList &Args) {
SymbolTable<ELFT> Symtab;
elf::Symtab<ELFT>::X = &Symtab;
- Target = createTarget();
- ScriptBase = Script<ELFT>::X = make<LinkerScript<ELFT>>();
+ Target = getTarget();
- Config->Rela =
- ELFT::Is64Bits || Config->EMachine == EM_X86_64 || Config->MipsN32Abi;
- Config->Mips64EL =
- (Config->EMachine == EM_MIPS && Config->EKind == ELF64LEKind);
Config->MaxPageSize = getMaxPageSize(Args);
Config->ImageBase = getImageBase(Args);
@@ -779,6 +966,16 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &Args) {
if (Config->OutputFile.empty())
Config->OutputFile = "a.out";
+ // Fail early if the output file or map file is not writable. If a user has a
+ // long link, e.g. due to a large LTO link, they do not wish to run it and
+ // find that it failed because there was a mistake in their command-line.
+ if (auto E = tryCreateFile(Config->OutputFile))
+ error("cannot open output file " + Config->OutputFile + ": " + E.message());
+ if (auto E = tryCreateFile(Config->MapFile))
+ error("cannot open map file " + Config->MapFile + ": " + E.message());
+ if (ErrorCount)
+ return;
+
// Use default entry point name if no name was given via the command
// line nor linker scripts. For some reason, MIPS entry point name is
// different from others.
@@ -806,46 +1003,64 @@ template <class ELFT> void LinkerDriver::link(opt::InputArgList &Args) {
if (ErrorCount)
return;
+ // Handle the `--undefined <sym>` options.
Symtab.scanUndefinedFlags();
+
+ // Handle undefined symbols in DSOs.
Symtab.scanShlibUndefined();
+
+ // Handle the -exclude-libs option.
+ if (Args.hasArg(OPT_exclude_libs))
+ excludeLibs(Args, Files);
+
+ // Apply version scripts.
Symtab.scanVersionScript();
+ // Create wrapped symbols for -wrap option.
+ for (auto *Arg : Args.filtered(OPT_wrap))
+ Symtab.addSymbolWrap(Arg->getValue());
+
+ // Create alias symbols for -defsym option.
+ for (std::pair<StringRef, StringRef> &Def : getDefsym(Args))
+ Symtab.addSymbolAlias(Def.first, Def.second);
+
Symtab.addCombinedLTOObject();
if (ErrorCount)
return;
- for (auto *Arg : Args.filtered(OPT_wrap))
- Symtab.wrap(Arg->getValue());
+ // Some symbols (such as __ehdr_start) are defined lazily only when there
+ // are undefined symbols for them, so we add these to trigger that logic.
+ for (StringRef Sym : Script->Opt.ReferencedSymbols)
+ Symtab.addUndefined(Sym);
+
+ // Apply symbol renames for -wrap and -defsym
+ Symtab.applySymbolRenames();
// Now that we have a complete list of input files.
// Beyond this point, no new files are added.
// Aggregate all input sections into one place.
for (elf::ObjectFile<ELFT> *F : Symtab.getObjectFiles())
- for (InputSectionBase<ELFT> *S : F->getSections())
- if (S && S != &InputSection<ELFT>::Discarded)
- Symtab.Sections.push_back(S);
+ for (InputSectionBase *S : F->getSections())
+ if (S && S != &InputSection::Discarded)
+ InputSections.push_back(S);
for (BinaryFile *F : Symtab.getBinaryFiles())
- for (InputSectionData *S : F->getSections())
- Symtab.Sections.push_back(cast<InputSection<ELFT>>(S));
+ for (InputSectionBase *S : F->getSections())
+ InputSections.push_back(cast<InputSection>(S));
- // Do size optimizations: garbage collection and identical code folding.
+ // This adds a .comment section containing a version string. We have to add it
+ // before decompressAndMergeSections because the .comment section is a
+ // mergeable section.
+ if (!Config->Relocatable)
+ InputSections.push_back(createCommentSection<ELFT>());
+
+ // Do size optimizations: garbage collection, merging of SHF_MERGE sections
+ // and identical code folding.
if (Config->GcSections)
markLive<ELFT>();
+ decompressAndMergeSections();
if (Config->ICF)
doIcf<ELFT>();
- // MergeInputSection::splitIntoPieces needs to be called before
- // any call of MergeInputSection::getOffset. Do that.
- forEach(Symtab.Sections.begin(), Symtab.Sections.end(),
- [](InputSectionBase<ELFT> *S) {
- if (!S->Live)
- return;
- if (Decompressor::isCompressedELFSection(S->Flags, S->Name))
- S->uncompress();
- if (auto *MS = dyn_cast<MergeInputSection<ELFT>>(S))
- MS->splitIntoPieces();
- });
-
// Write the result to the file.
writeResult<ELFT>();
}
diff --git a/gnu/llvm/tools/lld/ELF/DriverUtils.cpp b/gnu/llvm/tools/lld/ELF/DriverUtils.cpp
index 36b4052d72a..2f7d16b2505 100644
--- a/gnu/llvm/tools/lld/ELF/DriverUtils.cpp
+++ b/gnu/llvm/tools/lld/ELF/DriverUtils.cpp
@@ -16,7 +16,6 @@
#include "Driver.h"
#include "Error.h"
#include "Memory.h"
-#include "ScriptParser.h"
#include "lld/Config/Version.h"
#include "lld/Core/Reproduce.h"
#include "llvm/ADT/Optional.h"
@@ -43,9 +42,9 @@ using namespace lld::elf;
// Create table mapping all options defined in Options.td
static const opt::OptTable::Info OptInfo[] = {
-#define OPTION(X1, X2, ID, KIND, GROUP, ALIAS, X6, X7, X8, X9, X10) \
- {X1, X2, X9, X10, OPT_##ID, opt::Option::KIND##Class, \
- X8, X7, OPT_##GROUP, OPT_##ALIAS, X6},
+#define OPTION(X1, X2, ID, KIND, GROUP, ALIAS, X7, X8, X9, X10, X11, X12) \
+ {X1, X2, X10, X11, OPT_##ID, opt::Option::KIND##Class, \
+ X9, X8, OPT_##GROUP, OPT_##ALIAS, X7, X12},
#include "Options.inc"
#undef OPTION
};
@@ -54,8 +53,6 @@ ELFOptTable::ELFOptTable() : OptTable(OptInfo) {}
// Parse -color-diagnostics={auto,always,never} or -no-color-diagnostics.
static bool getColorDiagnostics(opt::InputArgList &Args) {
- bool Default = (ErrorOS == &errs() && Process::StandardErrHasColors());
-
auto *Arg = Args.getLastArg(OPT_color_diagnostics, OPT_color_diagnostics_eq,
OPT_no_color_diagnostics);
if (!Arg)
@@ -67,7 +64,7 @@ static bool getColorDiagnostics(opt::InputArgList &Args) {
StringRef S = Arg->getValue();
if (S == "auto")
- return Default;
+ return ErrorOS->has_colors();
if (S == "always")
return true;
if (S != "never")
@@ -129,10 +126,11 @@ void elf::printHelp(const char *Argv0) {
// shared libraries. Therefore, we need to print out at least "elf".
// Here, we print out all the targets that we support.
outs() << Argv0 << ": supported targets: "
- << "elf32-i386 elf32-iamcu elf32-littlearm elf32-powerpc "
- << "elf32-tradbigmips elf32-tradlittlemips elf32-x86-64 "
- << "elf64-amdgpu elf64-littleaarch64 elf64-powerpc "
- << "elf64-tradbigmips elf64-tradlittlemips elf64-x86-64\n";
+ << "elf32-i386 elf32-iamcu elf32-littlearm elf32-ntradbigmips "
+ << "elf32-ntradlittlemips elf32-powerpc elf32-tradbigmips "
+ << "elf32-tradlittlemips elf32-x86-64 "
+ << "elf64-amdgpu elf64-littleaarch64 elf64-powerpc elf64-tradbigmips "
+ << "elf64-tradlittlemips elf64-x86-64\n";
}
// Reconstructs command line arguments so that so that you can re-run
@@ -149,6 +147,13 @@ std::string elf::createResponseFile(const opt::InputArgList &Args) {
case OPT_INPUT:
OS << quote(rewritePath(Arg->getValue())) << "\n";
break;
+ case OPT_o:
+ // If -o path contains directories, "lld @response.txt" will likely
+ // fail because the archive we are creating doesn't contain empty
+ // directories for the output path (-o doesn't create directories).
+ // Strip directories to prevent the issue.
+ OS << "-o " << quote(sys::path::filename(Arg->getValue())) << "\n";
+ break;
case OPT_L:
case OPT_dynamic_list:
case OPT_rpath:
diff --git a/gnu/llvm/tools/lld/ELF/Options.td b/gnu/llvm/tools/lld/ELF/Options.td
index 77ed4c7e466..1400a206bdf 100644
--- a/gnu/llvm/tools/lld/ELF/Options.td
+++ b/gnu/llvm/tools/lld/ELF/Options.td
@@ -22,6 +22,11 @@ def build_id: F<"build-id">, HelpText<"Generate build ID note">;
def build_id_eq: J<"build-id=">, HelpText<"Generate build ID note">;
+def compress_debug_sections : J<"compress-debug-sections=">,
+ HelpText<"Compress DWARF debug sections">;
+
+def defsym: J<"defsym=">, HelpText<"Define a symbol alias">;
+
def L: JoinedOrSeparate<["-"], "L">, MetaVarName<"<dir>">,
HelpText<"Add a directory to the library search path">;
@@ -48,6 +53,8 @@ def color_diagnostics_eq: J<"color-diagnostics=">,
def define_common: F<"define-common">,
HelpText<"Assign space to common symbols">;
+def demangle: F<"demangle">, HelpText<"Demangle symbol names">;
+
def disable_new_dtags: F<"disable-new-dtags">,
HelpText<"Disable new dynamic tags">;
@@ -68,6 +75,8 @@ def dynamic_list: S<"dynamic-list">,
def eh_frame_hdr: F<"eh-frame-hdr">,
HelpText<"Request creation of .eh_frame_hdr section and PT_GNU_EH_FRAME segment header">;
+def emit_relocs: F<"emit-relocs">, HelpText<"Generate relocations in output">;
+
def enable_new_dtags: F<"enable-new-dtags">,
HelpText<"Enable new dynamic tags">;
@@ -80,6 +89,12 @@ def entry: S<"entry">, MetaVarName<"<entry>">,
def error_limit: S<"error-limit">,
HelpText<"Maximum number of errors to emit before stopping (0 = no limit)">;
+def error_unresolved_symbols: F<"error-unresolved-symbols">,
+ HelpText<"Report unresolved symbols as errors">;
+
+def exclude_libs: S<"exclude-libs">,
+ HelpText<"Exclude static libraries from automatic export">;
+
def export_dynamic: F<"export-dynamic">,
HelpText<"Put symbols in the dynamic symbol table">;
@@ -89,6 +104,8 @@ def export_dynamic_symbol: S<"export-dynamic-symbol">,
def fatal_warnings: F<"fatal-warnings">,
HelpText<"Treat warnings as errors">;
+def filter: J<"filter=">, HelpText<"Set DT_FILTER field to the specified name">;
+
def fini: S<"fini">, MetaVarName<"<symbol>">,
HelpText<"Specify a finalizer function">;
@@ -109,7 +126,9 @@ def hash_style: S<"hash-style">,
def help: F<"help">, HelpText<"Print option help">;
-def icf: F<"icf=all">, HelpText<"Enable identical code folding">;
+def icf_all: F<"icf=all">, HelpText<"Enable identical code folding">;
+
+def icf_none: F<"icf=none">, HelpText<"Disable identical code folding">;
def image_base : J<"image-base=">, HelpText<"Set the base address">;
@@ -124,6 +143,8 @@ def lto_O: J<"lto-O">, MetaVarName<"<opt-level>">,
def m: JoinedOrSeparate<["-"], "m">, HelpText<"Set target emulation">;
+def Map: JS<"Map">, HelpText<"Print a link map to the specified file">;
+
def nostdlib: F<"nostdlib">,
HelpText<"Only search directories specified on the command line">;
@@ -139,6 +160,12 @@ def no_define_common: F<"no-define-common">,
def no_demangle: F<"no-demangle">,
HelpText<"Do not demangle symbol names">;
+def no_dynamic_linker: F<"no-dynamic-linker">,
+ HelpText<"Inhibit output of .interp section">;
+
+def no_export_dynamic: F<"no-export-dynamic">;
+def no_fatal_warnings: F<"no-fatal-warnings">;
+
def no_gc_sections: F<"no-gc-sections">,
HelpText<"Disable garbage collection of unused sections">;
@@ -170,7 +197,7 @@ def o: JoinedOrSeparate<["-"], "o">, MetaVarName<"<path>">,
def oformat: Separate<["--"], "oformat">, MetaVarName<"<format>">,
HelpText<"Specify the binary format for the output object file">;
-def omagic: F<"omagic">, MetaVarName<"<magic>">,
+def omagic: Flag<["--"], "omagic">, MetaVarName<"<magic>">,
HelpText<"Set the text and data sections to be readable and writable">;
def pie: F<"pie">, HelpText<"Create a position independent executable">;
@@ -178,6 +205,9 @@ def pie: F<"pie">, HelpText<"Create a position independent executable">;
def print_gc_sections: F<"print-gc-sections">,
HelpText<"List removed unused sections">;
+def print_map: F<"print-map">,
+ HelpText<"Print a link map to the standard output">;
+
def reproduce: S<"reproduce">,
HelpText<"Dump linker invocation and input files for debugging">;
@@ -221,7 +251,7 @@ def threads: F<"threads">, HelpText<"Run the linker multi-threaded">;
def trace: F<"trace">, HelpText<"Print the names of the input files">;
-def trace_symbol : J<"trace-symbol=">, HelpText<"Trace references to symbols">;
+def trace_symbol : S<"trace-symbol">, HelpText<"Trace references to symbols">;
def undefined: S<"undefined">,
HelpText<"Force undefined symbol during linking">;
@@ -244,6 +274,9 @@ def version_script: S<"version-script">,
def warn_common: F<"warn-common">,
HelpText<"Warn about duplicate common symbols">;
+def warn_unresolved_symbols: F<"warn-unresolved-symbols">,
+ HelpText<"Report unresolved symbols as warnings">;
+
def whole_archive: F<"whole-archive">,
HelpText<"Force load of all members in a static library">;
@@ -264,25 +297,32 @@ def alias_L__library_path: J<"library-path=">, Alias<L>;
def alias_define_common_d: Flag<["-"], "d">, Alias<define_common>;
def alias_define_common_dc: F<"dc">, Alias<define_common>;
def alias_define_common_dp: F<"dp">, Alias<define_common>;
+def alias_defsym: S<"defsym">, Alias<defsym>;
def alias_discard_all_x: Flag<["-"], "x">, Alias<discard_all>;
def alias_discard_locals_X: Flag<["-"], "X">, Alias<discard_locals>;
def alias_dynamic_list: J<"dynamic-list=">, Alias<dynamic_list>;
+def alias_emit_relocs: Flag<["-"], "q">, Alias<emit_relocs>;
def alias_entry_e: JoinedOrSeparate<["-"], "e">, Alias<entry>;
def alias_entry_entry: J<"entry=">, Alias<entry>;
def alias_error_limit: J<"error-limit=">, Alias<error_limit>;
+def alias_exclude_libs: J<"exclude-libs=">, Alias<exclude_libs>;
def alias_export_dynamic_E: Flag<["-"], "E">, Alias<export_dynamic>;
def alias_export_dynamic_symbol: J<"export-dynamic-symbol=">,
Alias<export_dynamic_symbol>;
+def alias_filter: Separate<["-"], "F">, Alias<filter>;
def alias_fini_fini: J<"fini=">, Alias<fini>;
def alias_format_b: S<"b">, Alias<format>;
def alias_hash_style_hash_style: J<"hash-style=">, Alias<hash_style>;
def alias_init_init: J<"init=">, Alias<init>;
def alias_l__library: J<"library=">, Alias<l>;
+def alias_Map_eq: J<"Map=">, Alias<Map>;
def alias_omagic: Flag<["-"], "N">, Alias<omagic>;
def alias_o_output: Joined<["--"], "output=">, Alias<o>;
def alias_o_output2 : Separate<["--"], "output">, Alias<o>;
def alias_pie_pic_executable: F<"pic-executable">, Alias<pie>;
+def alias_print_map_M: Flag<["-"], "M">, Alias<print_map>;
def alias_relocatable_r: Flag<["-"], "r">, Alias<relocatable>;
+def alias_reproduce_eq: J<"reproduce=">, Alias<reproduce>;
def alias_retain_symbols_file: S<"retain-symbols-file">, Alias<retain_symbols_file>;
def alias_rpath_R: JoinedOrSeparate<["-"], "R">, Alias<rpath>;
def alias_rpath_rpath: J<"rpath=">, Alias<rpath>;
@@ -297,12 +337,14 @@ def alias_strip_debug_S: Flag<["-"], "S">, Alias<strip_debug>;
def alias_Tbss: J<"Tbss=">, Alias<Tbss>;
def alias_Tdata: J<"Tdata=">, Alias<Tdata>;
def alias_trace: Flag<["-"], "t">, Alias<trace>;
+def trace_trace_symbol_eq : J<"trace-symbol=">, Alias<trace_symbol>;
def alias_trace_symbol_y : JoinedOrSeparate<["-"], "y">, Alias<trace_symbol>;
def alias_Ttext: J<"Ttext=">, Alias<Ttext>;
def alias_Ttext_segment: S<"Ttext-segment">, Alias<Ttext>;
def alias_Ttext_segment_eq: J<"Ttext-segment=">, Alias<Ttext>;
def alias_undefined_eq: J<"undefined=">, Alias<undefined>;
def alias_undefined_u: JoinedOrSeparate<["-"], "u">, Alias<undefined>;
+def alias_version_script_eq: J<"version-script=">, Alias<version_script>;
def alias_version_V: Flag<["-"], "V">, Alias<version>;
def alias_wrap_wrap: J<"wrap=">, Alias<wrap>;
@@ -314,6 +356,26 @@ def end_group_paren: Flag<["-"], ")">;
def start_group: F<"start-group">;
def start_group_paren: Flag<["-"], "(">;
+// LTO-related options.
+def lto_aa_pipeline: J<"lto-aa-pipeline=">,
+ HelpText<"AA pipeline to run during LTO. Used in conjunction with -lto-newpm-passes">;
+def lto_newpm_passes: J<"lto-newpm-passes=">,
+ HelpText<"Passes to run during LTO">;
+def lto_partitions: J<"lto-partitions=">,
+ HelpText<"Number of LTO codegen partitions">;
+def disable_verify: F<"disable-verify">;
+def mllvm: S<"mllvm">;
+def opt_remarks_filename: Separate<["--"], "opt-remarks-filename">,
+ HelpText<"YAML output file for optimization remarks">;
+def opt_remarks_with_hotness: Flag<["--"], "opt-remarks-with-hotness">,
+ HelpText<"Include hotness informations in the optimization remarks file">;
+def save_temps: F<"save-temps">;
+def thinlto_cache_dir: J<"thinlto-cache-dir=">,
+ HelpText<"Path to ThinLTO cached object file directory">;
+def thinlto_cache_policy: S<"thinlto-cache-policy">,
+ HelpText<"Pruning policy for the ThinLTO cache">;
+def thinlto_jobs: J<"thinlto-jobs=">, HelpText<"Number of ThinLTO jobs">;
+
// Ignore LTO plugin-related options.
// clang -flto passes -plugin and -plugin-opt to the linker. This is required
// for ld.gold and ld.bfd to get LTO working. But it's not for lld which doesn't
@@ -329,17 +391,13 @@ def plugin_opt_eq: J<"plugin-opt=">;
// Options listed below are silently ignored for now for compatibility.
def allow_shlib_undefined: F<"allow-shlib-undefined">;
def cref: Flag<["--"], "cref">;
-def demangle: F<"demangle">;
def detect_odr_violations: F<"detect-odr-violations">;
def g: Flag<["-"], "g">;
-def M: Flag<["-"], "M">;
-def Map: JS<"Map">;
def no_add_needed: F<"no-add-needed">;
def no_allow_shlib_undefined: F<"no-allow-shlib-undefined">;
def no_copy_dt_needed_entries: F<"no-copy-dt-needed-entries">,
Alias<no_add_needed>;
-def no_dynamic_linker: F<"no-dynamic-linker">;
-def no_fatal_warnings: F<"no-fatal-warnings">;
+def no_keep_memory: F<"no-keep-memory">;
def no_mmap_output_file: F<"no-mmap-output-file">;
def no_warn_common: F<"no-warn-common">;
def no_warn_mismatch: F<"no-warn-mismatch">;
@@ -354,19 +412,3 @@ def EL : F<"EL">;
def G: JoinedOrSeparate<["-"], "G">;
def Qy : F<"Qy">;
-// Aliases for ignored options
-def alias_Map_eq: J<"Map=">, Alias<Map>;
-def alias_version_script_version_script: J<"version-script=">,
- Alias<version_script>;
-
-// LTO-related options.
-def lto_aa_pipeline: J<"lto-aa-pipeline=">,
- HelpText<"AA pipeline to run during LTO. Used in conjunction with -lto-newpm-passes">;
-def lto_newpm_passes: J<"lto-newpm-passes=">,
- HelpText<"Passes to run during LTO">;
-def lto_partitions: J<"lto-partitions=">,
- HelpText<"Number of LTO codegen partitions">;
-def disable_verify: F<"disable-verify">;
-def mllvm: S<"mllvm">;
-def save_temps: F<"save-temps">;
-def thinlto_jobs: J<"thinlto-jobs=">, HelpText<"Number of ThinLTO jobs">;
diff --git a/gnu/llvm/tools/lld/ELF/OutputSections.cpp b/gnu/llvm/tools/lld/ELF/OutputSections.cpp
index edcc0f186b3..abe54816586 100644
--- a/gnu/llvm/tools/lld/ELF/OutputSections.cpp
+++ b/gnu/llvm/tools/lld/ELF/OutputSections.cpp
@@ -9,7 +9,6 @@
#include "OutputSections.h"
#include "Config.h"
-#include "EhFrame.h"
#include "LinkerScript.h"
#include "Memory.h"
#include "Strings.h"
@@ -17,7 +16,7 @@
#include "SyntheticSections.h"
#include "Target.h"
#include "Threads.h"
-#include "llvm/Support/Dwarf.h"
+#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/SHA1.h"
@@ -31,15 +30,21 @@ using namespace llvm::ELF;
using namespace lld;
using namespace lld::elf;
-OutputSectionBase::OutputSectionBase(StringRef Name, uint32_t Type,
- uint64_t Flags)
- : Name(Name) {
- this->Type = Type;
- this->Flags = Flags;
- this->Addralign = 1;
-}
-
-uint32_t OutputSectionBase::getPhdrFlags() const {
+uint8_t Out::First;
+OutputSection *Out::Opd;
+uint8_t *Out::OpdBuf;
+PhdrEntry *Out::TlsPhdr;
+OutputSection *Out::DebugInfo;
+OutputSection *Out::ElfHeader;
+OutputSection *Out::ProgramHeaders;
+OutputSection *Out::PreinitArray;
+OutputSection *Out::InitArray;
+OutputSection *Out::FiniArray;
+
+std::vector<OutputSection *> elf::OutputSections;
+std::vector<OutputSectionCommand *> elf::OutputSectionCommands;
+
+uint32_t OutputSection::getPhdrFlags() const {
uint32_t Ret = PF_R;
if (Flags & SHF_WRITE)
Ret |= PF_W;
@@ -49,9 +54,9 @@ uint32_t OutputSectionBase::getPhdrFlags() const {
}
template <class ELFT>
-void OutputSectionBase::writeHeaderTo(typename ELFT::Shdr *Shdr) {
+void OutputSection::writeHeaderTo(typename ELFT::Shdr *Shdr) {
Shdr->sh_entsize = Entsize;
- Shdr->sh_addralign = Addralign;
+ Shdr->sh_addralign = Alignment;
Shdr->sh_type = Type;
Shdr->sh_offset = Offset;
Shdr->sh_flags = Flags;
@@ -62,490 +67,41 @@ void OutputSectionBase::writeHeaderTo(typename ELFT::Shdr *Shdr) {
Shdr->sh_name = ShName;
}
-template <class ELFT> static uint64_t getEntsize(uint32_t Type) {
- switch (Type) {
- case SHT_RELA:
- return sizeof(typename ELFT::Rela);
- case SHT_REL:
- return sizeof(typename ELFT::Rel);
- case SHT_MIPS_REGINFO:
- return sizeof(Elf_Mips_RegInfo<ELFT>);
- case SHT_MIPS_OPTIONS:
- return sizeof(Elf_Mips_Options<ELFT>) + sizeof(Elf_Mips_RegInfo<ELFT>);
- case SHT_MIPS_ABIFLAGS:
- return sizeof(Elf_Mips_ABIFlags<ELFT>);
- default:
- return 0;
- }
-}
+OutputSection::OutputSection(StringRef Name, uint32_t Type, uint64_t Flags)
+ : SectionBase(Output, Name, Flags, /*Entsize*/ 0, /*Alignment*/ 1, Type,
+ /*Info*/ 0,
+ /*Link*/ 0),
+ SectionIndex(INT_MAX) {}
-template <class ELFT>
-OutputSection<ELFT>::OutputSection(StringRef Name, uint32_t Type, uintX_t Flags)
- : OutputSectionBase(Name, Type, Flags) {
- this->Entsize = getEntsize<ELFT>(Type);
-}
-
-template <typename ELFT>
-static bool compareByFilePosition(InputSection<ELFT> *A,
- InputSection<ELFT> *B) {
- // Synthetic doesn't have link order dependecy, stable_sort will keep it last
- if (A->kind() == InputSectionData::Synthetic ||
- B->kind() == InputSectionData::Synthetic)
- return false;
- auto *LA = cast<InputSection<ELFT>>(A->getLinkOrderDep());
- auto *LB = cast<InputSection<ELFT>>(B->getLinkOrderDep());
- OutputSectionBase *AOut = LA->OutSec;
- OutputSectionBase *BOut = LB->OutSec;
- if (AOut != BOut)
- return AOut->SectionIndex < BOut->SectionIndex;
- return LA->OutSecOff < LB->OutSecOff;
+static uint64_t updateOffset(uint64_t Off, InputSection *S) {
+ Off = alignTo(Off, S->Alignment);
+ S->OutSecOff = Off;
+ return Off + S->getSize();
}
-template <class ELFT> void OutputSection<ELFT>::finalize() {
- if ((this->Flags & SHF_LINK_ORDER) && !this->Sections.empty()) {
- std::sort(Sections.begin(), Sections.end(), compareByFilePosition<ELFT>);
- Size = 0;
- assignOffsets();
-
- // We must preserve the link order dependency of sections with the
- // SHF_LINK_ORDER flag. The dependency is indicated by the sh_link field. We
- // need to translate the InputSection sh_link to the OutputSection sh_link,
- // all InputSections in the OutputSection have the same dependency.
- if (auto *D = this->Sections.front()->getLinkOrderDep())
- this->Link = D->OutSec->SectionIndex;
- }
-
- uint32_t Type = this->Type;
- if (!Config->Relocatable || (Type != SHT_RELA && Type != SHT_REL))
- return;
-
- this->Link = In<ELFT>::SymTab->OutSec->SectionIndex;
- // sh_info for SHT_REL[A] sections should contain the section header index of
- // the section to which the relocation applies.
- InputSectionBase<ELFT> *S = Sections[0]->getRelocatedSection();
- this->Info = S->OutSec->SectionIndex;
-}
-
-template <class ELFT>
-void OutputSection<ELFT>::addSection(InputSectionData *C) {
- assert(C->Live);
- auto *S = cast<InputSection<ELFT>>(C);
+void OutputSection::addSection(InputSection *S) {
+ assert(S->Live);
Sections.push_back(S);
- S->OutSec = this;
+ S->Parent = this;
this->updateAlignment(S->Alignment);
- // Keep sh_entsize value of the input section to be able to perform merging
- // later during a final linking using the generated relocatable object.
- if (Config->Relocatable && (S->Flags & SHF_MERGE))
- this->Entsize = S->Entsize;
-}
-
-// This function is called after we sort input sections
-// and scan relocations to setup sections' offsets.
-template <class ELFT> void OutputSection<ELFT>::assignOffsets() {
- uintX_t Off = this->Size;
- for (InputSection<ELFT> *S : Sections) {
- Off = alignTo(Off, S->Alignment);
- S->OutSecOff = Off;
- Off += S->getSize();
- }
- this->Size = Off;
-}
-
-template <class ELFT>
-void OutputSection<ELFT>::sort(
- std::function<int(InputSection<ELFT> *S)> Order) {
- typedef std::pair<unsigned, InputSection<ELFT> *> Pair;
- auto Comp = [](const Pair &A, const Pair &B) { return A.first < B.first; };
-
- std::vector<Pair> V;
- for (InputSection<ELFT> *S : Sections)
- V.push_back({Order(S), S});
- std::stable_sort(V.begin(), V.end(), Comp);
- Sections.clear();
- for (Pair &P : V)
- Sections.push_back(P.second);
-}
-
-// Sorts input sections by section name suffixes, so that .foo.N comes
-// before .foo.M if N < M. Used to sort .{init,fini}_array.N sections.
-// We want to keep the original order if the priorities are the same
-// because the compiler keeps the original initialization order in a
-// translation unit and we need to respect that.
-// For more detail, read the section of the GCC's manual about init_priority.
-template <class ELFT> void OutputSection<ELFT>::sortInitFini() {
- // Sort sections by priority.
- sort([](InputSection<ELFT> *S) { return getPriority(S->Name); });
-}
-
-// Returns true if S matches /Filename.?\.o$/.
-static bool isCrtBeginEnd(StringRef S, StringRef Filename) {
- if (!S.endswith(".o"))
- return false;
- S = S.drop_back(2);
- if (S.endswith(Filename))
- return true;
- return !S.empty() && S.drop_back().endswith(Filename);
-}
-
-static bool isCrtbegin(StringRef S) { return isCrtBeginEnd(S, "crtbegin"); }
-static bool isCrtend(StringRef S) { return isCrtBeginEnd(S, "crtend"); }
-
-// .ctors and .dtors are sorted by this priority from highest to lowest.
-//
-// 1. The section was contained in crtbegin (crtbegin contains
-// some sentinel value in its .ctors and .dtors so that the runtime
-// can find the beginning of the sections.)
-//
-// 2. The section has an optional priority value in the form of ".ctors.N"
-// or ".dtors.N" where N is a number. Unlike .{init,fini}_array,
-// they are compared as string rather than number.
-//
-// 3. The section is just ".ctors" or ".dtors".
-//
-// 4. The section was contained in crtend, which contains an end marker.
-//
-// In an ideal world, we don't need this function because .init_array and
-// .ctors are duplicate features (and .init_array is newer.) However, there
-// are too many real-world use cases of .ctors, so we had no choice to
-// support that with this rather ad-hoc semantics.
-template <class ELFT>
-static bool compCtors(const InputSection<ELFT> *A,
- const InputSection<ELFT> *B) {
- bool BeginA = isCrtbegin(A->getFile()->getName());
- bool BeginB = isCrtbegin(B->getFile()->getName());
- if (BeginA != BeginB)
- return BeginA;
- bool EndA = isCrtend(A->getFile()->getName());
- bool EndB = isCrtend(B->getFile()->getName());
- if (EndA != EndB)
- return EndB;
- StringRef X = A->Name;
- StringRef Y = B->Name;
- assert(X.startswith(".ctors") || X.startswith(".dtors"));
- assert(Y.startswith(".ctors") || Y.startswith(".dtors"));
- X = X.substr(6);
- Y = Y.substr(6);
- if (X.empty() && Y.empty())
- return false;
- return X < Y;
-}
-
-// Sorts input sections by the special rules for .ctors and .dtors.
-// Unfortunately, the rules are different from the one for .{init,fini}_array.
-// Read the comment above.
-template <class ELFT> void OutputSection<ELFT>::sortCtorsDtors() {
- std::stable_sort(Sections.begin(), Sections.end(), compCtors<ELFT>);
-}
-
-// Fill [Buf, Buf + Size) with Filler. Filler is written in big
-// endian order. This is used for linker script "=fillexp" command.
-void fill(uint8_t *Buf, size_t Size, uint32_t Filler) {
- uint8_t V[4];
- write32be(V, Filler);
- size_t I = 0;
- for (; I + 4 < Size; I += 4)
- memcpy(Buf + I, V, 4);
- memcpy(Buf + I, V, Size - I);
-}
-
-template <class ELFT> void OutputSection<ELFT>::writeTo(uint8_t *Buf) {
- Loc = Buf;
- if (uint32_t Filler = Script<ELFT>::X->getFiller(this->Name))
- fill(Buf, this->Size, Filler);
-
- auto Fn = [=](InputSection<ELFT> *IS) { IS->writeTo(Buf); };
- forEach(Sections.begin(), Sections.end(), Fn);
-
- // Linker scripts may have BYTE()-family commands with which you
- // can write arbitrary bytes to the output. Process them if any.
- Script<ELFT>::X->writeDataBytes(this->Name, Buf);
-}
-
-template <class ELFT>
-EhOutputSection<ELFT>::EhOutputSection()
- : OutputSectionBase(".eh_frame", SHT_PROGBITS, SHF_ALLOC) {}
-
-// Search for an existing CIE record or create a new one.
-// CIE records from input object files are uniquified by their contents
-// and where their relocations point to.
-template <class ELFT>
-template <class RelTy>
-CieRecord *EhOutputSection<ELFT>::addCie(EhSectionPiece &Piece,
- ArrayRef<RelTy> Rels) {
- auto *Sec = cast<EhInputSection<ELFT>>(Piece.ID);
- const endianness E = ELFT::TargetEndianness;
- if (read32<E>(Piece.data().data() + 4) != 0)
- fatal(toString(Sec) + ": CIE expected at beginning of .eh_frame");
-
- SymbolBody *Personality = nullptr;
- unsigned FirstRelI = Piece.FirstRelocation;
- if (FirstRelI != (unsigned)-1)
- Personality = &Sec->getFile()->getRelocTargetSym(Rels[FirstRelI]);
-
- // Search for an existing CIE by CIE contents/relocation target pair.
- CieRecord *Cie = &CieMap[{Piece.data(), Personality}];
-
- // If not found, create a new one.
- if (Cie->Piece == nullptr) {
- Cie->Piece = &Piece;
- Cies.push_back(Cie);
- }
- return Cie;
-}
-
-// There is one FDE per function. Returns true if a given FDE
-// points to a live function.
-template <class ELFT>
-template <class RelTy>
-bool EhOutputSection<ELFT>::isFdeLive(EhSectionPiece &Piece,
- ArrayRef<RelTy> Rels) {
- auto *Sec = cast<EhInputSection<ELFT>>(Piece.ID);
- unsigned FirstRelI = Piece.FirstRelocation;
- if (FirstRelI == (unsigned)-1)
- fatal(toString(Sec) + ": FDE doesn't reference another section");
- const RelTy &Rel = Rels[FirstRelI];
- SymbolBody &B = Sec->getFile()->getRelocTargetSym(Rel);
- auto *D = dyn_cast<DefinedRegular<ELFT>>(&B);
- if (!D || !D->Section)
- return false;
- InputSectionBase<ELFT> *Target = D->Section->Repl;
- return Target && Target->Live;
-}
-
-// .eh_frame is a sequence of CIE or FDE records. In general, there
-// is one CIE record per input object file which is followed by
-// a list of FDEs. This function searches an existing CIE or create a new
-// one and associates FDEs to the CIE.
-template <class ELFT>
-template <class RelTy>
-void EhOutputSection<ELFT>::addSectionAux(EhInputSection<ELFT> *Sec,
- ArrayRef<RelTy> Rels) {
- const endianness E = ELFT::TargetEndianness;
-
- DenseMap<size_t, CieRecord *> OffsetToCie;
- for (EhSectionPiece &Piece : Sec->Pieces) {
- // The empty record is the end marker.
- if (Piece.size() == 4)
- return;
-
- size_t Offset = Piece.InputOff;
- uint32_t ID = read32<E>(Piece.data().data() + 4);
- if (ID == 0) {
- OffsetToCie[Offset] = addCie(Piece, Rels);
- continue;
- }
-
- uint32_t CieOffset = Offset + 4 - ID;
- CieRecord *Cie = OffsetToCie[CieOffset];
- if (!Cie)
- fatal(toString(Sec) + ": invalid CIE reference");
-
- if (!isFdeLive(Piece, Rels))
- continue;
- Cie->FdePieces.push_back(&Piece);
- NumFdes++;
- }
-}
-
-template <class ELFT>
-void EhOutputSection<ELFT>::addSection(InputSectionData *C) {
- auto *Sec = cast<EhInputSection<ELFT>>(C);
- Sec->OutSec = this;
- this->updateAlignment(Sec->Alignment);
- Sections.push_back(Sec);
-
- // .eh_frame is a sequence of CIE or FDE records. This function
- // splits it into pieces so that we can call
- // SplitInputSection::getSectionPiece on the section.
- Sec->split();
- if (Sec->Pieces.empty())
- return;
-
- if (Sec->NumRelocations) {
- if (Sec->AreRelocsRela)
- addSectionAux(Sec, Sec->relas());
- else
- addSectionAux(Sec, Sec->rels());
- return;
- }
- addSectionAux(Sec, makeArrayRef<Elf_Rela>(nullptr, nullptr));
-}
-
-template <class ELFT>
-static void writeCieFde(uint8_t *Buf, ArrayRef<uint8_t> D) {
- memcpy(Buf, D.data(), D.size());
- // Fix the size field. -4 since size does not include the size field itself.
- const endianness E = ELFT::TargetEndianness;
- write32<E>(Buf, alignTo(D.size(), sizeof(typename ELFT::uint)) - 4);
-}
-
-template <class ELFT> void EhOutputSection<ELFT>::finalize() {
- if (this->Size)
- return; // Already finalized.
-
- size_t Off = 0;
- for (CieRecord *Cie : Cies) {
- Cie->Piece->OutputOff = Off;
- Off += alignTo(Cie->Piece->size(), sizeof(uintX_t));
-
- for (EhSectionPiece *Fde : Cie->FdePieces) {
- Fde->OutputOff = Off;
- Off += alignTo(Fde->size(), sizeof(uintX_t));
- }
- }
- this->Size = Off + 4;
-}
-
-template <class ELFT> static uint64_t readFdeAddr(uint8_t *Buf, int Size) {
- const endianness E = ELFT::TargetEndianness;
- switch (Size) {
- case DW_EH_PE_udata2:
- return read16<E>(Buf);
- case DW_EH_PE_udata4:
- return read32<E>(Buf);
- case DW_EH_PE_udata8:
- return read64<E>(Buf);
- case DW_EH_PE_absptr:
- if (ELFT::Is64Bits)
- return read64<E>(Buf);
- return read32<E>(Buf);
- }
- fatal("unknown FDE size encoding");
-}
-
-// Returns the VA to which a given FDE (on a mmap'ed buffer) is applied to.
-// We need it to create .eh_frame_hdr section.
-template <class ELFT>
-typename ELFT::uint EhOutputSection<ELFT>::getFdePc(uint8_t *Buf, size_t FdeOff,
- uint8_t Enc) {
- // The starting address to which this FDE applies is
- // stored at FDE + 8 byte.
- size_t Off = FdeOff + 8;
- uint64_t Addr = readFdeAddr<ELFT>(Buf + Off, Enc & 0x7);
- if ((Enc & 0x70) == DW_EH_PE_absptr)
- return Addr;
- if ((Enc & 0x70) == DW_EH_PE_pcrel)
- return Addr + this->Addr + Off;
- fatal("unknown FDE size relative encoding");
-}
-
-template <class ELFT> void EhOutputSection<ELFT>::writeTo(uint8_t *Buf) {
- const endianness E = ELFT::TargetEndianness;
- for (CieRecord *Cie : Cies) {
- size_t CieOffset = Cie->Piece->OutputOff;
- writeCieFde<ELFT>(Buf + CieOffset, Cie->Piece->data());
-
- for (EhSectionPiece *Fde : Cie->FdePieces) {
- size_t Off = Fde->OutputOff;
- writeCieFde<ELFT>(Buf + Off, Fde->data());
-
- // FDE's second word should have the offset to an associated CIE.
- // Write it.
- write32<E>(Buf + Off + 4, Off + 4 - CieOffset);
- }
- }
-
- for (EhInputSection<ELFT> *S : Sections)
- S->relocate(Buf, nullptr);
-
- // Construct .eh_frame_hdr. .eh_frame_hdr is a binary search table
- // to get a FDE from an address to which FDE is applied. So here
- // we obtain two addresses and pass them to EhFrameHdr object.
- if (In<ELFT>::EhFrameHdr) {
- for (CieRecord *Cie : Cies) {
- uint8_t Enc = getFdeEncoding<ELFT>(Cie->Piece);
- for (SectionPiece *Fde : Cie->FdePieces) {
- uintX_t Pc = getFdePc(Buf, Fde->OutputOff, Enc);
- uintX_t FdeVA = this->Addr + Fde->OutputOff;
- In<ELFT>::EhFrameHdr->addFde(Pc, FdeVA);
- }
- }
- }
-}
-
-template <class ELFT>
-MergeOutputSection<ELFT>::MergeOutputSection(StringRef Name, uint32_t Type,
- uintX_t Flags, uintX_t Alignment)
- : OutputSectionBase(Name, Type, Flags),
- Builder(StringTableBuilder::RAW, Alignment) {}
-
-template <class ELFT> void MergeOutputSection<ELFT>::writeTo(uint8_t *Buf) {
- Builder.write(Buf);
-}
-
-template <class ELFT>
-void MergeOutputSection<ELFT>::addSection(InputSectionData *C) {
- auto *Sec = cast<MergeInputSection<ELFT>>(C);
- Sec->OutSec = this;
- this->updateAlignment(Sec->Alignment);
- this->Entsize = Sec->Entsize;
- Sections.push_back(Sec);
-}
-
-template <class ELFT> bool MergeOutputSection<ELFT>::shouldTailMerge() const {
- return (this->Flags & SHF_STRINGS) && Config->Optimize >= 2;
-}
-
-template <class ELFT> void MergeOutputSection<ELFT>::finalizeTailMerge() {
- // Add all string pieces to the string table builder to create section
- // contents.
- for (MergeInputSection<ELFT> *Sec : Sections)
- for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
- if (Sec->Pieces[I].Live)
- Builder.add(Sec->getData(I));
-
- // Fix the string table content. After this, the contents will never change.
- Builder.finalize();
- this->Size = Builder.getSize();
-
- // finalize() fixed tail-optimized strings, so we can now get
- // offsets of strings. Get an offset for each string and save it
- // to a corresponding StringPiece for easy access.
- for (MergeInputSection<ELFT> *Sec : Sections)
- for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
- if (Sec->Pieces[I].Live)
- Sec->Pieces[I].OutputOff = Builder.getOffset(Sec->getData(I));
-}
-
-template <class ELFT> void MergeOutputSection<ELFT>::finalizeNoTailMerge() {
- // Add all string pieces to the string table builder to create section
- // contents. Because we are not tail-optimizing, offsets of strings are
- // fixed when they are added to the builder (string table builder contains
- // a hash table from strings to offsets).
- for (MergeInputSection<ELFT> *Sec : Sections)
- for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
- if (Sec->Pieces[I].Live)
- Sec->Pieces[I].OutputOff = Builder.add(Sec->getData(I));
-
- Builder.finalizeInOrder();
- this->Size = Builder.getSize();
-}
+ // The actual offsets will be computed by assignAddresses. For now, use
+ // crude approximation so that it is at least easy for other code to know the
+ // section order. It is also used to calculate the output section size early
+ // for compressed debug sections.
+ this->Size = updateOffset(Size, S);
-template <class ELFT> void MergeOutputSection<ELFT>::finalize() {
- if (shouldTailMerge())
- finalizeTailMerge();
- else
- finalizeNoTailMerge();
+ // If this section contains a table of fixed-size entries, sh_entsize
+ // holds the element size. Consequently, if this contains two or more
+ // input sections, all of them must have the same sh_entsize. However,
+ // you can put different types of input sections into one output
+ // sectin by using linker scripts. I don't know what to do here.
+ // Probably we sholuld handle that as an error. But for now we just
+ // pick the largest sh_entsize.
+ this->Entsize = std::max(this->Entsize, S->Entsize);
}
-template <class ELFT>
-static typename ELFT::uint getOutFlags(InputSectionBase<ELFT> *S) {
- return S->Flags & ~SHF_GROUP & ~SHF_COMPRESSED;
-}
-
-namespace llvm {
-template <> struct DenseMapInfo<lld::elf::SectionKey> {
- static lld::elf::SectionKey getEmptyKey();
- static lld::elf::SectionKey getTombstoneKey();
- static unsigned getHashValue(const lld::elf::SectionKey &Val);
- static bool isEqual(const lld::elf::SectionKey &LHS,
- const lld::elf::SectionKey &RHS);
-};
-}
-
-template <class ELFT>
-static SectionKey createKey(InputSectionBase<ELFT> *C, StringRef OutsecName) {
+static SectionKey createKey(InputSectionBase *C, StringRef OutsecName) {
// The ELF spec just says
// ----------------------------------------------------------------
// In the first phase, input sections that match in name, type and
@@ -588,81 +144,109 @@ static SectionKey createKey(InputSectionBase<ELFT> *C, StringRef OutsecName) {
//
// Given the above issues, we instead merge sections by name and error on
// incompatible types and flags.
- //
- // The exception being SHF_MERGE, where we create different output sections
- // for each alignment. This makes each output section simple. In case of
- // relocatable object generation we do not try to perform merging and treat
- // SHF_MERGE sections as regular ones, but also create different output
- // sections for them to allow merging at final linking stage.
- //
- // Fortunately, creating symbols in the middle of a merge section is not
- // supported by bfd or gold, so the SHF_MERGE exception should not cause
- // problems with most linker scripts.
- typedef typename ELFT::uint uintX_t;
- uintX_t Flags = C->Flags & (SHF_MERGE | SHF_STRINGS);
-
- uintX_t Alignment = 0;
- if (isa<MergeInputSection<ELFT>>(C) ||
- (Config->Relocatable && (C->Flags & SHF_MERGE)))
- Alignment = std::max<uintX_t>(C->Alignment, C->Entsize);
+ uint32_t Alignment = 0;
+ uint64_t Flags = 0;
+ if (Config->Relocatable && (C->Flags & SHF_MERGE)) {
+ Alignment = std::max<uint64_t>(C->Alignment, C->Entsize);
+ Flags = C->Flags & (SHF_MERGE | SHF_STRINGS);
+ }
return SectionKey{OutsecName, Flags, Alignment};
}
-template <class ELFT> OutputSectionFactory<ELFT>::OutputSectionFactory() {}
+OutputSectionFactory::OutputSectionFactory() {}
-template <class ELFT> OutputSectionFactory<ELFT>::~OutputSectionFactory() {}
+static uint64_t getIncompatibleFlags(uint64_t Flags) {
+ return Flags & (SHF_ALLOC | SHF_TLS);
+}
-template <class ELFT>
-std::pair<OutputSectionBase *, bool>
-OutputSectionFactory<ELFT>::create(InputSectionBase<ELFT> *C,
- StringRef OutsecName) {
- SectionKey Key = createKey(C, OutsecName);
- return create(Key, C);
+// We allow sections of types listed below to merged into a
+// single progbits section. This is typically done by linker
+// scripts. Merging nobits and progbits will force disk space
+// to be allocated for nobits sections. Other ones don't require
+// any special treatment on top of progbits, so there doesn't
+// seem to be a harm in merging them.
+static bool canMergeToProgbits(unsigned Type) {
+ return Type == SHT_NOBITS || Type == SHT_PROGBITS || Type == SHT_INIT_ARRAY ||
+ Type == SHT_PREINIT_ARRAY || Type == SHT_FINI_ARRAY ||
+ Type == SHT_NOTE;
}
-static uint64_t getIncompatibleFlags(uint64_t Flags) {
- return Flags & (SHF_ALLOC | SHF_TLS);
+void elf::reportDiscarded(InputSectionBase *IS) {
+ if (!Config->PrintGcSections)
+ return;
+ message("removing unused section from '" + IS->Name + "' in file '" +
+ IS->File->getName() + "'");
+}
+
+void OutputSectionFactory::addInputSec(InputSectionBase *IS,
+ StringRef OutsecName) {
+ // Sections with the SHT_GROUP attribute reach here only when the - r option
+ // is given. Such sections define "section groups", and InputFiles.cpp has
+ // dedup'ed section groups by their signatures. For the -r, we want to pass
+ // through all SHT_GROUP sections without merging them because merging them
+ // creates broken section contents.
+ if (IS->Type == SHT_GROUP) {
+ OutputSection *Out = nullptr;
+ addInputSec(IS, OutsecName, Out);
+ return;
+ }
+
+ // Imagine .zed : { *(.foo) *(.bar) } script. Both foo and bar may have
+ // relocation sections .rela.foo and .rela.bar for example. Most tools do
+ // not allow multiple REL[A] sections for output section. Hence we
+ // should combine these relocation sections into single output.
+ // We skip synthetic sections because it can be .rela.dyn/.rela.plt or any
+ // other REL[A] sections created by linker itself.
+ if (!isa<SyntheticSection>(IS) &&
+ (IS->Type == SHT_REL || IS->Type == SHT_RELA)) {
+ auto *Sec = cast<InputSection>(IS);
+ OutputSection *Out = Sec->getRelocatedSection()->getOutputSection();
+ addInputSec(IS, OutsecName, Out->RelocationSection);
+ return;
+ }
+
+ SectionKey Key = createKey(IS, OutsecName);
+ OutputSection *&Sec = Map[Key];
+ addInputSec(IS, OutsecName, Sec);
}
-template <class ELFT>
-std::pair<OutputSectionBase *, bool>
-OutputSectionFactory<ELFT>::create(const SectionKey &Key,
- InputSectionBase<ELFT> *C) {
- uintX_t Flags = getOutFlags(C);
- OutputSectionBase *&Sec = Map[Key];
- if (Sec) {
- if (getIncompatibleFlags(Sec->Flags) != getIncompatibleFlags(C->Flags))
- error("Section has flags incompatible with others with the same name " +
- toString(C));
- // Convert notbits to progbits if they are mixed. This happens is some
- // linker scripts.
- if (Sec->Type == SHT_NOBITS && C->Type == SHT_PROGBITS)
- Sec->Type = SHT_PROGBITS;
- if (Sec->Type != C->Type &&
- !(Sec->Type == SHT_PROGBITS && C->Type == SHT_NOBITS))
- error("Section has different type from others with the same name " +
- toString(C));
- Sec->Flags |= Flags;
- return {Sec, false};
+void OutputSectionFactory::addInputSec(InputSectionBase *IS,
+ StringRef OutsecName,
+ OutputSection *&Sec) {
+ if (!IS->Live) {
+ reportDiscarded(IS);
+ return;
}
- uint32_t Type = C->Type;
- switch (C->kind()) {
- case InputSectionBase<ELFT>::Regular:
- case InputSectionBase<ELFT>::Synthetic:
- Sec = make<OutputSection<ELFT>>(Key.Name, Type, Flags);
- break;
- case InputSectionBase<ELFT>::EHFrame:
- return {Out<ELFT>::EhFrame, false};
- case InputSectionBase<ELFT>::Merge:
- Sec = make<MergeOutputSection<ELFT>>(Key.Name, Type, Flags, Key.Alignment);
- break;
+ if (Sec) {
+ if (getIncompatibleFlags(Sec->Flags) != getIncompatibleFlags(IS->Flags))
+ error("incompatible section flags for " + Sec->Name + "\n>>> " +
+ toString(IS) + ": 0x" + utohexstr(IS->Flags) +
+ "\n>>> output section " + Sec->Name + ": 0x" +
+ utohexstr(Sec->Flags));
+ if (Sec->Type != IS->Type) {
+ if (canMergeToProgbits(Sec->Type) && canMergeToProgbits(IS->Type))
+ Sec->Type = SHT_PROGBITS;
+ else
+ error("section type mismatch for " + IS->Name + "\n>>> " +
+ toString(IS) + ": " +
+ getELFSectionTypeName(Config->EMachine, IS->Type) +
+ "\n>>> output section " + Sec->Name + ": " +
+ getELFSectionTypeName(Config->EMachine, Sec->Type));
+ }
+ Sec->Flags |= IS->Flags;
+ } else {
+ Sec = make<OutputSection>(OutsecName, IS->Type, IS->Flags);
+ OutputSections.push_back(Sec);
}
- return {Sec, true};
+
+ Sec->addSection(cast<InputSection>(IS));
}
+OutputSectionFactory::~OutputSectionFactory() {}
+
SectionKey DenseMapInfo<SectionKey>::getEmptyKey() {
return SectionKey{DenseMapInfo<StringRef>::getEmptyKey(), 0, 0};
}
@@ -681,32 +265,13 @@ bool DenseMapInfo<SectionKey>::isEqual(const SectionKey &LHS,
LHS.Flags == RHS.Flags && LHS.Alignment == RHS.Alignment;
}
-namespace lld {
-namespace elf {
-
-template void OutputSectionBase::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
-template void OutputSectionBase::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
-template void OutputSectionBase::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
-template void OutputSectionBase::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
-
-template class OutputSection<ELF32LE>;
-template class OutputSection<ELF32BE>;
-template class OutputSection<ELF64LE>;
-template class OutputSection<ELF64BE>;
-
-template class EhOutputSection<ELF32LE>;
-template class EhOutputSection<ELF32BE>;
-template class EhOutputSection<ELF64LE>;
-template class EhOutputSection<ELF64BE>;
-
-template class MergeOutputSection<ELF32LE>;
-template class MergeOutputSection<ELF32BE>;
-template class MergeOutputSection<ELF64LE>;
-template class MergeOutputSection<ELF64BE>;
-
-template class OutputSectionFactory<ELF32LE>;
-template class OutputSectionFactory<ELF32BE>;
-template class OutputSectionFactory<ELF64LE>;
-template class OutputSectionFactory<ELF64BE>;
-}
+uint64_t elf::getHeaderSize() {
+ if (Config->OFormatBinary)
+ return 0;
+ return Out::ElfHeader->Size + Out::ProgramHeaders->Size;
}
+
+template void OutputSection::writeHeaderTo<ELF32LE>(ELF32LE::Shdr *Shdr);
+template void OutputSection::writeHeaderTo<ELF32BE>(ELF32BE::Shdr *Shdr);
+template void OutputSection::writeHeaderTo<ELF64LE>(ELF64LE::Shdr *Shdr);
+template void OutputSection::writeHeaderTo<ELF64BE>(ELF64BE::Shdr *Shdr);
diff --git a/gnu/llvm/tools/lld/ELF/SymbolTable.cpp b/gnu/llvm/tools/lld/ELF/SymbolTable.cpp
index deecd8d74f4..b9817ba9d2d 100644
--- a/gnu/llvm/tools/lld/ELF/SymbolTable.cpp
+++ b/gnu/llvm/tools/lld/ELF/SymbolTable.cpp
@@ -52,6 +52,9 @@ template <class ELFT> static bool isCompatible(InputFile *F) {
// Add symbols in File to the symbol table.
template <class ELFT> void SymbolTable<ELFT>::addFile(InputFile *File) {
+ if (!Config->FirstElf && isa<ELFFileBase<ELFT>>(File))
+ Config->FirstElf = File;
+
if (!isCompatible<ELFT>(File))
return;
@@ -75,13 +78,13 @@ template <class ELFT> void SymbolTable<ELFT>::addFile(InputFile *File) {
}
if (Config->Trace)
- outs() << toString(File) << "\n";
+ message(toString(File));
// .so file
if (auto *F = dyn_cast<SharedFile<ELFT>>(File)) {
// DSOs are uniquified not by filename but by soname.
F->parseSoName();
- if (ErrorCount || !SoNames.insert(F->getSoName()).second)
+ if (ErrorCount || !SoNames.insert(F->SoName).second)
return;
SharedFiles.push_back(F);
F->parseRest();
@@ -115,7 +118,7 @@ template <class ELFT> void SymbolTable<ELFT>::addCombinedLTOObject() {
// Compile bitcode files and replace bitcode symbols.
LTO.reset(new BitcodeCompiler);
for (BitcodeFile *F : BitcodeFiles)
- LTO->add<ELFT>(*F);
+ LTO->add(*F);
for (InputFile *File : LTO->compile()) {
ObjectFile<ELFT> *Obj = cast<ObjectFile<ELFT>>(File);
@@ -126,19 +129,19 @@ template <class ELFT> void SymbolTable<ELFT>::addCombinedLTOObject() {
}
template <class ELFT>
-DefinedRegular<ELFT> *SymbolTable<ELFT>::addAbsolute(StringRef Name,
- uint8_t Visibility,
- uint8_t Binding) {
+DefinedRegular *SymbolTable<ELFT>::addAbsolute(StringRef Name,
+ uint8_t Visibility,
+ uint8_t Binding) {
Symbol *Sym =
addRegular(Name, Visibility, STT_NOTYPE, 0, 0, Binding, nullptr, nullptr);
- return cast<DefinedRegular<ELFT>>(Sym->body());
+ return cast<DefinedRegular>(Sym->body());
}
// Add Name as an "ignored" symbol. An ignored symbol is a regular
// linker-synthesized defined symbol, but is only defined if needed.
template <class ELFT>
-DefinedRegular<ELFT> *SymbolTable<ELFT>::addIgnored(StringRef Name,
- uint8_t Visibility) {
+DefinedRegular *SymbolTable<ELFT>::addIgnored(StringRef Name,
+ uint8_t Visibility) {
SymbolBody *S = find(Name);
if (!S || S->isInCurrentDSO())
return nullptr;
@@ -159,7 +162,7 @@ template <class ELFT> void SymbolTable<ELFT>::trace(StringRef Name) {
// Rename SYM as __wrap_SYM. The original symbol is preserved as __real_SYM.
// Used to implement --wrap.
-template <class ELFT> void SymbolTable<ELFT>::wrap(StringRef Name) {
+template <class ELFT> void SymbolTable<ELFT>::addSymbolWrap(StringRef Name) {
SymbolBody *B = find(Name);
if (!B)
return;
@@ -167,11 +170,40 @@ template <class ELFT> void SymbolTable<ELFT>::wrap(StringRef Name) {
Symbol *Real = addUndefined(Saver.save("__real_" + Name));
Symbol *Wrap = addUndefined(Saver.save("__wrap_" + Name));
- // We rename symbols by replacing the old symbol's SymbolBody with the new
- // symbol's SymbolBody. This causes all SymbolBody pointers referring to the
- // old symbol to instead refer to the new symbol.
- memcpy(Real->Body.buffer, Sym->Body.buffer, sizeof(Sym->Body));
- memcpy(Sym->Body.buffer, Wrap->Body.buffer, sizeof(Wrap->Body));
+ // Tell LTO not to eliminate this symbol
+ Wrap->IsUsedInRegularObj = true;
+
+ Config->RenamedSymbols[Real] = {Sym, Real->Binding};
+ Config->RenamedSymbols[Sym] = {Wrap, Sym->Binding};
+}
+
+// Creates alias for symbol. Used to implement --defsym=ALIAS=SYM.
+template <class ELFT>
+void SymbolTable<ELFT>::addSymbolAlias(StringRef Alias, StringRef Name) {
+ SymbolBody *B = find(Name);
+ if (!B) {
+ error("-defsym: undefined symbol: " + Name);
+ return;
+ }
+ Symbol *Sym = B->symbol();
+ Symbol *AliasSym = addUndefined(Alias);
+
+ // Tell LTO not to eliminate this symbol
+ Sym->IsUsedInRegularObj = true;
+ Config->RenamedSymbols[AliasSym] = {Sym, AliasSym->Binding};
+}
+
+// Apply symbol renames created by -wrap and -defsym. The renames are created
+// before LTO in addSymbolWrap() and addSymbolAlias() to have a chance to inform
+// LTO (if LTO is running) not to include these symbols in IPO. Now that the
+// symbols are finalized, we can perform the replacement.
+template <class ELFT> void SymbolTable<ELFT>::applySymbolRenames() {
+ for (auto &KV : Config->RenamedSymbols) {
+ Symbol *Dst = KV.first;
+ Symbol *Src = KV.second.Target;
+ Dst->body()->copy(Src->body());
+ Dst->Binding = KV.second.OriginalBinding;
+ }
}
static uint8_t getMinVisibility(uint8_t VA, uint8_t VB) {
@@ -185,6 +217,12 @@ static uint8_t getMinVisibility(uint8_t VA, uint8_t VB) {
// Find an existing symbol or create and insert a new one.
template <class ELFT>
std::pair<Symbol *, bool> SymbolTable<ELFT>::insert(StringRef Name) {
+ // <name>@@<version> means the symbol is the default version. In that
+ // case <name>@@<version> will be used to resolve references to <name>.
+ size_t Pos = Name.find("@@");
+ if (Pos != StringRef::npos)
+ Name = Name.take_front(Pos);
+
auto P = Symtab.insert(
{CachedHashStringRef(Name), SymIndex((int)SymVector.size(), false)});
SymIndex &V = P.first->second;
@@ -197,7 +235,7 @@ std::pair<Symbol *, bool> SymbolTable<ELFT>::insert(StringRef Name) {
Symbol *Sym;
if (IsNew) {
- Sym = new (BAlloc) Symbol;
+ Sym = make<Symbol>();
Sym->InVersionScript = false;
Sym->Binding = STB_WEAK;
Sym->Visibility = STV_DEFAULT;
@@ -212,13 +250,6 @@ std::pair<Symbol *, bool> SymbolTable<ELFT>::insert(StringRef Name) {
return {Sym, IsNew};
}
-// Construct a string in the form of "Sym in File1 and File2".
-// Used to construct an error message.
-static std::string conflictMsg(SymbolBody *Existing, InputFile *NewFile) {
- return "'" + toString(*Existing) + "' in " + toString(Existing->File) +
- " and " + toString(NewFile);
-}
-
// Find an existing symbol or create and insert a new one, then apply the given
// attributes.
template <class ELFT>
@@ -232,13 +263,19 @@ SymbolTable<ELFT>::insert(StringRef Name, uint8_t Type, uint8_t Visibility,
// Merge in the new symbol's visibility.
S->Visibility = getMinVisibility(S->Visibility, Visibility);
+
if (!CanOmitFromDynSym && (Config->Shared || Config->ExportDynamic))
S->ExportDynamic = true;
+
if (IsUsedInRegularObj)
S->IsUsedInRegularObj = true;
+
if (!WasInserted && S->body()->Type != SymbolBody::UnknownType &&
- ((Type == STT_TLS) != S->body()->isTls()))
- error("TLS attribute mismatch for symbol " + conflictMsg(S->body(), File));
+ ((Type == STT_TLS) != S->body()->isTls())) {
+ error("TLS attribute mismatch: " + toString(*S->body()) +
+ "\n>>> defined in " + toString(S->body()->File) +
+ "\n>>> defined in " + toString(File));
+ }
return {S, WasInserted};
}
@@ -258,18 +295,23 @@ Symbol *SymbolTable<ELFT>::addUndefined(StringRef Name, bool IsLocal,
InputFile *File) {
Symbol *S;
bool WasInserted;
+ uint8_t Visibility = getVisibility(StOther);
std::tie(S, WasInserted) =
- insert(Name, Type, getVisibility(StOther), CanOmitFromDynSym, File);
- if (WasInserted) {
+ insert(Name, Type, Visibility, CanOmitFromDynSym, File);
+ // An undefined symbol with non default visibility must be satisfied
+ // in the same DSO.
+ if (WasInserted ||
+ (isa<SharedSymbol>(S->body()) && Visibility != STV_DEFAULT)) {
S->Binding = Binding;
- replaceBody<Undefined<ELFT>>(S, Name, IsLocal, StOther, Type, File);
+ replaceBody<Undefined>(S, Name, IsLocal, StOther, Type, File);
return S;
}
if (Binding != STB_WEAK) {
- if (S->body()->isShared() || S->body()->isLazy())
+ SymbolBody *B = S->body();
+ if (B->isShared() || B->isLazy() || B->isUndefined())
S->Binding = Binding;
- if (auto *SS = dyn_cast<SharedSymbol<ELFT>>(S->body()))
- SS->file()->IsUsed = true;
+ if (auto *SS = dyn_cast<SharedSymbol>(B))
+ cast<SharedFile<ELFT>>(SS->File)->IsUsed = true;
}
if (auto *L = dyn_cast<Lazy>(S->body())) {
// An undefined weak will not fetch archive members, but we have to remember
@@ -282,15 +324,36 @@ Symbol *SymbolTable<ELFT>::addUndefined(StringRef Name, bool IsLocal,
return S;
}
+// Using .symver foo,foo@@VER unfortunately creates two symbols: foo and
+// foo@@VER. We want to effectively ignore foo, so give precedence to
+// foo@@VER.
+// FIXME: If users can transition to using
+// .symver foo,foo@@@VER
+// we can delete this hack.
+static int compareVersion(Symbol *S, StringRef Name) {
+ if (Name.find("@@") != StringRef::npos &&
+ S->body()->getName().find("@@") == StringRef::npos)
+ return 1;
+ if (Name.find("@@") == StringRef::npos &&
+ S->body()->getName().find("@@") != StringRef::npos)
+ return -1;
+ return 0;
+}
+
// We have a new defined symbol with the specified binding. Return 1 if the new
// symbol should win, -1 if the new symbol should lose, or 0 if both symbols are
// strong defined symbols.
-static int compareDefined(Symbol *S, bool WasInserted, uint8_t Binding) {
+static int compareDefined(Symbol *S, bool WasInserted, uint8_t Binding,
+ StringRef Name) {
if (WasInserted)
return 1;
SymbolBody *Body = S->body();
- if (Body->isLazy() || !Body->isInCurrentDSO())
+ if (!Body->isInCurrentDSO())
return 1;
+
+ if (int R = compareVersion(S, Name))
+ return R;
+
if (Binding == STB_WEAK)
return -1;
if (S->isWeak())
@@ -303,8 +366,9 @@ static int compareDefined(Symbol *S, bool WasInserted, uint8_t Binding) {
// is a conflict. If the new symbol wins, also update the binding.
template <typename ELFT>
static int compareDefinedNonCommon(Symbol *S, bool WasInserted, uint8_t Binding,
- bool IsAbsolute, typename ELFT::uint Value) {
- if (int Cmp = compareDefined(S, WasInserted, Binding)) {
+ bool IsAbsolute, typename ELFT::uint Value,
+ StringRef Name) {
+ if (int Cmp = compareDefined(S, WasInserted, Binding, Name)) {
if (Cmp > 0)
S->Binding = Binding;
return Cmp;
@@ -315,7 +379,7 @@ static int compareDefinedNonCommon(Symbol *S, bool WasInserted, uint8_t Binding,
if (Config->WarnCommon)
warn("common " + S->body()->getName() + " is overridden");
return 1;
- } else if (auto *R = dyn_cast<DefinedRegular<ELFT>>(B)) {
+ } else if (auto *R = dyn_cast<DefinedRegular>(B)) {
if (R->Section == nullptr && Binding == STB_GLOBAL && IsAbsolute &&
R->Value == Value)
return -1;
@@ -325,14 +389,14 @@ static int compareDefinedNonCommon(Symbol *S, bool WasInserted, uint8_t Binding,
template <class ELFT>
Symbol *SymbolTable<ELFT>::addCommon(StringRef N, uint64_t Size,
- uint64_t Alignment, uint8_t Binding,
+ uint32_t Alignment, uint8_t Binding,
uint8_t StOther, uint8_t Type,
InputFile *File) {
Symbol *S;
bool WasInserted;
std::tie(S, WasInserted) = insert(N, Type, getVisibility(StOther),
/*CanOmitFromDynSym*/ false, File);
- int Cmp = compareDefined(S, WasInserted, Binding);
+ int Cmp = compareDefined(S, WasInserted, Binding, N);
if (Cmp > 0) {
S->Binding = Binding;
replaceBody<DefinedCommon>(S, N, Size, Alignment, StOther, Type, File);
@@ -355,73 +419,72 @@ Symbol *SymbolTable<ELFT>::addCommon(StringRef N, uint64_t Size,
return S;
}
-static void print(const Twine &Msg) {
+static void warnOrError(const Twine &Msg) {
if (Config->AllowMultipleDefinition)
warn(Msg);
else
error(Msg);
}
-static void reportDuplicate(SymbolBody *Existing, InputFile *NewFile) {
- print("duplicate symbol " + conflictMsg(Existing, NewFile));
+static void reportDuplicate(SymbolBody *Sym, InputFile *NewFile) {
+ warnOrError("duplicate symbol: " + toString(*Sym) + "\n>>> defined in " +
+ toString(Sym->File) + "\n>>> defined in " + toString(NewFile));
}
template <class ELFT>
-static void reportDuplicate(SymbolBody *Existing,
- InputSectionBase<ELFT> *ErrSec,
+static void reportDuplicate(SymbolBody *Sym, InputSectionBase *ErrSec,
typename ELFT::uint ErrOffset) {
- DefinedRegular<ELFT> *D = dyn_cast<DefinedRegular<ELFT>>(Existing);
+ DefinedRegular *D = dyn_cast<DefinedRegular>(Sym);
if (!D || !D->Section || !ErrSec) {
- reportDuplicate(Existing, ErrSec ? ErrSec->getFile() : nullptr);
+ reportDuplicate(Sym, ErrSec ? ErrSec->getFile<ELFT>() : nullptr);
return;
}
- std::string OldLoc = D->Section->getLocation(D->Value);
- std::string NewLoc = ErrSec->getLocation(ErrOffset);
-
- print(NewLoc + ": duplicate symbol '" + toString(*Existing) + "'");
- print(OldLoc + ": previous definition was here");
+ // Construct and print an error message in the form of:
+ //
+ // ld.lld: error: duplicate symbol: foo
+ // >>> defined at bar.c:30
+ // >>> bar.o (/home/alice/src/bar.o)
+ // >>> defined at baz.c:563
+ // >>> baz.o in archive libbaz.a
+ auto *Sec1 = cast<InputSectionBase>(D->Section);
+ std::string Src1 = Sec1->getSrcMsg<ELFT>(D->Value);
+ std::string Obj1 = Sec1->getObjMsg<ELFT>(D->Value);
+ std::string Src2 = ErrSec->getSrcMsg<ELFT>(ErrOffset);
+ std::string Obj2 = ErrSec->getObjMsg<ELFT>(ErrOffset);
+
+ std::string Msg = "duplicate symbol: " + toString(*Sym) + "\n>>> defined at ";
+ if (!Src1.empty())
+ Msg += Src1 + "\n>>> ";
+ Msg += Obj1 + "\n>>> defined at ";
+ if (!Src2.empty())
+ Msg += Src2 + "\n>>> ";
+ Msg += Obj2;
+ warnOrError(Msg);
}
template <typename ELFT>
Symbol *SymbolTable<ELFT>::addRegular(StringRef Name, uint8_t StOther,
- uint8_t Type, uintX_t Value, uintX_t Size,
- uint8_t Binding,
- InputSectionBase<ELFT> *Section,
- InputFile *File) {
+ uint8_t Type, uint64_t Value,
+ uint64_t Size, uint8_t Binding,
+ SectionBase *Section, InputFile *File) {
Symbol *S;
bool WasInserted;
std::tie(S, WasInserted) = insert(Name, Type, getVisibility(StOther),
/*CanOmitFromDynSym*/ false, File);
int Cmp = compareDefinedNonCommon<ELFT>(S, WasInserted, Binding,
- Section == nullptr, Value);
+ Section == nullptr, Value, Name);
if (Cmp > 0)
- replaceBody<DefinedRegular<ELFT>>(S, Name, /*IsLocal=*/false, StOther, Type,
- Value, Size, Section, File);
+ replaceBody<DefinedRegular>(S, Name, /*IsLocal=*/false, StOther, Type,
+ Value, Size, Section, File);
else if (Cmp == 0)
- reportDuplicate(S->body(), Section, Value);
+ reportDuplicate<ELFT>(S->body(),
+ dyn_cast_or_null<InputSectionBase>(Section), Value);
return S;
}
template <typename ELFT>
-Symbol *SymbolTable<ELFT>::addSynthetic(StringRef N,
- const OutputSectionBase *Section,
- uintX_t Value, uint8_t StOther) {
- Symbol *S;
- bool WasInserted;
- std::tie(S, WasInserted) = insert(N, STT_NOTYPE, getVisibility(StOther),
- /*CanOmitFromDynSym*/ false, nullptr);
- int Cmp = compareDefinedNonCommon<ELFT>(S, WasInserted, STB_GLOBAL,
- /*IsAbsolute*/ false, /*Value*/ 0);
- if (Cmp > 0)
- replaceBody<DefinedSynthetic>(S, N, Value, Section);
- else if (Cmp == 0)
- reportDuplicate(S->body(), nullptr);
- return S;
-}
-
-template <typename ELFT>
-void SymbolTable<ELFT>::addShared(SharedFile<ELFT> *F, StringRef Name,
+void SymbolTable<ELFT>::addShared(SharedFile<ELFT> *File, StringRef Name,
const Elf_Sym &Sym,
const typename ELFT::Verdef *Verdef) {
// DSO symbols do not affect visibility in the output, so we pass STV_DEFAULT
@@ -429,15 +492,21 @@ void SymbolTable<ELFT>::addShared(SharedFile<ELFT> *F, StringRef Name,
// unchanged.
Symbol *S;
bool WasInserted;
- std::tie(S, WasInserted) =
- insert(Name, Sym.getType(), STV_DEFAULT, /*CanOmitFromDynSym*/ true, F);
+ std::tie(S, WasInserted) = insert(Name, Sym.getType(), STV_DEFAULT,
+ /*CanOmitFromDynSym*/ true, File);
// Make sure we preempt DSO symbols with default visibility.
if (Sym.getVisibility() == STV_DEFAULT)
S->ExportDynamic = true;
- if (WasInserted || isa<Undefined<ELFT>>(S->body())) {
- replaceBody<SharedSymbol<ELFT>>(S, F, Name, Sym, Verdef);
+
+ SymbolBody *Body = S->body();
+ // An undefined symbol with non default visibility must be satisfied
+ // in the same DSO.
+ if (WasInserted ||
+ (isa<Undefined>(Body) && Body->getVisibility() == STV_DEFAULT)) {
+ replaceBody<SharedSymbol>(S, File, Name, Sym.st_other, Sym.getType(), &Sym,
+ Verdef);
if (!S->isWeak())
- F->IsUsed = true;
+ File->IsUsed = true;
}
}
@@ -450,10 +519,10 @@ Symbol *SymbolTable<ELFT>::addBitcode(StringRef Name, uint8_t Binding,
std::tie(S, WasInserted) =
insert(Name, Type, getVisibility(StOther), CanOmitFromDynSym, F);
int Cmp = compareDefinedNonCommon<ELFT>(S, WasInserted, Binding,
- /*IsAbs*/ false, /*Value*/ 0);
+ /*IsAbs*/ false, /*Value*/ 0, Name);
if (Cmp > 0)
- replaceBody<DefinedRegular<ELFT>>(S, Name, /*IsLocal=*/false, StOther, Type,
- 0, 0, nullptr, F);
+ replaceBody<DefinedRegular>(S, Name, /*IsLocal=*/false, StOther, Type, 0, 0,
+ nullptr, F);
else if (Cmp == 0)
reportDuplicate(S->body(), F);
return S;
@@ -478,18 +547,18 @@ SymbolBody *SymbolTable<ELFT>::findInCurrentDSO(StringRef Name) {
}
template <class ELFT>
-void SymbolTable<ELFT>::addLazyArchive(ArchiveFile *F,
- const object::Archive::Symbol Sym) {
+Symbol *SymbolTable<ELFT>::addLazyArchive(ArchiveFile *F,
+ const object::Archive::Symbol Sym) {
Symbol *S;
bool WasInserted;
StringRef Name = Sym.getName();
std::tie(S, WasInserted) = insert(Name);
if (WasInserted) {
replaceBody<LazyArchive>(S, *F, Sym, SymbolBody::UnknownType);
- return;
+ return S;
}
if (!S->body()->isUndefined())
- return;
+ return S;
// Weak undefined symbols should not fetch members from archives. If we were
// to keep old symbol we would not know that an archive member was available
@@ -500,11 +569,12 @@ void SymbolTable<ELFT>::addLazyArchive(ArchiveFile *F,
// to preserve its type. FIXME: Move the Type field to Symbol.
if (S->isWeak()) {
replaceBody<LazyArchive>(S, *F, Sym, S->body()->Type);
- return;
+ return S;
}
std::pair<MemoryBufferRef, uint64_t> MBInfo = F->getMember(&Sym);
if (!MBInfo.first.getBuffer().empty())
addFile(createObjectFile(MBInfo.first, F->getName(), MBInfo.second));
+ return S;
}
template <class ELFT>
@@ -520,13 +590,10 @@ void SymbolTable<ELFT>::addLazyObject(StringRef Name, LazyObjectFile &Obj) {
return;
// See comment for addLazyArchive above.
- if (S->isWeak()) {
+ if (S->isWeak())
replaceBody<LazyObject>(S, Name, Obj, S->body()->Type);
- } else {
- MemoryBufferRef MBRef = Obj.getBuffer();
- if (!MBRef.getBuffer().empty())
- addFile(createObjectFile(MBRef));
- }
+ else if (InputFile *F = Obj.fetch())
+ addFile(F);
}
// Process undefined (-u) flags by loading lazy symbols named by those flags.
@@ -545,11 +612,20 @@ template <class ELFT> void SymbolTable<ELFT>::scanUndefinedFlags() {
// shared libraries can find them.
// Except this, we ignore undefined symbols in DSOs.
template <class ELFT> void SymbolTable<ELFT>::scanShlibUndefined() {
- for (SharedFile<ELFT> *File : SharedFiles)
- for (StringRef U : File->getUndefinedSymbols())
- if (SymbolBody *Sym = find(U))
- if (Sym->isDefined())
- Sym->symbol()->ExportDynamic = true;
+ for (SharedFile<ELFT> *File : SharedFiles) {
+ for (StringRef U : File->getUndefinedSymbols()) {
+ SymbolBody *Sym = find(U);
+ if (!Sym || !Sym->isDefined())
+ continue;
+ Sym->symbol()->ExportDynamic = true;
+
+ // If -dynamic-list is given, the default version is set to
+ // VER_NDX_LOCAL, which prevents a symbol to be exported via .dynsym.
+ // Set to VER_NDX_GLOBAL so the symbol will be handled as if it were
+ // specified by -dynamic-list.
+ Sym->symbol()->VersionId = VER_NDX_GLOBAL;
+ }
+ }
}
// Initialize DemangledSyms with a map from demangled symbols to symbol
@@ -630,7 +706,8 @@ template <class ELFT> void SymbolTable<ELFT>::handleAnonymousVersion() {
// Set symbol versions to symbols. This function handles patterns
// containing no wildcard characters.
template <class ELFT>
-void SymbolTable<ELFT>::assignExactVersion(SymbolVersion Ver, uint16_t VersionId,
+void SymbolTable<ELFT>::assignExactVersion(SymbolVersion Ver,
+ uint16_t VersionId,
StringRef VersionName) {
if (Ver.HasWildcard)
return;
@@ -646,6 +723,12 @@ void SymbolTable<ELFT>::assignExactVersion(SymbolVersion Ver, uint16_t VersionId
// Assign the version.
for (SymbolBody *B : Syms) {
+ // Skip symbols containing version info because symbol versions
+ // specified by symbol names take precedence over version scripts.
+ // See parseSymbolVersion().
+ if (B->getName().find('@') != StringRef::npos)
+ continue;
+
Symbol *Sym = B->symbol();
if (Sym->InVersionScript)
warn("duplicate symbol '" + Ver.Name + "' in version script");
@@ -659,12 +742,11 @@ void SymbolTable<ELFT>::assignWildcardVersion(SymbolVersion Ver,
uint16_t VersionId) {
if (!Ver.HasWildcard)
return;
- std::vector<SymbolBody *> Syms = findAllByVersion(Ver);
// Exact matching takes precendence over fuzzy matching,
// so we set a version to a symbol only if no version has been assigned
// to the symbol. This behavior is compatible with GNU.
- for (SymbolBody *B : Syms)
+ for (SymbolBody *B : findAllByVersion(Ver))
if (B->symbol()->VersionId == Config->DefaultSymbolVersion)
B->symbol()->VersionId = VersionId;
}
@@ -672,19 +754,9 @@ void SymbolTable<ELFT>::assignWildcardVersion(SymbolVersion Ver,
// This function processes version scripts by updating VersionId
// member of symbols.
template <class ELFT> void SymbolTable<ELFT>::scanVersionScript() {
- // Symbol themselves might know their versions because symbols
- // can contain versions in the form of <name>@<version>.
- // Let them parse their names.
- if (!Config->VersionDefinitions.empty())
- for (Symbol *Sym : SymVector)
- Sym->body()->parseSymbolVersion();
-
// Handle edge cases first.
handleAnonymousVersion();
- if (Config->VersionDefinitions.empty())
- return;
-
// Now we have version definitions, so we need to set version ids to symbols.
// Each version definition has a glob pattern, and all symbols that match
// with the pattern get that version.
@@ -702,6 +774,12 @@ template <class ELFT> void SymbolTable<ELFT>::scanVersionScript() {
for (VersionDefinition &V : llvm::reverse(Config->VersionDefinitions))
for (SymbolVersion &Ver : V.Globals)
assignWildcardVersion(Ver, V.Id);
+
+ // Symbol themselves might know their versions because symbols
+ // can contain versions in the form of <name>@<version>.
+ // Let them parse and update their names to exclude version suffix.
+ for (Symbol *Sym : SymVector)
+ Sym->body()->parseSymbolVersion();
}
template class elf::SymbolTable<ELF32LE>;
diff --git a/gnu/llvm/tools/lld/ELF/Symbols.cpp b/gnu/llvm/tools/lld/ELF/Symbols.cpp
index 43af44ec4b8..c69007e781a 100644
--- a/gnu/llvm/tools/lld/ELF/Symbols.cpp
+++ b/gnu/llvm/tools/lld/ELF/Symbols.cpp
@@ -28,62 +28,90 @@ using namespace llvm::ELF;
using namespace lld;
using namespace lld::elf;
-template <class ELFT>
-static typename ELFT::uint getSymVA(const SymbolBody &Body,
- typename ELFT::uint &Addend) {
- typedef typename ELFT::uint uintX_t;
-
+DefinedRegular *ElfSym::Bss;
+DefinedRegular *ElfSym::Etext1;
+DefinedRegular *ElfSym::Etext2;
+DefinedRegular *ElfSym::Edata1;
+DefinedRegular *ElfSym::Edata2;
+DefinedRegular *ElfSym::End1;
+DefinedRegular *ElfSym::End2;
+DefinedRegular *ElfSym::GlobalOffsetTable;
+DefinedRegular *ElfSym::MipsGp;
+DefinedRegular *ElfSym::MipsGpDisp;
+DefinedRegular *ElfSym::MipsLocalGp;
+
+static uint64_t getSymVA(const SymbolBody &Body, int64_t &Addend) {
switch (Body.kind()) {
- case SymbolBody::DefinedSyntheticKind: {
- auto &D = cast<DefinedSynthetic>(Body);
- const OutputSectionBase *Sec = D.Section;
- if (!Sec)
- return D.Value;
- if (D.Value == uintX_t(-1))
- return Sec->Addr + Sec->Size;
- return Sec->Addr + D.Value;
- }
case SymbolBody::DefinedRegularKind: {
- auto &D = cast<DefinedRegular<ELFT>>(Body);
- InputSectionBase<ELFT> *IS = D.Section;
+ auto &D = cast<DefinedRegular>(Body);
+ SectionBase *IS = D.Section;
+ if (auto *ISB = dyn_cast_or_null<InputSectionBase>(IS))
+ IS = ISB->Repl;
// According to the ELF spec reference to a local symbol from outside
// the group are not allowed. Unfortunately .eh_frame breaks that rule
// and must be treated specially. For now we just replace the symbol with
// 0.
- if (IS == &InputSection<ELFT>::Discarded)
+ if (IS == &InputSection::Discarded)
return 0;
// This is an absolute symbol.
if (!IS)
return D.Value;
- uintX_t Offset = D.Value;
+ uint64_t Offset = D.Value;
+
+ // An object in an SHF_MERGE section might be referenced via a
+ // section symbol (as a hack for reducing the number of local
+ // symbols).
+ // Depending on the addend, the reference via a section symbol
+ // refers to a different object in the merge section.
+ // Since the objects in the merge section are not necessarily
+ // contiguous in the output, the addend can thus affect the final
+ // VA in a non-linear way.
+ // To make this work, we incorporate the addend into the section
+ // offset (and zero out the addend for later processing) so that
+ // we find the right object in the section.
if (D.isSection()) {
Offset += Addend;
Addend = 0;
}
- uintX_t VA = (IS->OutSec ? IS->OutSec->Addr : 0) + IS->getOffset(Offset);
+
+ const OutputSection *OutSec = IS->getOutputSection();
+
+ // In the typical case, this is actually very simple and boils
+ // down to adding together 3 numbers:
+ // 1. The address of the output section.
+ // 2. The offset of the input section within the output section.
+ // 3. The offset within the input section (this addition happens
+ // inside InputSection::getOffset).
+ //
+ // If you understand the data structures involved with this next
+ // line (and how they get built), then you have a pretty good
+ // understanding of the linker.
+ uint64_t VA = (OutSec ? OutSec->Addr : 0) + IS->getOffset(Offset);
+
if (D.isTls() && !Config->Relocatable) {
- if (!Out<ELFT>::TlsPhdr)
+ if (!Out::TlsPhdr)
fatal(toString(D.File) +
- " has a STT_TLS symbol but doesn't have a PT_TLS section");
- return VA - Out<ELFT>::TlsPhdr->p_vaddr;
+ " has an STT_TLS symbol but doesn't have an SHF_TLS section");
+ return VA - Out::TlsPhdr->p_vaddr;
}
return VA;
}
case SymbolBody::DefinedCommonKind:
if (!Config->DefineCommon)
return 0;
- return In<ELFT>::Common->OutSec->Addr + In<ELFT>::Common->OutSecOff +
+ return InX::Common->getParent()->Addr + InX::Common->OutSecOff +
cast<DefinedCommon>(Body).Offset;
case SymbolBody::SharedKind: {
- auto &SS = cast<SharedSymbol<ELFT>>(Body);
- if (!SS.NeedsCopyOrPltAddr)
- return 0;
- if (SS.isFunc())
- return Body.getPltVA<ELFT>();
- return SS.getBssSectionForCopy()->Addr + SS.CopyOffset;
+ auto &SS = cast<SharedSymbol>(Body);
+ if (SS.NeedsCopy)
+ return SS.CopyRelSec->getParent()->Addr + SS.CopyRelSec->OutSecOff +
+ SS.CopyRelSecOff;
+ if (SS.NeedsPltAddr)
+ return Body.getPltVA();
+ return 0;
}
case SymbolBody::UndefinedKind:
return 0;
@@ -97,10 +125,9 @@ static typename ELFT::uint getSymVA(const SymbolBody &Body,
SymbolBody::SymbolBody(Kind K, StringRefZ Name, bool IsLocal, uint8_t StOther,
uint8_t Type)
- : SymbolKind(K), NeedsCopyOrPltAddr(false), IsLocal(IsLocal),
+ : SymbolKind(K), NeedsCopy(false), NeedsPltAddr(false), IsLocal(IsLocal),
IsInGlobalMipsGot(false), Is32BitMipsGot(false), IsInIplt(false),
- IsInIgot(false), CopyIsInBssRelRo(false), Type(Type), StOther(StOther),
- Name(Name) {}
+ IsInIgot(false), Type(Type), StOther(StOther), Name(Name) {}
// Returns true if a symbol can be replaced at load-time by a symbol
// with the same name defined in other ELF executable or DSO.
@@ -112,7 +139,7 @@ bool SymbolBody::isPreemptible() const {
// symbols with copy relocations (which resolve to .bss) or preempt plt
// entries (which resolve to that plt entry).
if (isShared())
- return !NeedsCopyOrPltAddr;
+ return !NeedsCopy && !NeedsPltAddr;
// That's all that can be preempted in a non-DSO.
if (!Config->Shared)
@@ -132,65 +159,76 @@ bool SymbolBody::isPreemptible() const {
return true;
}
-template <class ELFT> bool SymbolBody::hasThunk() const {
- if (auto *DR = dyn_cast<DefinedRegular<ELFT>>(this))
- return DR->ThunkData != nullptr;
- if (auto *S = dyn_cast<SharedSymbol<ELFT>>(this))
- return S->ThunkData != nullptr;
- return false;
+// Overwrites all attributes with Other's so that this symbol becomes
+// an alias to Other. This is useful for handling some options such as
+// --wrap.
+void SymbolBody::copy(SymbolBody *Other) {
+ memcpy(symbol()->Body.buffer, Other->symbol()->Body.buffer,
+ sizeof(Symbol::Body));
}
-template <class ELFT>
-typename ELFT::uint SymbolBody::getVA(typename ELFT::uint Addend) const {
- typename ELFT::uint OutVA = getSymVA<ELFT>(*this, Addend);
+uint64_t SymbolBody::getVA(int64_t Addend) const {
+ uint64_t OutVA = getSymVA(*this, Addend);
return OutVA + Addend;
}
-template <class ELFT> typename ELFT::uint SymbolBody::getGotVA() const {
- return In<ELFT>::Got->getVA() + getGotOffset<ELFT>();
+uint64_t SymbolBody::getGotVA() const {
+ return InX::Got->getVA() + getGotOffset();
}
-template <class ELFT> typename ELFT::uint SymbolBody::getGotOffset() const {
+uint64_t SymbolBody::getGotOffset() const {
return GotIndex * Target->GotEntrySize;
}
-template <class ELFT> typename ELFT::uint SymbolBody::getGotPltVA() const {
+uint64_t SymbolBody::getGotPltVA() const {
if (this->IsInIgot)
- return In<ELFT>::IgotPlt->getVA() + getGotPltOffset<ELFT>();
- return In<ELFT>::GotPlt->getVA() + getGotPltOffset<ELFT>();
+ return InX::IgotPlt->getVA() + getGotPltOffset();
+ return InX::GotPlt->getVA() + getGotPltOffset();
}
-template <class ELFT> typename ELFT::uint SymbolBody::getGotPltOffset() const {
+uint64_t SymbolBody::getGotPltOffset() const {
return GotPltIndex * Target->GotPltEntrySize;
}
-template <class ELFT> typename ELFT::uint SymbolBody::getPltVA() const {
+uint64_t SymbolBody::getPltVA() const {
if (this->IsInIplt)
- return In<ELFT>::Iplt->getVA() + PltIndex * Target->PltEntrySize;
- return In<ELFT>::Plt->getVA() + Target->PltHeaderSize +
+ return InX::Iplt->getVA() + PltIndex * Target->PltEntrySize;
+ return InX::Plt->getVA() + Target->PltHeaderSize +
PltIndex * Target->PltEntrySize;
}
-template <class ELFT> typename ELFT::uint SymbolBody::getThunkVA() const {
- if (const auto *DR = dyn_cast<DefinedRegular<ELFT>>(this))
- return DR->ThunkData->getVA();
- if (const auto *S = dyn_cast<SharedSymbol<ELFT>>(this))
- return S->ThunkData->getVA();
- if (const auto *S = dyn_cast<Undefined<ELFT>>(this))
- return S->ThunkData->getVA();
- fatal("getThunkVA() not supported for Symbol class\n");
-}
-
template <class ELFT> typename ELFT::uint SymbolBody::getSize() const {
if (const auto *C = dyn_cast<DefinedCommon>(this))
return C->Size;
- if (const auto *DR = dyn_cast<DefinedRegular<ELFT>>(this))
+ if (const auto *DR = dyn_cast<DefinedRegular>(this))
return DR->Size;
- if (const auto *S = dyn_cast<SharedSymbol<ELFT>>(this))
- return S->Sym.st_size;
+ if (const auto *S = dyn_cast<SharedSymbol>(this))
+ return S->getSize<ELFT>();
return 0;
}
+OutputSection *SymbolBody::getOutputSection() const {
+ if (auto *S = dyn_cast<DefinedRegular>(this)) {
+ if (S->Section)
+ return S->Section->getOutputSection();
+ return nullptr;
+ }
+
+ if (auto *S = dyn_cast<SharedSymbol>(this)) {
+ if (S->NeedsCopy)
+ return S->CopyRelSec->getParent();
+ return nullptr;
+ }
+
+ if (isa<DefinedCommon>(this)) {
+ if (Config->DefineCommon)
+ return InX::Common->getParent();
+ return nullptr;
+ }
+
+ return nullptr;
+}
+
// If a symbol name contains '@', the characters after that is
// a symbol version name. This function parses that.
void SymbolBody::parseSymbolVersion() {
@@ -227,34 +265,36 @@ void SymbolBody::parseSymbolVersion() {
}
// It is an error if the specified version is not defined.
- error(toString(File) + ": symbol " + S + " has undefined version " + Verstr);
+ // Usually version script is not provided when linking executable,
+ // but we may still want to override a versioned symbol from DSO,
+ // so we do not report error in this case.
+ if (Config->Shared)
+ error(toString(File) + ": symbol " + S + " has undefined version " +
+ Verstr);
}
Defined::Defined(Kind K, StringRefZ Name, bool IsLocal, uint8_t StOther,
uint8_t Type)
: SymbolBody(K, Name, IsLocal, StOther, Type) {}
-template <class ELFT> bool DefinedRegular<ELFT>::isMipsPIC() const {
+template <class ELFT> bool DefinedRegular::isMipsPIC() const {
+ typedef typename ELFT::Ehdr Elf_Ehdr;
if (!Section || !isFunc())
return false;
+
+ auto *Sec = cast<InputSectionBase>(Section);
+ const Elf_Ehdr *Hdr = Sec->template getFile<ELFT>()->getObj().getHeader();
return (this->StOther & STO_MIPS_MIPS16) == STO_MIPS_PIC ||
- (Section->getFile()->getObj().getHeader()->e_flags & EF_MIPS_PIC);
+ (Hdr->e_flags & EF_MIPS_PIC);
}
-template <typename ELFT>
-Undefined<ELFT>::Undefined(StringRefZ Name, bool IsLocal, uint8_t StOther,
- uint8_t Type, InputFile *File)
+Undefined::Undefined(StringRefZ Name, bool IsLocal, uint8_t StOther,
+ uint8_t Type, InputFile *File)
: SymbolBody(SymbolBody::UndefinedKind, Name, IsLocal, StOther, Type) {
this->File = File;
}
-template <typename ELFT>
-OutputSection<ELFT> *SharedSymbol<ELFT>::getBssSectionForCopy() const {
- assert(needsCopy());
- return CopyIsInBssRelRo ? Out<ELFT>::BssRelRo : Out<ELFT>::Bss;
-}
-
-DefinedCommon::DefinedCommon(StringRef Name, uint64_t Size, uint64_t Alignment,
+DefinedCommon::DefinedCommon(StringRef Name, uint64_t Size, uint32_t Alignment,
uint8_t StOther, uint8_t Type, InputFile *File)
: Defined(SymbolBody::DefinedCommonKind, Name, /*IsLocal=*/false, StOther,
Type),
@@ -262,6 +302,17 @@ DefinedCommon::DefinedCommon(StringRef Name, uint64_t Size, uint64_t Alignment,
this->File = File;
}
+// If a shared symbol is referred via a copy relocation, its alignment
+// becomes part of the ABI. This function returns a symbol alignment.
+// Because symbols don't have alignment attributes, we need to infer that.
+template <class ELFT> uint32_t SharedSymbol::getAlignment() const {
+ auto *File = cast<SharedFile<ELFT>>(this->File);
+ uint32_t SecAlign = File->getSection(getSym<ELFT>())->sh_addralign;
+ uint64_t SymValue = getSym<ELFT>().st_value;
+ uint32_t SymAlign = uint32_t(1) << countTrailingZeros(SymValue);
+ return std::min(SecAlign, SymAlign);
+}
+
InputFile *Lazy::fetch() {
if (auto *S = dyn_cast<LazyArchive>(this))
return S->fetch();
@@ -289,20 +340,14 @@ InputFile *LazyArchive::fetch() {
return createObjectFile(MBInfo.first, file()->getName(), MBInfo.second);
}
-InputFile *LazyObject::fetch() {
- MemoryBufferRef MBRef = file()->getBuffer();
- if (MBRef.getBuffer().empty())
- return nullptr;
- return createObjectFile(MBRef);
-}
+InputFile *LazyObject::fetch() { return file()->fetch(); }
uint8_t Symbol::computeBinding() const {
if (Config->Relocatable)
return Binding;
if (Visibility != STV_DEFAULT && Visibility != STV_PROTECTED)
return STB_LOCAL;
- const SymbolBody *Body = body();
- if (VersionId == VER_NDX_LOCAL && Body->isInCurrentDSO())
+ if (VersionId == VER_NDX_LOCAL && body()->isInCurrentDSO())
return STB_LOCAL;
if (Config->NoGnuUnique && Binding == STB_GNU_UNIQUE)
return STB_GLOBAL;
@@ -319,15 +364,15 @@ bool Symbol::includeInDynsym() const {
// Print out a log message for --trace-symbol.
void elf::printTraceSymbol(Symbol *Sym) {
SymbolBody *B = Sym->body();
- outs() << toString(B->File);
-
+ std::string S;
if (B->isUndefined())
- outs() << ": reference to ";
+ S = ": reference to ";
else if (B->isCommon())
- outs() << ": common definition of ";
+ S = ": common definition of ";
else
- outs() << ": definition of ";
- outs() << B->getName() << "\n";
+ S = ": definition of ";
+
+ message(toString(B->File) + S + B->getName());
}
// Returns a symbol for an error message.
@@ -338,62 +383,17 @@ std::string lld::toString(const SymbolBody &B) {
return B.getName();
}
-template bool SymbolBody::hasThunk<ELF32LE>() const;
-template bool SymbolBody::hasThunk<ELF32BE>() const;
-template bool SymbolBody::hasThunk<ELF64LE>() const;
-template bool SymbolBody::hasThunk<ELF64BE>() const;
-
-template uint32_t SymbolBody::template getVA<ELF32LE>(uint32_t) const;
-template uint32_t SymbolBody::template getVA<ELF32BE>(uint32_t) const;
-template uint64_t SymbolBody::template getVA<ELF64LE>(uint64_t) const;
-template uint64_t SymbolBody::template getVA<ELF64BE>(uint64_t) const;
-
-template uint32_t SymbolBody::template getGotVA<ELF32LE>() const;
-template uint32_t SymbolBody::template getGotVA<ELF32BE>() const;
-template uint64_t SymbolBody::template getGotVA<ELF64LE>() const;
-template uint64_t SymbolBody::template getGotVA<ELF64BE>() const;
-
-template uint32_t SymbolBody::template getGotOffset<ELF32LE>() const;
-template uint32_t SymbolBody::template getGotOffset<ELF32BE>() const;
-template uint64_t SymbolBody::template getGotOffset<ELF64LE>() const;
-template uint64_t SymbolBody::template getGotOffset<ELF64BE>() const;
-
-template uint32_t SymbolBody::template getGotPltVA<ELF32LE>() const;
-template uint32_t SymbolBody::template getGotPltVA<ELF32BE>() const;
-template uint64_t SymbolBody::template getGotPltVA<ELF64LE>() const;
-template uint64_t SymbolBody::template getGotPltVA<ELF64BE>() const;
-
-template uint32_t SymbolBody::template getThunkVA<ELF32LE>() const;
-template uint32_t SymbolBody::template getThunkVA<ELF32BE>() const;
-template uint64_t SymbolBody::template getThunkVA<ELF64LE>() const;
-template uint64_t SymbolBody::template getThunkVA<ELF64BE>() const;
-
-template uint32_t SymbolBody::template getGotPltOffset<ELF32LE>() const;
-template uint32_t SymbolBody::template getGotPltOffset<ELF32BE>() const;
-template uint64_t SymbolBody::template getGotPltOffset<ELF64LE>() const;
-template uint64_t SymbolBody::template getGotPltOffset<ELF64BE>() const;
-
-template uint32_t SymbolBody::template getPltVA<ELF32LE>() const;
-template uint32_t SymbolBody::template getPltVA<ELF32BE>() const;
-template uint64_t SymbolBody::template getPltVA<ELF64LE>() const;
-template uint64_t SymbolBody::template getPltVA<ELF64BE>() const;
-
template uint32_t SymbolBody::template getSize<ELF32LE>() const;
template uint32_t SymbolBody::template getSize<ELF32BE>() const;
template uint64_t SymbolBody::template getSize<ELF64LE>() const;
template uint64_t SymbolBody::template getSize<ELF64BE>() const;
-template class elf::Undefined<ELF32LE>;
-template class elf::Undefined<ELF32BE>;
-template class elf::Undefined<ELF64LE>;
-template class elf::Undefined<ELF64BE>;
-
-template class elf::SharedSymbol<ELF32LE>;
-template class elf::SharedSymbol<ELF32BE>;
-template class elf::SharedSymbol<ELF64LE>;
-template class elf::SharedSymbol<ELF64BE>;
+template bool DefinedRegular::template isMipsPIC<ELF32LE>() const;
+template bool DefinedRegular::template isMipsPIC<ELF32BE>() const;
+template bool DefinedRegular::template isMipsPIC<ELF64LE>() const;
+template bool DefinedRegular::template isMipsPIC<ELF64BE>() const;
-template class elf::DefinedRegular<ELF32LE>;
-template class elf::DefinedRegular<ELF32BE>;
-template class elf::DefinedRegular<ELF64LE>;
-template class elf::DefinedRegular<ELF64BE>;
+template uint32_t SharedSymbol::template getAlignment<ELF32LE>() const;
+template uint32_t SharedSymbol::template getAlignment<ELF32BE>() const;
+template uint32_t SharedSymbol::template getAlignment<ELF64LE>() const;
+template uint32_t SharedSymbol::template getAlignment<ELF64BE>() const;
diff --git a/gnu/llvm/tools/lld/ELF/SyntheticSections.cpp b/gnu/llvm/tools/lld/ELF/SyntheticSections.cpp
index d1b6755ab2c..4bbec4ab34b 100644
--- a/gnu/llvm/tools/lld/ELF/SyntheticSections.cpp
+++ b/gnu/llvm/tools/lld/ELF/SyntheticSections.cpp
@@ -27,7 +27,10 @@
#include "Threads.h"
#include "Writer.h"
#include "lld/Config/Version.h"
-#include "llvm/Support/Dwarf.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugPubTable.h"
+#include "llvm/Object/Decompressor.h"
+#include "llvm/Object/ELFObjectFile.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MD5.h"
#include "llvm/Support/RandomNumberGenerator.h"
@@ -45,6 +48,12 @@ using namespace llvm::support::endian;
using namespace lld;
using namespace lld::elf;
+uint64_t SyntheticSection::getVA() const {
+ if (OutputSection *Sec = getParent())
+ return Sec->Addr + OutSecOff;
+ return 0;
+}
+
template <class ELFT> static std::vector<DefinedCommon *> getCommonSymbols() {
std::vector<DefinedCommon *> V;
for (Symbol *S : Symtab<ELFT>::X->getSymbols())
@@ -54,35 +63,24 @@ template <class ELFT> static std::vector<DefinedCommon *> getCommonSymbols() {
}
// Find all common symbols and allocate space for them.
-template <class ELFT> InputSection<ELFT> *elf::createCommonSection() {
- auto *Ret = make<InputSection<ELFT>>(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, 1,
- ArrayRef<uint8_t>(), "COMMON");
- Ret->Live = true;
-
+template <class ELFT> InputSection *elf::createCommonSection() {
if (!Config->DefineCommon)
- return Ret;
+ return nullptr;
// Sort the common symbols by alignment as an heuristic to pack them better.
std::vector<DefinedCommon *> Syms = getCommonSymbols<ELFT>();
+ if (Syms.empty())
+ return nullptr;
+
std::stable_sort(Syms.begin(), Syms.end(),
[](const DefinedCommon *A, const DefinedCommon *B) {
return A->Alignment > B->Alignment;
});
- // Assign offsets to symbols.
- size_t Size = 0;
- size_t Alignment = 1;
- for (DefinedCommon *Sym : Syms) {
- Alignment = std::max<size_t>(Alignment, Sym->Alignment);
- Size = alignTo(Size, Sym->Alignment);
-
- // Compute symbol offset relative to beginning of input section.
- Sym->Offset = Size;
- Size += Sym->Size;
- }
- Ret->Alignment = Alignment;
- Ret->Data = makeArrayRef<uint8_t>(nullptr, Size);
- return Ret;
+ BssSection *Sec = make<BssSection>("COMMON");
+ for (DefinedCommon *Sym : Syms)
+ Sym->Offset = Sec->reserveSpace(Sym->Size, Sym->Alignment);
+ return Sec;
}
// Returns an LLD version string.
@@ -100,16 +98,17 @@ static ArrayRef<uint8_t> getVersion() {
// Creates a .comment section containing LLD version info.
// With this feature, you can identify LLD-generated binaries easily
-// by "objdump -s -j .comment <file>".
+// by "readelf --string-dump .comment <file>".
// The returned object is a mergeable string section.
-template <class ELFT> MergeInputSection<ELFT> *elf::createCommentSection() {
+template <class ELFT> MergeInputSection *elf::createCommentSection() {
typename ELFT::Shdr Hdr = {};
Hdr.sh_flags = SHF_MERGE | SHF_STRINGS;
Hdr.sh_type = SHT_PROGBITS;
Hdr.sh_entsize = 1;
Hdr.sh_addralign = 1;
- auto *Ret = make<MergeInputSection<ELFT>>(/*file=*/nullptr, &Hdr, ".comment");
+ auto *Ret =
+ make<MergeInputSection>((ObjectFile<ELFT> *)nullptr, &Hdr, ".comment");
Ret->Data = getVersion();
Ret->splitIntoPieces();
return Ret;
@@ -118,8 +117,10 @@ template <class ELFT> MergeInputSection<ELFT> *elf::createCommentSection() {
// .MIPS.abiflags section.
template <class ELFT>
MipsAbiFlagsSection<ELFT>::MipsAbiFlagsSection(Elf_Mips_ABIFlags Flags)
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_MIPS_ABIFLAGS, 8, ".MIPS.abiflags"),
- Flags(Flags) {}
+ : SyntheticSection(SHF_ALLOC, SHT_MIPS_ABIFLAGS, 8, ".MIPS.abiflags"),
+ Flags(Flags) {
+ this->Entsize = sizeof(Elf_Mips_ABIFlags);
+}
template <class ELFT> void MipsAbiFlagsSection<ELFT>::writeTo(uint8_t *Buf) {
memcpy(Buf, &Flags, sizeof(Flags));
@@ -130,13 +131,13 @@ MipsAbiFlagsSection<ELFT> *MipsAbiFlagsSection<ELFT>::create() {
Elf_Mips_ABIFlags Flags = {};
bool Create = false;
- for (InputSectionBase<ELFT> *Sec : Symtab<ELFT>::X->Sections) {
- if (!Sec->Live || Sec->Type != SHT_MIPS_ABIFLAGS)
+ for (InputSectionBase *Sec : InputSections) {
+ if (Sec->Type != SHT_MIPS_ABIFLAGS)
continue;
Sec->Live = false;
Create = true;
- std::string Filename = toString(Sec->getFile());
+ std::string Filename = toString(Sec->getFile<ELFT>());
const size_t Size = Sec->Data.size();
// Older version of BFD (such as the default FreeBSD linker) concatenate
// .MIPS.abiflags instead of merging. To allow for this case (or potential
@@ -175,8 +176,10 @@ MipsAbiFlagsSection<ELFT> *MipsAbiFlagsSection<ELFT>::create() {
// .MIPS.options section.
template <class ELFT>
MipsOptionsSection<ELFT>::MipsOptionsSection(Elf_Mips_RegInfo Reginfo)
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_MIPS_OPTIONS, 8, ".MIPS.options"),
- Reginfo(Reginfo) {}
+ : SyntheticSection(SHF_ALLOC, SHT_MIPS_OPTIONS, 8, ".MIPS.options"),
+ Reginfo(Reginfo) {
+ this->Entsize = sizeof(Elf_Mips_Options) + sizeof(Elf_Mips_RegInfo);
+}
template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *Buf) {
auto *Options = reinterpret_cast<Elf_Mips_Options *>(Buf);
@@ -184,7 +187,7 @@ template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *Buf) {
Options->size = getSize();
if (!Config->Relocatable)
- Reginfo.ri_gp_value = In<ELFT>::MipsGot->getGp();
+ Reginfo.ri_gp_value = InX::MipsGot->getGp();
memcpy(Buf + sizeof(Elf_Mips_Options), &Reginfo, sizeof(Reginfo));
}
@@ -197,13 +200,13 @@ MipsOptionsSection<ELFT> *MipsOptionsSection<ELFT>::create() {
Elf_Mips_RegInfo Reginfo = {};
bool Create = false;
- for (InputSectionBase<ELFT> *Sec : Symtab<ELFT>::X->Sections) {
- if (!Sec->Live || Sec->Type != SHT_MIPS_OPTIONS)
+ for (InputSectionBase *Sec : InputSections) {
+ if (Sec->Type != SHT_MIPS_OPTIONS)
continue;
Sec->Live = false;
Create = true;
- std::string Filename = toString(Sec->getFile());
+ std::string Filename = toString(Sec->getFile<ELFT>());
ArrayRef<uint8_t> D = Sec->Data;
while (!D.empty()) {
@@ -217,7 +220,7 @@ MipsOptionsSection<ELFT> *MipsOptionsSection<ELFT>::create() {
if (Config->Relocatable && Opt->getRegInfo().ri_gp_value)
error(Filename + ": unsupported non-zero ri_gp_value");
Reginfo.ri_gprmask |= Opt->getRegInfo().ri_gprmask;
- Sec->getFile()->MipsGp0 = Opt->getRegInfo().ri_gp_value;
+ Sec->getFile<ELFT>()->MipsGp0 = Opt->getRegInfo().ri_gp_value;
break;
}
@@ -235,12 +238,14 @@ MipsOptionsSection<ELFT> *MipsOptionsSection<ELFT>::create() {
// MIPS .reginfo section.
template <class ELFT>
MipsReginfoSection<ELFT>::MipsReginfoSection(Elf_Mips_RegInfo Reginfo)
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_MIPS_REGINFO, 4, ".reginfo"),
- Reginfo(Reginfo) {}
+ : SyntheticSection(SHF_ALLOC, SHT_MIPS_REGINFO, 4, ".reginfo"),
+ Reginfo(Reginfo) {
+ this->Entsize = sizeof(Elf_Mips_RegInfo);
+}
template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *Buf) {
if (!Config->Relocatable)
- Reginfo.ri_gp_value = In<ELFT>::MipsGot->getGp();
+ Reginfo.ri_gp_value = InX::MipsGot->getGp();
memcpy(Buf, &Reginfo, sizeof(Reginfo));
}
@@ -253,22 +258,24 @@ MipsReginfoSection<ELFT> *MipsReginfoSection<ELFT>::create() {
Elf_Mips_RegInfo Reginfo = {};
bool Create = false;
- for (InputSectionBase<ELFT> *Sec : Symtab<ELFT>::X->Sections) {
- if (!Sec->Live || Sec->Type != SHT_MIPS_REGINFO)
+ for (InputSectionBase *Sec : InputSections) {
+ if (Sec->Type != SHT_MIPS_REGINFO)
continue;
Sec->Live = false;
Create = true;
if (Sec->Data.size() != sizeof(Elf_Mips_RegInfo)) {
- error(toString(Sec->getFile()) + ": invalid size of .reginfo section");
+ error(toString(Sec->getFile<ELFT>()) +
+ ": invalid size of .reginfo section");
return nullptr;
}
auto *R = reinterpret_cast<const Elf_Mips_RegInfo *>(Sec->Data.data());
if (Config->Relocatable && R->ri_gp_value)
- error(toString(Sec->getFile()) + ": unsupported non-zero ri_gp_value");
+ error(toString(Sec->getFile<ELFT>()) +
+ ": unsupported non-zero ri_gp_value");
Reginfo.ri_gprmask |= R->ri_gprmask;
- Sec->getFile()->MipsGp0 = R->ri_gp_value;
+ Sec->getFile<ELFT>()->MipsGp0 = R->ri_gp_value;
};
if (Create)
@@ -276,15 +283,24 @@ MipsReginfoSection<ELFT> *MipsReginfoSection<ELFT>::create() {
return nullptr;
}
-template <class ELFT> InputSection<ELFT> *elf::createInterpSection() {
- auto *Ret = make<InputSection<ELFT>>(SHF_ALLOC, SHT_PROGBITS, 1,
- ArrayRef<uint8_t>(), ".interp");
- Ret->Live = true;
-
+InputSection *elf::createInterpSection() {
// StringSaver guarantees that the returned string ends with '\0'.
StringRef S = Saver.save(Config->DynamicLinker);
- Ret->Data = {(const uint8_t *)S.data(), S.size() + 1};
- return Ret;
+ ArrayRef<uint8_t> Contents = {(const uint8_t *)S.data(), S.size() + 1};
+
+ auto *Sec =
+ make<InputSection>(SHF_ALLOC, SHT_PROGBITS, 1, Contents, ".interp");
+ Sec->Live = true;
+ return Sec;
+}
+
+SymbolBody *elf::addSyntheticLocal(StringRef Name, uint8_t Type, uint64_t Value,
+ uint64_t Size, InputSectionBase *Section) {
+ auto *S = make<DefinedRegular>(Name, /*IsLocal*/ true, STV_DEFAULT, Type,
+ Value, Size, Section, nullptr);
+ if (InX::SymTab)
+ InX::SymTab->addSymbol(S);
+ return S;
}
static size_t getHashSize() {
@@ -303,16 +319,15 @@ static size_t getHashSize() {
}
}
-template <class ELFT>
-BuildIdSection<ELFT>::BuildIdSection()
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_NOTE, 1, ".note.gnu.build-id"),
+BuildIdSection::BuildIdSection()
+ : SyntheticSection(SHF_ALLOC, SHT_NOTE, 1, ".note.gnu.build-id"),
HashSize(getHashSize()) {}
-template <class ELFT> void BuildIdSection<ELFT>::writeTo(uint8_t *Buf) {
- const endianness E = ELFT::TargetEndianness;
- write32<E>(Buf, 4); // Name size
- write32<E>(Buf + 4, HashSize); // Content size
- write32<E>(Buf + 8, NT_GNU_BUILD_ID); // Type
+void BuildIdSection::writeTo(uint8_t *Buf) {
+ endianness E = Config->Endianness;
+ write32(Buf, 4, E); // Name size
+ write32(Buf + 4, HashSize, E); // Content size
+ write32(Buf + 8, NT_GNU_BUILD_ID, E); // Type
memcpy(Buf + 12, "GNU", 4); // Name string
HashBuf = Buf + 16;
}
@@ -334,23 +349,33 @@ static std::vector<ArrayRef<uint8_t>> split(ArrayRef<uint8_t> Arr,
// In order to utilize multiple cores, we first split data into 1MB
// chunks, compute a hash for each chunk, and then compute a hash value
// of the hash values.
-template <class ELFT>
-void BuildIdSection<ELFT>::computeHash(
+void BuildIdSection::computeHash(
llvm::ArrayRef<uint8_t> Data,
std::function<void(uint8_t *Dest, ArrayRef<uint8_t> Arr)> HashFn) {
std::vector<ArrayRef<uint8_t>> Chunks = split(Data, 1024 * 1024);
std::vector<uint8_t> Hashes(Chunks.size() * HashSize);
// Compute hash values.
- forLoop(0, Chunks.size(),
- [&](size_t I) { HashFn(Hashes.data() + I * HashSize, Chunks[I]); });
+ parallelForEachN(0, Chunks.size(), [&](size_t I) {
+ HashFn(Hashes.data() + I * HashSize, Chunks[I]);
+ });
// Write to the final output buffer.
HashFn(HashBuf, Hashes);
}
-template <class ELFT>
-void BuildIdSection<ELFT>::writeBuildId(ArrayRef<uint8_t> Buf) {
+BssSection::BssSection(StringRef Name)
+ : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_NOBITS, 0, Name) {}
+
+size_t BssSection::reserveSpace(uint64_t Size, uint32_t Alignment) {
+ if (OutputSection *Sec = getParent())
+ Sec->updateAlignment(Alignment);
+ this->Size = alignTo(this->Size, Alignment) + Size;
+ this->Alignment = std::max(this->Alignment, Alignment);
+ return this->Size - Size;
+}
+
+void BuildIdSection::writeBuildId(ArrayRef<uint8_t> Buf) {
switch (Config->BuildId) {
case BuildIdKind::Fast:
computeHash(Buf, [](uint8_t *Dest, ArrayRef<uint8_t> Arr) {
@@ -380,16 +405,229 @@ void BuildIdSection<ELFT>::writeBuildId(ArrayRef<uint8_t> Buf) {
}
template <class ELFT>
-GotSection<ELFT>::GotSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
- Target->GotEntrySize, ".got") {}
+EhFrameSection<ELFT>::EhFrameSection()
+ : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 1, ".eh_frame") {}
+
+// Search for an existing CIE record or create a new one.
+// CIE records from input object files are uniquified by their contents
+// and where their relocations point to.
+template <class ELFT>
+template <class RelTy>
+CieRecord *EhFrameSection<ELFT>::addCie(EhSectionPiece &Piece,
+ ArrayRef<RelTy> Rels) {
+ auto *Sec = cast<EhInputSection>(Piece.ID);
+ const endianness E = ELFT::TargetEndianness;
+ if (read32<E>(Piece.data().data() + 4) != 0)
+ fatal(toString(Sec) + ": CIE expected at beginning of .eh_frame");
+
+ SymbolBody *Personality = nullptr;
+ unsigned FirstRelI = Piece.FirstRelocation;
+ if (FirstRelI != (unsigned)-1)
+ Personality =
+ &Sec->template getFile<ELFT>()->getRelocTargetSym(Rels[FirstRelI]);
+
+ // Search for an existing CIE by CIE contents/relocation target pair.
+ CieRecord *Cie = &CieMap[{Piece.data(), Personality}];
+
+ // If not found, create a new one.
+ if (Cie->Piece == nullptr) {
+ Cie->Piece = &Piece;
+ Cies.push_back(Cie);
+ }
+ return Cie;
+}
+
+// There is one FDE per function. Returns true if a given FDE
+// points to a live function.
+template <class ELFT>
+template <class RelTy>
+bool EhFrameSection<ELFT>::isFdeLive(EhSectionPiece &Piece,
+ ArrayRef<RelTy> Rels) {
+ auto *Sec = cast<EhInputSection>(Piece.ID);
+ unsigned FirstRelI = Piece.FirstRelocation;
+ if (FirstRelI == (unsigned)-1)
+ return false;
+ const RelTy &Rel = Rels[FirstRelI];
+ SymbolBody &B = Sec->template getFile<ELFT>()->getRelocTargetSym(Rel);
+ auto *D = dyn_cast<DefinedRegular>(&B);
+ if (!D || !D->Section)
+ return false;
+ auto *Target =
+ cast<InputSectionBase>(cast<InputSectionBase>(D->Section)->Repl);
+ return Target && Target->Live;
+}
-template <class ELFT> void GotSection<ELFT>::addEntry(SymbolBody &Sym) {
+// .eh_frame is a sequence of CIE or FDE records. In general, there
+// is one CIE record per input object file which is followed by
+// a list of FDEs. This function searches an existing CIE or create a new
+// one and associates FDEs to the CIE.
+template <class ELFT>
+template <class RelTy>
+void EhFrameSection<ELFT>::addSectionAux(EhInputSection *Sec,
+ ArrayRef<RelTy> Rels) {
+ const endianness E = ELFT::TargetEndianness;
+
+ DenseMap<size_t, CieRecord *> OffsetToCie;
+ for (EhSectionPiece &Piece : Sec->Pieces) {
+ // The empty record is the end marker.
+ if (Piece.size() == 4)
+ return;
+
+ size_t Offset = Piece.InputOff;
+ uint32_t ID = read32<E>(Piece.data().data() + 4);
+ if (ID == 0) {
+ OffsetToCie[Offset] = addCie(Piece, Rels);
+ continue;
+ }
+
+ uint32_t CieOffset = Offset + 4 - ID;
+ CieRecord *Cie = OffsetToCie[CieOffset];
+ if (!Cie)
+ fatal(toString(Sec) + ": invalid CIE reference");
+
+ if (!isFdeLive(Piece, Rels))
+ continue;
+ Cie->FdePieces.push_back(&Piece);
+ NumFdes++;
+ }
+}
+
+template <class ELFT>
+void EhFrameSection<ELFT>::addSection(InputSectionBase *C) {
+ auto *Sec = cast<EhInputSection>(C);
+ Sec->Parent = this;
+ updateAlignment(Sec->Alignment);
+ Sections.push_back(Sec);
+ for (auto *DS : Sec->DependentSections)
+ DependentSections.push_back(DS);
+
+ // .eh_frame is a sequence of CIE or FDE records. This function
+ // splits it into pieces so that we can call
+ // SplitInputSection::getSectionPiece on the section.
+ Sec->split<ELFT>();
+ if (Sec->Pieces.empty())
+ return;
+
+ if (Sec->NumRelocations) {
+ if (Sec->AreRelocsRela)
+ addSectionAux(Sec, Sec->template relas<ELFT>());
+ else
+ addSectionAux(Sec, Sec->template rels<ELFT>());
+ return;
+ }
+ addSectionAux(Sec, makeArrayRef<Elf_Rela>(nullptr, nullptr));
+}
+
+template <class ELFT>
+static void writeCieFde(uint8_t *Buf, ArrayRef<uint8_t> D) {
+ memcpy(Buf, D.data(), D.size());
+
+ // Fix the size field. -4 since size does not include the size field itself.
+ const endianness E = ELFT::TargetEndianness;
+ write32<E>(Buf, alignTo(D.size(), sizeof(typename ELFT::uint)) - 4);
+}
+
+template <class ELFT> void EhFrameSection<ELFT>::finalizeContents() {
+ if (this->Size)
+ return; // Already finalized.
+
+ size_t Off = 0;
+ for (CieRecord *Cie : Cies) {
+ Cie->Piece->OutputOff = Off;
+ Off += alignTo(Cie->Piece->size(), Config->Wordsize);
+
+ for (EhSectionPiece *Fde : Cie->FdePieces) {
+ Fde->OutputOff = Off;
+ Off += alignTo(Fde->size(), Config->Wordsize);
+ }
+ }
+
+ // The LSB standard does not allow a .eh_frame section with zero
+ // Call Frame Information records. Therefore add a CIE record length
+ // 0 as a terminator if this .eh_frame section is empty.
+ if (Off == 0)
+ Off = 4;
+
+ this->Size = Off;
+}
+
+template <class ELFT> static uint64_t readFdeAddr(uint8_t *Buf, int Size) {
+ const endianness E = ELFT::TargetEndianness;
+ switch (Size) {
+ case DW_EH_PE_udata2:
+ return read16<E>(Buf);
+ case DW_EH_PE_udata4:
+ return read32<E>(Buf);
+ case DW_EH_PE_udata8:
+ return read64<E>(Buf);
+ case DW_EH_PE_absptr:
+ if (ELFT::Is64Bits)
+ return read64<E>(Buf);
+ return read32<E>(Buf);
+ }
+ fatal("unknown FDE size encoding");
+}
+
+// Returns the VA to which a given FDE (on a mmap'ed buffer) is applied to.
+// We need it to create .eh_frame_hdr section.
+template <class ELFT>
+uint64_t EhFrameSection<ELFT>::getFdePc(uint8_t *Buf, size_t FdeOff,
+ uint8_t Enc) {
+ // The starting address to which this FDE applies is
+ // stored at FDE + 8 byte.
+ size_t Off = FdeOff + 8;
+ uint64_t Addr = readFdeAddr<ELFT>(Buf + Off, Enc & 0x7);
+ if ((Enc & 0x70) == DW_EH_PE_absptr)
+ return Addr;
+ if ((Enc & 0x70) == DW_EH_PE_pcrel)
+ return Addr + getParent()->Addr + Off;
+ fatal("unknown FDE size relative encoding");
+}
+
+template <class ELFT> void EhFrameSection<ELFT>::writeTo(uint8_t *Buf) {
+ const endianness E = ELFT::TargetEndianness;
+ for (CieRecord *Cie : Cies) {
+ size_t CieOffset = Cie->Piece->OutputOff;
+ writeCieFde<ELFT>(Buf + CieOffset, Cie->Piece->data());
+
+ for (EhSectionPiece *Fde : Cie->FdePieces) {
+ size_t Off = Fde->OutputOff;
+ writeCieFde<ELFT>(Buf + Off, Fde->data());
+
+ // FDE's second word should have the offset to an associated CIE.
+ // Write it.
+ write32<E>(Buf + Off + 4, Off + 4 - CieOffset);
+ }
+ }
+
+ for (EhInputSection *S : Sections)
+ S->relocateAlloc(Buf, nullptr);
+
+ // Construct .eh_frame_hdr. .eh_frame_hdr is a binary search table
+ // to get a FDE from an address to which FDE is applied. So here
+ // we obtain two addresses and pass them to EhFrameHdr object.
+ if (In<ELFT>::EhFrameHdr) {
+ for (CieRecord *Cie : Cies) {
+ uint8_t Enc = getFdeEncoding<ELFT>(Cie->Piece);
+ for (SectionPiece *Fde : Cie->FdePieces) {
+ uint64_t Pc = getFdePc(Buf, Fde->OutputOff, Enc);
+ uint64_t FdeVA = getParent()->Addr + Fde->OutputOff;
+ In<ELFT>::EhFrameHdr->addFde(Pc, FdeVA);
+ }
+ }
+ }
+}
+
+GotSection::GotSection()
+ : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
+ Target->GotEntrySize, ".got") {}
+
+void GotSection::addEntry(SymbolBody &Sym) {
Sym.GotIndex = NumEntries;
++NumEntries;
}
-template <class ELFT> bool GotSection<ELFT>::addDynTlsEntry(SymbolBody &Sym) {
+bool GotSection::addDynTlsEntry(SymbolBody &Sym) {
if (Sym.GlobalDynIndex != -1U)
return false;
Sym.GlobalDynIndex = NumEntries;
@@ -400,48 +638,42 @@ template <class ELFT> bool GotSection<ELFT>::addDynTlsEntry(SymbolBody &Sym) {
// Reserves TLS entries for a TLS module ID and a TLS block offset.
// In total it takes two GOT slots.
-template <class ELFT> bool GotSection<ELFT>::addTlsIndex() {
+bool GotSection::addTlsIndex() {
if (TlsIndexOff != uint32_t(-1))
return false;
- TlsIndexOff = NumEntries * sizeof(uintX_t);
+ TlsIndexOff = NumEntries * Config->Wordsize;
NumEntries += 2;
return true;
}
-template <class ELFT>
-typename GotSection<ELFT>::uintX_t
-GotSection<ELFT>::getGlobalDynAddr(const SymbolBody &B) const {
- return this->getVA() + B.GlobalDynIndex * sizeof(uintX_t);
+uint64_t GotSection::getGlobalDynAddr(const SymbolBody &B) const {
+ return this->getVA() + B.GlobalDynIndex * Config->Wordsize;
}
-template <class ELFT>
-typename GotSection<ELFT>::uintX_t
-GotSection<ELFT>::getGlobalDynOffset(const SymbolBody &B) const {
- return B.GlobalDynIndex * sizeof(uintX_t);
+uint64_t GotSection::getGlobalDynOffset(const SymbolBody &B) const {
+ return B.GlobalDynIndex * Config->Wordsize;
}
-template <class ELFT> void GotSection<ELFT>::finalize() {
- Size = NumEntries * sizeof(uintX_t);
-}
+void GotSection::finalizeContents() { Size = NumEntries * Config->Wordsize; }
-template <class ELFT> bool GotSection<ELFT>::empty() const {
+bool GotSection::empty() const {
// If we have a relocation that is relative to GOT (such as GOTOFFREL),
// we need to emit a GOT even if it's empty.
return NumEntries == 0 && !HasGotOffRel;
}
-template <class ELFT> void GotSection<ELFT>::writeTo(uint8_t *Buf) {
- this->relocate(Buf, Buf + Size);
+void GotSection::writeTo(uint8_t *Buf) {
+ // Buf points to the start of this section's buffer,
+ // whereas InputSectionBase::relocateAlloc() expects its argument
+ // to point to the start of the output section.
+ relocateAlloc(Buf - OutSecOff, Buf - OutSecOff + Size);
}
-template <class ELFT>
-MipsGotSection<ELFT>::MipsGotSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_WRITE | SHF_MIPS_GPREL,
- SHT_PROGBITS, 16, ".got") {}
+MipsGotSection::MipsGotSection()
+ : SyntheticSection(SHF_ALLOC | SHF_WRITE | SHF_MIPS_GPREL, SHT_PROGBITS, 16,
+ ".got") {}
-template <class ELFT>
-void MipsGotSection<ELFT>::addEntry(SymbolBody &Sym, uintX_t Addend,
- RelExpr Expr) {
+void MipsGotSection::addEntry(SymbolBody &Sym, int64_t Addend, RelExpr Expr) {
// For "true" local symbols which can be referenced from the same module
// only compiler creates two instructions for address loading:
//
@@ -472,7 +704,7 @@ void MipsGotSection<ELFT>::addEntry(SymbolBody &Sym, uintX_t Addend,
// sections referenced by GOT relocations. Then later in the `finalize`
// method calculate number of "pages" required to cover all saved output
// section and allocate appropriate number of GOT entries.
- PageIndexMap.insert({cast<DefinedRegular<ELFT>>(&Sym)->Section->OutSec, 0});
+ PageIndexMap.insert({Sym.getOutputSection(), 0});
return;
}
if (Sym.isTls()) {
@@ -483,7 +715,7 @@ void MipsGotSection<ELFT>::addEntry(SymbolBody &Sym, uintX_t Addend,
TlsEntries.push_back(&Sym);
return;
}
- auto AddEntry = [&](SymbolBody &S, uintX_t A, GotEntries &Items) {
+ auto AddEntry = [&](SymbolBody &S, uint64_t A, GotEntries &Items) {
if (S.isInGot() && !A)
return;
size_t NewIndex = Items.size();
@@ -508,8 +740,7 @@ void MipsGotSection<ELFT>::addEntry(SymbolBody &Sym, uintX_t Addend,
}
}
-template <class ELFT>
-bool MipsGotSection<ELFT>::addDynTlsEntry(SymbolBody &Sym) {
+bool MipsGotSection::addDynTlsEntry(SymbolBody &Sym) {
if (Sym.GlobalDynIndex != -1U)
return false;
Sym.GlobalDynIndex = TlsEntries.size();
@@ -521,10 +752,10 @@ bool MipsGotSection<ELFT>::addDynTlsEntry(SymbolBody &Sym) {
// Reserves TLS entries for a TLS module ID and a TLS block offset.
// In total it takes two GOT slots.
-template <class ELFT> bool MipsGotSection<ELFT>::addTlsIndex() {
+bool MipsGotSection::addTlsIndex() {
if (TlsIndexOff != uint32_t(-1))
return false;
- TlsIndexOff = TlsEntries.size() * sizeof(uintX_t);
+ TlsIndexOff = TlsEntries.size() * Config->Wordsize;
TlsEntries.push_back(nullptr);
TlsEntries.push_back(nullptr);
return true;
@@ -538,25 +769,20 @@ static uint64_t getMipsPageCount(uint64_t Size) {
return (Size + 0xfffe) / 0xffff + 1;
}
-template <class ELFT>
-typename MipsGotSection<ELFT>::uintX_t
-MipsGotSection<ELFT>::getPageEntryOffset(const SymbolBody &B,
- uintX_t Addend) const {
- const OutputSectionBase *OutSec =
- cast<DefinedRegular<ELFT>>(&B)->Section->OutSec;
- uintX_t SecAddr = getMipsPageAddr(OutSec->Addr);
- uintX_t SymAddr = getMipsPageAddr(B.getVA<ELFT>(Addend));
- uintX_t Index = PageIndexMap.lookup(OutSec) + (SymAddr - SecAddr) / 0xffff;
+uint64_t MipsGotSection::getPageEntryOffset(const SymbolBody &B,
+ int64_t Addend) const {
+ const OutputSection *OutSec = B.getOutputSection();
+ uint64_t SecAddr = getMipsPageAddr(OutSec->Addr);
+ uint64_t SymAddr = getMipsPageAddr(B.getVA(Addend));
+ uint64_t Index = PageIndexMap.lookup(OutSec) + (SymAddr - SecAddr) / 0xffff;
assert(Index < PageEntriesNum);
- return (HeaderEntriesNum + Index) * sizeof(uintX_t);
+ return (HeaderEntriesNum + Index) * Config->Wordsize;
}
-template <class ELFT>
-typename MipsGotSection<ELFT>::uintX_t
-MipsGotSection<ELFT>::getBodyEntryOffset(const SymbolBody &B,
- uintX_t Addend) const {
+uint64_t MipsGotSection::getBodyEntryOffset(const SymbolBody &B,
+ int64_t Addend) const {
// Calculate offset of the GOT entries block: TLS, global, local.
- uintX_t Index = HeaderEntriesNum + PageEntriesNum;
+ uint64_t Index = HeaderEntriesNum + PageEntriesNum;
if (B.isTls())
Index += LocalEntries.size() + LocalEntries32.size() + GlobalEntries.size();
else if (B.IsInGlobalMipsGot)
@@ -571,35 +797,31 @@ MipsGotSection<ELFT>::getBodyEntryOffset(const SymbolBody &B,
assert(It != EntryIndexMap.end());
Index += It->second;
}
- return Index * sizeof(uintX_t);
+ return Index * Config->Wordsize;
}
-template <class ELFT>
-typename MipsGotSection<ELFT>::uintX_t
-MipsGotSection<ELFT>::getTlsOffset() const {
- return (getLocalEntriesNum() + GlobalEntries.size()) * sizeof(uintX_t);
+uint64_t MipsGotSection::getTlsOffset() const {
+ return (getLocalEntriesNum() + GlobalEntries.size()) * Config->Wordsize;
}
-template <class ELFT>
-typename MipsGotSection<ELFT>::uintX_t
-MipsGotSection<ELFT>::getGlobalDynOffset(const SymbolBody &B) const {
- return B.GlobalDynIndex * sizeof(uintX_t);
+uint64_t MipsGotSection::getGlobalDynOffset(const SymbolBody &B) const {
+ return B.GlobalDynIndex * Config->Wordsize;
}
-template <class ELFT>
-const SymbolBody *MipsGotSection<ELFT>::getFirstGlobalEntry() const {
+const SymbolBody *MipsGotSection::getFirstGlobalEntry() const {
return GlobalEntries.empty() ? nullptr : GlobalEntries.front().first;
}
-template <class ELFT>
-unsigned MipsGotSection<ELFT>::getLocalEntriesNum() const {
+unsigned MipsGotSection::getLocalEntriesNum() const {
return HeaderEntriesNum + PageEntriesNum + LocalEntries.size() +
LocalEntries32.size();
}
-template <class ELFT> void MipsGotSection<ELFT>::finalize() {
+void MipsGotSection::finalizeContents() { updateAllocSize(); }
+
+void MipsGotSection::updateAllocSize() {
PageEntriesNum = 0;
- for (std::pair<const OutputSectionBase *, size_t> &P : PageIndexMap) {
+ for (std::pair<const OutputSection *, size_t> &P : PageIndexMap) {
// For each output section referenced by GOT page relocations calculate
// and save into PageIndexMap an upper bound of MIPS GOT entries required
// to store page addresses of local symbols. We assume the worst case -
@@ -610,27 +832,31 @@ template <class ELFT> void MipsGotSection<ELFT>::finalize() {
PageEntriesNum += getMipsPageCount(P.first->Size);
}
Size = (getLocalEntriesNum() + GlobalEntries.size() + TlsEntries.size()) *
- sizeof(uintX_t);
+ Config->Wordsize;
}
-template <class ELFT> bool MipsGotSection<ELFT>::empty() const {
+bool MipsGotSection::empty() const {
// We add the .got section to the result for dynamic MIPS target because
// its address and properties are mentioned in the .dynamic section.
return Config->Relocatable;
}
-template <class ELFT>
-typename MipsGotSection<ELFT>::uintX_t MipsGotSection<ELFT>::getGp() const {
- return ElfSym<ELFT>::MipsGp->template getVA<ELFT>(0);
+uint64_t MipsGotSection::getGp() const { return ElfSym::MipsGp->getVA(0); }
+
+static uint64_t readUint(uint8_t *Buf) {
+ if (Config->Is64)
+ return read64(Buf, Config->Endianness);
+ return read32(Buf, Config->Endianness);
}
-template <class ELFT>
-static void writeUint(uint8_t *Buf, typename ELFT::uint Val) {
- typedef typename ELFT::uint uintX_t;
- write<uintX_t, ELFT::TargetEndianness, sizeof(uintX_t)>(Buf, Val);
+static void writeUint(uint8_t *Buf, uint64_t Val) {
+ if (Config->Is64)
+ write64(Buf, Val, Config->Endianness);
+ else
+ write32(Buf, Val, Config->Endianness);
}
-template <class ELFT> void MipsGotSection<ELFT>::writeTo(uint8_t *Buf) {
+void MipsGotSection::writeTo(uint8_t *Buf) {
// Set the MSB of the second GOT slot. This is not required by any
// MIPS ABI documentation, though.
//
@@ -645,25 +871,24 @@ template <class ELFT> void MipsGotSection<ELFT>::writeTo(uint8_t *Buf) {
// we've been doing this for years, it is probably a safe bet to
// keep doing this for now. We really need to revisit this to see
// if we had to do this.
- auto *P = reinterpret_cast<typename ELFT::Off *>(Buf);
- P[1] = uintX_t(1) << (ELFT::Is64Bits ? 63 : 31);
- Buf += HeaderEntriesNum * sizeof(uintX_t);
+ writeUint(Buf + Config->Wordsize, (uint64_t)1 << (Config->Wordsize * 8 - 1));
+ Buf += HeaderEntriesNum * Config->Wordsize;
// Write 'page address' entries to the local part of the GOT.
- for (std::pair<const OutputSectionBase *, size_t> &L : PageIndexMap) {
+ for (std::pair<const OutputSection *, size_t> &L : PageIndexMap) {
size_t PageCount = getMipsPageCount(L.first->Size);
- uintX_t FirstPageAddr = getMipsPageAddr(L.first->Addr);
+ uint64_t FirstPageAddr = getMipsPageAddr(L.first->Addr);
for (size_t PI = 0; PI < PageCount; ++PI) {
- uint8_t *Entry = Buf + (L.second + PI) * sizeof(uintX_t);
- writeUint<ELFT>(Entry, FirstPageAddr + PI * 0x10000);
+ uint8_t *Entry = Buf + (L.second + PI) * Config->Wordsize;
+ writeUint(Entry, FirstPageAddr + PI * 0x10000);
}
}
- Buf += PageEntriesNum * sizeof(uintX_t);
+ Buf += PageEntriesNum * Config->Wordsize;
auto AddEntry = [&](const GotEntry &SA) {
uint8_t *Entry = Buf;
- Buf += sizeof(uintX_t);
+ Buf += Config->Wordsize;
const SymbolBody *Body = SA.first;
- uintX_t VA = Body->template getVA<ELFT>(SA.second);
- writeUint<ELFT>(Entry, VA);
+ uint64_t VA = Body->getVA(SA.second);
+ writeUint(Entry, VA);
};
std::for_each(std::begin(LocalEntries), std::end(LocalEntries), AddEntry);
std::for_each(std::begin(LocalEntries32), std::end(LocalEntries32), AddEntry);
@@ -674,86 +899,83 @@ template <class ELFT> void MipsGotSection<ELFT>::writeTo(uint8_t *Buf) {
// for thread-local storage.
// https://www.linux-mips.org/wiki/NPTL
if (TlsIndexOff != -1U && !Config->Pic)
- writeUint<ELFT>(Buf + TlsIndexOff, 1);
+ writeUint(Buf + TlsIndexOff, 1);
for (const SymbolBody *B : TlsEntries) {
if (!B || B->isPreemptible())
continue;
- uintX_t VA = B->getVA<ELFT>();
+ uint64_t VA = B->getVA();
if (B->GotIndex != -1U) {
- uint8_t *Entry = Buf + B->GotIndex * sizeof(uintX_t);
- writeUint<ELFT>(Entry, VA - 0x7000);
+ uint8_t *Entry = Buf + B->GotIndex * Config->Wordsize;
+ writeUint(Entry, VA - 0x7000);
}
if (B->GlobalDynIndex != -1U) {
- uint8_t *Entry = Buf + B->GlobalDynIndex * sizeof(uintX_t);
- writeUint<ELFT>(Entry, 1);
- Entry += sizeof(uintX_t);
- writeUint<ELFT>(Entry, VA - 0x8000);
+ uint8_t *Entry = Buf + B->GlobalDynIndex * Config->Wordsize;
+ writeUint(Entry, 1);
+ Entry += Config->Wordsize;
+ writeUint(Entry, VA - 0x8000);
}
}
}
-template <class ELFT>
-GotPltSection<ELFT>::GotPltSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
- Target->GotPltEntrySize, ".got.plt") {}
+GotPltSection::GotPltSection()
+ : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
+ Target->GotPltEntrySize, ".got.plt") {}
-template <class ELFT> void GotPltSection<ELFT>::addEntry(SymbolBody &Sym) {
+void GotPltSection::addEntry(SymbolBody &Sym) {
Sym.GotPltIndex = Target->GotPltHeaderEntriesNum + Entries.size();
Entries.push_back(&Sym);
}
-template <class ELFT> size_t GotPltSection<ELFT>::getSize() const {
+size_t GotPltSection::getSize() const {
return (Target->GotPltHeaderEntriesNum + Entries.size()) *
Target->GotPltEntrySize;
}
-template <class ELFT> void GotPltSection<ELFT>::writeTo(uint8_t *Buf) {
+void GotPltSection::writeTo(uint8_t *Buf) {
Target->writeGotPltHeader(Buf);
Buf += Target->GotPltHeaderEntriesNum * Target->GotPltEntrySize;
for (const SymbolBody *B : Entries) {
Target->writeGotPlt(Buf, *B);
- Buf += sizeof(uintX_t);
+ Buf += Config->Wordsize;
}
}
// On ARM the IgotPltSection is part of the GotSection, on other Targets it is
// part of the .got.plt
-template <class ELFT>
-IgotPltSection<ELFT>::IgotPltSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
- Target->GotPltEntrySize,
- Config->EMachine == EM_ARM ? ".got" : ".got.plt") {
-}
+IgotPltSection::IgotPltSection()
+ : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
+ Target->GotPltEntrySize,
+ Config->EMachine == EM_ARM ? ".got" : ".got.plt") {}
-template <class ELFT> void IgotPltSection<ELFT>::addEntry(SymbolBody &Sym) {
+void IgotPltSection::addEntry(SymbolBody &Sym) {
Sym.IsInIgot = true;
Sym.GotPltIndex = Entries.size();
Entries.push_back(&Sym);
}
-template <class ELFT> size_t IgotPltSection<ELFT>::getSize() const {
+size_t IgotPltSection::getSize() const {
return Entries.size() * Target->GotPltEntrySize;
}
-template <class ELFT> void IgotPltSection<ELFT>::writeTo(uint8_t *Buf) {
+void IgotPltSection::writeTo(uint8_t *Buf) {
for (const SymbolBody *B : Entries) {
Target->writeIgotPlt(Buf, *B);
- Buf += sizeof(uintX_t);
+ Buf += Config->Wordsize;
}
}
-template <class ELFT>
-StringTableSection<ELFT>::StringTableSection(StringRef Name, bool Dynamic)
- : SyntheticSection<ELFT>(Dynamic ? (uintX_t)SHF_ALLOC : 0, SHT_STRTAB, 1,
- Name),
- Dynamic(Dynamic) {}
+StringTableSection::StringTableSection(StringRef Name, bool Dynamic)
+ : SyntheticSection(Dynamic ? (uint64_t)SHF_ALLOC : 0, SHT_STRTAB, 1, Name),
+ Dynamic(Dynamic) {
+ // ELF string tables start with a NUL byte.
+ addString("");
+}
// Adds a string to the string table. If HashIt is true we hash and check for
// duplicates. It is optional because the name of global symbols are already
// uniqued and hashing them again has a big cost for a small value: uniquing
// them with some other string that happens to be the same.
-template <class ELFT>
-unsigned StringTableSection<ELFT>::addString(StringRef S, bool HashIt) {
+unsigned StringTableSection::addString(StringRef S, bool HashIt) {
if (HashIt) {
auto R = StringMap.insert(std::make_pair(S, this->Size));
if (!R.second)
@@ -765,9 +987,7 @@ unsigned StringTableSection<ELFT>::addString(StringRef S, bool HashIt) {
return Ret;
}
-template <class ELFT> void StringTableSection<ELFT>::writeTo(uint8_t *Buf) {
- // ELF string tables start with NUL byte, so advance the pointer by one.
- ++Buf;
+void StringTableSection::writeTo(uint8_t *Buf) {
for (StringRef S : Strings) {
memcpy(Buf, S.data(), S.size());
Buf += S.size() + 1;
@@ -781,13 +1001,15 @@ static unsigned getVerDefNum() { return Config->VersionDefinitions.size() + 1; }
template <class ELFT>
DynamicSection<ELFT>::DynamicSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_WRITE, SHT_DYNAMIC,
- sizeof(uintX_t), ".dynamic") {
+ : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_DYNAMIC, Config->Wordsize,
+ ".dynamic") {
this->Entsize = ELFT::Is64Bits ? 16 : 8;
- // .dynamic section is not writable on MIPS.
+
+ // .dynamic section is not writable on MIPS and on Fuchsia OS
+ // which passes -z rodynamic.
// See "Special Section" in Chapter 4 in the following document:
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
- if (Config->EMachine == EM_MIPS)
+ if (Config->EMachine == EM_MIPS || Config->ZRodynamic)
this->Flags = SHF_ALLOC;
addEntries();
@@ -798,16 +1020,18 @@ DynamicSection<ELFT>::DynamicSection()
template <class ELFT> void DynamicSection<ELFT>::addEntries() {
// Add strings to .dynstr early so that .dynstr's size will be
// fixed early.
+ for (StringRef S : Config->FilterList)
+ add({DT_FILTER, InX::DynStrTab->addString(S)});
for (StringRef S : Config->AuxiliaryList)
- add({DT_AUXILIARY, In<ELFT>::DynStrTab->addString(S)});
- if (!Config->RPath.empty())
+ add({DT_AUXILIARY, InX::DynStrTab->addString(S)});
+ if (!Config->Rpath.empty())
add({Config->EnableNewDtags ? DT_RUNPATH : DT_RPATH,
- In<ELFT>::DynStrTab->addString(Config->RPath)});
+ InX::DynStrTab->addString(Config->Rpath)});
for (SharedFile<ELFT> *F : Symtab<ELFT>::X->getSharedFiles())
if (F->isNeeded())
- add({DT_NEEDED, In<ELFT>::DynStrTab->addString(F->getSoName())});
+ add({DT_NEEDED, InX::DynStrTab->addString(F->SoName)});
if (!Config->SoName.empty())
- add({DT_SONAME, In<ELFT>::DynStrTab->addString(Config->SoName)});
+ add({DT_SONAME, InX::DynStrTab->addString(Config->SoName)});
// Set DT_FLAGS and DT_FLAGS_1.
uint32_t DtFlags = 0;
@@ -832,22 +1056,31 @@ template <class ELFT> void DynamicSection<ELFT>::addEntries() {
if (DtFlags1)
add({DT_FLAGS_1, DtFlags1});
- if (!Config->Shared && !Config->Relocatable)
+ // DT_DEBUG is a pointer to debug informaion used by debuggers at runtime. We
+ // need it for each process, so we don't write it for DSOs. The loader writes
+ // the pointer into this entry.
+ //
+ // DT_DEBUG is the only .dynamic entry that needs to be written to. Some
+ // systems (currently only Fuchsia OS) provide other means to give the
+ // debugger this information. Such systems may choose make .dynamic read-only.
+ // If the target is such a system (used -z rodynamic) don't write DT_DEBUG.
+ if (!Config->Shared && !Config->Relocatable && !Config->ZRodynamic)
add({DT_DEBUG, (uint64_t)0});
}
// Add remaining entries to complete .dynamic contents.
-template <class ELFT> void DynamicSection<ELFT>::finalize() {
+template <class ELFT> void DynamicSection<ELFT>::finalizeContents() {
if (this->Size)
return; // Already finalized.
- this->Link = In<ELFT>::DynStrTab->OutSec->SectionIndex;
- if (In<ELFT>::RelaDyn->OutSec->Size > 0) {
- bool IsRela = Config->Rela;
+ this->Link = InX::DynStrTab->getParent()->SectionIndex;
+ if (In<ELFT>::RelaDyn->getParent() && !In<ELFT>::RelaDyn->empty()) {
+ bool IsRela = Config->IsRela;
add({IsRela ? DT_RELA : DT_REL, In<ELFT>::RelaDyn});
- add({IsRela ? DT_RELASZ : DT_RELSZ, In<ELFT>::RelaDyn->OutSec->Size});
+ add({IsRela ? DT_RELASZ : DT_RELSZ, In<ELFT>::RelaDyn->getParent(),
+ Entry::SecSize});
add({IsRela ? DT_RELAENT : DT_RELENT,
- uintX_t(IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel))});
+ uint64_t(IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel))});
// MIPS dynamic loader does not support RELCOUNT tag.
// The problem is in the tight relation between dynamic
@@ -858,34 +1091,45 @@ template <class ELFT> void DynamicSection<ELFT>::finalize() {
add({IsRela ? DT_RELACOUNT : DT_RELCOUNT, NumRelativeRels});
}
}
- if (In<ELFT>::RelaPlt->OutSec->Size > 0) {
+ if (In<ELFT>::RelaPlt->getParent() && !In<ELFT>::RelaPlt->empty()) {
add({DT_JMPREL, In<ELFT>::RelaPlt});
- add({DT_PLTRELSZ, In<ELFT>::RelaPlt->OutSec->Size});
- add({Config->EMachine == EM_MIPS ? DT_MIPS_PLTGOT : DT_PLTGOT,
- In<ELFT>::GotPlt});
- add({DT_PLTREL, uint64_t(Config->Rela ? DT_RELA : DT_REL)});
+ add({DT_PLTRELSZ, In<ELFT>::RelaPlt->getParent(), Entry::SecSize});
+ switch (Config->EMachine) {
+ case EM_MIPS:
+ add({DT_MIPS_PLTGOT, In<ELFT>::GotPlt});
+ break;
+ case EM_SPARCV9:
+ add({DT_PLTGOT, In<ELFT>::Plt});
+ break;
+ default:
+ add({DT_PLTGOT, In<ELFT>::GotPlt});
+ break;
+ }
+ add({DT_PLTREL, uint64_t(Config->IsRela ? DT_RELA : DT_REL)});
}
- add({DT_SYMTAB, In<ELFT>::DynSymTab});
+ add({DT_SYMTAB, InX::DynSymTab});
add({DT_SYMENT, sizeof(Elf_Sym)});
- add({DT_STRTAB, In<ELFT>::DynStrTab});
- add({DT_STRSZ, In<ELFT>::DynStrTab->getSize()});
- if (In<ELFT>::GnuHashTab)
- add({DT_GNU_HASH, In<ELFT>::GnuHashTab});
+ add({DT_STRTAB, InX::DynStrTab});
+ add({DT_STRSZ, InX::DynStrTab->getSize()});
+ if (!Config->ZText)
+ add({DT_TEXTREL, (uint64_t)0});
+ if (InX::GnuHashTab)
+ add({DT_GNU_HASH, InX::GnuHashTab});
if (In<ELFT>::HashTab)
add({DT_HASH, In<ELFT>::HashTab});
- if (Out<ELFT>::PreinitArray) {
- add({DT_PREINIT_ARRAY, Out<ELFT>::PreinitArray});
- add({DT_PREINIT_ARRAYSZ, Out<ELFT>::PreinitArray, Entry::SecSize});
+ if (Out::PreinitArray) {
+ add({DT_PREINIT_ARRAY, Out::PreinitArray});
+ add({DT_PREINIT_ARRAYSZ, Out::PreinitArray, Entry::SecSize});
}
- if (Out<ELFT>::InitArray) {
- add({DT_INIT_ARRAY, Out<ELFT>::InitArray});
- add({DT_INIT_ARRAYSZ, Out<ELFT>::InitArray, Entry::SecSize});
+ if (Out::InitArray) {
+ add({DT_INIT_ARRAY, Out::InitArray});
+ add({DT_INIT_ARRAYSZ, Out::InitArray, Entry::SecSize});
}
- if (Out<ELFT>::FiniArray) {
- add({DT_FINI_ARRAY, Out<ELFT>::FiniArray});
- add({DT_FINI_ARRAYSZ, Out<ELFT>::FiniArray, Entry::SecSize});
+ if (Out::FiniArray) {
+ add({DT_FINI_ARRAY, Out::FiniArray});
+ add({DT_FINI_ARRAYSZ, Out::FiniArray, Entry::SecSize});
}
if (SymbolBody *B = Symtab<ELFT>::X->findInCurrentDSO(Config->Init))
@@ -909,19 +1153,18 @@ template <class ELFT> void DynamicSection<ELFT>::finalize() {
add({DT_MIPS_RLD_VERSION, 1});
add({DT_MIPS_FLAGS, RHF_NOTPOT});
add({DT_MIPS_BASE_ADDRESS, Config->ImageBase});
- add({DT_MIPS_SYMTABNO, In<ELFT>::DynSymTab->getNumSymbols()});
- add({DT_MIPS_LOCAL_GOTNO, In<ELFT>::MipsGot->getLocalEntriesNum()});
- if (const SymbolBody *B = In<ELFT>::MipsGot->getFirstGlobalEntry())
+ add({DT_MIPS_SYMTABNO, InX::DynSymTab->getNumSymbols()});
+ add({DT_MIPS_LOCAL_GOTNO, InX::MipsGot->getLocalEntriesNum()});
+ if (const SymbolBody *B = InX::MipsGot->getFirstGlobalEntry())
add({DT_MIPS_GOTSYM, B->DynsymIndex});
else
- add({DT_MIPS_GOTSYM, In<ELFT>::DynSymTab->getNumSymbols()});
- add({DT_PLTGOT, In<ELFT>::MipsGot});
- if (In<ELFT>::MipsRldMap)
- add({DT_MIPS_RLD_MAP, In<ELFT>::MipsRldMap});
+ add({DT_MIPS_GOTSYM, InX::DynSymTab->getNumSymbols()});
+ add({DT_PLTGOT, InX::MipsGot});
+ if (InX::MipsRldMap)
+ add({DT_MIPS_RLD_MAP, InX::MipsRldMap});
}
- this->OutSec->Entsize = this->Entsize;
- this->OutSec->Link = this->Link;
+ getParent()->Link = this->Link;
// +1 for DT_NULL
this->Size = (Entries.size() + 1) * this->Entsize;
@@ -937,13 +1180,13 @@ template <class ELFT> void DynamicSection<ELFT>::writeTo(uint8_t *Buf) {
P->d_un.d_ptr = E.OutSec->Addr;
break;
case Entry::InSecAddr:
- P->d_un.d_ptr = E.InSec->OutSec->Addr + E.InSec->OutSecOff;
+ P->d_un.d_ptr = E.InSec->getParent()->Addr + E.InSec->OutSecOff;
break;
case Entry::SecSize:
P->d_un.d_val = E.OutSec->Size;
break;
case Entry::SymAddr:
- P->d_un.d_ptr = E.Sym->template getVA<ELFT>();
+ P->d_un.d_ptr = E.Sym->getVA();
break;
case Entry::PlainInt:
P->d_un.d_val = E.Val;
@@ -953,21 +1196,17 @@ template <class ELFT> void DynamicSection<ELFT>::writeTo(uint8_t *Buf) {
}
}
-template <class ELFT>
-typename ELFT::uint DynamicReloc<ELFT>::getOffset() const {
- if (OutputSec)
- return OutputSec->Addr + OffsetInSec;
- return InputSec->OutSec->Addr + InputSec->getOffset(OffsetInSec);
+uint64_t DynamicReloc::getOffset() const {
+ return InputSec->getOutputSection()->Addr + InputSec->getOffset(OffsetInSec);
}
-template <class ELFT>
-typename ELFT::uint DynamicReloc<ELFT>::getAddend() const {
+int64_t DynamicReloc::getAddend() const {
if (UseSymVA)
- return Sym->getVA<ELFT>(Addend);
+ return Sym->getVA(Addend);
return Addend;
}
-template <class ELFT> uint32_t DynamicReloc<ELFT>::getSymIndex() const {
+uint32_t DynamicReloc::getSymIndex() const {
if (Sym && !UseSymVA)
return Sym->DynsymIndex;
return 0;
@@ -975,14 +1214,14 @@ template <class ELFT> uint32_t DynamicReloc<ELFT>::getSymIndex() const {
template <class ELFT>
RelocationSection<ELFT>::RelocationSection(StringRef Name, bool Sort)
- : SyntheticSection<ELFT>(SHF_ALLOC, Config->Rela ? SHT_RELA : SHT_REL,
- sizeof(uintX_t), Name),
+ : SyntheticSection(SHF_ALLOC, Config->IsRela ? SHT_RELA : SHT_REL,
+ Config->Wordsize, Name),
Sort(Sort) {
- this->Entsize = Config->Rela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
+ this->Entsize = Config->IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
}
template <class ELFT>
-void RelocationSection<ELFT>::addReloc(const DynamicReloc<ELFT> &Reloc) {
+void RelocationSection<ELFT>::addReloc(const DynamicReloc &Reloc) {
if (Reloc.Type == Target->RelativeRel)
++NumRelativeRelocs;
Relocs.push_back(Reloc);
@@ -990,33 +1229,33 @@ void RelocationSection<ELFT>::addReloc(const DynamicReloc<ELFT> &Reloc) {
template <class ELFT, class RelTy>
static bool compRelocations(const RelTy &A, const RelTy &B) {
- bool AIsRel = A.getType(Config->Mips64EL) == Target->RelativeRel;
- bool BIsRel = B.getType(Config->Mips64EL) == Target->RelativeRel;
+ bool AIsRel = A.getType(Config->IsMips64EL) == Target->RelativeRel;
+ bool BIsRel = B.getType(Config->IsMips64EL) == Target->RelativeRel;
if (AIsRel != BIsRel)
return AIsRel;
- return A.getSymbol(Config->Mips64EL) < B.getSymbol(Config->Mips64EL);
+ return A.getSymbol(Config->IsMips64EL) < B.getSymbol(Config->IsMips64EL);
}
template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *Buf) {
uint8_t *BufBegin = Buf;
- for (const DynamicReloc<ELFT> &Rel : Relocs) {
+ for (const DynamicReloc &Rel : Relocs) {
auto *P = reinterpret_cast<Elf_Rela *>(Buf);
- Buf += Config->Rela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
+ Buf += Config->IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel);
- if (Config->Rela)
+ if (Config->IsRela)
P->r_addend = Rel.getAddend();
P->r_offset = Rel.getOffset();
- if (Config->EMachine == EM_MIPS && Rel.getInputSec() == In<ELFT>::MipsGot)
+ if (Config->EMachine == EM_MIPS && Rel.getInputSec() == InX::MipsGot)
// Dynamic relocation against MIPS GOT section make deal TLS entries
// allocated in the end of the GOT. We need to adjust the offset to take
// in account 'local' and 'global' GOT entries.
- P->r_offset += In<ELFT>::MipsGot->getTlsOffset();
- P->setSymbolAndType(Rel.getSymIndex(), Rel.Type, Config->Mips64EL);
+ P->r_offset += InX::MipsGot->getTlsOffset();
+ P->setSymbolAndType(Rel.getSymIndex(), Rel.Type, Config->IsMips64EL);
}
if (Sort) {
- if (Config->Rela)
+ if (Config->IsRela)
std::stable_sort((Elf_Rela *)BufBegin,
(Elf_Rela *)BufBegin + Relocs.size(),
compRelocations<ELFT, Elf_Rela>);
@@ -1030,314 +1269,284 @@ template <class ELFT> unsigned RelocationSection<ELFT>::getRelocOffset() {
return this->Entsize * Relocs.size();
}
-template <class ELFT> void RelocationSection<ELFT>::finalize() {
- this->Link = In<ELFT>::DynSymTab ? In<ELFT>::DynSymTab->OutSec->SectionIndex
- : In<ELFT>::SymTab->OutSec->SectionIndex;
+template <class ELFT> void RelocationSection<ELFT>::finalizeContents() {
+ this->Link = InX::DynSymTab ? InX::DynSymTab->getParent()->SectionIndex
+ : InX::SymTab->getParent()->SectionIndex;
// Set required output section properties.
- this->OutSec->Link = this->Link;
- this->OutSec->Entsize = this->Entsize;
+ getParent()->Link = this->Link;
}
-template <class ELFT>
-SymbolTableSection<ELFT>::SymbolTableSection(
- StringTableSection<ELFT> &StrTabSec)
- : SyntheticSection<ELFT>(StrTabSec.isDynamic() ? (uintX_t)SHF_ALLOC : 0,
- StrTabSec.isDynamic() ? SHT_DYNSYM : SHT_SYMTAB,
- sizeof(uintX_t),
- StrTabSec.isDynamic() ? ".dynsym" : ".symtab"),
- StrTabSec(StrTabSec) {
- this->Entsize = sizeof(Elf_Sym);
-}
+SymbolTableBaseSection::SymbolTableBaseSection(StringTableSection &StrTabSec)
+ : SyntheticSection(StrTabSec.isDynamic() ? (uint64_t)SHF_ALLOC : 0,
+ StrTabSec.isDynamic() ? SHT_DYNSYM : SHT_SYMTAB,
+ Config->Wordsize,
+ StrTabSec.isDynamic() ? ".dynsym" : ".symtab"),
+ StrTabSec(StrTabSec) {}
// Orders symbols according to their positions in the GOT,
// in compliance with MIPS ABI rules.
// See "Global Offset Table" in Chapter 5 in the following document
// for detailed description:
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
-static bool sortMipsSymbols(const SymbolBody *L, const SymbolBody *R) {
+static bool sortMipsSymbols(const SymbolTableEntry &L,
+ const SymbolTableEntry &R) {
// Sort entries related to non-local preemptible symbols by GOT indexes.
// All other entries go to the first part of GOT in arbitrary order.
- bool LIsInLocalGot = !L->IsInGlobalMipsGot;
- bool RIsInLocalGot = !R->IsInGlobalMipsGot;
+ bool LIsInLocalGot = !L.Symbol->IsInGlobalMipsGot;
+ bool RIsInLocalGot = !R.Symbol->IsInGlobalMipsGot;
if (LIsInLocalGot || RIsInLocalGot)
return !RIsInLocalGot;
- return L->GotIndex < R->GotIndex;
-}
-
-template <class ELFT> void SymbolTableSection<ELFT>::finalize() {
- this->OutSec->Link = this->Link = StrTabSec.OutSec->SectionIndex;
- this->OutSec->Info = this->Info = NumLocals + 1;
- this->OutSec->Entsize = this->Entsize;
-
- if (Config->Relocatable)
- return;
+ return L.Symbol->GotIndex < R.Symbol->GotIndex;
+}
+
+// Finalize a symbol table. The ELF spec requires that all local
+// symbols precede global symbols, so we sort symbol entries in this
+// function. (For .dynsym, we don't do that because symbols for
+// dynamic linking are inherently all globals.)
+void SymbolTableBaseSection::finalizeContents() {
+ getParent()->Link = StrTabSec.getParent()->SectionIndex;
+
+ // If it is a .dynsym, there should be no local symbols, but we need
+ // to do a few things for the dynamic linker.
+ if (this->Type == SHT_DYNSYM) {
+ // Section's Info field has the index of the first non-local symbol.
+ // Because the first symbol entry is a null entry, 1 is the first.
+ getParent()->Info = 1;
+
+ if (InX::GnuHashTab) {
+ // NB: It also sorts Symbols to meet the GNU hash table requirements.
+ InX::GnuHashTab->addSymbols(Symbols);
+ } else if (Config->EMachine == EM_MIPS) {
+ std::stable_sort(Symbols.begin(), Symbols.end(), sortMipsSymbols);
+ }
- if (!StrTabSec.isDynamic()) {
- auto GlobBegin = Symbols.begin() + NumLocals;
- auto It = std::stable_partition(
- GlobBegin, Symbols.end(), [](const SymbolTableEntry &S) {
- return S.Symbol->symbol()->computeBinding() == STB_LOCAL;
- });
- // update sh_info with number of Global symbols output with computed
- // binding of STB_LOCAL
- this->OutSec->Info = this->Info = 1 + It - Symbols.begin();
+ size_t I = 0;
+ for (const SymbolTableEntry &S : Symbols)
+ S.Symbol->DynsymIndex = ++I;
return;
}
-
- if (In<ELFT>::GnuHashTab)
- // NB: It also sorts Symbols to meet the GNU hash table requirements.
- In<ELFT>::GnuHashTab->addSymbols(Symbols);
- else if (Config->EMachine == EM_MIPS)
- std::stable_sort(Symbols.begin(), Symbols.end(),
- [](const SymbolTableEntry &L, const SymbolTableEntry &R) {
- return sortMipsSymbols(L.Symbol, R.Symbol);
- });
- size_t I = 0;
- for (const SymbolTableEntry &S : Symbols)
- S.Symbol->DynsymIndex = ++I;
}
-template <class ELFT> void SymbolTableSection<ELFT>::addGlobal(SymbolBody *B) {
- Symbols.push_back({B, StrTabSec.addString(B->getName(), false)});
+void SymbolTableBaseSection::postThunkContents() {
+ if (this->Type == SHT_DYNSYM)
+ return;
+ // move all local symbols before global symbols.
+ auto It = std::stable_partition(
+ Symbols.begin(), Symbols.end(), [](const SymbolTableEntry &S) {
+ return S.Symbol->isLocal() ||
+ S.Symbol->symbol()->computeBinding() == STB_LOCAL;
+ });
+ size_t NumLocals = It - Symbols.begin();
+ getParent()->Info = NumLocals + 1;
}
-template <class ELFT> void SymbolTableSection<ELFT>::addLocal(SymbolBody *B) {
- assert(!StrTabSec.isDynamic());
- ++NumLocals;
- Symbols.push_back({B, StrTabSec.addString(B->getName())});
+void SymbolTableBaseSection::addSymbol(SymbolBody *B) {
+ // Adding a local symbol to a .dynsym is a bug.
+ assert(this->Type != SHT_DYNSYM || !B->isLocal());
+
+ bool HashIt = B->isLocal();
+ Symbols.push_back({B, StrTabSec.addString(B->getName(), HashIt)});
}
-template <class ELFT>
-size_t SymbolTableSection<ELFT>::getSymbolIndex(SymbolBody *Body) {
- auto I = llvm::find_if(
- Symbols, [&](const SymbolTableEntry &E) { return E.Symbol == Body; });
+size_t SymbolTableBaseSection::getSymbolIndex(SymbolBody *Body) {
+ auto I = llvm::find_if(Symbols, [&](const SymbolTableEntry &E) {
+ if (E.Symbol == Body)
+ return true;
+ // This is used for -r, so we have to handle multiple section
+ // symbols being combined.
+ if (Body->Type == STT_SECTION && E.Symbol->Type == STT_SECTION)
+ return Body->getOutputSection() == E.Symbol->getOutputSection();
+ return false;
+ });
if (I == Symbols.end())
return 0;
return I - Symbols.begin() + 1;
}
+template <class ELFT>
+SymbolTableSection<ELFT>::SymbolTableSection(StringTableSection &StrTabSec)
+ : SymbolTableBaseSection(StrTabSec) {
+ this->Entsize = sizeof(Elf_Sym);
+}
+
+// Write the internal symbol table contents to the output symbol table.
template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *Buf) {
+ // The first entry is a null entry as per the ELF spec.
Buf += sizeof(Elf_Sym);
- // All symbols with STB_LOCAL binding precede the weak and global symbols.
- // .dynsym only contains global symbols.
- if (Config->Discard != DiscardPolicy::All && !StrTabSec.isDynamic())
- writeLocalSymbols(Buf);
-
- writeGlobalSymbols(Buf);
-}
-
-template <class ELFT>
-void SymbolTableSection<ELFT>::writeLocalSymbols(uint8_t *&Buf) {
- // Iterate over all input object files to copy their local symbols
- // to the output symbol table pointed by Buf.
+ auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
- for (auto I = Symbols.begin(); I != Symbols.begin() + NumLocals; ++I) {
- const DefinedRegular<ELFT> &Body = *cast<DefinedRegular<ELFT>>(I->Symbol);
- InputSectionBase<ELFT> *Section = Body.Section;
- auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
+ for (SymbolTableEntry &Ent : Symbols) {
+ SymbolBody *Body = Ent.Symbol;
- if (!Section) {
- ESym->st_shndx = SHN_ABS;
- ESym->st_value = Body.Value;
+ // Set st_info and st_other.
+ if (Body->isLocal()) {
+ ESym->setBindingAndType(STB_LOCAL, Body->Type);
} else {
- const OutputSectionBase *OutSec = Section->OutSec;
- ESym->st_shndx = OutSec->SectionIndex;
- ESym->st_value = OutSec->Addr + Section->getOffset(Body);
+ ESym->setBindingAndType(Body->symbol()->computeBinding(), Body->Type);
+ ESym->setVisibility(Body->symbol()->Visibility);
}
- ESym->st_name = I->StrTabOffset;
- ESym->st_size = Body.template getSize<ELFT>();
- ESym->setBindingAndType(STB_LOCAL, Body.Type);
- Buf += sizeof(*ESym);
- }
-}
-
-template <class ELFT>
-void SymbolTableSection<ELFT>::writeGlobalSymbols(uint8_t *Buf) {
- // Write the internal symbol table contents to the output symbol table
- // pointed by Buf.
- auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
-
- for (auto I = Symbols.begin() + NumLocals; I != Symbols.end(); ++I) {
- const SymbolTableEntry &S = *I;
- SymbolBody *Body = S.Symbol;
- size_t StrOff = S.StrTabOffset;
- uint8_t Type = Body->Type;
- uintX_t Size = Body->getSize<ELFT>();
+ ESym->st_name = Ent.StrTabOffset;
- ESym->setBindingAndType(Body->symbol()->computeBinding(), Type);
- ESym->st_size = Size;
- ESym->st_name = StrOff;
- ESym->setVisibility(Body->symbol()->Visibility);
- ESym->st_value = Body->getVA<ELFT>();
-
- if (const OutputSectionBase *OutSec = getOutputSection(Body)) {
+ // Set a section index.
+ if (const OutputSection *OutSec = Body->getOutputSection())
ESym->st_shndx = OutSec->SectionIndex;
- } else if (isa<DefinedRegular<ELFT>>(Body)) {
+ else if (isa<DefinedRegular>(Body))
ESym->st_shndx = SHN_ABS;
- } else if (isa<DefinedCommon>(Body)) {
+ else if (isa<DefinedCommon>(Body))
ESym->st_shndx = SHN_COMMON;
+
+ // Copy symbol size if it is a defined symbol. st_size is not significant
+ // for undefined symbols, so whether copying it or not is up to us if that's
+ // the case. We'll leave it as zero because by not setting a value, we can
+ // get the exact same outputs for two sets of input files that differ only
+ // in undefined symbol size in DSOs.
+ if (ESym->st_shndx != SHN_UNDEF)
+ ESym->st_size = Body->getSize<ELFT>();
+
+ // st_value is usually an address of a symbol, but that has a
+ // special meaining for uninstantiated common symbols (this can
+ // occur if -r is given).
+ if (!Config->DefineCommon && isa<DefinedCommon>(Body))
ESym->st_value = cast<DefinedCommon>(Body)->Alignment;
- }
+ else
+ ESym->st_value = Body->getVA();
- if (Config->EMachine == EM_MIPS) {
- // On MIPS we need to mark symbol which has a PLT entry and requires
- // pointer equality by STO_MIPS_PLT flag. That is necessary to help
- // dynamic linker distinguish such symbols and MIPS lazy-binding stubs.
- // https://sourceware.org/ml/binutils/2008-07/txt00000.txt
- if (Body->isInPlt() && Body->NeedsCopyOrPltAddr)
- ESym->st_other |= STO_MIPS_PLT;
- if (Config->Relocatable) {
- auto *D = dyn_cast<DefinedRegular<ELFT>>(Body);
- if (D && D->isMipsPIC())
- ESym->st_other |= STO_MIPS_PIC;
- }
- }
++ESym;
}
-}
-template <class ELFT>
-const OutputSectionBase *
-SymbolTableSection<ELFT>::getOutputSection(SymbolBody *Sym) {
- switch (Sym->kind()) {
- case SymbolBody::DefinedSyntheticKind:
- return cast<DefinedSynthetic>(Sym)->Section;
- case SymbolBody::DefinedRegularKind: {
- auto &D = cast<DefinedRegular<ELFT>>(*Sym);
- if (D.Section)
- return D.Section->OutSec;
- break;
- }
- case SymbolBody::DefinedCommonKind:
- if (!Config->DefineCommon)
- return nullptr;
- return In<ELFT>::Common->OutSec;
- case SymbolBody::SharedKind: {
- auto &SS = cast<SharedSymbol<ELFT>>(*Sym);
- if (SS.needsCopy())
- return SS.getBssSectionForCopy();
- break;
- }
- case SymbolBody::UndefinedKind:
- case SymbolBody::LazyArchiveKind:
- case SymbolBody::LazyObjectKind:
- break;
- }
- return nullptr;
-}
-
-template <class ELFT>
-GnuHashTableSection<ELFT>::GnuHashTableSection()
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_GNU_HASH, sizeof(uintX_t),
- ".gnu.hash") {
- this->Entsize = ELFT::Is64Bits ? 0 : 4;
-}
-
-template <class ELFT>
-unsigned GnuHashTableSection<ELFT>::calcNBuckets(unsigned NumHashed) {
- if (!NumHashed)
- return 0;
-
- // These values are prime numbers which are not greater than 2^(N-1) + 1.
- // In result, for any particular NumHashed we return a prime number
- // which is not greater than NumHashed.
- static const unsigned Primes[] = {
- 1, 1, 3, 3, 7, 13, 31, 61, 127, 251,
- 509, 1021, 2039, 4093, 8191, 16381, 32749, 65521, 131071};
-
- return Primes[std::min<unsigned>(Log2_32_Ceil(NumHashed),
- array_lengthof(Primes) - 1)];
-}
-
-// Bloom filter estimation: at least 8 bits for each hashed symbol.
-// GNU Hash table requirement: it should be a power of 2,
-// the minimum value is 1, even for an empty table.
-// Expected results for a 32-bit target:
-// calcMaskWords(0..4) = 1
-// calcMaskWords(5..8) = 2
-// calcMaskWords(9..16) = 4
-// For a 64-bit target:
-// calcMaskWords(0..8) = 1
-// calcMaskWords(9..16) = 2
-// calcMaskWords(17..32) = 4
-template <class ELFT>
-unsigned GnuHashTableSection<ELFT>::calcMaskWords(unsigned NumHashed) {
- if (!NumHashed)
- return 1;
- return NextPowerOf2((NumHashed - 1) / sizeof(Elf_Off));
-}
+ // On MIPS we need to mark symbol which has a PLT entry and requires
+ // pointer equality by STO_MIPS_PLT flag. That is necessary to help
+ // dynamic linker distinguish such symbols and MIPS lazy-binding stubs.
+ // https://sourceware.org/ml/binutils/2008-07/txt00000.txt
+ if (Config->EMachine == EM_MIPS) {
+ auto *ESym = reinterpret_cast<Elf_Sym *>(Buf);
-template <class ELFT> void GnuHashTableSection<ELFT>::finalize() {
- unsigned NumHashed = Symbols.size();
- NBuckets = calcNBuckets(NumHashed);
- MaskWords = calcMaskWords(NumHashed);
- // Second hash shift estimation: just predefined values.
- Shift2 = ELFT::Is64Bits ? 6 : 5;
+ for (SymbolTableEntry &Ent : Symbols) {
+ SymbolBody *Body = Ent.Symbol;
+ if (Body->isInPlt() && Body->NeedsPltAddr)
+ ESym->st_other |= STO_MIPS_PLT;
- this->OutSec->Entsize = this->Entsize;
- this->OutSec->Link = this->Link = In<ELFT>::DynSymTab->OutSec->SectionIndex;
- this->Size = sizeof(Elf_Word) * 4 // Header
- + sizeof(Elf_Off) * MaskWords // Bloom Filter
- + sizeof(Elf_Word) * NBuckets // Hash Buckets
- + sizeof(Elf_Word) * NumHashed; // Hash Values
+ if (Config->Relocatable)
+ if (auto *D = dyn_cast<DefinedRegular>(Body))
+ if (D->isMipsPIC<ELFT>())
+ ESym->st_other |= STO_MIPS_PIC;
+ ++ESym;
+ }
+ }
}
-template <class ELFT> void GnuHashTableSection<ELFT>::writeTo(uint8_t *Buf) {
- writeHeader(Buf);
+// .hash and .gnu.hash sections contain on-disk hash tables that map
+// symbol names to their dynamic symbol table indices. Their purpose
+// is to help the dynamic linker resolve symbols quickly. If ELF files
+// don't have them, the dynamic linker has to do linear search on all
+// dynamic symbols, which makes programs slower. Therefore, a .hash
+// section is added to a DSO by default. A .gnu.hash is added if you
+// give the -hash-style=gnu or -hash-style=both option.
+//
+// The Unix semantics of resolving dynamic symbols is somewhat expensive.
+// Each ELF file has a list of DSOs that the ELF file depends on and a
+// list of dynamic symbols that need to be resolved from any of the
+// DSOs. That means resolving all dynamic symbols takes O(m)*O(n)
+// where m is the number of DSOs and n is the number of dynamic
+// symbols. For modern large programs, both m and n are large. So
+// making each step faster by using hash tables substiantially
+// improves time to load programs.
+//
+// (Note that this is not the only way to design the shared library.
+// For instance, the Windows DLL takes a different approach. On
+// Windows, each dynamic symbol has a name of DLL from which the symbol
+// has to be resolved. That makes the cost of symbol resolution O(n).
+// This disables some hacky techniques you can use on Unix such as
+// LD_PRELOAD, but this is arguably better semantics than the Unix ones.)
+//
+// Due to historical reasons, we have two different hash tables, .hash
+// and .gnu.hash. They are for the same purpose, and .gnu.hash is a new
+// and better version of .hash. .hash is just an on-disk hash table, but
+// .gnu.hash has a bloom filter in addition to a hash table to skip
+// DSOs very quickly. If you are sure that your dynamic linker knows
+// about .gnu.hash, you want to specify -hash-style=gnu. Otherwise, a
+// safe bet is to specify -hash-style=both for backward compatibilty.
+GnuHashTableSection::GnuHashTableSection()
+ : SyntheticSection(SHF_ALLOC, SHT_GNU_HASH, Config->Wordsize, ".gnu.hash") {
+}
+
+void GnuHashTableSection::finalizeContents() {
+ getParent()->Link = InX::DynSymTab->getParent()->SectionIndex;
+
+ // Computes bloom filter size in word size. We want to allocate 8
+ // bits for each symbol. It must be a power of two.
if (Symbols.empty())
- return;
+ MaskWords = 1;
+ else
+ MaskWords = NextPowerOf2((Symbols.size() - 1) / Config->Wordsize);
+
+ Size = 16; // Header
+ Size += Config->Wordsize * MaskWords; // Bloom filter
+ Size += NBuckets * 4; // Hash buckets
+ Size += Symbols.size() * 4; // Hash values
+}
+
+void GnuHashTableSection::writeTo(uint8_t *Buf) {
+ // Write a header.
+ write32(Buf, NBuckets, Config->Endianness);
+ write32(Buf + 4, InX::DynSymTab->getNumSymbols() - Symbols.size(),
+ Config->Endianness);
+ write32(Buf + 8, MaskWords, Config->Endianness);
+ write32(Buf + 12, getShift2(), Config->Endianness);
+ Buf += 16;
+
+ // Write a bloom filter and a hash table.
writeBloomFilter(Buf);
+ Buf += Config->Wordsize * MaskWords;
writeHashTable(Buf);
}
-template <class ELFT>
-void GnuHashTableSection<ELFT>::writeHeader(uint8_t *&Buf) {
- auto *P = reinterpret_cast<Elf_Word *>(Buf);
- *P++ = NBuckets;
- *P++ = In<ELFT>::DynSymTab->getNumSymbols() - Symbols.size();
- *P++ = MaskWords;
- *P++ = Shift2;
- Buf = reinterpret_cast<uint8_t *>(P);
-}
-
-template <class ELFT>
-void GnuHashTableSection<ELFT>::writeBloomFilter(uint8_t *&Buf) {
- unsigned C = sizeof(Elf_Off) * 8;
-
- auto *Masks = reinterpret_cast<Elf_Off *>(Buf);
- for (const SymbolData &Sym : Symbols) {
- size_t Pos = (Sym.Hash / C) & (MaskWords - 1);
- uintX_t V = (uintX_t(1) << (Sym.Hash % C)) |
- (uintX_t(1) << ((Sym.Hash >> Shift2) % C));
- Masks[Pos] |= V;
+// This function writes a 2-bit bloom filter. This bloom filter alone
+// usually filters out 80% or more of all symbol lookups [1].
+// The dynamic linker uses the hash table only when a symbol is not
+// filtered out by a bloom filter.
+//
+// [1] Ulrich Drepper (2011), "How To Write Shared Libraries" (Ver. 4.1.2),
+// p.9, https://www.akkadia.org/drepper/dsohowto.pdf
+void GnuHashTableSection::writeBloomFilter(uint8_t *Buf) {
+ const unsigned C = Config->Wordsize * 8;
+ for (const Entry &Sym : Symbols) {
+ size_t I = (Sym.Hash / C) & (MaskWords - 1);
+ uint64_t Val = readUint(Buf + I * Config->Wordsize);
+ Val |= uint64_t(1) << (Sym.Hash % C);
+ Val |= uint64_t(1) << ((Sym.Hash >> getShift2()) % C);
+ writeUint(Buf + I * Config->Wordsize, Val);
}
- Buf += sizeof(Elf_Off) * MaskWords;
}
-template <class ELFT>
-void GnuHashTableSection<ELFT>::writeHashTable(uint8_t *Buf) {
- Elf_Word *Buckets = reinterpret_cast<Elf_Word *>(Buf);
- Elf_Word *Values = Buckets + NBuckets;
-
- int PrevBucket = -1;
- int I = 0;
- for (const SymbolData &Sym : Symbols) {
- int Bucket = Sym.Hash % NBuckets;
- assert(PrevBucket <= Bucket);
- if (Bucket != PrevBucket) {
- Buckets[Bucket] = Sym.Body->DynsymIndex;
- PrevBucket = Bucket;
- if (I > 0)
- Values[I - 1] |= 1;
- }
- Values[I] = Sym.Hash & ~1;
- ++I;
+void GnuHashTableSection::writeHashTable(uint8_t *Buf) {
+ // Group symbols by hash value.
+ std::vector<std::vector<Entry>> Syms(NBuckets);
+ for (const Entry &Ent : Symbols)
+ Syms[Ent.Hash % NBuckets].push_back(Ent);
+
+ // Write hash buckets. Hash buckets contain indices in the following
+ // hash value table.
+ uint32_t *Buckets = reinterpret_cast<uint32_t *>(Buf);
+ for (size_t I = 0; I < NBuckets; ++I)
+ if (!Syms[I].empty())
+ write32(Buckets + I, Syms[I][0].Body->DynsymIndex, Config->Endianness);
+
+ // Write a hash value table. It represents a sequence of chains that
+ // share the same hash modulo value. The last element of each chain
+ // is terminated by LSB 1.
+ uint32_t *Values = Buckets + NBuckets;
+ size_t I = 0;
+ for (std::vector<Entry> &Vec : Syms) {
+ if (Vec.empty())
+ continue;
+ for (const Entry &Ent : makeArrayRef(Vec).drop_back())
+ write32(Values + I++, Ent.Hash & ~1, Config->Endianness);
+ write32(Values + I++, Vec.back().Hash | 1, Config->Endianness);
}
- if (I > 0)
- Values[I - 1] |= 1;
}
static uint32_t hashGnu(StringRef Name) {
@@ -1347,58 +1556,76 @@ static uint32_t hashGnu(StringRef Name) {
return H;
}
+// Returns a number of hash buckets to accomodate given number of elements.
+// We want to choose a moderate number that is not too small (which
+// causes too many hash collisions) and not too large (which wastes
+// disk space.)
+//
+// We return a prime number because it (is believed to) achieve good
+// hash distribution.
+static size_t getBucketSize(size_t NumSymbols) {
+ // List of largest prime numbers that are not greater than 2^n + 1.
+ for (size_t N : {131071, 65521, 32749, 16381, 8191, 4093, 2039, 1021, 509,
+ 251, 127, 61, 31, 13, 7, 3, 1})
+ if (N <= NumSymbols)
+ return N;
+ return 0;
+}
+
// Add symbols to this symbol hash table. Note that this function
// destructively sort a given vector -- which is needed because
// GNU-style hash table places some sorting requirements.
-template <class ELFT>
-void GnuHashTableSection<ELFT>::addSymbols(std::vector<SymbolTableEntry> &V) {
- // Ideally this will just be 'auto' but GCC 6.1 is not able
- // to deduce it correctly.
+void GnuHashTableSection::addSymbols(std::vector<SymbolTableEntry> &V) {
+ // We cannot use 'auto' for Mid because GCC 6.1 cannot deduce
+ // its type correctly.
std::vector<SymbolTableEntry>::iterator Mid =
std::stable_partition(V.begin(), V.end(), [](const SymbolTableEntry &S) {
return S.Symbol->isUndefined();
});
if (Mid == V.end())
return;
- for (auto I = Mid, E = V.end(); I != E; ++I) {
- SymbolBody *B = I->Symbol;
- size_t StrOff = I->StrTabOffset;
- Symbols.push_back({B, StrOff, hashGnu(B->getName())});
+
+ for (SymbolTableEntry &Ent : llvm::make_range(Mid, V.end())) {
+ SymbolBody *B = Ent.Symbol;
+ Symbols.push_back({B, Ent.StrTabOffset, hashGnu(B->getName())});
}
- unsigned NBuckets = calcNBuckets(Symbols.size());
+ NBuckets = getBucketSize(Symbols.size());
std::stable_sort(Symbols.begin(), Symbols.end(),
- [&](const SymbolData &L, const SymbolData &R) {
+ [&](const Entry &L, const Entry &R) {
return L.Hash % NBuckets < R.Hash % NBuckets;
});
V.erase(Mid, V.end());
- for (const SymbolData &Sym : Symbols)
- V.push_back({Sym.Body, Sym.STName});
+ for (const Entry &Ent : Symbols)
+ V.push_back({Ent.Body, Ent.StrTabOffset});
}
template <class ELFT>
HashTableSection<ELFT>::HashTableSection()
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_HASH, sizeof(Elf_Word), ".hash") {
- this->Entsize = sizeof(Elf_Word);
+ : SyntheticSection(SHF_ALLOC, SHT_HASH, 4, ".hash") {
+ this->Entsize = 4;
}
-template <class ELFT> void HashTableSection<ELFT>::finalize() {
- this->OutSec->Link = this->Link = In<ELFT>::DynSymTab->OutSec->SectionIndex;
- this->OutSec->Entsize = this->Entsize;
+template <class ELFT> void HashTableSection<ELFT>::finalizeContents() {
+ getParent()->Link = InX::DynSymTab->getParent()->SectionIndex;
- unsigned NumEntries = 2; // nbucket and nchain.
- NumEntries += In<ELFT>::DynSymTab->getNumSymbols(); // The chain entries.
+ unsigned NumEntries = 2; // nbucket and nchain.
+ NumEntries += InX::DynSymTab->getNumSymbols(); // The chain entries.
// Create as many buckets as there are symbols.
// FIXME: This is simplistic. We can try to optimize it, but implementing
// support for SHT_GNU_HASH is probably even more profitable.
- NumEntries += In<ELFT>::DynSymTab->getNumSymbols();
- this->Size = NumEntries * sizeof(Elf_Word);
+ NumEntries += InX::DynSymTab->getNumSymbols();
+ this->Size = NumEntries * 4;
}
template <class ELFT> void HashTableSection<ELFT>::writeTo(uint8_t *Buf) {
- unsigned NumSymbols = In<ELFT>::DynSymTab->getNumSymbols();
+ // A 32-bit integer type in the target endianness.
+ typedef typename ELFT::Word Elf_Word;
+
+ unsigned NumSymbols = InX::DynSymTab->getNumSymbols();
+
auto *P = reinterpret_cast<Elf_Word *>(Buf);
*P++ = NumSymbols; // nbucket
*P++ = NumSymbols; // nchain
@@ -1406,7 +1633,7 @@ template <class ELFT> void HashTableSection<ELFT>::writeTo(uint8_t *Buf) {
Elf_Word *Buckets = P;
Elf_Word *Chains = P + NumSymbols;
- for (const SymbolTableEntry &S : In<ELFT>::DynSymTab->getSymbols()) {
+ for (const SymbolTableEntry &S : InX::DynSymTab->getSymbols()) {
SymbolBody *Body = S.Symbol;
StringRef Name = Body->getName();
unsigned I = Body->DynsymIndex;
@@ -1416,78 +1643,69 @@ template <class ELFT> void HashTableSection<ELFT>::writeTo(uint8_t *Buf) {
}
}
-template <class ELFT>
-PltSection<ELFT>::PltSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16,
- ".plt") {}
+PltSection::PltSection(size_t S)
+ : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16, ".plt"),
+ HeaderSize(S) {
+ // The PLT needs to be writable on SPARC as the dynamic linker will
+ // modify the instructions in the PLT entries.
+ if (Config->EMachine == EM_SPARCV9)
+ this->Flags |= SHF_WRITE;
+}
-template <class ELFT> void PltSection<ELFT>::writeTo(uint8_t *Buf) {
- // At beginning of PLT, we have code to call the dynamic linker
- // to resolve dynsyms at runtime. Write such code.
- Target->writePltHeader(Buf);
- size_t Off = Target->PltHeaderSize;
+void PltSection::writeTo(uint8_t *Buf) {
+ // At beginning of PLT but not the IPLT, we have code to call the dynamic
+ // linker to resolve dynsyms at runtime. Write such code.
+ if (HeaderSize != 0)
+ Target->writePltHeader(Buf);
+ size_t Off = HeaderSize;
+ // The IPlt is immediately after the Plt, account for this in RelOff
+ unsigned PltOff = getPltRelocOff();
for (auto &I : Entries) {
const SymbolBody *B = I.first;
- unsigned RelOff = I.second;
- uint64_t Got = B->getGotPltVA<ELFT>();
+ unsigned RelOff = I.second + PltOff;
+ uint64_t Got = B->getGotPltVA();
uint64_t Plt = this->getVA() + Off;
Target->writePlt(Buf + Off, Got, Plt, B->PltIndex, RelOff);
Off += Target->PltEntrySize;
}
}
-template <class ELFT> void PltSection<ELFT>::addEntry(SymbolBody &Sym) {
+template <class ELFT> void PltSection::addEntry(SymbolBody &Sym) {
Sym.PltIndex = Entries.size();
- unsigned RelOff = In<ELFT>::RelaPlt->getRelocOffset();
+ RelocationSection<ELFT> *PltRelocSection = In<ELFT>::RelaPlt;
+ if (HeaderSize == 0) {
+ PltRelocSection = In<ELFT>::RelaIplt;
+ Sym.IsInIplt = true;
+ }
+ unsigned RelOff = PltRelocSection->getRelocOffset();
Entries.push_back(std::make_pair(&Sym, RelOff));
}
-template <class ELFT> size_t PltSection<ELFT>::getSize() const {
- return Target->PltHeaderSize + Entries.size() * Target->PltEntrySize;
+size_t PltSection::getSize() const {
+ return HeaderSize + Entries.size() * Target->PltEntrySize;
}
-template <class ELFT>
-IpltSection<ELFT>::IpltSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS, 16,
- ".plt") {}
-
-template <class ELFT> void IpltSection<ELFT>::writeTo(uint8_t *Buf) {
- // The IRelative relocations do not support lazy binding so no header is
- // needed
- size_t Off = 0;
- for (auto &I : Entries) {
- const SymbolBody *B = I.first;
- unsigned RelOff = I.second + In<ELFT>::Plt->getSize();
- uint64_t Got = B->getGotPltVA<ELFT>();
- uint64_t Plt = this->getVA() + Off;
- Target->writePlt(Buf + Off, Got, Plt, B->PltIndex, RelOff);
+// Some architectures such as additional symbols in the PLT section. For
+// example ARM uses mapping symbols to aid disassembly
+void PltSection::addSymbols() {
+ // The PLT may have symbols defined for the Header, the IPLT has no header
+ if (HeaderSize != 0)
+ Target->addPltHeaderSymbols(this);
+ size_t Off = HeaderSize;
+ for (size_t I = 0; I < Entries.size(); ++I) {
+ Target->addPltSymbols(this, Off);
Off += Target->PltEntrySize;
}
}
-template <class ELFT> void IpltSection<ELFT>::addEntry(SymbolBody &Sym) {
- Sym.PltIndex = Entries.size();
- Sym.IsInIplt = true;
- unsigned RelOff = In<ELFT>::RelaIplt->getRelocOffset();
- Entries.push_back(std::make_pair(&Sym, RelOff));
+unsigned PltSection::getPltRelocOff() const {
+ return (HeaderSize == 0) ? InX::Plt->getSize() : 0;
}
-template <class ELFT> size_t IpltSection<ELFT>::getSize() const {
- return Entries.size() * Target->PltEntrySize;
-}
-
-template <class ELFT>
-GdbIndexSection<ELFT>::GdbIndexSection()
- : SyntheticSection<ELFT>(0, SHT_PROGBITS, 1, ".gdb_index"),
- StringPool(llvm::StringTableBuilder::ELF) {}
-
-template <class ELFT> void GdbIndexSection<ELFT>::parseDebugSections() {
- for (InputSectionBase<ELFT> *S : Symtab<ELFT>::X->Sections)
- if (InputSection<ELFT> *IS = dyn_cast<InputSection<ELFT>>(S))
- if (IS->OutSec && IS->Name == ".debug_info")
- readDwarf(IS);
-}
+GdbIndexSection::GdbIndexSection(std::vector<GdbIndexChunk> &&Chunks)
+ : SyntheticSection(0, SHT_PROGBITS, 1, ".gdb_index"),
+ StringPool(llvm::StringTableBuilder::ELF), Chunks(std::move(Chunks)) {}
// Iterative hash function for symbol's name is described in .gdb_index format
// specification. Note that we use one for version 5 to 7 here, it is different
@@ -1499,57 +1717,149 @@ static uint32_t hash(StringRef Str) {
return R;
}
-template <class ELFT>
-void GdbIndexSection<ELFT>::readDwarf(InputSection<ELFT> *I) {
- GdbIndexBuilder<ELFT> Builder(I);
- if (ErrorCount)
- return;
+static std::vector<CompilationUnitEntry> readCuList(DWARFContext &Dwarf) {
+ std::vector<CompilationUnitEntry> Ret;
+ for (std::unique_ptr<DWARFCompileUnit> &CU : Dwarf.compile_units())
+ Ret.push_back({CU->getOffset(), CU->getLength() + 4});
+ return Ret;
+}
+
+static std::vector<AddressEntry> readAddressArea(DWARFContext &Dwarf,
+ InputSection *Sec) {
+ std::vector<AddressEntry> Ret;
+
+ uint32_t CurrentCu = 0;
+ for (std::unique_ptr<DWARFCompileUnit> &CU : Dwarf.compile_units()) {
+ DWARFAddressRangesVector Ranges;
+ CU->collectAddressRanges(Ranges);
+
+ ArrayRef<InputSectionBase *> Sections = Sec->File->getSections();
+ for (DWARFAddressRange &R : Ranges) {
+ InputSectionBase *S = Sections[R.SectionIndex];
+ if (!S || S == &InputSection::Discarded || !S->Live)
+ continue;
+ // Range list with zero size has no effect.
+ if (R.LowPC == R.HighPC)
+ continue;
+ Ret.push_back({cast<InputSection>(S), R.LowPC, R.HighPC, CurrentCu});
+ }
+ ++CurrentCu;
+ }
+ return Ret;
+}
+
+static std::vector<NameTypeEntry> readPubNamesAndTypes(DWARFContext &Dwarf,
+ bool IsLE) {
+ StringRef Data[] = {Dwarf.getGnuPubNamesSection(),
+ Dwarf.getGnuPubTypesSection()};
- size_t CuId = CompilationUnits.size();
- std::vector<std::pair<uintX_t, uintX_t>> CuList = Builder.readCUList();
- CompilationUnits.insert(CompilationUnits.end(), CuList.begin(), CuList.end());
+ std::vector<NameTypeEntry> Ret;
+ for (StringRef D : Data) {
+ DWARFDebugPubTable PubTable(D, IsLE, true);
+ for (const DWARFDebugPubTable::Set &Set : PubTable.getData())
+ for (const DWARFDebugPubTable::Entry &Ent : Set.Entries)
+ Ret.push_back({Ent.Name, Ent.Descriptor.toBits()});
+ }
+ return Ret;
+}
- std::vector<AddressEntry<ELFT>> AddrArea = Builder.readAddressArea(CuId);
- AddressArea.insert(AddressArea.end(), AddrArea.begin(), AddrArea.end());
+static std::vector<InputSection *> getDebugInfoSections() {
+ std::vector<InputSection *> Ret;
+ for (InputSectionBase *S : InputSections)
+ if (InputSection *IS = dyn_cast<InputSection>(S))
+ if (IS->Name == ".debug_info")
+ Ret.push_back(IS);
+ return Ret;
+}
- std::vector<std::pair<StringRef, uint8_t>> NamesAndTypes =
- Builder.readPubNamesAndTypes();
+void GdbIndexSection::buildIndex() {
+ if (Chunks.empty())
+ return;
- for (std::pair<StringRef, uint8_t> &Pair : NamesAndTypes) {
- uint32_t Hash = hash(Pair.first);
- size_t Offset = StringPool.add(Pair.first);
+ uint32_t CuId = 0;
+ for (GdbIndexChunk &D : Chunks) {
+ for (AddressEntry &E : D.AddressArea)
+ E.CuIndex += CuId;
+
+ // Populate constant pool area.
+ for (NameTypeEntry &NameType : D.NamesAndTypes) {
+ uint32_t Hash = hash(NameType.Name);
+ size_t Offset = StringPool.add(NameType.Name);
+
+ bool IsNew;
+ GdbSymbol *Sym;
+ std::tie(IsNew, Sym) = SymbolTable.add(Hash, Offset);
+ if (IsNew) {
+ Sym->CuVectorIndex = CuVectors.size();
+ CuVectors.resize(CuVectors.size() + 1);
+ }
- bool IsNew;
- GdbSymbol *Sym;
- std::tie(IsNew, Sym) = SymbolTable.add(Hash, Offset);
- if (IsNew) {
- Sym->CuVectorIndex = CuVectors.size();
- CuVectors.push_back({{CuId, Pair.second}});
- continue;
+ CuVectors[Sym->CuVectorIndex].insert(CuId | (NameType.Type << 24));
}
- std::vector<std::pair<uint32_t, uint8_t>> &CuVec =
- CuVectors[Sym->CuVectorIndex];
- CuVec.push_back({CuId, Pair.second});
+ CuId += D.CompilationUnits.size();
+ }
+}
+
+static GdbIndexChunk readDwarf(DWARFContextInMemory &Dwarf, InputSection *Sec) {
+ GdbIndexChunk Ret;
+ Ret.DebugInfoSec = Sec;
+ Ret.CompilationUnits = readCuList(Dwarf);
+ Ret.AddressArea = readAddressArea(Dwarf, Sec);
+ Ret.NamesAndTypes = readPubNamesAndTypes(Dwarf, Config->IsLE);
+ return Ret;
+}
+
+template <class ELFT> GdbIndexSection *elf::createGdbIndex() {
+ std::vector<GdbIndexChunk> Chunks;
+ for (InputSection *Sec : getDebugInfoSections()) {
+ InputFile *F = Sec->File;
+ std::error_code EC;
+ ELFObjectFile<ELFT> Obj(F->MB, EC);
+ if (EC)
+ fatal(EC.message());
+ DWARFContextInMemory Dwarf(Obj, nullptr, [&](Error E) {
+ error(toString(F) + ": error parsing DWARF data:\n>>> " +
+ toString(std::move(E)));
+ return ErrorPolicy::Continue;
+ });
+ Chunks.push_back(readDwarf(Dwarf, Sec));
}
+ return make<GdbIndexSection>(std::move(Chunks));
+}
+
+static size_t getCuSize(std::vector<GdbIndexChunk> &C) {
+ size_t Ret = 0;
+ for (GdbIndexChunk &D : C)
+ Ret += D.CompilationUnits.size();
+ return Ret;
}
-template <class ELFT> void GdbIndexSection<ELFT>::finalize() {
+static size_t getAddressAreaSize(std::vector<GdbIndexChunk> &C) {
+ size_t Ret = 0;
+ for (GdbIndexChunk &D : C)
+ Ret += D.AddressArea.size();
+ return Ret;
+}
+
+void GdbIndexSection::finalizeContents() {
if (Finalized)
return;
Finalized = true;
- parseDebugSections();
+ buildIndex();
+
+ SymbolTable.finalizeContents();
// GdbIndex header consist from version fields
// and 5 more fields with different kinds of offsets.
- CuTypesOffset = CuListOffset + CompilationUnits.size() * CompilationUnitSize;
- SymTabOffset = CuTypesOffset + AddressArea.size() * AddressEntrySize;
+ CuTypesOffset = CuListOffset + getCuSize(Chunks) * CompilationUnitSize;
+ SymTabOffset = CuTypesOffset + getAddressAreaSize(Chunks) * AddressEntrySize;
ConstantPoolOffset =
SymTabOffset + SymbolTable.getCapacity() * SymTabEntrySize;
- for (std::vector<std::pair<uint32_t, uint8_t>> &CuVec : CuVectors) {
+ for (std::set<uint32_t> &CuVec : CuVectors) {
CuVectorsOffset.push_back(CuVectorsSize);
CuVectorsSize += OffsetTypeSize * (CuVec.size() + 1);
}
@@ -1558,12 +1868,12 @@ template <class ELFT> void GdbIndexSection<ELFT>::finalize() {
StringPool.finalizeInOrder();
}
-template <class ELFT> size_t GdbIndexSection<ELFT>::getSize() const {
- const_cast<GdbIndexSection<ELFT> *>(this)->finalize();
+size_t GdbIndexSection::getSize() const {
+ const_cast<GdbIndexSection *>(this)->finalizeContents();
return StringPoolOffset + StringPool.getSize();
}
-template <class ELFT> void GdbIndexSection<ELFT>::writeTo(uint8_t *Buf) {
+void GdbIndexSection::writeTo(uint8_t *Buf) {
write32le(Buf, 7); // Write version.
write32le(Buf + 4, CuListOffset); // CU list offset.
write32le(Buf + 8, CuTypesOffset); // Types CU list offset.
@@ -1573,19 +1883,24 @@ template <class ELFT> void GdbIndexSection<ELFT>::writeTo(uint8_t *Buf) {
Buf += 24;
// Write the CU list.
- for (std::pair<uintX_t, uintX_t> CU : CompilationUnits) {
- write64le(Buf, CU.first);
- write64le(Buf + 8, CU.second);
- Buf += 16;
+ for (GdbIndexChunk &D : Chunks) {
+ for (CompilationUnitEntry &Cu : D.CompilationUnits) {
+ write64le(Buf, D.DebugInfoSec->OutSecOff + Cu.CuOffset);
+ write64le(Buf + 8, Cu.CuLength);
+ Buf += 16;
+ }
}
// Write the address area.
- for (AddressEntry<ELFT> &E : AddressArea) {
- uintX_t BaseAddr = E.Section->OutSec->Addr + E.Section->getOffset(0);
- write64le(Buf, BaseAddr + E.LowAddress);
- write64le(Buf + 8, BaseAddr + E.HighAddress);
- write32le(Buf + 16, E.CuIndex);
- Buf += 20;
+ for (GdbIndexChunk &D : Chunks) {
+ for (AddressEntry &E : D.AddressArea) {
+ uint64_t BaseAddr =
+ E.Section->getParent()->Addr + E.Section->getOffset(0);
+ write64le(Buf, BaseAddr + E.LowAddress);
+ write64le(Buf + 8, BaseAddr + E.HighAddress);
+ write32le(Buf + 16, E.CuIndex);
+ Buf += 20;
+ }
}
// Write the symbol table.
@@ -1602,14 +1917,11 @@ template <class ELFT> void GdbIndexSection<ELFT>::writeTo(uint8_t *Buf) {
}
// Write the CU vectors into the constant pool.
- for (std::vector<std::pair<uint32_t, uint8_t>> &CuVec : CuVectors) {
+ for (std::set<uint32_t> &CuVec : CuVectors) {
write32le(Buf, CuVec.size());
Buf += 4;
- for (std::pair<uint32_t, uint8_t> &P : CuVec) {
- uint32_t Index = P.first;
- uint8_t Flags = P.second;
- Index |= Flags << 24;
- write32le(Buf, Index);
+ for (uint32_t Val : CuVec) {
+ write32le(Buf, Val);
Buf += 4;
}
}
@@ -1617,13 +1929,11 @@ template <class ELFT> void GdbIndexSection<ELFT>::writeTo(uint8_t *Buf) {
StringPool.write(Buf);
}
-template <class ELFT> bool GdbIndexSection<ELFT>::empty() const {
- return !Out<ELFT>::DebugInfo;
-}
+bool GdbIndexSection::empty() const { return !Out::DebugInfo; }
template <class ELFT>
EhFrameHeader<ELFT>::EhFrameHeader()
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_PROGBITS, 1, ".eh_frame_hdr") {}
+ : SyntheticSection(SHF_ALLOC, SHT_PROGBITS, 1, ".eh_frame_hdr") {}
// .eh_frame_hdr contains a binary search table of pointers to FDEs.
// Each entry of the search table consists of two values,
@@ -1644,11 +1954,11 @@ template <class ELFT> void EhFrameHeader<ELFT>::writeTo(uint8_t *Buf) {
Buf[1] = DW_EH_PE_pcrel | DW_EH_PE_sdata4;
Buf[2] = DW_EH_PE_udata4;
Buf[3] = DW_EH_PE_datarel | DW_EH_PE_sdata4;
- write32<E>(Buf + 4, Out<ELFT>::EhFrame->Addr - this->getVA() - 4);
+ write32<E>(Buf + 4, In<ELFT>::EhFrame->getParent()->Addr - this->getVA() - 4);
write32<E>(Buf + 8, Fdes.size());
Buf += 12;
- uintX_t VA = this->getVA();
+ uint64_t VA = this->getVA();
for (FdeData &Fde : Fdes) {
write32<E>(Buf, Fde.Pc - VA);
write32<E>(Buf + 4, Fde.FdeVA - VA);
@@ -1658,7 +1968,7 @@ template <class ELFT> void EhFrameHeader<ELFT>::writeTo(uint8_t *Buf) {
template <class ELFT> size_t EhFrameHeader<ELFT>::getSize() const {
// .eh_frame_hdr has a 12 bytes header followed by an array of FDEs.
- return 12 + Out<ELFT>::EhFrame->NumFdes * 8;
+ return 12 + In<ELFT>::EhFrame->NumFdes * 8;
}
template <class ELFT>
@@ -1667,13 +1977,13 @@ void EhFrameHeader<ELFT>::addFde(uint32_t Pc, uint32_t FdeVA) {
}
template <class ELFT> bool EhFrameHeader<ELFT>::empty() const {
- return Out<ELFT>::EhFrame->empty();
+ return In<ELFT>::EhFrame->empty();
}
template <class ELFT>
VersionDefinitionSection<ELFT>::VersionDefinitionSection()
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_GNU_verdef, sizeof(uint32_t),
- ".gnu.version_d") {}
+ : SyntheticSection(SHF_ALLOC, SHT_GNU_verdef, sizeof(uint32_t),
+ ".gnu.version_d") {}
static StringRef getFileDefName() {
if (!Config->SoName.empty())
@@ -1681,17 +1991,17 @@ static StringRef getFileDefName() {
return Config->OutputFile;
}
-template <class ELFT> void VersionDefinitionSection<ELFT>::finalize() {
- FileDefNameOff = In<ELFT>::DynStrTab->addString(getFileDefName());
+template <class ELFT> void VersionDefinitionSection<ELFT>::finalizeContents() {
+ FileDefNameOff = InX::DynStrTab->addString(getFileDefName());
for (VersionDefinition &V : Config->VersionDefinitions)
- V.NameOff = In<ELFT>::DynStrTab->addString(V.Name);
+ V.NameOff = InX::DynStrTab->addString(V.Name);
- this->OutSec->Link = this->Link = In<ELFT>::DynStrTab->OutSec->SectionIndex;
+ getParent()->Link = InX::DynStrTab->getParent()->SectionIndex;
// sh_info should be set to the number of definitions. This fact is missed in
// documentation, but confirmed by binutils community:
// https://sourceware.org/ml/binutils/2014-11/msg00355.html
- this->OutSec->Info = this->Info = getVerDefNum();
+ getParent()->Info = getVerDefNum();
}
template <class ELFT>
@@ -1731,23 +2041,24 @@ template <class ELFT> size_t VersionDefinitionSection<ELFT>::getSize() const {
template <class ELFT>
VersionTableSection<ELFT>::VersionTableSection()
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_GNU_versym, sizeof(uint16_t),
- ".gnu.version") {}
+ : SyntheticSection(SHF_ALLOC, SHT_GNU_versym, sizeof(uint16_t),
+ ".gnu.version") {
+ this->Entsize = sizeof(Elf_Versym);
+}
-template <class ELFT> void VersionTableSection<ELFT>::finalize() {
- this->OutSec->Entsize = this->Entsize = sizeof(Elf_Versym);
+template <class ELFT> void VersionTableSection<ELFT>::finalizeContents() {
// At the moment of june 2016 GNU docs does not mention that sh_link field
// should be set, but Sun docs do. Also readelf relies on this field.
- this->OutSec->Link = this->Link = In<ELFT>::DynSymTab->OutSec->SectionIndex;
+ getParent()->Link = InX::DynSymTab->getParent()->SectionIndex;
}
template <class ELFT> size_t VersionTableSection<ELFT>::getSize() const {
- return sizeof(Elf_Versym) * (In<ELFT>::DynSymTab->getSymbols().size() + 1);
+ return sizeof(Elf_Versym) * (InX::DynSymTab->getSymbols().size() + 1);
}
template <class ELFT> void VersionTableSection<ELFT>::writeTo(uint8_t *Buf) {
auto *OutVersym = reinterpret_cast<Elf_Versym *>(Buf) + 1;
- for (const SymbolTableEntry &S : In<ELFT>::DynSymTab->getSymbols()) {
+ for (const SymbolTableEntry &S : InX::DynSymTab->getSymbols()) {
OutVersym->vs_index = S.Symbol->symbol()->VersionId;
++OutVersym;
}
@@ -1759,8 +2070,8 @@ template <class ELFT> bool VersionTableSection<ELFT>::empty() const {
template <class ELFT>
VersionNeedSection<ELFT>::VersionNeedSection()
- : SyntheticSection<ELFT>(SHF_ALLOC, SHT_GNU_verneed, sizeof(uint32_t),
- ".gnu.version_r") {
+ : SyntheticSection(SHF_ALLOC, SHT_GNU_verneed, sizeof(uint32_t),
+ ".gnu.version_r") {
// Identifiers in verneed section start at 2 because 0 and 1 are reserved
// for VER_NDX_LOCAL and VER_NDX_GLOBAL.
// First identifiers are reserved by verdef section if it exist.
@@ -1768,24 +2079,27 @@ VersionNeedSection<ELFT>::VersionNeedSection()
}
template <class ELFT>
-void VersionNeedSection<ELFT>::addSymbol(SharedSymbol<ELFT> *SS) {
- if (!SS->Verdef) {
+void VersionNeedSection<ELFT>::addSymbol(SharedSymbol *SS) {
+ auto *Ver = reinterpret_cast<const typename ELFT::Verdef *>(SS->Verdef);
+ if (!Ver) {
SS->symbol()->VersionId = VER_NDX_GLOBAL;
return;
}
- SharedFile<ELFT> *F = SS->file();
+
+ auto *File = cast<SharedFile<ELFT>>(SS->File);
+
// If we don't already know that we need an Elf_Verneed for this DSO, prepare
// to create one by adding it to our needed list and creating a dynstr entry
// for the soname.
- if (F->VerdefMap.empty())
- Needed.push_back({F, In<ELFT>::DynStrTab->addString(F->getSoName())});
- typename SharedFile<ELFT>::NeededVer &NV = F->VerdefMap[SS->Verdef];
+ if (File->VerdefMap.empty())
+ Needed.push_back({File, InX::DynStrTab->addString(File->SoName)});
+ typename SharedFile<ELFT>::NeededVer &NV = File->VerdefMap[Ver];
// If we don't already know that we need an Elf_Vernaux for this Elf_Verdef,
// prepare to create one by allocating a version identifier and creating a
// dynstr entry for the version name.
if (NV.Index == 0) {
- NV.StrTab = In<ELFT>::DynStrTab->addString(
- SS->file()->getStringTable().data() + SS->Verdef->getAux()->vda_name);
+ NV.StrTab = InX::DynStrTab->addString(File->getStringTable().data() +
+ Ver->getAux()->vda_name);
NV.Index = NextIndex++;
}
SS->symbol()->VersionId = NV.Index;
@@ -1826,9 +2140,9 @@ template <class ELFT> void VersionNeedSection<ELFT>::writeTo(uint8_t *Buf) {
Verneed[-1].vn_next = 0;
}
-template <class ELFT> void VersionNeedSection<ELFT>::finalize() {
- this->OutSec->Link = this->Link = In<ELFT>::DynStrTab->OutSec->SectionIndex;
- this->OutSec->Info = this->Info = Needed.size();
+template <class ELFT> void VersionNeedSection<ELFT>::finalizeContents() {
+ getParent()->Link = InX::DynStrTab->getParent()->SectionIndex;
+ getParent()->Info = Needed.size();
}
template <class ELFT> size_t VersionNeedSection<ELFT>::getSize() const {
@@ -1842,53 +2156,219 @@ template <class ELFT> bool VersionNeedSection<ELFT>::empty() const {
return getNeedNum() == 0;
}
-template <class ELFT>
-MipsRldMapSection<ELFT>::MipsRldMapSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS,
- sizeof(typename ELFT::uint), ".rld_map") {}
+MergeSyntheticSection::MergeSyntheticSection(StringRef Name, uint32_t Type,
+ uint64_t Flags, uint32_t Alignment)
+ : SyntheticSection(Flags, Type, Alignment, Name),
+ Builder(StringTableBuilder::RAW, Alignment) {}
+
+void MergeSyntheticSection::addSection(MergeInputSection *MS) {
+ MS->Parent = this;
+ Sections.push_back(MS);
+}
+
+void MergeSyntheticSection::writeTo(uint8_t *Buf) { Builder.write(Buf); }
+
+bool MergeSyntheticSection::shouldTailMerge() const {
+ return (this->Flags & SHF_STRINGS) && Config->Optimize >= 2;
+}
+
+void MergeSyntheticSection::finalizeTailMerge() {
+ // Add all string pieces to the string table builder to create section
+ // contents.
+ for (MergeInputSection *Sec : Sections)
+ for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
+ if (Sec->Pieces[I].Live)
+ Builder.add(Sec->getData(I));
+
+ // Fix the string table content. After this, the contents will never change.
+ Builder.finalize();
+
+ // finalize() fixed tail-optimized strings, so we can now get
+ // offsets of strings. Get an offset for each string and save it
+ // to a corresponding StringPiece for easy access.
+ for (MergeInputSection *Sec : Sections)
+ for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
+ if (Sec->Pieces[I].Live)
+ Sec->Pieces[I].OutputOff = Builder.getOffset(Sec->getData(I));
+}
+
+void MergeSyntheticSection::finalizeNoTailMerge() {
+ // Add all string pieces to the string table builder to create section
+ // contents. Because we are not tail-optimizing, offsets of strings are
+ // fixed when they are added to the builder (string table builder contains
+ // a hash table from strings to offsets).
+ for (MergeInputSection *Sec : Sections)
+ for (size_t I = 0, E = Sec->Pieces.size(); I != E; ++I)
+ if (Sec->Pieces[I].Live)
+ Sec->Pieces[I].OutputOff = Builder.add(Sec->getData(I));
+
+ Builder.finalizeInOrder();
+}
+
+void MergeSyntheticSection::finalizeContents() {
+ if (shouldTailMerge())
+ finalizeTailMerge();
+ else
+ finalizeNoTailMerge();
+}
+
+size_t MergeSyntheticSection::getSize() const { return Builder.getSize(); }
+
+// This function decompresses compressed sections and scans over the input
+// sections to create mergeable synthetic sections. It removes
+// MergeInputSections from the input section array and adds new synthetic
+// sections at the location of the first input section that it replaces. It then
+// finalizes each synthetic section in order to compute an output offset for
+// each piece of each input section.
+void elf::decompressAndMergeSections() {
+ // splitIntoPieces needs to be called on each MergeInputSection before calling
+ // finalizeContents(). Do that first.
+ parallelForEach(InputSections.begin(), InputSections.end(),
+ [](InputSectionBase *S) {
+ if (!S->Live)
+ return;
+ if (Decompressor::isCompressedELFSection(S->Flags, S->Name))
+ S->uncompress();
+ if (auto *MS = dyn_cast<MergeInputSection>(S))
+ MS->splitIntoPieces();
+ });
+
+ std::vector<MergeSyntheticSection *> MergeSections;
+ for (InputSectionBase *&S : InputSections) {
+ MergeInputSection *MS = dyn_cast<MergeInputSection>(S);
+ if (!MS)
+ continue;
+
+ // We do not want to handle sections that are not alive, so just remove
+ // them instead of trying to merge.
+ if (!MS->Live)
+ continue;
+
+ StringRef OutsecName = getOutputSectionName(MS->Name);
+ uint64_t Flags = MS->Flags & ~(uint64_t)SHF_GROUP;
+ uint32_t Alignment = std::max<uint32_t>(MS->Alignment, MS->Entsize);
+
+ auto I = llvm::find_if(MergeSections, [=](MergeSyntheticSection *Sec) {
+ return Sec->Name == OutsecName && Sec->Flags == Flags &&
+ Sec->Alignment == Alignment;
+ });
+ if (I == MergeSections.end()) {
+ MergeSyntheticSection *Syn =
+ make<MergeSyntheticSection>(OutsecName, MS->Type, Flags, Alignment);
+ MergeSections.push_back(Syn);
+ I = std::prev(MergeSections.end());
+ S = Syn;
+ } else {
+ S = nullptr;
+ }
+ (*I)->addSection(MS);
+ }
+ for (auto *MS : MergeSections)
+ MS->finalizeContents();
-template <class ELFT> void MipsRldMapSection<ELFT>::writeTo(uint8_t *Buf) {
- // Apply filler from linker script.
- uint64_t Filler = Script<ELFT>::X->getFiller(this->Name);
- Filler = (Filler << 32) | Filler;
- memcpy(Buf, &Filler, getSize());
+ std::vector<InputSectionBase *> &V = InputSections;
+ V.erase(std::remove(V.begin(), V.end(), nullptr), V.end());
}
-template <class ELFT>
-ARMExidxSentinelSection<ELFT>::ARMExidxSentinelSection()
- : SyntheticSection<ELFT>(SHF_ALLOC | SHF_LINK_ORDER, SHT_ARM_EXIDX,
- sizeof(typename ELFT::uint), ".ARM.exidx") {}
+MipsRldMapSection::MipsRldMapSection()
+ : SyntheticSection(SHF_ALLOC | SHF_WRITE, SHT_PROGBITS, Config->Wordsize,
+ ".rld_map") {}
+
+ARMExidxSentinelSection::ARMExidxSentinelSection()
+ : SyntheticSection(SHF_ALLOC | SHF_LINK_ORDER, SHT_ARM_EXIDX,
+ Config->Wordsize, ".ARM.exidx") {}
// Write a terminating sentinel entry to the end of the .ARM.exidx table.
// This section will have been sorted last in the .ARM.exidx table.
// This table entry will have the form:
// | PREL31 upper bound of code that has exception tables | EXIDX_CANTUNWIND |
-template <class ELFT>
-void ARMExidxSentinelSection<ELFT>::writeTo(uint8_t *Buf) {
- // Get the InputSection before us, we are by definition last
- auto RI = cast<OutputSection<ELFT>>(this->OutSec)->Sections.rbegin();
- InputSection<ELFT> *LE = *(++RI);
- InputSection<ELFT> *LC = cast<InputSection<ELFT>>(LE->getLinkOrderDep());
- uint64_t S = LC->OutSec->Addr + LC->getOffset(LC->getSize());
- uint64_t P = this->getVA();
+// The sentinel must have the PREL31 value of an address higher than any
+// address described by any other table entry.
+void ARMExidxSentinelSection::writeTo(uint8_t *Buf) {
+ // The Sections are sorted in order of ascending PREL31 address with the
+ // sentinel last. We need to find the InputSection that precedes the
+ // sentinel. By construction the Sentinel is in the last
+ // InputSectionDescription as the InputSection that precedes it.
+ OutputSectionCommand *C = Script->getCmd(getParent());
+ auto ISD = std::find_if(C->Commands.rbegin(), C->Commands.rend(),
+ [](const BaseCommand *Base) {
+ return isa<InputSectionDescription>(Base);
+ });
+ auto L = cast<InputSectionDescription>(*ISD);
+ InputSection *Highest = L->Sections[L->Sections.size() - 2];
+ InputSection *LS = Highest->getLinkOrderDep();
+ uint64_t S = LS->getParent()->Addr + LS->getOffset(LS->getSize());
+ uint64_t P = getVA();
Target->relocateOne(Buf, R_ARM_PREL31, S - P);
write32le(Buf + 4, 0x1);
}
-template InputSection<ELF32LE> *elf::createCommonSection();
-template InputSection<ELF32BE> *elf::createCommonSection();
-template InputSection<ELF64LE> *elf::createCommonSection();
-template InputSection<ELF64BE> *elf::createCommonSection();
-
-template InputSection<ELF32LE> *elf::createInterpSection();
-template InputSection<ELF32BE> *elf::createInterpSection();
-template InputSection<ELF64LE> *elf::createInterpSection();
-template InputSection<ELF64BE> *elf::createInterpSection();
-
-template MergeInputSection<ELF32LE> *elf::createCommentSection();
-template MergeInputSection<ELF32BE> *elf::createCommentSection();
-template MergeInputSection<ELF64LE> *elf::createCommentSection();
-template MergeInputSection<ELF64BE> *elf::createCommentSection();
+ThunkSection::ThunkSection(OutputSection *OS, uint64_t Off)
+ : SyntheticSection(SHF_ALLOC | SHF_EXECINSTR, SHT_PROGBITS,
+ Config->Wordsize, ".text.thunk") {
+ this->Parent = OS;
+ this->OutSecOff = Off;
+}
+
+void ThunkSection::addThunk(Thunk *T) {
+ uint64_t Off = alignTo(Size, T->Alignment);
+ T->Offset = Off;
+ Thunks.push_back(T);
+ T->addSymbols(*this);
+ Size = Off + T->size();
+}
+
+void ThunkSection::writeTo(uint8_t *Buf) {
+ for (const Thunk *T : Thunks)
+ T->writeTo(Buf + T->Offset, *this);
+}
+
+InputSection *ThunkSection::getTargetInputSection() const {
+ const Thunk *T = Thunks.front();
+ return T->getTargetInputSection();
+}
+
+InputSection *InX::ARMAttributes;
+BssSection *InX::Bss;
+BssSection *InX::BssRelRo;
+BuildIdSection *InX::BuildId;
+InputSection *InX::Common;
+SyntheticSection *InX::Dynamic;
+StringTableSection *InX::DynStrTab;
+SymbolTableBaseSection *InX::DynSymTab;
+InputSection *InX::Interp;
+GdbIndexSection *InX::GdbIndex;
+GotSection *InX::Got;
+GotPltSection *InX::GotPlt;
+GnuHashTableSection *InX::GnuHashTab;
+IgotPltSection *InX::IgotPlt;
+MipsGotSection *InX::MipsGot;
+MipsRldMapSection *InX::MipsRldMap;
+PltSection *InX::Plt;
+PltSection *InX::Iplt;
+StringTableSection *InX::ShStrTab;
+StringTableSection *InX::StrTab;
+SymbolTableBaseSection *InX::SymTab;
+
+template GdbIndexSection *elf::createGdbIndex<ELF32LE>();
+template GdbIndexSection *elf::createGdbIndex<ELF32BE>();
+template GdbIndexSection *elf::createGdbIndex<ELF64LE>();
+template GdbIndexSection *elf::createGdbIndex<ELF64BE>();
+
+template void PltSection::addEntry<ELF32LE>(SymbolBody &Sym);
+template void PltSection::addEntry<ELF32BE>(SymbolBody &Sym);
+template void PltSection::addEntry<ELF64LE>(SymbolBody &Sym);
+template void PltSection::addEntry<ELF64BE>(SymbolBody &Sym);
+
+template InputSection *elf::createCommonSection<ELF32LE>();
+template InputSection *elf::createCommonSection<ELF32BE>();
+template InputSection *elf::createCommonSection<ELF64LE>();
+template InputSection *elf::createCommonSection<ELF64BE>();
+
+template MergeInputSection *elf::createCommentSection<ELF32LE>();
+template MergeInputSection *elf::createCommentSection<ELF32BE>();
+template MergeInputSection *elf::createCommentSection<ELF64LE>();
+template MergeInputSection *elf::createCommentSection<ELF64BE>();
template class elf::MipsAbiFlagsSection<ELF32LE>;
template class elf::MipsAbiFlagsSection<ELF32BE>;
@@ -1905,36 +2385,6 @@ template class elf::MipsReginfoSection<ELF32BE>;
template class elf::MipsReginfoSection<ELF64LE>;
template class elf::MipsReginfoSection<ELF64BE>;
-template class elf::BuildIdSection<ELF32LE>;
-template class elf::BuildIdSection<ELF32BE>;
-template class elf::BuildIdSection<ELF64LE>;
-template class elf::BuildIdSection<ELF64BE>;
-
-template class elf::GotSection<ELF32LE>;
-template class elf::GotSection<ELF32BE>;
-template class elf::GotSection<ELF64LE>;
-template class elf::GotSection<ELF64BE>;
-
-template class elf::MipsGotSection<ELF32LE>;
-template class elf::MipsGotSection<ELF32BE>;
-template class elf::MipsGotSection<ELF64LE>;
-template class elf::MipsGotSection<ELF64BE>;
-
-template class elf::GotPltSection<ELF32LE>;
-template class elf::GotPltSection<ELF32BE>;
-template class elf::GotPltSection<ELF64LE>;
-template class elf::GotPltSection<ELF64BE>;
-
-template class elf::IgotPltSection<ELF32LE>;
-template class elf::IgotPltSection<ELF32BE>;
-template class elf::IgotPltSection<ELF64LE>;
-template class elf::IgotPltSection<ELF64BE>;
-
-template class elf::StringTableSection<ELF32LE>;
-template class elf::StringTableSection<ELF32BE>;
-template class elf::StringTableSection<ELF64LE>;
-template class elf::StringTableSection<ELF64BE>;
-
template class elf::DynamicSection<ELF32LE>;
template class elf::DynamicSection<ELF32BE>;
template class elf::DynamicSection<ELF64LE>;
@@ -1950,31 +2400,11 @@ template class elf::SymbolTableSection<ELF32BE>;
template class elf::SymbolTableSection<ELF64LE>;
template class elf::SymbolTableSection<ELF64BE>;
-template class elf::GnuHashTableSection<ELF32LE>;
-template class elf::GnuHashTableSection<ELF32BE>;
-template class elf::GnuHashTableSection<ELF64LE>;
-template class elf::GnuHashTableSection<ELF64BE>;
-
template class elf::HashTableSection<ELF32LE>;
template class elf::HashTableSection<ELF32BE>;
template class elf::HashTableSection<ELF64LE>;
template class elf::HashTableSection<ELF64BE>;
-template class elf::PltSection<ELF32LE>;
-template class elf::PltSection<ELF32BE>;
-template class elf::PltSection<ELF64LE>;
-template class elf::PltSection<ELF64BE>;
-
-template class elf::IpltSection<ELF32LE>;
-template class elf::IpltSection<ELF32BE>;
-template class elf::IpltSection<ELF64LE>;
-template class elf::IpltSection<ELF64BE>;
-
-template class elf::GdbIndexSection<ELF32LE>;
-template class elf::GdbIndexSection<ELF32BE>;
-template class elf::GdbIndexSection<ELF64LE>;
-template class elf::GdbIndexSection<ELF64BE>;
-
template class elf::EhFrameHeader<ELF32LE>;
template class elf::EhFrameHeader<ELF32BE>;
template class elf::EhFrameHeader<ELF64LE>;
@@ -1995,12 +2425,7 @@ template class elf::VersionDefinitionSection<ELF32BE>;
template class elf::VersionDefinitionSection<ELF64LE>;
template class elf::VersionDefinitionSection<ELF64BE>;
-template class elf::MipsRldMapSection<ELF32LE>;
-template class elf::MipsRldMapSection<ELF32BE>;
-template class elf::MipsRldMapSection<ELF64LE>;
-template class elf::MipsRldMapSection<ELF64BE>;
-
-template class elf::ARMExidxSentinelSection<ELF32LE>;
-template class elf::ARMExidxSentinelSection<ELF32BE>;
-template class elf::ARMExidxSentinelSection<ELF64LE>;
-template class elf::ARMExidxSentinelSection<ELF64BE>;
+template class elf::EhFrameSection<ELF32LE>;
+template class elf::EhFrameSection<ELF32BE>;
+template class elf::EhFrameSection<ELF64LE>;
+template class elf::EhFrameSection<ELF64BE>;
diff --git a/gnu/llvm/tools/lld/tools/lld/lld.cpp b/gnu/llvm/tools/lld/tools/lld/lld.cpp
index 037f43c8cb6..4a68ed7cab9 100644
--- a/gnu/llvm/tools/lld/tools/lld/lld.cpp
+++ b/gnu/llvm/tools/lld/tools/lld/lld.cpp
@@ -43,9 +43,9 @@ LLVM_ATTRIBUTE_NORETURN static void die(const Twine &S) {
static Flavor getFlavor(StringRef S) {
return StringSwitch<Flavor>(S)
- .Cases("ld", "ld.lld", "gnu", Gnu)
- .Case("link", WinLink)
- .Case("darwin", Darwin)
+ .CasesLower("ld", "ld.lld", "gnu", Gnu)
+ .CaseLower("link", WinLink)
+ .CaseLower("darwin", Darwin)
.Default(Invalid);
}
@@ -110,6 +110,6 @@ int main(int Argc, const char **Argv) {
#endif
default:
die("lld is a generic driver.\n"
- "Invoke ld.lld (Unix), ld (Mac) or lld-link (Windows) instead.");
+ "Invoke ld.lld (Unix), ld (macOS) or lld-link (Windows) instead.");
}
}