summaryrefslogtreecommitdiffstats
path: root/gnu/llvm/lib/Target/X86/X86InstructionSelector.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'gnu/llvm/lib/Target/X86/X86InstructionSelector.cpp')
-rw-r--r--gnu/llvm/lib/Target/X86/X86InstructionSelector.cpp461
1 files changed, 344 insertions, 117 deletions
diff --git a/gnu/llvm/lib/Target/X86/X86InstructionSelector.cpp b/gnu/llvm/lib/Target/X86/X86InstructionSelector.cpp
index 859d3288db8..44bbc3f1b3f 100644
--- a/gnu/llvm/lib/Target/X86/X86InstructionSelector.cpp
+++ b/gnu/llvm/lib/Target/X86/X86InstructionSelector.cpp
@@ -1,4 +1,4 @@
-//===- X86InstructionSelector.cpp ----------------------------*- C++ -*-==//
+//===- X86InstructionSelector.cpp -----------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,6 +12,7 @@
/// \todo This should be generated by TableGen.
//===----------------------------------------------------------------------===//
+#include "MCTargetDesc/X86BaseInfo.h"
#include "X86InstrBuilder.h"
#include "X86InstrInfo.h"
#include "X86RegisterBankInfo.h"
@@ -19,27 +20,36 @@
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
+#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
+#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
#include "llvm/CodeGen/GlobalISel/Utils.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineConstantPool.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
-#include "llvm/IR/Type.h"
+#include "llvm/CodeGen/TargetOpcodes.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/LowLevelTypeImpl.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+#include <tuple>
#define DEBUG_TYPE "X86-isel"
-#include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
-
using namespace llvm;
-#ifndef LLVM_BUILD_GLOBAL_ISEL
-#error "You shouldn't build this"
-#endif
-
namespace {
#define GET_GLOBALISEL_PREDICATE_BITSET
@@ -51,15 +61,16 @@ public:
X86InstructionSelector(const X86TargetMachine &TM, const X86Subtarget &STI,
const X86RegisterBankInfo &RBI);
- bool select(MachineInstr &I) const override;
+ bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
+ static const char *getName() { return DEBUG_TYPE; }
private:
/// tblgen-erated 'select' implementation, used as the initial selector for
/// the patterns that don't require complex C++.
- bool selectImpl(MachineInstr &I) const;
+ bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
// TODO: remove after supported by Tablegen-erated instruction selection.
- unsigned getLoadStoreOp(LLT &Ty, const RegisterBank &RB, unsigned Opc,
+ unsigned getLoadStoreOp(const LLT &Ty, const RegisterBank &RB, unsigned Opc,
uint64_t Alignment) const;
bool selectLoadStoreOp(MachineInstr &I, MachineRegisterInfo &MRI,
@@ -74,19 +85,28 @@ private:
MachineFunction &MF) const;
bool selectZext(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
+ bool selectAnyext(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
bool selectUnmergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
+ MachineFunction &MF,
+ CodeGenCoverage &CoverageInfo) const;
bool selectMergeValues(MachineInstr &I, MachineRegisterInfo &MRI,
- MachineFunction &MF) const;
+ MachineFunction &MF,
+ CodeGenCoverage &CoverageInfo) const;
bool selectInsert(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
bool selectExtract(MachineInstr &I, MachineRegisterInfo &MRI,
MachineFunction &MF) const;
+ bool selectCondBranch(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
+ bool materializeFP(MachineInstr &I, MachineRegisterInfo &MRI,
+ MachineFunction &MF) const;
+ bool selectImplicitDefOrPHI(MachineInstr &I, MachineRegisterInfo &MRI) const;
// emit insert subreg instruction and insert it before MachineInstr &I
bool emitInsertSubreg(unsigned DstReg, unsigned SrcReg, MachineInstr &I,
@@ -171,21 +191,71 @@ X86InstructionSelector::getRegClass(LLT Ty, unsigned Reg,
return getRegClass(Ty, RegBank);
}
+static unsigned getSubRegIndex(const TargetRegisterClass *RC) {
+ unsigned SubIdx = X86::NoSubRegister;
+ if (RC == &X86::GR32RegClass) {
+ SubIdx = X86::sub_32bit;
+ } else if (RC == &X86::GR16RegClass) {
+ SubIdx = X86::sub_16bit;
+ } else if (RC == &X86::GR8RegClass) {
+ SubIdx = X86::sub_8bit;
+ }
+
+ return SubIdx;
+}
+
+static const TargetRegisterClass *getRegClassFromGRPhysReg(unsigned Reg) {
+ assert(TargetRegisterInfo::isPhysicalRegister(Reg));
+ if (X86::GR64RegClass.contains(Reg))
+ return &X86::GR64RegClass;
+ if (X86::GR32RegClass.contains(Reg))
+ return &X86::GR32RegClass;
+ if (X86::GR16RegClass.contains(Reg))
+ return &X86::GR16RegClass;
+ if (X86::GR8RegClass.contains(Reg))
+ return &X86::GR8RegClass;
+
+ llvm_unreachable("Unknown RegClass for PhysReg!");
+}
+
// Set X86 Opcode and constrain DestReg.
bool X86InstructionSelector::selectCopy(MachineInstr &I,
MachineRegisterInfo &MRI) const {
-
unsigned DstReg = I.getOperand(0).getReg();
+ const unsigned DstSize = RBI.getSizeInBits(DstReg, MRI, TRI);
+ const RegisterBank &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
+
+ unsigned SrcReg = I.getOperand(1).getReg();
+ const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
+ const RegisterBank &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
+
if (TargetRegisterInfo::isPhysicalRegister(DstReg)) {
assert(I.isCopy() && "Generic operators do not allow physical registers");
+
+ if (DstSize > SrcSize && SrcRegBank.getID() == X86::GPRRegBankID &&
+ DstRegBank.getID() == X86::GPRRegBankID) {
+
+ const TargetRegisterClass *SrcRC =
+ getRegClass(MRI.getType(SrcReg), SrcRegBank);
+ const TargetRegisterClass *DstRC = getRegClassFromGRPhysReg(DstReg);
+
+ if (SrcRC != DstRC) {
+ // This case can be generated by ABI lowering, performe anyext
+ unsigned ExtSrc = MRI.createVirtualRegister(DstRC);
+ BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(TargetOpcode::SUBREG_TO_REG))
+ .addDef(ExtSrc)
+ .addImm(0)
+ .addReg(SrcReg)
+ .addImm(getSubRegIndex(SrcRC));
+
+ I.getOperand(1).setReg(ExtSrc);
+ }
+ }
+
return true;
}
- const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
- const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
- unsigned SrcReg = I.getOperand(1).getReg();
- const unsigned SrcSize = RBI.getSizeInBits(SrcReg, MRI, TRI);
-
assert((!TargetRegisterInfo::isPhysicalRegister(SrcReg) || I.isCopy()) &&
"No phys reg on generic operators");
assert((DstSize == SrcSize ||
@@ -195,38 +265,28 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
DstSize <= RBI.getSizeInBits(SrcReg, MRI, TRI))) &&
"Copy with different width?!");
- const TargetRegisterClass *RC = nullptr;
+ const TargetRegisterClass *DstRC =
+ getRegClass(MRI.getType(DstReg), DstRegBank);
- switch (RegBank.getID()) {
- case X86::GPRRegBankID:
- assert((DstSize <= 64) && "GPRs cannot get more than 64-bit width values.");
- RC = getRegClass(MRI.getType(DstReg), RegBank);
+ if (SrcRegBank.getID() == X86::GPRRegBankID &&
+ DstRegBank.getID() == X86::GPRRegBankID && SrcSize > DstSize &&
+ TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
+ // Change the physical register to performe truncate.
- // Change the physical register
- if (SrcSize > DstSize && TargetRegisterInfo::isPhysicalRegister(SrcReg)) {
- if (RC == &X86::GR32RegClass)
- I.getOperand(1).setSubReg(X86::sub_32bit);
- else if (RC == &X86::GR16RegClass)
- I.getOperand(1).setSubReg(X86::sub_16bit);
- else if (RC == &X86::GR8RegClass)
- I.getOperand(1).setSubReg(X86::sub_8bit);
+ const TargetRegisterClass *SrcRC = getRegClassFromGRPhysReg(SrcReg);
+ if (DstRC != SrcRC) {
+ I.getOperand(1).setSubReg(getSubRegIndex(DstRC));
I.getOperand(1).substPhysReg(SrcReg, TRI);
}
- break;
- case X86::VECRRegBankID:
- RC = getRegClass(MRI.getType(DstReg), RegBank);
- break;
- default:
- llvm_unreachable("Unknown RegBank!");
}
// No need to constrain SrcReg. It will get constrained when
// we hit another of its use or its defs.
// Copies do not have constraints.
const TargetRegisterClass *OldRC = MRI.getRegClassOrNull(DstReg);
- if (!OldRC || !RC->hasSubClassEq(OldRC)) {
- if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
+ if (!OldRC || !DstRC->hasSubClassEq(OldRC)) {
+ if (!RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
<< " operand\n");
return false;
@@ -236,7 +296,8 @@ bool X86InstructionSelector::selectCopy(MachineInstr &I,
return true;
}
-bool X86InstructionSelector::select(MachineInstr &I) const {
+bool X86InstructionSelector::select(MachineInstr &I,
+ CodeGenCoverage &CoverageInfo) const {
assert(I.getParent() && "Instruction should be in a basic block!");
assert(I.getParent()->getParent() && "Instruction should be in a function!");
@@ -248,51 +309,69 @@ bool X86InstructionSelector::select(MachineInstr &I) const {
if (!isPreISelGenericOpcode(Opcode)) {
// Certain non-generic instructions also need some special handling.
+ if (Opcode == TargetOpcode::LOAD_STACK_GUARD)
+ return false;
+
if (I.isCopy())
return selectCopy(I, MRI);
- // TODO: handle more cases - LOAD_STACK_GUARD, PHI
return true;
}
assert(I.getNumOperands() == I.getNumExplicitOperands() &&
"Generic instruction has unexpected implicit operands\n");
- if (selectImpl(I))
+ if (selectImpl(I, CoverageInfo))
return true;
DEBUG(dbgs() << " C++ instruction selection: "; I.print(dbgs()));
// TODO: This should be implemented by tblgen.
- if (selectLoadStoreOp(I, MRI, MF))
- return true;
- if (selectFrameIndexOrGep(I, MRI, MF))
- return true;
- if (selectGlobalValue(I, MRI, MF))
- return true;
- if (selectConstant(I, MRI, MF))
- return true;
- if (selectTrunc(I, MRI, MF))
- return true;
- if (selectZext(I, MRI, MF))
- return true;
- if (selectCmp(I, MRI, MF))
- return true;
- if (selectUadde(I, MRI, MF))
- return true;
- if (selectUnmergeValues(I, MRI, MF))
- return true;
- if (selectMergeValues(I, MRI, MF))
- return true;
- if (selectExtract(I, MRI, MF))
- return true;
- if (selectInsert(I, MRI, MF))
- return true;
+ switch (I.getOpcode()) {
+ default:
+ return false;
+ case TargetOpcode::G_STORE:
+ case TargetOpcode::G_LOAD:
+ return selectLoadStoreOp(I, MRI, MF);
+ case TargetOpcode::G_GEP:
+ case TargetOpcode::G_FRAME_INDEX:
+ return selectFrameIndexOrGep(I, MRI, MF);
+ case TargetOpcode::G_GLOBAL_VALUE:
+ return selectGlobalValue(I, MRI, MF);
+ case TargetOpcode::G_CONSTANT:
+ return selectConstant(I, MRI, MF);
+ case TargetOpcode::G_FCONSTANT:
+ return materializeFP(I, MRI, MF);
+ case TargetOpcode::G_TRUNC:
+ return selectTrunc(I, MRI, MF);
+ case TargetOpcode::G_ZEXT:
+ return selectZext(I, MRI, MF);
+ case TargetOpcode::G_ANYEXT:
+ return selectAnyext(I, MRI, MF);
+ case TargetOpcode::G_ICMP:
+ return selectCmp(I, MRI, MF);
+ case TargetOpcode::G_UADDE:
+ return selectUadde(I, MRI, MF);
+ case TargetOpcode::G_UNMERGE_VALUES:
+ return selectUnmergeValues(I, MRI, MF, CoverageInfo);
+ case TargetOpcode::G_MERGE_VALUES:
+ return selectMergeValues(I, MRI, MF, CoverageInfo);
+ case TargetOpcode::G_EXTRACT:
+ return selectExtract(I, MRI, MF);
+ case TargetOpcode::G_INSERT:
+ return selectInsert(I, MRI, MF);
+ case TargetOpcode::G_BRCOND:
+ return selectCondBranch(I, MRI, MF);
+ case TargetOpcode::G_IMPLICIT_DEF:
+ case TargetOpcode::G_PHI:
+ return selectImplicitDefOrPHI(I, MRI);
+ }
return false;
}
-unsigned X86InstructionSelector::getLoadStoreOp(LLT &Ty, const RegisterBank &RB,
+unsigned X86InstructionSelector::getLoadStoreOp(const LLT &Ty,
+ const RegisterBank &RB,
unsigned Opc,
uint64_t Alignment) const {
bool Isload = (Opc == TargetOpcode::G_LOAD);
@@ -366,9 +445,9 @@ unsigned X86InstructionSelector::getLoadStoreOp(LLT &Ty, const RegisterBank &RB,
}
// Fill in an address from the given instruction.
-void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI,
- X86AddressMode &AM) {
-
+static void X86SelectAddress(const MachineInstr &I,
+ const MachineRegisterInfo &MRI,
+ X86AddressMode &AM) {
assert(I.getOperand(0).isReg() && "unsupported opperand.");
assert(MRI.getType(I.getOperand(0).getReg()).isPointer() &&
"unsupported type.");
@@ -390,17 +469,15 @@ void X86SelectAddress(const MachineInstr &I, const MachineRegisterInfo &MRI,
// Default behavior.
AM.Base.Reg = I.getOperand(0).getReg();
- return;
}
bool X86InstructionSelector::selectLoadStoreOp(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
-
unsigned Opc = I.getOpcode();
- if (Opc != TargetOpcode::G_STORE && Opc != TargetOpcode::G_LOAD)
- return false;
+ assert((Opc == TargetOpcode::G_STORE || Opc == TargetOpcode::G_LOAD) &&
+ "unexpected instruction");
const unsigned DefReg = I.getOperand(0).getReg();
LLT Ty = MRI.getType(DefReg);
@@ -447,8 +524,8 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
MachineFunction &MF) const {
unsigned Opc = I.getOpcode();
- if (Opc != TargetOpcode::G_FRAME_INDEX && Opc != TargetOpcode::G_GEP)
- return false;
+ assert((Opc == TargetOpcode::G_FRAME_INDEX || Opc == TargetOpcode::G_GEP) &&
+ "unexpected instruction");
const unsigned DefReg = I.getOperand(0).getReg();
LLT Ty = MRI.getType(DefReg);
@@ -473,10 +550,8 @@ bool X86InstructionSelector::selectFrameIndexOrGep(MachineInstr &I,
bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
- unsigned Opc = I.getOpcode();
-
- if (Opc != TargetOpcode::G_GLOBAL_VALUE)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE) &&
+ "unexpected instruction");
auto GV = I.getOperand(1).getGlobal();
if (GV->isThreadLocal()) {
@@ -485,7 +560,7 @@ bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
// Can't handle alternate code models yet.
if (TM.getCodeModel() != CodeModel::Small)
- return 0;
+ return false;
X86AddressMode AM;
AM.GV = GV;
@@ -521,8 +596,8 @@ bool X86InstructionSelector::selectGlobalValue(MachineInstr &I,
bool X86InstructionSelector::selectConstant(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_CONSTANT)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_CONSTANT) &&
+ "unexpected instruction");
const unsigned DefReg = I.getOperand(0).getReg();
LLT Ty = MRI.getType(DefReg);
@@ -550,14 +625,13 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I,
case 32:
NewOpc = X86::MOV32ri;
break;
- case 64: {
+ case 64:
// TODO: in case isUInt<32>(Val), X86::MOV32ri can be used
if (isInt<32>(Val))
NewOpc = X86::MOV64ri32;
else
NewOpc = X86::MOV64ri;
break;
- }
default:
llvm_unreachable("Can't select G_CONSTANT, unsupported type.");
}
@@ -569,8 +643,7 @@ bool X86InstructionSelector::selectConstant(MachineInstr &I,
bool X86InstructionSelector::selectTrunc(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_TRUNC)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_TRUNC) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
const unsigned SrcReg = I.getOperand(1).getReg();
@@ -628,8 +701,7 @@ bool X86InstructionSelector::selectTrunc(MachineInstr &I,
bool X86InstructionSelector::selectZext(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_ZEXT)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_ZEXT) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
const unsigned SrcReg = I.getOperand(1).getReg();
@@ -673,11 +745,59 @@ bool X86InstructionSelector::selectZext(MachineInstr &I,
return true;
}
+bool X86InstructionSelector::selectAnyext(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ assert((I.getOpcode() == TargetOpcode::G_ANYEXT) && "unexpected instruction");
+
+ const unsigned DstReg = I.getOperand(0).getReg();
+ const unsigned SrcReg = I.getOperand(1).getReg();
+
+ const LLT DstTy = MRI.getType(DstReg);
+ const LLT SrcTy = MRI.getType(SrcReg);
+
+ const RegisterBank &DstRB = *RBI.getRegBank(DstReg, MRI, TRI);
+ const RegisterBank &SrcRB = *RBI.getRegBank(SrcReg, MRI, TRI);
+
+ assert(DstRB.getID() == SrcRB.getID() &&
+ "G_ANYEXT input/output on different banks\n");
+
+ assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
+ "G_ANYEXT incorrect operand size");
+
+ if (DstRB.getID() != X86::GPRRegBankID)
+ return false;
+
+ const TargetRegisterClass *DstRC = getRegClass(DstTy, DstRB);
+ const TargetRegisterClass *SrcRC = getRegClass(SrcTy, SrcRB);
+
+ if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, MRI) ||
+ !RBI.constrainGenericRegister(DstReg, *DstRC, MRI)) {
+ DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
+ return false;
+ }
+
+ if (SrcRC == DstRC) {
+ I.setDesc(TII.get(X86::COPY));
+ return true;
+ }
+
+ BuildMI(*I.getParent(), I, I.getDebugLoc(),
+ TII.get(TargetOpcode::SUBREG_TO_REG))
+ .addDef(DstReg)
+ .addImm(0)
+ .addReg(SrcReg)
+ .addImm(getSubRegIndex(SrcRC));
+
+ I.eraseFromParent();
+ return true;
+}
+
bool X86InstructionSelector::selectCmp(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_ICMP)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_ICMP) && "unexpected instruction");
X86::CondCode CC;
bool SwapArgs;
@@ -729,8 +849,7 @@ bool X86InstructionSelector::selectCmp(MachineInstr &I,
bool X86InstructionSelector::selectUadde(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_UADDE)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_UADDE) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
const unsigned CarryOutReg = I.getOperand(1).getReg();
@@ -789,9 +908,8 @@ bool X86InstructionSelector::selectUadde(MachineInstr &I,
bool X86InstructionSelector::selectExtract(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
-
- if (I.getOpcode() != TargetOpcode::G_EXTRACT)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_EXTRACT) &&
+ "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
const unsigned SrcReg = I.getOperand(1).getReg();
@@ -848,7 +966,6 @@ bool X86InstructionSelector::emitExtractSubreg(unsigned DstReg, unsigned SrcReg,
MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
-
const LLT DstTy = MRI.getType(DstReg);
const LLT SrcTy = MRI.getType(SrcReg);
unsigned SubIdx = X86::NoSubRegister;
@@ -887,7 +1004,6 @@ bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
-
const LLT DstTy = MRI.getType(DstReg);
const LLT SrcTy = MRI.getType(SrcReg);
unsigned SubIdx = X86::NoSubRegister;
@@ -925,9 +1041,7 @@ bool X86InstructionSelector::emitInsertSubreg(unsigned DstReg, unsigned SrcReg,
bool X86InstructionSelector::selectInsert(MachineInstr &I,
MachineRegisterInfo &MRI,
MachineFunction &MF) const {
-
- if (I.getOpcode() != TargetOpcode::G_INSERT)
- return false;
+ assert((I.getOpcode() == TargetOpcode::G_INSERT) && "unexpected instruction");
const unsigned DstReg = I.getOperand(0).getReg();
const unsigned SrcReg = I.getOperand(1).getReg();
@@ -982,11 +1096,11 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I,
return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
}
-bool X86InstructionSelector::selectUnmergeValues(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_UNMERGE_VALUES)
- return false;
+bool X86InstructionSelector::selectUnmergeValues(
+ MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
+ CodeGenCoverage &CoverageInfo) const {
+ assert((I.getOpcode() == TargetOpcode::G_UNMERGE_VALUES) &&
+ "unexpected instruction");
// Split to extracts.
unsigned NumDefs = I.getNumOperands() - 1;
@@ -994,14 +1108,13 @@ bool X86InstructionSelector::selectUnmergeValues(MachineInstr &I,
unsigned DefSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
-
MachineInstr &ExtrInst =
*BuildMI(*I.getParent(), I, I.getDebugLoc(),
TII.get(TargetOpcode::G_EXTRACT), I.getOperand(Idx).getReg())
.addReg(SrcReg)
.addImm(Idx * DefSize);
- if (!select(ExtrInst))
+ if (!select(ExtrInst, CoverageInfo))
return false;
}
@@ -1009,11 +1122,11 @@ bool X86InstructionSelector::selectUnmergeValues(MachineInstr &I,
return true;
}
-bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
- MachineRegisterInfo &MRI,
- MachineFunction &MF) const {
- if (I.getOpcode() != TargetOpcode::G_MERGE_VALUES)
- return false;
+bool X86InstructionSelector::selectMergeValues(
+ MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF,
+ CodeGenCoverage &CoverageInfo) const {
+ assert((I.getOpcode() == TargetOpcode::G_MERGE_VALUES) &&
+ "unexpected instruction");
// Split to inserts.
unsigned DstReg = I.getOperand(0).getReg();
@@ -1032,7 +1145,6 @@ bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
return false;
for (unsigned Idx = 2; Idx < I.getNumOperands(); ++Idx) {
-
unsigned Tmp = MRI.createGenericVirtualRegister(DstTy);
MRI.setRegBank(Tmp, RegBank);
@@ -1044,7 +1156,7 @@ bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
DefReg = Tmp;
- if (!select(InsertInst))
+ if (!select(InsertInst, CoverageInfo))
return false;
}
@@ -1052,12 +1164,127 @@ bool X86InstructionSelector::selectMergeValues(MachineInstr &I,
TII.get(TargetOpcode::COPY), DstReg)
.addReg(DefReg);
- if (!select(CopyInst))
+ if (!select(CopyInst, CoverageInfo))
return false;
I.eraseFromParent();
return true;
}
+
+bool X86InstructionSelector::selectCondBranch(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ assert((I.getOpcode() == TargetOpcode::G_BRCOND) && "unexpected instruction");
+
+ const unsigned CondReg = I.getOperand(0).getReg();
+ MachineBasicBlock *DestMBB = I.getOperand(1).getMBB();
+
+ MachineInstr &TestInst =
+ *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::TEST8ri))
+ .addReg(CondReg)
+ .addImm(1);
+ BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::JNE_1))
+ .addMBB(DestMBB);
+
+ constrainSelectedInstRegOperands(TestInst, TII, TRI, RBI);
+
+ I.eraseFromParent();
+ return true;
+}
+
+bool X86InstructionSelector::materializeFP(MachineInstr &I,
+ MachineRegisterInfo &MRI,
+ MachineFunction &MF) const {
+ assert((I.getOpcode() == TargetOpcode::G_FCONSTANT) &&
+ "unexpected instruction");
+
+ // Can't handle alternate code models yet.
+ CodeModel::Model CM = TM.getCodeModel();
+ if (CM != CodeModel::Small && CM != CodeModel::Large)
+ return false;
+
+ const unsigned DstReg = I.getOperand(0).getReg();
+ const LLT DstTy = MRI.getType(DstReg);
+ const RegisterBank &RegBank = *RBI.getRegBank(DstReg, MRI, TRI);
+ unsigned Align = DstTy.getSizeInBits();
+ const DebugLoc &DbgLoc = I.getDebugLoc();
+
+ unsigned Opc = getLoadStoreOp(DstTy, RegBank, TargetOpcode::G_LOAD, Align);
+
+ // Create the load from the constant pool.
+ const ConstantFP *CFP = I.getOperand(1).getFPImm();
+ unsigned CPI = MF.getConstantPool()->getConstantPoolIndex(CFP, Align);
+ MachineInstr *LoadInst = nullptr;
+ unsigned char OpFlag = STI.classifyLocalReference(nullptr);
+
+ if (CM == CodeModel::Large && STI.is64Bit()) {
+ // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
+ // they cannot be folded into immediate fields.
+
+ unsigned AddrReg = MRI.createVirtualRegister(&X86::GR64RegClass);
+ BuildMI(*I.getParent(), I, DbgLoc, TII.get(X86::MOV64ri), AddrReg)
+ .addConstantPoolIndex(CPI, 0, OpFlag);
+
+ MachineMemOperand *MMO = MF.getMachineMemOperand(
+ MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
+ MF.getDataLayout().getPointerSize(), Align);
+
+ LoadInst =
+ addDirectMem(BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg),
+ AddrReg)
+ .addMemOperand(MMO);
+
+ } else if (CM == CodeModel::Small || !STI.is64Bit()) {
+ // Handle the case when globals fit in our immediate field.
+ // This is true for X86-32 always and X86-64 when in -mcmodel=small mode.
+
+ // x86-32 PIC requires a PIC base register for constant pools.
+ unsigned PICBase = 0;
+ if (OpFlag == X86II::MO_PIC_BASE_OFFSET || OpFlag == X86II::MO_GOTOFF) {
+ // PICBase can be allocated by TII.getGlobalBaseReg(&MF).
+ // In DAGISEL the code that initialize it generated by the CGBR pass.
+ return false; // TODO support the mode.
+ } else if (STI.is64Bit() && TM.getCodeModel() == CodeModel::Small)
+ PICBase = X86::RIP;
+
+ LoadInst = addConstantPoolReference(
+ BuildMI(*I.getParent(), I, DbgLoc, TII.get(Opc), DstReg), CPI, PICBase,
+ OpFlag);
+ } else
+ return false;
+
+ constrainSelectedInstRegOperands(*LoadInst, TII, TRI, RBI);
+ I.eraseFromParent();
+ return true;
+}
+
+bool X86InstructionSelector::selectImplicitDefOrPHI(
+ MachineInstr &I, MachineRegisterInfo &MRI) const {
+ assert((I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF ||
+ I.getOpcode() == TargetOpcode::G_PHI) &&
+ "unexpected instruction");
+
+ unsigned DstReg = I.getOperand(0).getReg();
+
+ if (!MRI.getRegClassOrNull(DstReg)) {
+ const LLT DstTy = MRI.getType(DstReg);
+ const TargetRegisterClass *RC = getRegClass(DstTy, DstReg, MRI);
+
+ if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
+ DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
+ << " operand\n");
+ return false;
+ }
+ }
+
+ if (I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
+ I.setDesc(TII.get(X86::IMPLICIT_DEF));
+ else
+ I.setDesc(TII.get(X86::PHI));
+
+ return true;
+}
+
InstructionSelector *
llvm::createX86InstructionSelector(const X86TargetMachine &TM,
X86Subtarget &Subtarget,