diff options
| author | 2019-01-27 16:42:12 +0000 | |
|---|---|---|
| committer | 2019-01-27 16:42:12 +0000 | |
| commit | b773203fb58f3ef282fb69c832d8710cab5bc82d (patch) | |
| tree | e75913f147570fbd75169647b144df85b88a038c /gnu/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp | |
| parent | tweak errno in previous (diff) | |
| download | wireguard-openbsd-b773203fb58f3ef282fb69c832d8710cab5bc82d.tar.xz wireguard-openbsd-b773203fb58f3ef282fb69c832d8710cab5bc82d.zip | |
Import LLVM 7.0.1 release including clang, lld and lldb.
Diffstat (limited to 'gnu/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp')
| -rw-r--r-- | gnu/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp | 104 |
1 files changed, 54 insertions, 50 deletions
diff --git a/gnu/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/gnu/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp index 1644ab4c072..58aaae69255 100644 --- a/gnu/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp +++ b/gnu/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp @@ -62,7 +62,7 @@ namespace { /// because LLVM reads from the complete type it can generate incorrect code /// if we do not clip the tail padding off of the bitfield in the complete /// layout. This introduces a somewhat awkward extra unnecessary clip stage. -/// The location of the clip is stored internally as a sentinal of type +/// The location of the clip is stored internally as a sentinel of type /// SCISSOR. If LLVM were updated to read base types (which it probably /// should because locations of things such as VBases are bogus in the llvm /// type anyway) then we could eliminate the SCISSOR. @@ -74,7 +74,7 @@ namespace { struct CGRecordLowering { // MemberInfo is a helper structure that contains information about a record // member. In additional to the standard member types, there exists a - // sentinal member type that ensures correct rounding. + // sentinel member type that ensures correct rounding. struct MemberInfo { CharUnits Offset; enum InfoKind { VFPtr, VBPtr, Field, Base, VBase, Scissor } Kind; @@ -95,7 +95,7 @@ struct CGRecordLowering { // The constructor. CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed); // Short helper routines. - /// \brief Constructs a MemberInfo instance from an offset and llvm::Type *. + /// Constructs a MemberInfo instance from an offset and llvm::Type *. MemberInfo StorageInfo(CharUnits Offset, llvm::Type *Data) { return MemberInfo(Offset, MemberInfo::Field, Data); } @@ -118,19 +118,19 @@ struct CGRecordLowering { return !Context.getTargetInfo().getCXXABI().isMicrosoft(); } - /// \brief Wraps llvm::Type::getIntNTy with some implicit arguments. + /// Wraps llvm::Type::getIntNTy with some implicit arguments. llvm::Type *getIntNType(uint64_t NumBits) { return llvm::Type::getIntNTy(Types.getLLVMContext(), (unsigned)llvm::alignTo(NumBits, 8)); } - /// \brief Gets an llvm type of size NumBytes and alignment 1. + /// Gets an llvm type of size NumBytes and alignment 1. llvm::Type *getByteArrayType(CharUnits NumBytes) { assert(!NumBytes.isZero() && "Empty byte arrays aren't allowed."); llvm::Type *Type = llvm::Type::getInt8Ty(Types.getLLVMContext()); return NumBytes == CharUnits::One() ? Type : (llvm::Type *)llvm::ArrayType::get(Type, NumBytes.getQuantity()); } - /// \brief Gets the storage type for a field decl and handles storage + /// Gets the storage type for a field decl and handles storage /// for itanium bitfields that are smaller than their declared type. llvm::Type *getStorageType(const FieldDecl *FD) { llvm::Type *Type = Types.ConvertTypeForMem(FD->getType()); @@ -139,7 +139,7 @@ struct CGRecordLowering { return getIntNType(std::min(FD->getBitWidthValue(Context), (unsigned)Context.toBits(getSize(Type)))); } - /// \brief Gets the llvm Basesubobject type from a CXXRecordDecl. + /// Gets the llvm Basesubobject type from a CXXRecordDecl. llvm::Type *getStorageType(const CXXRecordDecl *RD) { return Types.getCGRecordLayout(RD).getBaseSubobjectLLVMType(); } @@ -166,9 +166,9 @@ struct CGRecordLowering { return Layout.getFieldOffset(FD->getFieldIndex()); } // Layout routines. - void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, + void setBitFieldInfo(const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType); - /// \brief Lowers an ASTRecordLayout to a llvm type. + /// Lowers an ASTRecordLayout to a llvm type. void lower(bool NonVirtualBaseType); void lowerUnion(); void accumulateFields(); @@ -177,18 +177,18 @@ struct CGRecordLowering { void accumulateBases(); void accumulateVPtrs(); void accumulateVBases(); - /// \brief Recursively searches all of the bases to find out if a vbase is + /// Recursively searches all of the bases to find out if a vbase is /// not the primary vbase of some base class. bool hasOwnStorage(const CXXRecordDecl *Decl, const CXXRecordDecl *Query); void calculateZeroInit(); - /// \brief Lowers bitfield storage types to I8 arrays for bitfields with tail + /// Lowers bitfield storage types to I8 arrays for bitfields with tail /// padding that is or can potentially be used. void clipTailPadding(); - /// \brief Determines if we need a packed llvm struct. + /// Determines if we need a packed llvm struct. void determinePacked(bool NVBaseType); - /// \brief Inserts padding everwhere it's needed. + /// Inserts padding everywhere it's needed. void insertPadding(); - /// \brief Fills out the structures that are ultimately consumed. + /// Fills out the structures that are ultimately consumed. void fillOutputFields(); // Input memoization fields. CodeGenTypes &Types; @@ -214,12 +214,13 @@ private: }; } // namespace { -CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, bool Packed) - : Types(Types), Context(Types.getContext()), D(D), - RD(dyn_cast<CXXRecordDecl>(D)), - Layout(Types.getContext().getASTRecordLayout(D)), - DataLayout(Types.getDataLayout()), IsZeroInitializable(true), - IsZeroInitializableAsBase(true), Packed(Packed) {} +CGRecordLowering::CGRecordLowering(CodeGenTypes &Types, const RecordDecl *D, + bool Packed) + : Types(Types), Context(Types.getContext()), D(D), + RD(dyn_cast<CXXRecordDecl>(D)), + Layout(Types.getContext().getASTRecordLayout(D)), + DataLayout(Types.getDataLayout()), IsZeroInitializable(true), + IsZeroInitializableAsBase(true), Packed(Packed) {} void CGRecordLowering::setBitFieldInfo( const FieldDecl *FD, CharUnits StartOffset, llvm::Type *StorageType) { @@ -245,7 +246,7 @@ void CGRecordLowering::lower(bool NVBaseType) { // 1) Store all members (fields and bases) in a list and sort them by offset. // 2) Add a 1-byte capstone member at the Size of the structure. // 3) Clip bitfield storages members if their tail padding is or might be - // used by another field or base. The clipping process uses the capstone + // used by another field or base. The clipping process uses the capstone // by treating it as another object that occurs after the record. // 4) Determine if the llvm-struct requires packing. It's important that this // phase occur after clipping, because clipping changes the llvm type. @@ -294,8 +295,7 @@ void CGRecordLowering::lowerUnion() { // been doing and cause lit tests to change. for (const auto *Field : D->fields()) { if (Field->isBitField()) { - // Skip 0 sized bitfields. - if (Field->getBitWidthValue(Context) == 0) + if (Field->isZeroLengthBitField(Context)) continue; llvm::Type *FieldType = getStorageType(Field); if (LayoutSize < getSize(FieldType)) @@ -313,9 +313,8 @@ void CGRecordLowering::lowerUnion() { if (!SeenNamedMember) { SeenNamedMember = Field->getIdentifier(); if (!SeenNamedMember) - if (const auto *FieldRD = - dyn_cast_or_null<RecordDecl>(Field->getType()->getAsTagDecl())) - SeenNamedMember = FieldRD->findFirstNamedDataMember(); + if (const auto *FieldRD = Field->getType()->getAsRecordDecl()) + SeenNamedMember = FieldRD->findFirstNamedDataMember(); if (SeenNamedMember && !isZeroInitializable(Field)) { IsZeroInitializable = IsZeroInitializableAsBase = false; StorageType = FieldType; @@ -380,7 +379,7 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field, for (; Field != FieldEnd; ++Field) { uint64_t BitOffset = getFieldBitOffset(*Field); // Zero-width bitfields end runs. - if (Field->getBitWidthValue(Context) == 0) { + if (Field->isZeroLengthBitField(Context)) { Run = FieldEnd; continue; } @@ -404,19 +403,20 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field, return; } - // Check if current Field is better as a single field run. When current field + // Check if OffsetInRecord is better as a single field run. When OffsetInRecord // has legal integer width, and its bitfield offset is naturally aligned, it // is better to make the bitfield a separate storage component so as it can be // accessed directly with lower cost. - auto IsBetterAsSingleFieldRun = [&](RecordDecl::field_iterator Field) { + auto IsBetterAsSingleFieldRun = [&](uint64_t OffsetInRecord, + uint64_t StartBitOffset) { if (!Types.getCodeGenOpts().FineGrainedBitfieldAccesses) return false; - unsigned Width = Field->getBitWidthValue(Context); - if (!DataLayout.isLegalInteger(Width)) + if (!DataLayout.isLegalInteger(OffsetInRecord)) return false; - // Make sure Field is natually aligned if it is treated as an IType integer. - if (getFieldBitOffset(*Field) % - Context.toBits(getAlignment(getIntNType(Width))) != + // Make sure StartBitOffset is natually aligned if it is treated as an + // IType integer. + if (StartBitOffset % + Context.toBits(getAlignment(getIntNType(OffsetInRecord))) != 0) return false; return true; @@ -431,26 +431,31 @@ CGRecordLowering::accumulateBitFields(RecordDecl::field_iterator Field, if (Field == FieldEnd) break; // Any non-zero-length bitfield can start a new run. - if (Field->getBitWidthValue(Context) != 0) { + if (!Field->isZeroLengthBitField(Context)) { Run = Field; StartBitOffset = getFieldBitOffset(*Field); Tail = StartBitOffset + Field->getBitWidthValue(Context); - StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Run); + StartFieldAsSingleRun = IsBetterAsSingleFieldRun(Tail - StartBitOffset, + StartBitOffset); } ++Field; continue; } // If the start field of a new run is better as a single run, or - // if current field is better as a single run, or - // if current field has zero width bitfield, or + // if current field (or consecutive fields) is better as a single run, or + // if current field has zero width bitfield and either + // UseZeroLengthBitfieldAlignment or UseBitFieldTypeAlignment is set to + // true, or // if the offset of current field is inconsistent with the offset of // previous field plus its offset, // skip the block below and go ahead to emit the storage. // Otherwise, try to add bitfields to the run. if (!StartFieldAsSingleRun && Field != FieldEnd && - !IsBetterAsSingleFieldRun(Field) && - Field->getBitWidthValue(Context) != 0 && + !IsBetterAsSingleFieldRun(Tail - StartBitOffset, StartBitOffset) && + (!Field->isZeroLengthBitField(Context) || + (!Context.getTargetInfo().useZeroLengthBitfieldAlignment() && + !Context.getTargetInfo().useBitFieldTypeAlignment())) && Tail == getFieldBitOffset(*Field)) { Tail += Field->getBitWidthValue(Context); ++Field; @@ -626,7 +631,7 @@ void CGRecordLowering::determinePacked(bool NVBaseType) { // non-virtual sub-object and an unpacked complete object or vise versa. if (NVSize % NVAlignment) Packed = true; - // Update the alignment of the sentinal. + // Update the alignment of the sentinel. if (!Packed) Members.back().Data = getIntNType(Context.toBits(Alignment)); } @@ -681,7 +686,7 @@ CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types, uint64_t Offset, uint64_t Size, uint64_t StorageSize, CharUnits StorageOffset) { - // This function is vestigial from CGRecordLayoutBuilder days but is still + // This function is vestigial from CGRecordLayoutBuilder days but is still // used in GCObjCRuntime.cpp. That usage has a "fixme" attached to it that // when addressed will allow for the removal of this function. llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType()); @@ -776,17 +781,16 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, if (BaseTy) { CharUnits NonVirtualSize = Layout.getNonVirtualSize(); - uint64_t AlignedNonVirtualTypeSizeInBits = + uint64_t AlignedNonVirtualTypeSizeInBits = getContext().toBits(NonVirtualSize); - assert(AlignedNonVirtualTypeSizeInBits == + assert(AlignedNonVirtualTypeSizeInBits == getDataLayout().getTypeAllocSizeInBits(BaseTy) && "Type size mismatch!"); } - + // Verify that the LLVM and AST field offsets agree. - llvm::StructType *ST = - dyn_cast<llvm::StructType>(RL->getLLVMType()); + llvm::StructType *ST = RL->getLLVMType(); const llvm::StructLayout *SL = getDataLayout().getStructLayout(ST); const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D); @@ -802,13 +806,13 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D, "Invalid field offset!"); continue; } - + // Ignore unnamed bit-fields. if (!FD->getDeclName()) continue; // Don't inspect zero-length bitfields. - if (FD->getBitWidthValue(getContext()) == 0) + if (FD->isZeroLengthBitField(getContext())) continue; const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD); @@ -849,7 +853,7 @@ void CGRecordLayout::print(raw_ostream &OS) const { OS << "<CGRecordLayout\n"; OS << " LLVMType:" << *CompleteObjectType << "\n"; if (BaseSubobjectType) - OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; + OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n"; OS << " IsZeroInitializable:" << IsZeroInitializable << "\n"; OS << " BitFields:[\n"; |
