1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-10-18 02:19:39 +00:00

Merge llvm-project main llvmorg-18-init-16864-g3b3ee1f53424

This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and
openmp to llvm-project main llvmorg-18-init-16864-g3b3ee1f53424.

PR:		276104
MFC after:	1 month
This commit is contained in:
Dimitry Andric 2024-01-11 19:29:01 +01:00
commit 297eecfb02
247 changed files with 5238 additions and 2347 deletions

View File

@ -19,6 +19,7 @@
#include "clang/AST/SelectorLocationsKind.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/ArrayRef.h"
@ -488,6 +489,15 @@ class alignas(8) Decl {
// Return true if this is a FileContext Decl.
bool isFileContextDecl() const;
/// Whether it resembles a flexible array member. This is a static member
/// because we want to be able to call it with a nullptr. That allows us to
/// perform non-Decl specific checks based on the object's type and strict
/// flex array level.
static bool isFlexibleArrayMemberLike(
ASTContext &Context, const Decl *D, QualType Ty,
LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel,
bool IgnoreTemplateOrMacroSubstitution);
ASTContext &getASTContext() const LLVM_READONLY;
/// Helper to get the language options from the ASTContext.

View File

@ -1425,6 +1425,9 @@ class CXXRecordDecl : public RecordDecl {
/// (C++11 [class]p6).
bool isTriviallyCopyable() const;
/// Determine whether this class is considered trivially copyable per
bool isTriviallyCopyConstructible() const;
/// Determine whether this class is considered trivial.
///
/// C++11 [class]p6:

View File

@ -1631,8 +1631,10 @@ class CompoundStmt final
SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(Loc) {
explicit CompoundStmt(SourceLocation Loc) : CompoundStmt(Loc, Loc) {}
CompoundStmt(SourceLocation Loc, SourceLocation EndLoc)
: Stmt(CompoundStmtClass), LBraceLoc(Loc), RBraceLoc(EndLoc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.HasFPFeatures = 0;
}

View File

@ -917,6 +917,9 @@ class QualType {
/// Return true if this is a trivially copyable type (C++0x [basic.types]p9)
bool isTriviallyCopyableType(const ASTContext &Context) const;
/// Return true if this is a trivially copyable type
bool isTriviallyCopyConstructibleType(const ASTContext &Context) const;
/// Return true if this is a trivially relocatable type.
bool isTriviallyRelocatableType(const ASTContext &Context) const;

View File

@ -4372,3 +4372,21 @@ def CodeAlign: StmtAttr {
static constexpr int MaximumAlignment = 4096;
}];
}
def CountedBy : InheritableAttr {
let Spellings = [Clang<"counted_by">];
let Subjects = SubjectList<[Field]>;
let Args = [IdentifierArgument<"CountedByField">];
let Documentation = [CountedByDocs];
let LangOpts = [COnly];
// FIXME: This is ugly. Let using a DeclArgument would be nice, but a Decl
// isn't yet available due to the fact that we're still parsing the
// structure. Maybe that code could be changed sometime in the future.
code AdditionalMembers = [{
private:
SourceRange CountedByFieldLoc;
public:
SourceRange getCountedByFieldLoc() const { return CountedByFieldLoc; }
void setCountedByFieldLoc(SourceRange Loc) { CountedByFieldLoc = Loc; }
}];
}

View File

@ -7749,3 +7749,81 @@ but do not pass them to the underlying coroutine or pass them by value.
.. _`CRT`: https://clang.llvm.org/docs/AttributeReference.html#coro-return-type
}];
}
def CountedByDocs : Documentation {
let Category = DocCatField;
let Content = [{
Clang supports the ``counted_by`` attribute on the flexible array member of a
structure in C. The argument for the attribute is the name of a field member
holding the count of elements in the flexible array. This information can be
used to improve the results of the array bound sanitizer and the
``__builtin_dynamic_object_size`` builtin. The ``count`` field member must be
within the same non-anonymous, enclosing struct as the flexible array member.
This example specifies that the flexible array member ``array`` has the number
of elements allocated for it in ``count``:
.. code-block:: c
struct bar;
struct foo {
size_t count;
char other;
struct bar *array[] __attribute__((counted_by(count)));
};
This establishes a relationship between ``array`` and ``count``. Specifically,
``array`` must have at least ``count`` number of elements available. It's the
user's responsibility to ensure that this relationship is maintained through
changes to the structure.
In the following example, the allocated array erroneously has fewer elements
than what's specified by ``p->count``. This would result in an out-of-bounds
access not being detected.
.. code-block:: c
#define SIZE_INCR 42
struct foo *p;
void foo_alloc(size_t count) {
p = malloc(MAX(sizeof(struct foo),
offsetof(struct foo, array[0]) + count * sizeof(struct bar *)));
p->count = count + SIZE_INCR;
}
The next example updates ``p->count``, but breaks the relationship requirement
that ``p->array`` must have at least ``p->count`` number of elements available:
.. code-block:: c
#define SIZE_INCR 42
struct foo *p;
void foo_alloc(size_t count) {
p = malloc(MAX(sizeof(struct foo),
offsetof(struct foo, array[0]) + count * sizeof(struct bar *)));
p->count = count;
}
void use_foo(int index, int val) {
p->count += SIZE_INCR + 1; /* 'count' is now larger than the number of elements of 'array'. */
p->array[index] = val; /* The sanitizer can't properly check this access. */
}
In this example, an update to ``p->count`` maintains the relationship
requirement:
.. code-block:: c
void use_foo(int index, int val) {
if (p->count == 0)
return;
--p->count;
p->array[index] = val;
}
}];
}

View File

@ -167,7 +167,7 @@ def err_verify_no_such_marker : Error<
def err_verify_missing_start : Error<
"cannot find start ('{{') of expected %0">;
def err_verify_missing_end : Error<
"cannot find end ('}}') of expected %0">;
"cannot find end ('%1') of expected %0">;
def err_verify_invalid_content : Error<
"invalid expected %0: %1">;
def err_verify_missing_regex : Error<

View File

@ -2253,6 +2253,8 @@ def warn_cxx17_compat_aggregate_init_paren_list : Warning<
def err_reference_bind_to_bitfield : Error<
"%select{non-const|volatile}0 reference cannot bind to "
"bit-field%select{| %1}2">;
def err_reference_bind_to_bitfield_in_cce : Error<
"reference cannot bind to bit-field in converted constant expression">;
def err_reference_bind_to_vector_element : Error<
"%select{non-const|volatile}0 reference cannot bind to vector element">;
def err_reference_bind_to_matrix_element : Error<
@ -6439,6 +6441,19 @@ def warn_superclass_variable_sized_type_not_at_end : Warning<
"field %0 can overwrite instance variable %1 with variable sized type %2"
" in superclass %3">, InGroup<ObjCFlexibleArray>;
def err_flexible_array_count_not_in_same_struct : Error<
"'counted_by' field %0 isn't within the same struct as the flexible array">;
def err_counted_by_attr_not_on_flexible_array_member : Error<
"'counted_by' only applies to C99 flexible array members">;
def err_counted_by_attr_refers_to_flexible_array : Error<
"'counted_by' cannot refer to the flexible array %0">;
def err_counted_by_must_be_in_structure : Error<
"field %0 in 'counted_by' not inside structure">;
def err_flexible_array_counted_by_attr_field_not_integer : Error<
"field %0 in 'counted_by' must be a non-boolean integer type">;
def note_flexible_array_counted_by_attr_field : Note<
"field %0 declared here">;
let CategoryName = "ARC Semantic Issue" in {
// ARC-mode diagnostics.

View File

@ -457,6 +457,7 @@ ENUM_LANGOPT(SignReturnAddressKey, SignReturnAddressKeyKind, 1, SignReturnAddres
"Key used for return address signing")
LANGOPT(BranchTargetEnforcement, 1, 0, "Branch-target enforcement enabled")
LANGOPT(BranchProtectionPAuthLR, 1, 0, "Use PC as a diversifier using PAuthLR NOP instructions.")
LANGOPT(GuardedControlStack, 1, 0, "Guarded control stack enabled")
LANGOPT(SpeculativeLoadHardening, 1, 0, "Speculative load hardening enabled")

View File

@ -1373,6 +1373,7 @@ class TargetInfo : public TransferrableTargetInfo,
LangOptions::SignReturnAddressKeyKind::AKey;
bool BranchTargetEnforcement = false;
bool BranchProtectionPAuthLR = false;
bool GuardedControlStack = false;
};
/// Determine if the Architecture in this TargetInfo supports branch

View File

@ -454,11 +454,11 @@ let TargetGuard = "sve,bf16" in {
let TargetGuard = "sve2p1" in {
// Contiguous truncating store from quadword (single vector).
def SVST1UWQ : MInst<"svst1uwq[_{d}]", "vPcd", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1uwq">;
def SVST1UWQ_VNUM : MInst<"svst1uwq_vnum[_{d}]", "vPcld", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1uwq">;
def SVST1UWQ : MInst<"svst1wq[_{d}]", "vPcd", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1wq">;
def SVST1UWQ_VNUM : MInst<"svst1wq_vnum[_{d}]", "vPcld", "iUif", [IsStore], MemEltTyInt32, "aarch64_sve_st1wq">;
def SVST1UDQ : MInst<"svst1udq[_{d}]", "vPcd", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1udq">;
def SVST1UDQ_VNUM : MInst<"svst1udq_vnum[_{d}]", "vPcld", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1udq">;
def SVST1UDQ : MInst<"svst1dq[_{d}]", "vPcd", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1dq">;
def SVST1UDQ_VNUM : MInst<"svst1dq_vnum[_{d}]", "vPcld", "lUld", [IsStore], MemEltTyInt64, "aarch64_sve_st1dq">;
// Store one vector (vector base + scalar offset)
def SVST1Q_SCATTER_U64BASE_OFFSET : MInst<"svst1q_scatter[_{2}base]_offset[_{d}]", "vPgld", "cUcsUsiUilUlfhdb", [IsScatterStore, IsByteIndexed], MemEltTyDefault, "aarch64_sve_st1q_scatter_scalar_offset">;
@ -2040,12 +2040,12 @@ let TargetGuard = "sve2p1|sme2" in {
}
let TargetGuard = "sve2p1" in {
def SVDOT_X2_S : SInst<"svdot[_{d}_{2}_{3}]", "ddhh", "i", MergeNone, "aarch64_sve_sdot_x2", [], []>;
def SVDOT_X2_U : SInst<"svdot[_{d}_{2}_{3}]", "ddhh", "Ui", MergeNone, "aarch64_sve_udot_x2", [], []>;
def SVDOT_X2_F : SInst<"svdot[_{d}_{2}_{3}]", "ddhh", "f", MergeNone, "aarch64_sve_fdot_x2", [], []>;
def SVDOT_LANE_X2_S : SInst<"svdot_lane[_{d}_{2}_{3}]", "ddhhi", "i", MergeNone, "aarch64_sve_sdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>;
def SVDOT_LANE_X2_U : SInst<"svdot_lane[_{d}_{2}_{3}]", "ddhhi", "Ui", MergeNone, "aarch64_sve_udot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>;
def SVDOT_LANE_X2_F : SInst<"svdot_lane[_{d}_{2}_{3}]", "ddhhi", "f", MergeNone, "aarch64_sve_fdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>;
def SVDOT_X2_S : SInst<"svdot[_{d}_{2}]", "ddhh", "i", MergeNone, "aarch64_sve_sdot_x2", [], []>;
def SVDOT_X2_U : SInst<"svdot[_{d}_{2}]", "ddhh", "Ui", MergeNone, "aarch64_sve_udot_x2", [], []>;
def SVDOT_X2_F : SInst<"svdot[_{d}_{2}]", "ddhh", "f", MergeNone, "aarch64_sve_fdot_x2", [], []>;
def SVDOT_LANE_X2_S : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "i", MergeNone, "aarch64_sve_sdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>;
def SVDOT_LANE_X2_U : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "Ui", MergeNone, "aarch64_sve_udot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>;
def SVDOT_LANE_X2_F : SInst<"svdot_lane[_{d}_{2}]", "ddhhi", "f", MergeNone, "aarch64_sve_fdot_lane_x2", [], [ImmCheck<3, ImmCheck0_3>]>;
}
let TargetGuard = "sve2p1|sme2" in {
@ -2208,7 +2208,7 @@ let TargetGuard = "sve2p1" in {
def SVTBLQ : SInst<"svtblq[_{d}]", "ddu", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_tblq">;
def SVTBXQ : SInst<"svtbxq[_{d}]", "dddu", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_tbxq">;
// EXTQ
def EXTQ : SInst<"svextq_lane[_{d}]", "dddk", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_extq_lane", [], [ImmCheck<2, ImmCheck0_15>]>;
def EXTQ : SInst<"svextq[_{d}]", "dddk", "cUcsUsiUilUlbhfd", MergeNone, "aarch64_sve_extq", [], [ImmCheck<2, ImmCheck0_15>]>;
// PMOV
// Move to Pred
multiclass PMOV_TO_PRED<string name, string types, string intrinsic, list<FlagType> flags=[], ImmCheckType immCh > {

View File

@ -4267,7 +4267,7 @@ def iquote : JoinedOrSeparate<["-"], "iquote">, Group<clang_i_Group>,
Visibility<[ClangOption, CC1Option]>,
HelpText<"Add directory to QUOTE include search path">, MetaVarName<"<directory>">;
def isysroot : JoinedOrSeparate<["-"], "isysroot">, Group<clang_i_Group>,
Visibility<[ClangOption, CC1Option]>,
Visibility<[ClangOption, CC1Option, FlangOption]>,
HelpText<"Set the system root directory (usually /)">, MetaVarName<"<dir>">,
MarshallingInfoString<HeaderSearchOpts<"Sysroot">, [{"/"}]>;
def isystem : JoinedOrSeparate<["-"], "isystem">, Group<clang_i_Group>,
@ -4585,11 +4585,13 @@ let Flags = [TargetSpecific] in {
def menable_experimental_extensions : Flag<["-"], "menable-experimental-extensions">, Group<m_Group>,
HelpText<"Enable use of experimental RISC-V extensions.">;
def mrvv_vector_bits_EQ : Joined<["-"], "mrvv-vector-bits=">, Group<m_Group>,
HelpText<"Specify the size in bits of an RVV vector register. Defaults to "
"the vector length agnostic value of \"scalable\". Accepts power of "
"2 values between 64 and 65536. Also accepts \"zvl\" "
"to use the value implied by -march/-mcpu. Value will be reflected "
"in __riscv_v_fixed_vlen preprocessor define (RISC-V only)">;
Visibility<[ClangOption, FlangOption]>,
HelpText<"Specify the size in bits of an RVV vector register">,
DocBrief<"Defaults to the vector length agnostic value of \"scalable\". "
"Accepts power of 2 values between 64 and 65536. Also accepts "
"\"zvl\" to use the value implied by -march/-mcpu. On Clang, value "
"will be reflected in __riscv_v_fixed_vlen preprocessor define "
"(RISC-V only)">;
def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_Group>,
HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64/LoongArch/RISC-V only)">;
@ -5197,7 +5199,7 @@ def nohipwrapperinc : Flag<["-"], "nohipwrapperinc">, Group<IncludePath_Group>,
HelpText<"Do not include the default HIP wrapper headers and include paths">;
def : Flag<["-"], "nocudainc">, Alias<nogpuinc>;
def nogpulib : Flag<["-"], "nogpulib">, MarshallingInfoFlag<LangOpts<"NoGPULib">>,
Visibility<[ClangOption, CC1Option]>,
Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
HelpText<"Do not link device library for CUDA/HIP device compilation">;
def : Flag<["-"], "nocudalib">, Alias<nogpulib>;
def gpulibc : Flag<["-"], "gpulibc">, Visibility<[ClangOption, CC1Option, FlangOption, FC1Option]>,
@ -7010,6 +7012,8 @@ def mbranch_target_enforce : Flag<["-"], "mbranch-target-enforce">,
MarshallingInfoFlag<LangOpts<"BranchTargetEnforcement">>;
def mbranch_protection_pauth_lr : Flag<["-"], "mbranch-protection-pauth-lr">,
MarshallingInfoFlag<LangOpts<"BranchProtectionPAuthLR">>;
def mguarded_control_stack : Flag<["-"], "mguarded-control-stack">,
MarshallingInfoFlag<LangOpts<"GuardedControlStack">>;
def fno_dllexport_inlines : Flag<["-"], "fno-dllexport-inlines">,
MarshallingInfoNegativeFlag<LangOpts<"DllExportInlines">>;
def cfguard_no_checks : Flag<["-"], "cfguard-no-checks">,

View File

@ -225,6 +225,22 @@ struct FormatStyle {
/// bbb = 2;
/// \endcode
bool AlignCompound;
/// Only for ``AlignConsecutiveDeclarations``. Whether function pointers are
/// aligned.
/// \code
/// true:
/// unsigned i;
/// int &r;
/// int *p;
/// int (*f)();
///
/// false:
/// unsigned i;
/// int &r;
/// int *p;
/// int (*f)();
/// \endcode
bool AlignFunctionPointers;
/// Only for ``AlignConsecutiveAssignments``. Whether short assignment
/// operators are left-padded to the same length as long ones in order to
/// put all assignment operators to the right of the left hand side.
@ -247,7 +263,9 @@ struct FormatStyle {
bool operator==(const AlignConsecutiveStyle &R) const {
return Enabled == R.Enabled && AcrossEmptyLines == R.AcrossEmptyLines &&
AcrossComments == R.AcrossComments &&
AlignCompound == R.AlignCompound && PadOperators == R.PadOperators;
AlignCompound == R.AlignCompound &&
AlignFunctionPointers == R.AlignFunctionPointers &&
PadOperators == R.PadOperators;
}
bool operator!=(const AlignConsecutiveStyle &R) const {
return !(*this == R);

View File

@ -234,6 +234,26 @@ class Parser : public CodeCompletionHandler {
/// Parsing OpenACC directive mode.
bool OpenACCDirectiveParsing = false;
/// Currently parsing a situation where an OpenACC array section could be
/// legal, such as a 'var-list'.
bool AllowOpenACCArraySections = false;
/// RAII object to set reset OpenACC parsing a context where Array Sections
/// are allowed.
class OpenACCArraySectionRAII {
Parser &P;
public:
OpenACCArraySectionRAII(Parser &P) : P(P) {
assert(!P.AllowOpenACCArraySections);
P.AllowOpenACCArraySections = true;
}
~OpenACCArraySectionRAII() {
assert(P.AllowOpenACCArraySections);
P.AllowOpenACCArraySections = false;
}
};
/// When true, we are directly inside an Objective-C message
/// send expression.
///
@ -3546,8 +3566,8 @@ class Parser : public CodeCompletionHandler {
ExprResult ParseOpenACCIDExpression();
/// Parses the variable list for the `cache` construct.
void ParseOpenACCCacheVarList();
/// Parses a single variable in a variable list for the 'cache' construct.
bool ParseOpenACCCacheVar();
/// Parses a single variable in a variable list for OpenACC.
bool ParseOpenACCVar();
bool ParseOpenACCWaitArgument();
private:

View File

@ -4799,6 +4799,8 @@ class Sema final {
bool CheckAlwaysInlineAttr(const Stmt *OrigSt, const Stmt *CurSt,
const AttributeCommonInfo &A);
bool CheckCountedByAttr(Scope *Scope, const FieldDecl *FD);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
@ -5642,6 +5644,7 @@ class Sema final {
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = std::nullopt,
DeclContext *LookupCtx = nullptr,
TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,

View File

@ -282,7 +282,7 @@ class CorrectionCandidateCallback {
public:
static const unsigned InvalidDistance = TypoCorrection::InvalidDistance;
explicit CorrectionCandidateCallback(IdentifierInfo *Typo = nullptr,
explicit CorrectionCandidateCallback(const IdentifierInfo *Typo = nullptr,
NestedNameSpecifier *TypoNNS = nullptr)
: Typo(Typo), TypoNNS(TypoNNS) {}
@ -319,7 +319,7 @@ class CorrectionCandidateCallback {
/// this method.
virtual std::unique_ptr<CorrectionCandidateCallback> clone() = 0;
void setTypoName(IdentifierInfo *II) { Typo = II; }
void setTypoName(const IdentifierInfo *II) { Typo = II; }
void setTypoNNS(NestedNameSpecifier *NNS) { TypoNNS = NNS; }
// Flags for context-dependent keywords. WantFunctionLikeCasts is only
@ -345,13 +345,13 @@ class CorrectionCandidateCallback {
candidate.getCorrectionSpecifier() == TypoNNS;
}
IdentifierInfo *Typo;
const IdentifierInfo *Typo;
NestedNameSpecifier *TypoNNS;
};
class DefaultFilterCCC final : public CorrectionCandidateCallback {
public:
explicit DefaultFilterCCC(IdentifierInfo *Typo = nullptr,
explicit DefaultFilterCCC(const IdentifierInfo *Typo = nullptr,
NestedNameSpecifier *TypoNNS = nullptr)
: CorrectionCandidateCallback(Typo, TypoNNS) {}
@ -365,6 +365,10 @@ class DefaultFilterCCC final : public CorrectionCandidateCallback {
template <class C>
class DeclFilterCCC final : public CorrectionCandidateCallback {
public:
explicit DeclFilterCCC(const IdentifierInfo *Typo = nullptr,
NestedNameSpecifier *TypoNNS = nullptr)
: CorrectionCandidateCallback(Typo, TypoNNS) {}
bool ValidateCandidate(const TypoCorrection &candidate) override {
return candidate.getCorrectionDeclAs<C>();
}

View File

@ -1318,6 +1318,13 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(OMPArrayShapingTy, BuiltinType::OMPArrayShaping);
InitBuiltinType(OMPIteratorTy, BuiltinType::OMPIterator);
}
// Placeholder type for OpenACC array sections.
if (LangOpts.OpenACC) {
// FIXME: Once we implement OpenACC array sections in Sema, this will either
// be combined with the OpenMP type, or given its own type. In the meantime,
// just use the OpenMP type so that parsing can work.
InitBuiltinType(OMPArraySectionTy, BuiltinType::OMPArraySection);
}
if (LangOpts.MatrixTypes)
InitBuiltinType(IncompleteMatrixIdxTy, BuiltinType::IncompleteMatrixIdx);

View File

@ -5929,15 +5929,22 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
if (ToD)
return ToD;
bool IsFriendTemplate = D->getFriendObjectKind() != Decl::FOK_None;
bool IsDependentContext = DC != LexicalDC ? LexicalDC->isDependentContext()
: DC->isDependentContext();
bool DependentFriend = IsFriendTemplate && IsDependentContext;
// Should check if a declaration is friend in a dependent context.
// Such templates are not linked together in a declaration chain.
// The ASTImporter strategy is to map existing forward declarations to
// imported ones only if strictly necessary, otherwise import these as new
// forward declarations. In case of the "dependent friend" declarations, new
// declarations are created, but not linked in a declaration chain.
auto IsDependentFriend = [](ClassTemplateDecl *TD) {
return TD->getFriendObjectKind() != Decl::FOK_None &&
TD->getLexicalDeclContext()->isDependentContext();
};
bool DependentFriend = IsDependentFriend(D);
ClassTemplateDecl *FoundByLookup = nullptr;
// We may already have a template of the same name; try to find and match it.
if (!DependentFriend && !DC->isFunctionOrMethod()) {
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
@ -5953,10 +5960,13 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
// FIXME: sufficient conditon for 'IgnoreTemplateParmDepth'?
bool IgnoreTemplateParmDepth =
FoundTemplate->getFriendObjectKind() != Decl::FOK_None &&
!D->specializations().empty();
(FoundTemplate->getFriendObjectKind() != Decl::FOK_None) !=
(D->getFriendObjectKind() != Decl::FOK_None);
if (IsStructuralMatch(D, FoundTemplate, /*Complain=*/true,
IgnoreTemplateParmDepth)) {
if (DependentFriend || IsDependentFriend(FoundTemplate))
continue;
ClassTemplateDecl *TemplateWithDef =
getTemplateDefinition(FoundTemplate);
if (D->isThisDeclarationADefinition() && TemplateWithDef)
@ -9030,6 +9040,10 @@ class AttrImporter {
public:
AttrImporter(ASTImporter &I) : Importer(I), NImporter(I) {}
// Useful for accessing the imported attribute.
template <typename T> T *castAttrAs() { return cast<T>(ToAttr); }
template <typename T> const T *castAttrAs() const { return cast<T>(ToAttr); }
// Create an "importer" for an attribute parameter.
// Result of the 'value()' of that object is to be passed to the function
// 'importAttr', in the order that is expected by the attribute class.
@ -9243,6 +9257,15 @@ Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
From->args_size());
break;
}
case attr::CountedBy: {
AI.cloneAttr(FromAttr);
const auto *CBA = cast<CountedByAttr>(FromAttr);
Expected<SourceRange> SR = Import(CBA->getCountedByFieldLoc()).get();
if (!SR)
return SR.takeError();
AI.castAttrAs<CountedByAttr>()->setCountedByFieldLoc(SR.get());
break;
}
default: {
// The default branch works for attributes that have no arguments to import.

View File

@ -29,7 +29,6 @@
#include "clang/AST/Type.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/PartialDiagnostic.h"
@ -411,6 +410,79 @@ bool Decl::isFileContextDecl() const {
return DC && DC->isFileContext();
}
bool Decl::isFlexibleArrayMemberLike(
ASTContext &Ctx, const Decl *D, QualType Ty,
LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel,
bool IgnoreTemplateOrMacroSubstitution) {
// For compatibility with existing code, we treat arrays of length 0 or
// 1 as flexible array members.
const auto *CAT = Ctx.getAsConstantArrayType(Ty);
if (CAT) {
using FAMKind = LangOptions::StrictFlexArraysLevelKind;
llvm::APInt Size = CAT->getSize();
if (StrictFlexArraysLevel == FAMKind::IncompleteOnly)
return false;
// GCC extension, only allowed to represent a FAM.
if (Size.isZero())
return true;
if (StrictFlexArraysLevel == FAMKind::ZeroOrIncomplete && Size.uge(1))
return false;
if (StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete && Size.uge(2))
return false;
} else if (!Ctx.getAsIncompleteArrayType(Ty)) {
return false;
}
if (const auto *OID = dyn_cast_if_present<ObjCIvarDecl>(D))
return OID->getNextIvar() == nullptr;
const auto *FD = dyn_cast_if_present<FieldDecl>(D);
if (!FD)
return false;
if (CAT) {
// GCC treats an array memeber of a union as an FAM if the size is one or
// zero.
llvm::APInt Size = CAT->getSize();
if (FD->getParent()->isUnion() && (Size.isZero() || Size.isOne()))
return true;
}
// Don't consider sizes resulting from macro expansions or template argument
// substitution to form C89 tail-padded arrays.
if (IgnoreTemplateOrMacroSubstitution) {
TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
while (TInfo) {
TypeLoc TL = TInfo->getTypeLoc();
// Look through typedefs.
if (TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
TInfo = TDL->getTypeSourceInfo();
continue;
}
if (auto CTL = TL.getAs<ConstantArrayTypeLoc>()) {
if (const Expr *SizeExpr =
dyn_cast_if_present<IntegerLiteral>(CTL.getSizeExpr());
!SizeExpr || SizeExpr->getExprLoc().isMacroID())
return false;
}
break;
}
}
// Test that the field is the last in the structure.
RecordDecl::field_iterator FI(
DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
return ++FI == FD->getParent()->field_end();
}
TranslationUnitDecl *Decl::getTranslationUnitDecl() {
if (auto *TUD = dyn_cast<TranslationUnitDecl>(this))
return TUD;

View File

@ -587,6 +587,19 @@ bool CXXRecordDecl::isTriviallyCopyable() const {
return true;
}
bool CXXRecordDecl::isTriviallyCopyConstructible() const {
// A trivially copy constructible class is a class that:
// -- has no non-trivial copy constructors,
if (hasNonTrivialCopyConstructor())
return false;
// -- has a trivial destructor.
if (!hasTrivialDestructor())
return false;
return true;
}
void CXXRecordDecl::markedVirtualFunctionPure() {
// C++ [class.abstract]p2:
// A class is abstract if it has at least one pure virtual function.

View File

@ -205,85 +205,22 @@ bool Expr::isKnownToHaveBooleanValue(bool Semantic) const {
}
bool Expr::isFlexibleArrayMemberLike(
ASTContext &Context,
ASTContext &Ctx,
LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel,
bool IgnoreTemplateOrMacroSubstitution) const {
// For compatibility with existing code, we treat arrays of length 0 or
// 1 as flexible array members.
const auto *CAT = Context.getAsConstantArrayType(getType());
if (CAT) {
llvm::APInt Size = CAT->getSize();
using FAMKind = LangOptions::StrictFlexArraysLevelKind;
if (StrictFlexArraysLevel == FAMKind::IncompleteOnly)
return false;
// GCC extension, only allowed to represent a FAM.
if (Size == 0)
return true;
if (StrictFlexArraysLevel == FAMKind::ZeroOrIncomplete && Size.uge(1))
return false;
if (StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete && Size.uge(2))
return false;
} else if (!Context.getAsIncompleteArrayType(getType()))
return false;
const Expr *E = IgnoreParens();
const Decl *D = nullptr;
const NamedDecl *ND = nullptr;
if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
ND = DRE->getDecl();
else if (const auto *ME = dyn_cast<MemberExpr>(E))
ND = ME->getMemberDecl();
if (const auto *ME = dyn_cast<MemberExpr>(E))
D = ME->getMemberDecl();
else if (const auto *DRE = dyn_cast<DeclRefExpr>(E))
D = DRE->getDecl();
else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E))
return IRE->getDecl()->getNextIvar() == nullptr;
D = IRE->getDecl();
if (!ND)
return false;
// A flexible array member must be the last member in the class.
// FIXME: If the base type of the member expr is not FD->getParent(),
// this should not be treated as a flexible array member access.
if (const auto *FD = dyn_cast<FieldDecl>(ND)) {
// GCC treats an array memeber of a union as an FAM if the size is one or
// zero.
if (CAT) {
llvm::APInt Size = CAT->getSize();
if (FD->getParent()->isUnion() && (Size.isZero() || Size.isOne()))
return true;
}
// Don't consider sizes resulting from macro expansions or template argument
// substitution to form C89 tail-padded arrays.
if (IgnoreTemplateOrMacroSubstitution) {
TypeSourceInfo *TInfo = FD->getTypeSourceInfo();
while (TInfo) {
TypeLoc TL = TInfo->getTypeLoc();
// Look through typedefs.
if (TypedefTypeLoc TTL = TL.getAsAdjusted<TypedefTypeLoc>()) {
const TypedefNameDecl *TDL = TTL.getTypedefNameDecl();
TInfo = TDL->getTypeSourceInfo();
continue;
}
if (ConstantArrayTypeLoc CTL = TL.getAs<ConstantArrayTypeLoc>()) {
const Expr *SizeExpr = dyn_cast<IntegerLiteral>(CTL.getSizeExpr());
if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
return false;
}
break;
}
}
RecordDecl::field_iterator FI(
DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
return ++FI == FD->getParent()->field_end();
}
return false;
return Decl::isFlexibleArrayMemberLike(Ctx, D, E->getType(),
StrictFlexArraysLevel,
IgnoreTemplateOrMacroSubstitution);
}
const ValueDecl *

View File

@ -114,6 +114,8 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
}
case CK_FloatingCast: {
if (DiscardResult)
return this->discard(SubExpr);
if (!this->visit(SubExpr))
return false;
const auto *TargetSemantics = &Ctx.getFloatSemantics(CE->getType());
@ -121,6 +123,8 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
}
case CK_IntegralToFloating: {
if (DiscardResult)
return this->discard(SubExpr);
std::optional<PrimType> FromT = classify(SubExpr->getType());
if (!FromT)
return false;
@ -135,6 +139,9 @@ bool ByteCodeExprGen<Emitter>::VisitCastExpr(const CastExpr *CE) {
case CK_FloatingToBoolean:
case CK_FloatingToIntegral: {
if (DiscardResult)
return this->discard(SubExpr);
std::optional<PrimType> ToT = classify(CE->getType());
if (!ToT)

View File

@ -275,8 +275,8 @@ Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem, MetadataSize MD,
}
/// Unknown-size arrays of composite elements.
Descriptor::Descriptor(const DeclTy &D, Descriptor *Elem, bool IsTemporary,
UnknownSize)
Descriptor::Descriptor(const DeclTy &D, const Descriptor *Elem,
bool IsTemporary, UnknownSize)
: Source(D), ElemSize(Elem->getAllocSize() + sizeof(InlineDescriptor)),
Size(UnknownSizeMark), MDSize(0),
AllocSize(alignof(void *) + sizeof(InitMapPtr)), ElemDesc(Elem),
@ -286,7 +286,7 @@ Descriptor::Descriptor(const DeclTy &D, Descriptor *Elem, bool IsTemporary,
}
/// Composite records.
Descriptor::Descriptor(const DeclTy &D, Record *R, MetadataSize MD,
Descriptor::Descriptor(const DeclTy &D, const Record *R, MetadataSize MD,
bool IsConst, bool IsTemporary, bool IsMutable)
: Source(D), ElemSize(std::max<size_t>(alignof(void *), R->getFullSize())),
Size(ElemSize), MDSize(MD.value_or(0)), AllocSize(Size + MDSize),

View File

@ -100,7 +100,7 @@ struct Descriptor final {
static constexpr MetadataSize InlineDescMD = sizeof(InlineDescriptor);
/// Pointer to the record, if block contains records.
Record *const ElemRecord = nullptr;
const Record *const ElemRecord = nullptr;
/// Descriptor of the array element.
const Descriptor *const ElemDesc = nullptr;
/// Flag indicating if the block is mutable.
@ -135,10 +135,11 @@ struct Descriptor final {
unsigned NumElems, bool IsConst, bool IsTemporary, bool IsMutable);
/// Allocates a descriptor for an array of composites of unknown size.
Descriptor(const DeclTy &D, Descriptor *Elem, bool IsTemporary, UnknownSize);
Descriptor(const DeclTy &D, const Descriptor *Elem, bool IsTemporary,
UnknownSize);
/// Allocates a descriptor for a record.
Descriptor(const DeclTy &D, Record *R, MetadataSize MD, bool IsConst,
Descriptor(const DeclTy &D, const Record *R, MetadataSize MD, bool IsConst,
bool IsTemporary, bool IsMutable);
Descriptor(const DeclTy &D, MetadataSize MD);

View File

@ -134,6 +134,18 @@ void cleanupAfterFunctionCall(InterpState &S, CodePtr OpPC) {
if (CurFunc->isUnevaluatedBuiltin())
return;
// Some builtin functions require us to only look at the call site, since
// the classified parameter types do not match.
if (CurFunc->isBuiltin()) {
const auto *CE =
cast<CallExpr>(S.Current->Caller->getExpr(S.Current->getRetPC()));
for (int32_t I = CE->getNumArgs() - 1; I >= 0; --I) {
const Expr *A = CE->getArg(I);
popArg(S, A);
}
return;
}
if (S.Current->Caller && CurFunc->isVariadic()) {
// CallExpr we're look for is at the return PC of the current function, i.e.
// in the caller.

View File

@ -164,6 +164,8 @@ static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result,
case X: \
return Ret<X>(S, OpPC, Result);
switch (*T) {
RET_CASE(PT_Ptr);
RET_CASE(PT_FnPtr);
RET_CASE(PT_Float);
RET_CASE(PT_Bool);
RET_CASE(PT_Sint8);
@ -613,15 +615,34 @@ static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC,
return true;
}
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC,
const InterpFrame *Frame,
const Function *Func,
const CallExpr *Call) {
PrimType PtrT =
S.getContext().classify(Call->getArg(0)->getType()).value_or(PT_Ptr);
if (PtrT == PT_FnPtr) {
const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
S.Stk.push<FunctionPointer>(Arg);
} else if (PtrT == PT_Ptr) {
const Pointer &Arg = S.Stk.peek<Pointer>();
S.Stk.push<Pointer>(Arg);
} else {
assert(false && "Unsupported pointer type passed to __builtin_addressof()");
}
return true;
}
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
const CallExpr *Call) {
InterpFrame *Frame = S.Current;
APValue Dummy;
QualType ReturnType = Call->getCallReturnType(S.getCtx());
std::optional<PrimType> ReturnT = S.getContext().classify(ReturnType);
std::optional<PrimType> ReturnT = S.getContext().classify(Call->getType());
// If classify failed, we assume void.
assert(ReturnT || ReturnType->isVoidType());
assert(ReturnT || Call->getType()->isVoidType());
switch (F->getBuiltinID()) {
case Builtin::BI__builtin_is_constant_evaluated:
@ -820,6 +841,12 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F,
if (!interp__builtin_ffs(S, OpPC, Frame, F, Call))
return false;
break;
case Builtin::BIaddressof:
case Builtin::BI__addressof:
case Builtin::BI__builtin_addressof:
if (!interp__builtin_addressof(S, OpPC, Frame, F, Call))
return false;
break;
default:
return false;

View File

@ -315,14 +315,14 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
bool IsConst, bool IsTemporary,
bool IsMutable, const Expr *Init) {
// Classes and structures.
if (auto *RT = Ty->getAs<RecordType>()) {
if (auto *Record = getOrCreateRecord(RT->getDecl()))
if (const auto *RT = Ty->getAs<RecordType>()) {
if (const auto *Record = getOrCreateRecord(RT->getDecl()))
return allocateDescriptor(D, Record, MDSize, IsConst, IsTemporary,
IsMutable);
}
// Arrays.
if (auto ArrayType = Ty->getAsArrayTypeUnsafe()) {
if (const auto ArrayType = Ty->getAsArrayTypeUnsafe()) {
QualType ElemTy = ArrayType->getElementType();
// Array of well-known bounds.
if (auto CAT = dyn_cast<ConstantArrayType>(ArrayType)) {
@ -338,7 +338,7 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
} else {
// Arrays of composites. In this case, the array is a list of pointers,
// followed by the actual elements.
Descriptor *ElemDesc = createDescriptor(
const Descriptor *ElemDesc = createDescriptor(
D, ElemTy.getTypePtr(), std::nullopt, IsConst, IsTemporary);
if (!ElemDesc)
return nullptr;
@ -358,8 +358,8 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
return allocateDescriptor(D, *T, IsTemporary,
Descriptor::UnknownSize{});
} else {
Descriptor *Desc = createDescriptor(D, ElemTy.getTypePtr(), MDSize,
IsConst, IsTemporary);
const Descriptor *Desc = createDescriptor(D, ElemTy.getTypePtr(),
MDSize, IsConst, IsTemporary);
if (!Desc)
return nullptr;
return allocateDescriptor(D, Desc, IsTemporary,
@ -369,14 +369,14 @@ Descriptor *Program::createDescriptor(const DeclTy &D, const Type *Ty,
}
// Atomic types.
if (auto *AT = Ty->getAs<AtomicType>()) {
if (const auto *AT = Ty->getAs<AtomicType>()) {
const Type *InnerTy = AT->getValueType().getTypePtr();
return createDescriptor(D, InnerTy, MDSize, IsConst, IsTemporary,
IsMutable);
}
// Complex types - represented as arrays of elements.
if (auto *CT = Ty->getAs<ComplexType>()) {
if (const auto *CT = Ty->getAs<ComplexType>()) {
PrimType ElemTy = *Ctx.classify(CT->getElementType());
return allocateDescriptor(D, ElemTy, MDSize, 2, IsConst, IsTemporary,
IsMutable);

View File

@ -2604,19 +2604,22 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
return false;
}
bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
if ((*this)->isArrayType())
return Context.getBaseElementType(*this).isTriviallyCopyableType(Context);
static bool isTriviallyCopyableTypeImpl(const QualType &type,
const ASTContext &Context,
bool IsCopyConstructible) {
if (type->isArrayType())
return isTriviallyCopyableTypeImpl(Context.getBaseElementType(type),
Context, IsCopyConstructible);
if (hasNonTrivialObjCLifetime())
if (type.hasNonTrivialObjCLifetime())
return false;
// C++11 [basic.types]p9 - See Core 2094
// Scalar types, trivially copyable class types, arrays of such types, and
// cv-qualified versions of these types are collectively
// called trivially copyable types.
// called trivially copy constructible types.
QualType CanonicalType = getCanonicalType();
QualType CanonicalType = type.getCanonicalType();
if (CanonicalType->isDependentType())
return false;
@ -2634,16 +2637,29 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
if (const auto *RT = CanonicalType->getAs<RecordType>()) {
if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
if (!ClassDecl->isTriviallyCopyable()) return false;
if (IsCopyConstructible) {
return ClassDecl->isTriviallyCopyConstructible();
} else {
return ClassDecl->isTriviallyCopyable();
}
}
return true;
}
// No other types can match.
return false;
}
bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
return isTriviallyCopyableTypeImpl(*this, Context,
/*IsCopyConstructible=*/false);
}
bool QualType::isTriviallyCopyConstructibleType(
const ASTContext &Context) const {
return isTriviallyCopyableTypeImpl(*this, Context,
/*IsCopyConstructible=*/true);
}
bool QualType::isTriviallyRelocatableType(const ASTContext &Context) const {
QualType BaseElementType = Context.getBaseElementType(*this);

View File

@ -15,6 +15,81 @@
namespace clang {
using namespace ast_matchers;
// Check if result of Source expression could be a Target expression.
// Checks:
// - Implicit Casts
// - Binary Operators
// - ConditionalOperator
// - BinaryConditionalOperator
static bool canExprResolveTo(const Expr *Source, const Expr *Target) {
const auto IgnoreDerivedToBase = [](const Expr *E, auto Matcher) {
if (Matcher(E))
return true;
if (const auto *Cast = dyn_cast<ImplicitCastExpr>(E)) {
if ((Cast->getCastKind() == CK_DerivedToBase ||
Cast->getCastKind() == CK_UncheckedDerivedToBase) &&
Matcher(Cast->getSubExpr()))
return true;
}
return false;
};
const auto EvalCommaExpr = [](const Expr *E, auto Matcher) {
const Expr *Result = E;
while (const auto *BOComma =
dyn_cast_or_null<BinaryOperator>(Result->IgnoreParens())) {
if (!BOComma->isCommaOp())
break;
Result = BOComma->getRHS();
}
return Result != E && Matcher(Result);
};
// The 'ConditionalOperatorM' matches on `<anything> ? <expr> : <expr>`.
// This matching must be recursive because `<expr>` can be anything resolving
// to the `InnerMatcher`, for example another conditional operator.
// The edge-case `BaseClass &b = <cond> ? DerivedVar1 : DerivedVar2;`
// is handled, too. The implicit cast happens outside of the conditional.
// This is matched by `IgnoreDerivedToBase(canResolveToExpr(InnerMatcher))`
// below.
const auto ConditionalOperatorM = [Target](const Expr *E) {
if (const auto *OP = dyn_cast<ConditionalOperator>(E)) {
if (const auto *TE = OP->getTrueExpr()->IgnoreParens())
if (canExprResolveTo(TE, Target))
return true;
if (const auto *FE = OP->getFalseExpr()->IgnoreParens())
if (canExprResolveTo(FE, Target))
return true;
}
return false;
};
const auto ElvisOperator = [Target](const Expr *E) {
if (const auto *OP = dyn_cast<BinaryConditionalOperator>(E)) {
if (const auto *TE = OP->getTrueExpr()->IgnoreParens())
if (canExprResolveTo(TE, Target))
return true;
if (const auto *FE = OP->getFalseExpr()->IgnoreParens())
if (canExprResolveTo(FE, Target))
return true;
}
return false;
};
const Expr *SourceExprP = Source->IgnoreParens();
return IgnoreDerivedToBase(SourceExprP,
[&](const Expr *E) {
return E == Target || ConditionalOperatorM(E) ||
ElvisOperator(E);
}) ||
EvalCommaExpr(SourceExprP, [&](const Expr *E) {
return IgnoreDerivedToBase(
E->IgnoreParens(), [&](const Expr *EE) { return EE == Target; });
});
}
namespace {
AST_MATCHER_P(LambdaExpr, hasCaptureInit, const Expr *, E) {
@ -27,56 +102,14 @@ AST_MATCHER_P(CXXForRangeStmt, hasRangeStmt,
return InnerMatcher.matches(*Range, Finder, Builder);
}
AST_MATCHER_P(Expr, maybeEvalCommaExpr, ast_matchers::internal::Matcher<Expr>,
InnerMatcher) {
const Expr *Result = &Node;
while (const auto *BOComma =
dyn_cast_or_null<BinaryOperator>(Result->IgnoreParens())) {
if (!BOComma->isCommaOp())
break;
Result = BOComma->getRHS();
}
return InnerMatcher.matches(*Result, Finder, Builder);
}
AST_MATCHER_P(Stmt, canResolveToExpr, ast_matchers::internal::Matcher<Stmt>,
InnerMatcher) {
AST_MATCHER_P(Stmt, canResolveToExpr, const Stmt *, Inner) {
auto *Exp = dyn_cast<Expr>(&Node);
if (!Exp) {
return stmt().matches(Node, Finder, Builder);
}
auto DerivedToBase = [](const ast_matchers::internal::Matcher<Expr> &Inner) {
return implicitCastExpr(anyOf(hasCastKind(CK_DerivedToBase),
hasCastKind(CK_UncheckedDerivedToBase)),
hasSourceExpression(Inner));
};
auto IgnoreDerivedToBase =
[&DerivedToBase](const ast_matchers::internal::Matcher<Expr> &Inner) {
return ignoringParens(expr(anyOf(Inner, DerivedToBase(Inner))));
};
// The 'ConditionalOperator' matches on `<anything> ? <expr> : <expr>`.
// This matching must be recursive because `<expr>` can be anything resolving
// to the `InnerMatcher`, for example another conditional operator.
// The edge-case `BaseClass &b = <cond> ? DerivedVar1 : DerivedVar2;`
// is handled, too. The implicit cast happens outside of the conditional.
// This is matched by `IgnoreDerivedToBase(canResolveToExpr(InnerMatcher))`
// below.
auto const ConditionalOperator = conditionalOperator(anyOf(
hasTrueExpression(ignoringParens(canResolveToExpr(InnerMatcher))),
hasFalseExpression(ignoringParens(canResolveToExpr(InnerMatcher)))));
auto const ElvisOperator = binaryConditionalOperator(anyOf(
hasTrueExpression(ignoringParens(canResolveToExpr(InnerMatcher))),
hasFalseExpression(ignoringParens(canResolveToExpr(InnerMatcher)))));
auto const ComplexMatcher = ignoringParens(
expr(anyOf(IgnoreDerivedToBase(InnerMatcher),
maybeEvalCommaExpr(IgnoreDerivedToBase(InnerMatcher)),
IgnoreDerivedToBase(ConditionalOperator),
IgnoreDerivedToBase(ElvisOperator))));
return ComplexMatcher.matches(*Exp, Finder, Builder);
if (!Exp)
return true;
auto *Target = dyn_cast<Expr>(Inner);
if (!Target)
return false;
return canExprResolveTo(Exp, Target);
}
// Similar to 'hasAnyArgument', but does not work because 'InitListExpr' does
@ -121,6 +154,12 @@ AST_MATCHER_P(GenericSelectionExpr, hasControllingExpr,
return InnerMatcher.matches(*Node.getControllingExpr(), Finder, Builder);
}
template <typename T>
ast_matchers::internal::Matcher<T>
findFirst(const ast_matchers::internal::Matcher<T> &Matcher) {
return anyOf(Matcher, hasDescendant(Matcher));
}
const auto nonConstReferenceType = [] {
return hasUnqualifiedDesugaredType(
referenceType(pointee(unless(isConstQualified()))));
@ -220,8 +259,8 @@ bool ExprMutationAnalyzer::isUnevaluated(const Stmt *Exp, const Stmt &Stm,
return selectFirst<Stmt>(
NodeID<Expr>::value,
match(
findAll(
stmt(canResolveToExpr(equalsNode(Exp)),
findFirst(
stmt(canResolveToExpr(Exp),
anyOf(
// `Exp` is part of the underlying expression of
// decltype/typeof if it has an ancestor of
@ -275,44 +314,41 @@ const Stmt *ExprMutationAnalyzer::findDeclPointeeMutation(
const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// LHS of any assignment operators.
const auto AsAssignmentLhs = binaryOperator(
isAssignmentOperator(), hasLHS(canResolveToExpr(equalsNode(Exp))));
const auto AsAssignmentLhs =
binaryOperator(isAssignmentOperator(), hasLHS(canResolveToExpr(Exp)));
// Operand of increment/decrement operators.
const auto AsIncDecOperand =
unaryOperator(anyOf(hasOperatorName("++"), hasOperatorName("--")),
hasUnaryOperand(canResolveToExpr(equalsNode(Exp))));
hasUnaryOperand(canResolveToExpr(Exp)));
// Invoking non-const member function.
// A member function is assumed to be non-const when it is unresolved.
const auto NonConstMethod = cxxMethodDecl(unless(isConst()));
const auto AsNonConstThis = expr(anyOf(
cxxMemberCallExpr(on(canResolveToExpr(equalsNode(Exp))),
unless(isConstCallee())),
cxxMemberCallExpr(on(canResolveToExpr(Exp)), unless(isConstCallee())),
cxxOperatorCallExpr(callee(NonConstMethod),
hasArgument(0, canResolveToExpr(equalsNode(Exp)))),
hasArgument(0, canResolveToExpr(Exp))),
// In case of a templated type, calling overloaded operators is not
// resolved and modelled as `binaryOperator` on a dependent type.
// Such instances are considered a modification, because they can modify
// in different instantiations of the template.
binaryOperator(
hasEitherOperand(ignoringImpCasts(canResolveToExpr(equalsNode(Exp)))),
isTypeDependent()),
binaryOperator(isTypeDependent(),
hasEitherOperand(ignoringImpCasts(canResolveToExpr(Exp)))),
// Within class templates and member functions the member expression might
// not be resolved. In that case, the `callExpr` is considered to be a
// modification.
callExpr(
callee(expr(anyOf(unresolvedMemberExpr(hasObjectExpression(
canResolveToExpr(equalsNode(Exp)))),
cxxDependentScopeMemberExpr(hasObjectExpression(
canResolveToExpr(equalsNode(Exp)))))))),
callExpr(callee(expr(anyOf(
unresolvedMemberExpr(hasObjectExpression(canResolveToExpr(Exp))),
cxxDependentScopeMemberExpr(
hasObjectExpression(canResolveToExpr(Exp))))))),
// Match on a call to a known method, but the call itself is type
// dependent (e.g. `vector<T> v; v.push(T{});` in a templated function).
callExpr(allOf(isTypeDependent(),
callee(memberExpr(hasDeclaration(NonConstMethod),
hasObjectExpression(canResolveToExpr(
equalsNode(Exp)))))))));
callExpr(allOf(
isTypeDependent(),
callee(memberExpr(hasDeclaration(NonConstMethod),
hasObjectExpression(canResolveToExpr(Exp))))))));
// Taking address of 'Exp'.
// We're assuming 'Exp' is mutated as soon as its address is taken, though in
@ -322,11 +358,10 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
unaryOperator(hasOperatorName("&"),
// A NoOp implicit cast is adding const.
unless(hasParent(implicitCastExpr(hasCastKind(CK_NoOp)))),
hasUnaryOperand(canResolveToExpr(equalsNode(Exp))));
const auto AsPointerFromArrayDecay =
castExpr(hasCastKind(CK_ArrayToPointerDecay),
unless(hasParent(arraySubscriptExpr())),
has(canResolveToExpr(equalsNode(Exp))));
hasUnaryOperand(canResolveToExpr(Exp)));
const auto AsPointerFromArrayDecay = castExpr(
hasCastKind(CK_ArrayToPointerDecay),
unless(hasParent(arraySubscriptExpr())), has(canResolveToExpr(Exp)));
// Treat calling `operator->()` of move-only classes as taking address.
// These are typically smart pointers with unique ownership so we treat
// mutation of pointee as mutation of the smart pointer itself.
@ -334,7 +369,7 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
hasOverloadedOperatorName("->"),
callee(
cxxMethodDecl(ofClass(isMoveOnly()), returns(nonConstPointerType()))),
argumentCountIs(1), hasArgument(0, canResolveToExpr(equalsNode(Exp))));
argumentCountIs(1), hasArgument(0, canResolveToExpr(Exp)));
// Used as non-const-ref argument when calling a function.
// An argument is assumed to be non-const-ref when the function is unresolved.
@ -342,8 +377,8 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// findFunctionArgMutation which has additional smarts for handling forwarding
// references.
const auto NonConstRefParam = forEachArgumentWithParamType(
anyOf(canResolveToExpr(equalsNode(Exp)),
memberExpr(hasObjectExpression(canResolveToExpr(equalsNode(Exp))))),
anyOf(canResolveToExpr(Exp),
memberExpr(hasObjectExpression(canResolveToExpr(Exp)))),
nonConstReferenceType());
const auto NotInstantiated = unless(hasDeclaration(isInstantiated()));
const auto TypeDependentCallee =
@ -354,19 +389,17 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
const auto AsNonConstRefArg = anyOf(
callExpr(NonConstRefParam, NotInstantiated),
cxxConstructExpr(NonConstRefParam, NotInstantiated),
callExpr(TypeDependentCallee,
hasAnyArgument(canResolveToExpr(equalsNode(Exp)))),
cxxUnresolvedConstructExpr(
hasAnyArgument(canResolveToExpr(equalsNode(Exp)))),
callExpr(TypeDependentCallee, hasAnyArgument(canResolveToExpr(Exp))),
cxxUnresolvedConstructExpr(hasAnyArgument(canResolveToExpr(Exp))),
// Previous False Positive in the following Code:
// `template <typename T> void f() { int i = 42; new Type<T>(i); }`
// Where the constructor of `Type` takes its argument as reference.
// The AST does not resolve in a `cxxConstructExpr` because it is
// type-dependent.
parenListExpr(hasDescendant(expr(canResolveToExpr(equalsNode(Exp))))),
parenListExpr(hasDescendant(expr(canResolveToExpr(Exp)))),
// If the initializer is for a reference type, there is no cast for
// the variable. Values are cast to RValue first.
initListExpr(hasAnyInit(expr(canResolveToExpr(equalsNode(Exp))))));
initListExpr(hasAnyInit(expr(canResolveToExpr(Exp)))));
// Captured by a lambda by reference.
// If we're initializing a capture with 'Exp' directly then we're initializing
@ -380,76 +413,72 @@ const Stmt *ExprMutationAnalyzer::findDirectMutation(const Expr *Exp) {
// For returning by const-ref there will be an ImplicitCastExpr <NoOp> (for
// adding const.)
const auto AsNonConstRefReturn =
returnStmt(hasReturnValue(canResolveToExpr(equalsNode(Exp))));
returnStmt(hasReturnValue(canResolveToExpr(Exp)));
// It is used as a non-const-reference for initalizing a range-for loop.
const auto AsNonConstRefRangeInit = cxxForRangeStmt(
hasRangeInit(declRefExpr(allOf(canResolveToExpr(equalsNode(Exp)),
hasType(nonConstReferenceType())))));
const auto AsNonConstRefRangeInit = cxxForRangeStmt(hasRangeInit(declRefExpr(
allOf(canResolveToExpr(Exp), hasType(nonConstReferenceType())))));
const auto Matches = match(
traverse(TK_AsIs,
findAll(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand,
AsNonConstThis, AsAmpersandOperand,
AsPointerFromArrayDecay, AsOperatorArrowThis,
AsNonConstRefArg, AsLambdaRefCaptureInit,
AsNonConstRefReturn, AsNonConstRefRangeInit))
.bind("stmt"))),
traverse(
TK_AsIs,
findFirst(stmt(anyOf(AsAssignmentLhs, AsIncDecOperand, AsNonConstThis,
AsAmpersandOperand, AsPointerFromArrayDecay,
AsOperatorArrowThis, AsNonConstRefArg,
AsLambdaRefCaptureInit, AsNonConstRefReturn,
AsNonConstRefRangeInit))
.bind("stmt"))),
Stm, Context);
return selectFirst<Stmt>("stmt", Matches);
}
const Stmt *ExprMutationAnalyzer::findMemberMutation(const Expr *Exp) {
// Check whether any member of 'Exp' is mutated.
const auto MemberExprs =
match(findAll(expr(anyOf(memberExpr(hasObjectExpression(
canResolveToExpr(equalsNode(Exp)))),
cxxDependentScopeMemberExpr(hasObjectExpression(
canResolveToExpr(equalsNode(Exp)))),
binaryOperator(hasOperatorName(".*"),
hasLHS(equalsNode(Exp)))))
.bind(NodeID<Expr>::value)),
Stm, Context);
const auto MemberExprs = match(
findAll(expr(anyOf(memberExpr(hasObjectExpression(canResolveToExpr(Exp))),
cxxDependentScopeMemberExpr(
hasObjectExpression(canResolveToExpr(Exp))),
binaryOperator(hasOperatorName(".*"),
hasLHS(equalsNode(Exp)))))
.bind(NodeID<Expr>::value)),
Stm, Context);
return findExprMutation(MemberExprs);
}
const Stmt *ExprMutationAnalyzer::findArrayElementMutation(const Expr *Exp) {
// Check whether any element of an array is mutated.
const auto SubscriptExprs =
match(findAll(arraySubscriptExpr(
anyOf(hasBase(canResolveToExpr(equalsNode(Exp))),
hasBase(implicitCastExpr(
allOf(hasCastKind(CK_ArrayToPointerDecay),
hasSourceExpression(canResolveToExpr(
equalsNode(Exp))))))))
.bind(NodeID<Expr>::value)),
Stm, Context);
const auto SubscriptExprs = match(
findAll(arraySubscriptExpr(
anyOf(hasBase(canResolveToExpr(Exp)),
hasBase(implicitCastExpr(allOf(
hasCastKind(CK_ArrayToPointerDecay),
hasSourceExpression(canResolveToExpr(Exp)))))))
.bind(NodeID<Expr>::value)),
Stm, Context);
return findExprMutation(SubscriptExprs);
}
const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
// If the 'Exp' is explicitly casted to a non-const reference type the
// 'Exp' is considered to be modified.
const auto ExplicitCast = match(
findAll(
stmt(castExpr(hasSourceExpression(canResolveToExpr(equalsNode(Exp))),
explicitCastExpr(
hasDestinationType(nonConstReferenceType()))))
.bind("stmt")),
Stm, Context);
const auto ExplicitCast =
match(findFirst(stmt(castExpr(hasSourceExpression(canResolveToExpr(Exp)),
explicitCastExpr(hasDestinationType(
nonConstReferenceType()))))
.bind("stmt")),
Stm, Context);
if (const auto *CastStmt = selectFirst<Stmt>("stmt", ExplicitCast))
return CastStmt;
// If 'Exp' is casted to any non-const reference type, check the castExpr.
const auto Casts = match(
findAll(
expr(castExpr(hasSourceExpression(canResolveToExpr(equalsNode(Exp))),
anyOf(explicitCastExpr(
hasDestinationType(nonConstReferenceType())),
implicitCastExpr(hasImplicitDestinationType(
nonConstReferenceType())))))
.bind(NodeID<Expr>::value)),
findAll(expr(castExpr(hasSourceExpression(canResolveToExpr(Exp)),
anyOf(explicitCastExpr(hasDestinationType(
nonConstReferenceType())),
implicitCastExpr(hasImplicitDestinationType(
nonConstReferenceType())))))
.bind(NodeID<Expr>::value)),
Stm, Context);
if (const Stmt *S = findExprMutation(Casts))
@ -458,7 +487,7 @@ const Stmt *ExprMutationAnalyzer::findCastMutation(const Expr *Exp) {
const auto Calls =
match(findAll(callExpr(callee(namedDecl(
hasAnyName("::std::move", "::std::forward"))),
hasArgument(0, canResolveToExpr(equalsNode(Exp))))
hasArgument(0, canResolveToExpr(Exp)))
.bind("expr")),
Stm, Context);
return findExprMutation(Calls);
@ -473,16 +502,16 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
// array is considered modified if the loop-variable is a non-const reference.
const auto DeclStmtToNonRefToArray = declStmt(hasSingleDecl(varDecl(hasType(
hasUnqualifiedDesugaredType(referenceType(pointee(arrayType())))))));
const auto RefToArrayRefToElements =
match(findAll(stmt(cxxForRangeStmt(
hasLoopVariable(
varDecl(anyOf(hasType(nonConstReferenceType()),
hasType(nonConstPointerType())))
.bind(NodeID<Decl>::value)),
hasRangeStmt(DeclStmtToNonRefToArray),
hasRangeInit(canResolveToExpr(equalsNode(Exp)))))
.bind("stmt")),
Stm, Context);
const auto RefToArrayRefToElements = match(
findFirst(stmt(cxxForRangeStmt(
hasLoopVariable(
varDecl(anyOf(hasType(nonConstReferenceType()),
hasType(nonConstPointerType())))
.bind(NodeID<Decl>::value)),
hasRangeStmt(DeclStmtToNonRefToArray),
hasRangeInit(canResolveToExpr(Exp))))
.bind("stmt")),
Stm, Context);
if (const auto *BadRangeInitFromArray =
selectFirst<Stmt>("stmt", RefToArrayRefToElements))
@ -505,12 +534,12 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
hasSingleDecl(varDecl(hasType(hasUnqualifiedDesugaredType(referenceType(
pointee(hasDeclaration(cxxRecordDecl(HasAnyNonConstIterator)))))))));
const auto RefToContainerBadIterators =
match(findAll(stmt(cxxForRangeStmt(allOf(
hasRangeStmt(DeclStmtToNonConstIteratorContainer),
hasRangeInit(canResolveToExpr(equalsNode(Exp))))))
.bind("stmt")),
Stm, Context);
const auto RefToContainerBadIterators = match(
findFirst(stmt(cxxForRangeStmt(allOf(
hasRangeStmt(DeclStmtToNonConstIteratorContainer),
hasRangeInit(canResolveToExpr(Exp)))))
.bind("stmt")),
Stm, Context);
if (const auto *BadIteratorsContainer =
selectFirst<Stmt>("stmt", RefToContainerBadIterators))
@ -522,7 +551,7 @@ const Stmt *ExprMutationAnalyzer::findRangeLoopMutation(const Expr *Exp) {
match(findAll(cxxForRangeStmt(
hasLoopVariable(varDecl(hasType(nonConstReferenceType()))
.bind(NodeID<Decl>::value)),
hasRangeInit(canResolveToExpr(equalsNode(Exp))))),
hasRangeInit(canResolveToExpr(Exp)))),
Stm, Context);
return findDeclMutation(LoopVars);
}
@ -531,31 +560,29 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
// Follow non-const reference returned by `operator*()` of move-only classes.
// These are typically smart pointers with unique ownership so we treat
// mutation of pointee as mutation of the smart pointer itself.
const auto Ref =
match(findAll(cxxOperatorCallExpr(
hasOverloadedOperatorName("*"),
callee(cxxMethodDecl(ofClass(isMoveOnly()),
returns(nonConstReferenceType()))),
argumentCountIs(1),
hasArgument(0, canResolveToExpr(equalsNode(Exp))))
.bind(NodeID<Expr>::value)),
Stm, Context);
const auto Ref = match(
findAll(cxxOperatorCallExpr(
hasOverloadedOperatorName("*"),
callee(cxxMethodDecl(ofClass(isMoveOnly()),
returns(nonConstReferenceType()))),
argumentCountIs(1), hasArgument(0, canResolveToExpr(Exp)))
.bind(NodeID<Expr>::value)),
Stm, Context);
if (const Stmt *S = findExprMutation(Ref))
return S;
// If 'Exp' is bound to a non-const reference, check all declRefExpr to that.
const auto Refs = match(
stmt(forEachDescendant(
varDecl(
hasType(nonConstReferenceType()),
hasInitializer(anyOf(canResolveToExpr(equalsNode(Exp)),
memberExpr(hasObjectExpression(
canResolveToExpr(equalsNode(Exp)))))),
hasParent(declStmt().bind("stmt")),
// Don't follow the reference in range statement, we've
// handled that separately.
unless(hasParent(declStmt(hasParent(
cxxForRangeStmt(hasRangeStmt(equalsBoundNode("stmt"))))))))
varDecl(hasType(nonConstReferenceType()),
hasInitializer(anyOf(
canResolveToExpr(Exp),
memberExpr(hasObjectExpression(canResolveToExpr(Exp))))),
hasParent(declStmt().bind("stmt")),
// Don't follow the reference in range statement, we've
// handled that separately.
unless(hasParent(declStmt(hasParent(cxxForRangeStmt(
hasRangeStmt(equalsBoundNode("stmt"))))))))
.bind(NodeID<Decl>::value))),
Stm, Context);
return findDeclMutation(Refs);
@ -563,7 +590,7 @@ const Stmt *ExprMutationAnalyzer::findReferenceMutation(const Expr *Exp) {
const Stmt *ExprMutationAnalyzer::findFunctionArgMutation(const Expr *Exp) {
const auto NonConstRefParam = forEachArgumentWithParam(
canResolveToExpr(equalsNode(Exp)),
canResolveToExpr(Exp),
parmVarDecl(hasType(nonConstReferenceType())).bind("parm"));
const auto IsInstantiated = hasDeclaration(isInstantiated());
const auto FuncDecl = hasDeclaration(functionDecl().bind("func"));

View File

@ -226,6 +226,7 @@ bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
BPI.BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
BPI.GuardedControlStack = PBP.GuardedControlStack;
return true;
}
@ -532,6 +533,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (Opts.BranchTargetEnforcement)
Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
if (Opts.GuardedControlStack)
Builder.defineMacro("__ARM_FEATURE_GCS_DEFAULT", "1");
if (HasLS64)
Builder.defineMacro("__ARM_FEATURE_LS64", "1");
@ -544,6 +548,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasD128)
Builder.defineMacro("__ARM_FEATURE_SYSREG128", "1");
if (HasGCS)
Builder.defineMacro("__ARM_FEATURE_GCS", "1");
if (*ArchInfo == llvm::AArch64::ARMV8_1A)
getTargetDefinesARMV81A(Opts, Builder);
else if (*ArchInfo == llvm::AArch64::ARMV8_2A)

View File

@ -163,9 +163,8 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
auto ExtName = Extension.first;
auto ExtInfo = Extension.second;
Builder.defineMacro(
Twine("__riscv_", ExtName),
Twine(getVersionValue(ExtInfo.MajorVersion, ExtInfo.MinorVersion)));
Builder.defineMacro(Twine("__riscv_", ExtName),
Twine(getVersionValue(ExtInfo.Major, ExtInfo.Minor)));
}
if (ISAInfo->hasExtension("m") || ISAInfo->hasExtension("zmmul"))

View File

@ -25,6 +25,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/OSLog.h"
#include "clang/AST/OperationKinds.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
@ -818,6 +819,238 @@ CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
}
const FieldDecl *CodeGenFunction::FindFlexibleArrayMemberField(
ASTContext &Ctx, const RecordDecl *RD, StringRef Name, uint64_t &Offset) {
const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
getLangOpts().getStrictFlexArraysLevel();
unsigned FieldNo = 0;
bool IsUnion = RD->isUnion();
for (const Decl *D : RD->decls()) {
if (const auto *Field = dyn_cast<FieldDecl>(D);
Field && (Name.empty() || Field->getNameAsString() == Name) &&
Decl::isFlexibleArrayMemberLike(
Ctx, Field, Field->getType(), StrictFlexArraysLevel,
/*IgnoreTemplateOrMacroSubstitution=*/true)) {
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
Offset += Layout.getFieldOffset(FieldNo);
return Field;
}
if (const auto *Record = dyn_cast<RecordDecl>(D))
if (const FieldDecl *Field =
FindFlexibleArrayMemberField(Ctx, Record, Name, Offset)) {
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
Offset += Layout.getFieldOffset(FieldNo);
return Field;
}
if (!IsUnion && isa<FieldDecl>(D))
++FieldNo;
}
return nullptr;
}
static unsigned CountCountedByAttrs(const RecordDecl *RD) {
unsigned Num = 0;
for (const Decl *D : RD->decls()) {
if (const auto *FD = dyn_cast<FieldDecl>(D);
FD && FD->hasAttr<CountedByAttr>()) {
return ++Num;
}
if (const auto *Rec = dyn_cast<RecordDecl>(D))
Num += CountCountedByAttrs(Rec);
}
return Num;
}
llvm::Value *
CodeGenFunction::emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType) {
// The code generated here calculates the size of a struct with a flexible
// array member that uses the counted_by attribute. There are two instances
// we handle:
//
// struct s {
// unsigned long flags;
// int count;
// int array[] __attribute__((counted_by(count)));
// }
//
// 1) bdos of the flexible array itself:
//
// __builtin_dynamic_object_size(p->array, 1) ==
// p->count * sizeof(*p->array)
//
// 2) bdos of a pointer into the flexible array:
//
// __builtin_dynamic_object_size(&p->array[42], 1) ==
// (p->count - 42) * sizeof(*p->array)
//
// 2) bdos of the whole struct, including the flexible array:
//
// __builtin_dynamic_object_size(p, 1) ==
// max(sizeof(struct s),
// offsetof(struct s, array) + p->count * sizeof(*p->array))
//
ASTContext &Ctx = getContext();
const Expr *Base = E->IgnoreParenImpCasts();
const Expr *Idx = nullptr;
if (const auto *UO = dyn_cast<UnaryOperator>(Base);
UO && UO->getOpcode() == UO_AddrOf) {
Expr *SubExpr = UO->getSubExpr()->IgnoreParenImpCasts();
if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(SubExpr)) {
Base = ASE->getBase()->IgnoreParenImpCasts();
Idx = ASE->getIdx()->IgnoreParenImpCasts();
if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
int64_t Val = IL->getValue().getSExtValue();
if (Val < 0)
return getDefaultBuiltinObjectSizeResult(Type, ResType);
if (Val == 0)
// The index is 0, so we don't need to take it into account.
Idx = nullptr;
}
} else {
// Potential pointer to another element in the struct.
Base = SubExpr;
}
}
// Get the flexible array member Decl.
const RecordDecl *OuterRD = nullptr;
std::string FAMName;
if (const auto *ME = dyn_cast<MemberExpr>(Base)) {
// Check if \p Base is referencing the FAM itself.
const ValueDecl *VD = ME->getMemberDecl();
OuterRD = VD->getDeclContext()->getOuterLexicalRecordContext();
FAMName = VD->getNameAsString();
} else if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
// Check if we're pointing to the whole struct.
QualType Ty = DRE->getDecl()->getType();
if (Ty->isPointerType())
Ty = Ty->getPointeeType();
OuterRD = Ty->getAsRecordDecl();
// If we have a situation like this:
//
// struct union_of_fams {
// int flags;
// union {
// signed char normal_field;
// struct {
// int count1;
// int arr1[] __counted_by(count1);
// };
// struct {
// signed char count2;
// int arr2[] __counted_by(count2);
// };
// };
// };
//
// We don't konw which 'count' to use in this scenario:
//
// size_t get_size(struct union_of_fams *p) {
// return __builtin_dynamic_object_size(p, 1);
// }
//
// Instead of calculating a wrong number, we give up.
if (OuterRD && CountCountedByAttrs(OuterRD) > 1)
return nullptr;
}
if (!OuterRD)
return nullptr;
uint64_t Offset = 0;
const FieldDecl *FAMDecl =
FindFlexibleArrayMemberField(Ctx, OuterRD, FAMName, Offset);
Offset = Ctx.toCharUnitsFromBits(Offset).getQuantity();
if (!FAMDecl || !FAMDecl->hasAttr<CountedByAttr>())
// No flexible array member found or it doesn't have the "counted_by"
// attribute.
return nullptr;
const FieldDecl *CountedByFD = FindCountedByField(FAMDecl);
if (!CountedByFD)
// Can't find the field referenced by the "counted_by" attribute.
return nullptr;
// Build a load of the counted_by field.
bool IsSigned = CountedByFD->getType()->isSignedIntegerType();
Value *CountedByInst = EmitCountedByFieldExpr(Base, FAMDecl, CountedByFD);
if (!CountedByInst)
return getDefaultBuiltinObjectSizeResult(Type, ResType);
CountedByInst = Builder.CreateIntCast(CountedByInst, ResType, IsSigned);
// Build a load of the index and subtract it from the count.
Value *IdxInst = nullptr;
if (Idx) {
if (Idx->HasSideEffects(getContext()))
// We can't have side-effects.
return getDefaultBuiltinObjectSizeResult(Type, ResType);
bool IdxSigned = Idx->getType()->isSignedIntegerType();
IdxInst = EmitAnyExprToTemp(Idx).getScalarVal();
IdxInst = Builder.CreateIntCast(IdxInst, ResType, IdxSigned);
// We go ahead with the calculation here. If the index turns out to be
// negative, we'll catch it at the end.
CountedByInst =
Builder.CreateSub(CountedByInst, IdxInst, "", !IsSigned, IsSigned);
}
// Calculate how large the flexible array member is in bytes.
const ArrayType *ArrayTy = Ctx.getAsArrayType(FAMDecl->getType());
CharUnits Size = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
llvm::Constant *ElemSize =
llvm::ConstantInt::get(ResType, Size.getQuantity(), IsSigned);
Value *FAMSize =
Builder.CreateMul(CountedByInst, ElemSize, "", !IsSigned, IsSigned);
FAMSize = Builder.CreateIntCast(FAMSize, ResType, IsSigned);
Value *Res = FAMSize;
if (const auto *DRE = dyn_cast<DeclRefExpr>(Base)) {
// The whole struct is specificed in the __bdos.
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(OuterRD);
// Get the offset of the FAM.
llvm::Constant *FAMOffset = ConstantInt::get(ResType, Offset, IsSigned);
Value *OffsetAndFAMSize =
Builder.CreateAdd(FAMOffset, Res, "", !IsSigned, IsSigned);
// Get the full size of the struct.
llvm::Constant *SizeofStruct =
ConstantInt::get(ResType, Layout.getSize().getQuantity(), IsSigned);
// max(sizeof(struct s),
// offsetof(struct s, array) + p->count * sizeof(*p->array))
Res = IsSigned
? Builder.CreateBinaryIntrinsic(llvm::Intrinsic::smax,
OffsetAndFAMSize, SizeofStruct)
: Builder.CreateBinaryIntrinsic(llvm::Intrinsic::umax,
OffsetAndFAMSize, SizeofStruct);
}
// A negative \p IdxInst or \p CountedByInst means that the index lands
// outside of the flexible array member. If that's the case, we want to
// return 0.
Value *Cmp = Builder.CreateIsNotNeg(CountedByInst);
if (IdxInst)
Cmp = Builder.CreateAnd(Builder.CreateIsNotNeg(IdxInst), Cmp);
return Builder.CreateSelect(Cmp, Res, ConstantInt::get(ResType, 0, IsSigned));
}
/// Returns a Value corresponding to the size of the given expression.
/// This Value may be either of the following:
/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
@ -850,6 +1083,13 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
}
}
if (IsDynamic) {
// Emit special code for a flexible array member with the "counted_by"
// attribute.
if (Value *V = emitFlexibleArrayMemberSize(E, Type, ResType))
return V;
}
// LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
// evaluate E for side-effects. In either case, we shouldn't lower to
// @llvm.objectsize.
@ -9681,8 +9921,8 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
bool IsQuadStore = false;
switch (IntrinsicID) {
case Intrinsic::aarch64_sve_st1uwq:
case Intrinsic::aarch64_sve_st1udq:
case Intrinsic::aarch64_sve_st1wq:
case Intrinsic::aarch64_sve_st1dq:
AddrMemoryTy = llvm::ScalableVectorType::get(MemEltTy, 1);
PredTy =
llvm::ScalableVectorType::get(IntegerType::get(getLLVMContext(), 1), 1);

View File

@ -2612,6 +2612,8 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
if (IRFunctionArgs.hasSRetArg()) {
llvm::AttrBuilder SRETAttrs(getLLVMContext());
SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
SRETAttrs.addAttribute(llvm::Attribute::Writable);
SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);

View File

@ -156,7 +156,9 @@ static const EHPersonality &getObjCPersonality(const TargetInfo &Target,
case ObjCRuntime::WatchOS:
return EHPersonality::NeXT_ObjC;
case ObjCRuntime::GNUstep:
if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
if (T.isOSCygMing())
return EHPersonality::GNU_CPlusPlus_SEH;
else if (L.ObjCRuntime.getVersion() >= VersionTuple(1, 7))
return EHPersonality::GNUstep_ObjC;
[[fallthrough]];
case ObjCRuntime::GCC:
@ -210,7 +212,8 @@ static const EHPersonality &getObjCXXPersonality(const TargetInfo &Target,
return getObjCPersonality(Target, L);
case ObjCRuntime::GNUstep:
return EHPersonality::GNU_ObjCXX;
return Target.getTriple().isOSCygMing() ? EHPersonality::GNU_CPlusPlus_SEH
: EHPersonality::GNU_ObjCXX;
// The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the ObjC personality just to avoid returning null.

View File

@ -26,10 +26,12 @@
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
@ -925,16 +927,21 @@ static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
if (CE->getCastKind() == CK_ArrayToPointerDecay &&
!CE->getSubExpr()->isFlexibleArrayMemberLike(CGF.getContext(),
StrictFlexArraysLevel)) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
IndexedType = CE->getSubExpr()->getType();
const ArrayType *AT = IndexedType->castAsArrayTypeUnsafe();
if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
return CGF.Builder.getInt(CAT->getSize());
else if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
if (const auto *VAT = dyn_cast<VariableArrayType>(AT))
return CGF.getVLASize(VAT).NumElts;
// Ignore pass_object_size here. It's not applicable on decayed pointers.
}
}
CodeGenFunction::SanitizerScope SanScope(&CGF);
QualType EltTy{Base->getType()->getPointeeOrArrayElementType(), 0};
if (llvm::Value *POS = CGF.LoadPassedObjectSize(Base, EltTy)) {
IndexedType = Base->getType();
@ -944,22 +951,248 @@ static llvm::Value *getArrayIndexingBound(CodeGenFunction &CGF,
return nullptr;
}
namespace {
/// \p StructAccessBase returns the base \p Expr of a field access. It returns
/// either a \p DeclRefExpr, representing the base pointer to the struct, i.e.:
///
/// p in p-> a.b.c
///
/// or a \p MemberExpr, if the \p MemberExpr has the \p RecordDecl we're
/// looking for:
///
/// struct s {
/// struct s *ptr;
/// int count;
/// char array[] __attribute__((counted_by(count)));
/// };
///
/// If we have an expression like \p p->ptr->array[index], we want the
/// \p MemberExpr for \p p->ptr instead of \p p.
class StructAccessBase
: public ConstStmtVisitor<StructAccessBase, const Expr *> {
const RecordDecl *ExpectedRD;
bool IsExpectedRecordDecl(const Expr *E) const {
QualType Ty = E->getType();
if (Ty->isPointerType())
Ty = Ty->getPointeeType();
return ExpectedRD == Ty->getAsRecordDecl();
}
public:
StructAccessBase(const RecordDecl *ExpectedRD) : ExpectedRD(ExpectedRD) {}
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
// NOTE: If we build C++ support for counted_by, then we'll have to handle
// horrors like this:
//
// struct S {
// int x, y;
// int blah[] __attribute__((counted_by(x)));
// } s;
//
// int foo(int index, int val) {
// int (S::*IHatePMDs)[] = &S::blah;
// (s.*IHatePMDs)[index] = val;
// }
const Expr *Visit(const Expr *E) {
return ConstStmtVisitor<StructAccessBase, const Expr *>::Visit(E);
}
const Expr *VisitStmt(const Stmt *S) { return nullptr; }
// These are the types we expect to return (in order of most to least
// likely):
//
// 1. DeclRefExpr - This is the expression for the base of the structure.
// It's exactly what we want to build an access to the \p counted_by
// field.
// 2. MemberExpr - This is the expression that has the same \p RecordDecl
// as the flexble array member's lexical enclosing \p RecordDecl. This
// allows us to catch things like: "p->p->array"
// 3. CompoundLiteralExpr - This is for people who create something
// heretical like (struct foo has a flexible array member):
//
// (struct foo){ 1, 2 }.blah[idx];
const Expr *VisitDeclRefExpr(const DeclRefExpr *E) {
return IsExpectedRecordDecl(E) ? E : nullptr;
}
const Expr *VisitMemberExpr(const MemberExpr *E) {
if (IsExpectedRecordDecl(E) && E->isArrow())
return E;
const Expr *Res = Visit(E->getBase());
return !Res && IsExpectedRecordDecl(E) ? E : Res;
}
const Expr *VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
return IsExpectedRecordDecl(E) ? E : nullptr;
}
const Expr *VisitCallExpr(const CallExpr *E) {
return IsExpectedRecordDecl(E) ? E : nullptr;
}
const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
if (IsExpectedRecordDecl(E))
return E;
return Visit(E->getBase());
}
const Expr *VisitCastExpr(const CastExpr *E) {
return Visit(E->getSubExpr());
}
const Expr *VisitParenExpr(const ParenExpr *E) {
return Visit(E->getSubExpr());
}
const Expr *VisitUnaryAddrOf(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
const Expr *VisitUnaryDeref(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
};
} // end anonymous namespace
using RecIndicesTy =
SmallVector<std::pair<const RecordDecl *, llvm::Value *>, 8>;
static bool getGEPIndicesToField(CodeGenFunction &CGF, const RecordDecl *RD,
const FieldDecl *FD, RecIndicesTy &Indices) {
const CGRecordLayout &Layout = CGF.CGM.getTypes().getCGRecordLayout(RD);
int64_t FieldNo = -1;
for (const Decl *D : RD->decls()) {
if (const auto *Field = dyn_cast<FieldDecl>(D)) {
FieldNo = Layout.getLLVMFieldNo(Field);
if (FD == Field) {
Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
return true;
}
}
if (const auto *Record = dyn_cast<RecordDecl>(D)) {
++FieldNo;
if (getGEPIndicesToField(CGF, Record, FD, Indices)) {
if (RD->isUnion())
FieldNo = 0;
Indices.emplace_back(std::make_pair(RD, CGF.Builder.getInt32(FieldNo)));
return true;
}
}
}
return false;
}
/// This method is typically called in contexts where we can't generate
/// side-effects, like in __builtin_dynamic_object_size. When finding
/// expressions, only choose those that have either already been emitted or can
/// be loaded without side-effects.
///
/// - \p FAMDecl: the \p Decl for the flexible array member. It may not be
/// within the top-level struct.
/// - \p CountDecl: must be within the same non-anonymous struct as \p FAMDecl.
llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
const Expr *Base, const FieldDecl *FAMDecl, const FieldDecl *CountDecl) {
const RecordDecl *RD = CountDecl->getParent()->getOuterLexicalRecordContext();
// Find the base struct expr (i.e. p in p->a.b.c.d).
const Expr *StructBase = StructAccessBase(RD).Visit(Base);
if (!StructBase || StructBase->HasSideEffects(getContext()))
return nullptr;
llvm::Value *Res = nullptr;
if (const auto *DRE = dyn_cast<DeclRefExpr>(StructBase)) {
Res = EmitDeclRefLValue(DRE).getPointer(*this);
Res = Builder.CreateAlignedLoad(ConvertType(DRE->getType()), Res,
getPointerAlign(), "dre.load");
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
LValue LV = EmitMemberExpr(ME);
Address Addr = LV.getAddress(*this);
Res = Addr.getPointer();
} else if (StructBase->getType()->isPointerType()) {
LValueBaseInfo BaseInfo;
TBAAAccessInfo TBAAInfo;
Address Addr = EmitPointerWithAlignment(StructBase, &BaseInfo, &TBAAInfo);
Res = Addr.getPointer();
} else {
return nullptr;
}
llvm::Value *Zero = Builder.getInt32(0);
RecIndicesTy Indices;
getGEPIndicesToField(*this, RD, CountDecl, Indices);
for (auto I = Indices.rbegin(), E = Indices.rend(); I != E; ++I)
Res = Builder.CreateInBoundsGEP(
ConvertType(QualType(I->first->getTypeForDecl(), 0)), Res,
{Zero, I->second}, "..counted_by.gep");
return Builder.CreateAlignedLoad(ConvertType(CountDecl->getType()), Res,
getIntAlign(), "..counted_by.load");
}
const FieldDecl *CodeGenFunction::FindCountedByField(const FieldDecl *FD) {
if (!FD || !FD->hasAttr<CountedByAttr>())
return nullptr;
const auto *CBA = FD->getAttr<CountedByAttr>();
if (!CBA)
return nullptr;
auto GetNonAnonStructOrUnion =
[](const RecordDecl *RD) -> const RecordDecl * {
while (RD && RD->isAnonymousStructOrUnion()) {
const auto *R = dyn_cast<RecordDecl>(RD->getDeclContext());
if (!R)
return nullptr;
RD = R;
}
return RD;
};
const RecordDecl *EnclosingRD = GetNonAnonStructOrUnion(FD->getParent());
if (!EnclosingRD)
return nullptr;
DeclarationName DName(CBA->getCountedByField());
DeclContext::lookup_result Lookup = EnclosingRD->lookup(DName);
if (Lookup.empty())
return nullptr;
const NamedDecl *ND = Lookup.front();
if (const auto *IFD = dyn_cast<IndirectFieldDecl>(ND))
ND = IFD->getAnonField();
return dyn_cast<FieldDecl>(ND);
}
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
llvm::Value *Index, QualType IndexType,
bool Accessed) {
assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
"should not be called unless adding bounds checks");
SanitizerScope SanScope(this);
const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
getLangOpts().getStrictFlexArraysLevel();
getLangOpts().getStrictFlexArraysLevel();
QualType IndexedType;
llvm::Value *Bound =
getArrayIndexingBound(*this, Base, IndexedType, StrictFlexArraysLevel);
EmitBoundsCheckImpl(E, Bound, Index, IndexType, IndexedType, Accessed);
}
void CodeGenFunction::EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
llvm::Value *Index,
QualType IndexType,
QualType IndexedType, bool Accessed) {
if (!Bound)
return;
SanitizerScope SanScope(this);
bool IndexSigned = IndexType->isSignedIntegerOrEnumerationType();
llvm::Value *IndexVal = Builder.CreateIntCast(Index, SizeTy, IndexSigned);
llvm::Value *BoundVal = Builder.CreateIntCast(Bound, SizeTy, false);
@ -975,7 +1208,6 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
SanitizerHandler::OutOfBounds, StaticData, Index);
}
CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
@ -3823,6 +4055,61 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
return Address(eltPtr, CGF.ConvertTypeForMem(eltType), eltAlign);
}
/// The offset of a field from the beginning of the record.
static bool getFieldOffsetInBits(CodeGenFunction &CGF, const RecordDecl *RD,
const FieldDecl *FD, int64_t &Offset) {
ASTContext &Ctx = CGF.getContext();
const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
unsigned FieldNo = 0;
for (const Decl *D : RD->decls()) {
if (const auto *Record = dyn_cast<RecordDecl>(D))
if (getFieldOffsetInBits(CGF, Record, FD, Offset)) {
Offset += Layout.getFieldOffset(FieldNo);
return true;
}
if (const auto *Field = dyn_cast<FieldDecl>(D))
if (FD == Field) {
Offset += Layout.getFieldOffset(FieldNo);
return true;
}
if (isa<FieldDecl>(D))
++FieldNo;
}
return false;
}
/// Returns the relative offset difference between \p FD1 and \p FD2.
/// \code
/// offsetof(struct foo, FD1) - offsetof(struct foo, FD2)
/// \endcode
/// Both fields must be within the same struct.
static std::optional<int64_t> getOffsetDifferenceInBits(CodeGenFunction &CGF,
const FieldDecl *FD1,
const FieldDecl *FD2) {
const RecordDecl *FD1OuterRec =
FD1->getParent()->getOuterLexicalRecordContext();
const RecordDecl *FD2OuterRec =
FD2->getParent()->getOuterLexicalRecordContext();
if (FD1OuterRec != FD2OuterRec)
// Fields must be within the same RecordDecl.
return std::optional<int64_t>();
int64_t FD1Offset = 0;
if (!getFieldOffsetInBits(CGF, FD1OuterRec, FD1, FD1Offset))
return std::optional<int64_t>();
int64_t FD2Offset = 0;
if (!getFieldOffsetInBits(CGF, FD2OuterRec, FD2, FD2Offset))
return std::optional<int64_t>();
return std::make_optional<int64_t>(FD1Offset - FD2Offset);
}
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed) {
// The index must always be an integer, which is not an aggregate. Emit it
@ -3950,6 +4237,47 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
ArrayLV = EmitLValue(Array);
auto *Idx = EmitIdxAfterBase(/*Promote*/true);
if (SanOpts.has(SanitizerKind::ArrayBounds)) {
// If the array being accessed has a "counted_by" attribute, generate
// bounds checking code. The "count" field is at the top level of the
// struct or in an anonymous struct, that's also at the top level. Future
// expansions may allow the "count" to reside at any place in the struct,
// but the value of "counted_by" will be a "simple" path to the count,
// i.e. "a.b.count", so we shouldn't need the full force of EmitLValue or
// similar to emit the correct GEP.
const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
getLangOpts().getStrictFlexArraysLevel();
if (const auto *ME = dyn_cast<MemberExpr>(Array);
ME &&
ME->isFlexibleArrayMemberLike(getContext(), StrictFlexArraysLevel) &&
ME->getMemberDecl()->hasAttr<CountedByAttr>()) {
const FieldDecl *FAMDecl = dyn_cast<FieldDecl>(ME->getMemberDecl());
if (const FieldDecl *CountFD = FindCountedByField(FAMDecl)) {
if (std::optional<int64_t> Diff =
getOffsetDifferenceInBits(*this, CountFD, FAMDecl)) {
CharUnits OffsetDiff = CGM.getContext().toCharUnitsFromBits(*Diff);
// Create a GEP with a byte offset between the FAM and count and
// use that to load the count value.
Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(
ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty);
llvm::Type *CountTy = ConvertType(CountFD->getType());
llvm::Value *Res = Builder.CreateInBoundsGEP(
Int8Ty, Addr.getPointer(),
Builder.getInt32(OffsetDiff.getQuantity()), ".counted_by.gep");
Res = Builder.CreateAlignedLoad(CountTy, Res, getIntAlign(),
".counted_by.load");
// Now emit the bounds checking.
EmitBoundsCheckImpl(E, Res, Idx, E->getIdx()->getType(),
Array->getType(), Accessed);
}
}
}
}
// Propagate the alignment from the array itself to the result.
QualType arrayType = Array->getType();
Addr = emitArraySubscriptGEP(

View File

@ -168,6 +168,8 @@ class CGObjCGNU : public CGObjCRuntime {
/// Does the current target use SEH-based exceptions? False implies
/// Itanium-style DWARF unwinding.
bool usesSEHExceptions;
/// Does the current target uses C++-based exceptions?
bool usesCxxExceptions;
/// Helper to check if we are targeting a specific runtime version or later.
bool isRuntime(ObjCRuntime::Kind kind, unsigned major, unsigned minor=0) {
@ -819,12 +821,18 @@ class CGObjCGNUstep : public CGObjCGNU {
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy);
// If we're in ObjC++ mode, then we want to make
if (usesSEHExceptions) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void objc_exception_rethrow(void)
ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy);
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
if (usesCxxExceptions) {
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy);
// void __cxa_end_catch(void)
ExitCatchFn.init(&CGM, "__cxa_end_catch", VoidTy);
// void objc_exception_rethrow(void*)
ExceptionReThrowFn.init(&CGM, "__cxa_rethrow", PtrTy);
} else if (usesSEHExceptions) {
// void objc_exception_rethrow(void)
ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy);
} else if (CGM.getLangOpts().CPlusPlus) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy);
// void __cxa_end_catch(void)
@ -833,7 +841,6 @@ class CGObjCGNUstep : public CGObjCGNU {
ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy,
PtrTy);
} else if (R.getVersion() >= VersionTuple(1, 7)) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// id objc_begin_catch(void *e)
EnterCatchFn.init(&CGM, "objc_begin_catch", IdTy, PtrTy);
// void objc_end_catch(void)
@ -841,7 +848,6 @@ class CGObjCGNUstep : public CGObjCGNU {
// void _Unwind_Resume_or_Rethrow(void*)
ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy, PtrTy);
}
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
SetPropertyAtomic.init(&CGM, "objc_setProperty_atomic", VoidTy, IdTy,
SelectorTy, IdTy, PtrDiffTy);
SetPropertyAtomicCopy.init(&CGM, "objc_setProperty_atomic_copy", VoidTy,
@ -2126,6 +2132,9 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
usesSEHExceptions =
cgm.getContext().getTargetInfo().getTriple().isWindowsMSVCEnvironment();
usesCxxExceptions =
cgm.getContext().getTargetInfo().getTriple().isOSCygMing() &&
isRuntime(ObjCRuntime::GNUstep, 2);
CodeGenTypes &Types = CGM.getTypes();
IntTy = cast<llvm::IntegerType>(
@ -2212,7 +2221,10 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
// void objc_exception_throw(id);
ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy);
ExceptionReThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy);
ExceptionReThrowFn.init(&CGM,
usesCxxExceptions ? "objc_exception_rethrow"
: "objc_exception_throw",
VoidTy, IdTy);
// int objc_sync_enter(id);
SyncEnterFn.init(&CGM, "objc_sync_enter", IntTy, IdTy);
// int objc_sync_exit(id);
@ -2389,7 +2401,7 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
if (usesSEHExceptions)
return CGM.getCXXABI().getAddrOfRTTIDescriptor(T);
if (!CGM.getLangOpts().CPlusPlus)
if (!CGM.getLangOpts().CPlusPlus && !usesCxxExceptions)
return CGObjCGNU::GetEHType(T);
// For Objective-C++, we want to provide the ability to catch both C++ and
@ -3995,7 +4007,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
isRethrow = true;
}
if (isRethrow && usesSEHExceptions) {
if (isRethrow && (usesSEHExceptions || usesCxxExceptions)) {
// For SEH, ExceptionAsObject may be undef, because the catch handler is
// not passed it for catchalls and so it is not visible to the catch
// funclet. The real thrown object will still be live on the stack at this
@ -4005,8 +4017,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
// argument.
llvm::CallBase *Throw = CGF.EmitRuntimeCallOrInvoke(ExceptionReThrowFn);
Throw->setDoesNotReturn();
}
else {
} else {
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
llvm::CallBase *Throw =
CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);

View File

@ -3073,6 +3073,25 @@ class CodeGenFunction : public CodeGenTypeCache {
/// this expression is used as an lvalue, for instance in "&Arr[Idx]".
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
QualType IndexType, bool Accessed);
void EmitBoundsCheckImpl(const Expr *E, llvm::Value *Bound,
llvm::Value *Index, QualType IndexType,
QualType IndexedType, bool Accessed);
// Find a struct's flexible array member. It may be embedded inside multiple
// sub-structs, but must still be the last field.
const FieldDecl *FindFlexibleArrayMemberField(ASTContext &Ctx,
const RecordDecl *RD,
StringRef Name,
uint64_t &Offset);
/// Find the FieldDecl specified in a FAM's "counted_by" attribute. Returns
/// \p nullptr if either the attribute or the field doesn't exist.
const FieldDecl *FindCountedByField(const FieldDecl *FD);
/// Build an expression accessing the "counted_by" field.
llvm::Value *EmitCountedByFieldExpr(const Expr *Base,
const FieldDecl *FAMDecl,
const FieldDecl *CountDecl);
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
@ -4873,6 +4892,9 @@ class CodeGenFunction : public CodeGenTypeCache {
llvm::Value *EmittedE,
bool IsDynamic);
llvm::Value *emitFlexibleArrayMemberSize(const Expr *E, unsigned Type,
llvm::IntegerType *ResType);
void emitZeroOrPatternForAutoVarInit(QualType type, const VarDecl &D,
Address Loc);

View File

@ -1109,6 +1109,8 @@ void CodeGenModule::Release() {
if (LangOpts.BranchProtectionPAuthLR)
getModule().addModuleFlag(llvm::Module::Min, "branch-protection-pauth-lr",
1);
if (LangOpts.GuardedControlStack)
getModule().addModuleFlag(llvm::Module::Min, "guarded-control-stack", 1);
if (LangOpts.hasSignReturnAddress())
getModule().addModuleFlag(llvm::Module::Min, "sign-return-address", 1);
if (LangOpts.isSignReturnAddressScopeAll())

View File

@ -1712,7 +1712,11 @@ struct CounterCoverageMappingBuilder
extendRegion(S->getCond());
Counter ParentCount = getRegion().getCounter();
Counter ThenCount = getRegionCounter(S);
// If this is "if !consteval" the then-branch will never be taken, we don't
// need to change counter
Counter ThenCount =
S->isNegatedConsteval() ? ParentCount : getRegionCounter(S);
if (!S->isConsteval()) {
// Emitting a counter for the condition makes it easier to interpret the
@ -1729,7 +1733,12 @@ struct CounterCoverageMappingBuilder
extendRegion(S->getThen());
Counter OutCount = propagateCounts(ThenCount, S->getThen());
Counter ElseCount = subtractCounters(ParentCount, ThenCount);
// If this is "if consteval" the else-branch will never be taken, we don't
// need to change counter
Counter ElseCount = S->isNonNegatedConsteval()
? ParentCount
: subtractCounters(ParentCount, ThenCount);
if (const Stmt *Else = S->getElse()) {
bool ThenHasTerminateStmt = HasTerminateStmt;
HasTerminateStmt = false;

View File

@ -138,6 +138,8 @@ class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
BPI.BranchTargetEnforcement ? "true" : "false");
Fn->addFnAttr("branch-protection-pauth-lr",
BPI.BranchProtectionPAuthLR ? "true" : "false");
Fn->addFnAttr("guarded-control-stack",
BPI.GuardedControlStack ? "true" : "false");
}
bool isScalarizableAsmOperand(CodeGen::CodeGenFunction &CGF,

View File

@ -1508,7 +1508,7 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
<< Triple.getArchName();
StringRef Scope, Key;
bool IndirectBranches, BranchProtectionPAuthLR;
bool IndirectBranches, BranchProtectionPAuthLR, GuardedControlStack;
if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
Scope = A->getValue();
@ -1518,6 +1518,7 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
Key = "a_key";
IndirectBranches = false;
BranchProtectionPAuthLR = false;
GuardedControlStack = false;
} else {
StringRef DiagMsg;
llvm::ARM::ParsedBranchProtection PBP;
@ -1531,6 +1532,7 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
Key = PBP.Key;
BranchProtectionPAuthLR = PBP.BranchProtectionPAuthLR;
IndirectBranches = PBP.BranchTargetEnforcement;
GuardedControlStack = PBP.GuardedControlStack;
}
CmdArgs.push_back(
@ -1543,6 +1545,8 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
Args.MakeArgString(Twine("-mbranch-protection-pauth-lr")));
if (IndirectBranches)
CmdArgs.push_back("-mbranch-target-enforce");
if (GuardedControlStack)
CmdArgs.push_back("-mguarded-control-stack");
}
void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,

View File

@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "Flang.h"
#include "Arch/RISCV.h"
#include "CommonArgs.h"
#include "clang/Basic/CodeGenOptions.h"
@ -14,6 +15,8 @@
#include "llvm/Frontend/Debug/Options.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/RISCVISAInfo.h"
#include "llvm/TargetParser/RISCVTargetParser.h"
#include <cassert>
@ -203,6 +206,51 @@ void Flang::AddAArch64TargetArgs(const ArgList &Args,
}
}
void Flang::AddRISCVTargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
const llvm::Triple &Triple = getToolChain().getTriple();
// Handle -mrvv-vector-bits=<bits>
if (Arg *A = Args.getLastArg(options::OPT_mrvv_vector_bits_EQ)) {
StringRef Val = A->getValue();
const Driver &D = getToolChain().getDriver();
// Get minimum VLen from march.
unsigned MinVLen = 0;
StringRef Arch = riscv::getRISCVArch(Args, Triple);
auto ISAInfo = llvm::RISCVISAInfo::parseArchString(
Arch, /*EnableExperimentalExtensions*/ true);
// Ignore parsing error.
if (!errorToBool(ISAInfo.takeError()))
MinVLen = (*ISAInfo)->getMinVLen();
// If the value is "zvl", use MinVLen from march. Otherwise, try to parse
// as integer as long as we have a MinVLen.
unsigned Bits = 0;
if (Val.equals("zvl") && MinVLen >= llvm::RISCV::RVVBitsPerBlock) {
Bits = MinVLen;
} else if (!Val.getAsInteger(10, Bits)) {
// Only accept power of 2 values beteen RVVBitsPerBlock and 65536 that
// at least MinVLen.
if (Bits < MinVLen || Bits < llvm::RISCV::RVVBitsPerBlock ||
Bits > 65536 || !llvm::isPowerOf2_32(Bits))
Bits = 0;
}
// If we got a valid value try to use it.
if (Bits != 0) {
unsigned VScaleMin = Bits / llvm::RISCV::RVVBitsPerBlock;
CmdArgs.push_back(
Args.MakeArgString("-mvscale-max=" + llvm::Twine(VScaleMin)));
CmdArgs.push_back(
Args.MakeArgString("-mvscale-min=" + llvm::Twine(VScaleMin)));
} else if (!Val.equals("scalable")) {
// Handle the unsupported values passed to mrvv-vector-bits.
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getSpelling() << Val;
}
}
}
static void addVSDefines(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
@ -321,6 +369,9 @@ void Flang::addTargetOptions(const ArgList &Args,
AddAMDGPUTargetArgs(Args, CmdArgs);
break;
case llvm::Triple::riscv64:
getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
AddRISCVTargetArgs(Args, CmdArgs);
break;
case llvm::Triple::x86_64:
getTargetFeatures(D, Triple, Args, CmdArgs, /*ForAs*/ false);
break;
@ -352,12 +403,10 @@ void Flang::addTargetOptions(const ArgList &Args,
if (A->getValue() == StringRef{"Accelerate"}) {
CmdArgs.push_back("-framework");
CmdArgs.push_back("Accelerate");
A->render(Args, CmdArgs);
}
}
} else {
A->render(Args, CmdArgs);
}
A->render(Args, CmdArgs);
}
if (Triple.isKnownWindowsMSVCEnvironment()) {
@ -428,6 +477,8 @@ void Flang::addOffloadOptions(Compilation &C, const InputInfoList &Inputs,
CmdArgs.push_back("-fopenmp-assume-no-thread-state");
if (Args.hasArg(options::OPT_fopenmp_assume_no_nested_parallelism))
CmdArgs.push_back("-fopenmp-assume-no-nested-parallelism");
if (Args.hasArg(options::OPT_nogpulib))
CmdArgs.push_back("-nogpulib");
}
}

View File

@ -70,6 +70,13 @@ class LLVM_LIBRARY_VISIBILITY Flang : public Tool {
void AddAMDGPUTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
/// Add specific options for RISC-V target.
///
/// \param [in] Args The list of input driver arguments
/// \param [out] CmdArgs The list of output command arguments
void AddRISCVTargetArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
/// Extract offload options from the driver arguments and add them to
/// the command arguments.
/// \param [in] C The current compilation for the driver invocation

View File

@ -2668,7 +2668,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
case llvm::Triple::arm:
case llvm::Triple::thumb:
LibDirs.append(begin(ARMLibDirs), end(ARMLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF ||
TargetTriple.getEnvironment() == llvm::Triple::MuslEABIHF ||
TargetTriple.getEnvironment() == llvm::Triple::EABIHF) {
TripleAliases.append(begin(ARMHFTriples), end(ARMHFTriples));
} else {
TripleAliases.append(begin(ARMTriples), end(ARMTriples));
@ -2677,7 +2679,9 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
LibDirs.append(begin(ARMebLibDirs), end(ARMebLibDirs));
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF) {
if (TargetTriple.getEnvironment() == llvm::Triple::GNUEABIHF ||
TargetTriple.getEnvironment() == llvm::Triple::MuslEABIHF ||
TargetTriple.getEnvironment() == llvm::Triple::EABIHF) {
TripleAliases.append(begin(ARMebHFTriples), end(ARMebHFTriples));
} else {
TripleAliases.append(begin(ARMebTriples), end(ARMebTriples));

View File

@ -61,12 +61,16 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::thumb:
if (IsAndroid)
return "arm-linux-androideabi";
if (TargetEnvironment == llvm::Triple::GNUEABIHF)
if (TargetEnvironment == llvm::Triple::GNUEABIHF ||
TargetEnvironment == llvm::Triple::MuslEABIHF ||
TargetEnvironment == llvm::Triple::EABIHF)
return "arm-linux-gnueabihf";
return "arm-linux-gnueabi";
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
if (TargetEnvironment == llvm::Triple::GNUEABIHF)
if (TargetEnvironment == llvm::Triple::GNUEABIHF ||
TargetEnvironment == llvm::Triple::MuslEABIHF ||
TargetEnvironment == llvm::Triple::EABIHF)
return "armeb-linux-gnueabihf";
return "armeb-linux-gnueabi";
case llvm::Triple::x86:

View File

@ -76,41 +76,39 @@ template <> struct MappingTraits<FormatStyle::AlignConsecutiveStyle> {
FormatStyle::AlignConsecutiveStyle(
{/*Enabled=*/false, /*AcrossEmptyLines=*/false,
/*AcrossComments=*/false, /*AlignCompound=*/false,
/*PadOperators=*/true}));
/*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
IO.enumCase(Value, "Consecutive",
FormatStyle::AlignConsecutiveStyle(
{/*Enabled=*/true, /*AcrossEmptyLines=*/false,
/*AcrossComments=*/false, /*AlignCompound=*/false,
/*PadOperators=*/true}));
/*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
IO.enumCase(Value, "AcrossEmptyLines",
FormatStyle::AlignConsecutiveStyle(
{/*Enabled=*/true, /*AcrossEmptyLines=*/true,
/*AcrossComments=*/false, /*AlignCompound=*/false,
/*PadOperators=*/true}));
/*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
IO.enumCase(Value, "AcrossComments",
FormatStyle::AlignConsecutiveStyle({/*Enabled=*/true,
/*AcrossEmptyLines=*/false,
/*AcrossComments=*/true,
/*AlignCompound=*/false,
/*PadOperators=*/true}));
FormatStyle::AlignConsecutiveStyle(
{/*Enabled=*/true, /*AcrossEmptyLines=*/false,
/*AcrossComments=*/true, /*AlignCompound=*/false,
/*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
IO.enumCase(Value, "AcrossEmptyLinesAndComments",
FormatStyle::AlignConsecutiveStyle({/*Enabled=*/true,
/*AcrossEmptyLines=*/true,
/*AcrossComments=*/true,
/*AlignCompound=*/false,
/*PadOperators=*/true}));
FormatStyle::AlignConsecutiveStyle(
{/*Enabled=*/true, /*AcrossEmptyLines=*/true,
/*AcrossComments=*/true, /*AlignCompound=*/false,
/*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
// For backward compatibility.
IO.enumCase(Value, "true",
FormatStyle::AlignConsecutiveStyle(
{/*Enabled=*/true, /*AcrossEmptyLines=*/false,
/*AcrossComments=*/false, /*AlignCompound=*/false,
/*PadOperators=*/true}));
/*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
IO.enumCase(Value, "false",
FormatStyle::AlignConsecutiveStyle(
{/*Enabled=*/false, /*AcrossEmptyLines=*/false,
/*AcrossComments=*/false, /*AlignCompound=*/false,
/*PadOperators=*/true}));
/*AlignFunctionPointers=*/false, /*PadOperators=*/true}));
}
static void mapping(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
@ -118,6 +116,7 @@ template <> struct MappingTraits<FormatStyle::AlignConsecutiveStyle> {
IO.mapOptional("AcrossEmptyLines", Value.AcrossEmptyLines);
IO.mapOptional("AcrossComments", Value.AcrossComments);
IO.mapOptional("AlignCompound", Value.AlignCompound);
IO.mapOptional("AlignFunctionPointers", Value.AlignFunctionPointers);
IO.mapOptional("PadOperators", Value.PadOperators);
}
};
@ -1432,6 +1431,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlignConsecutiveAssignments.AcrossEmptyLines = false;
LLVMStyle.AlignConsecutiveAssignments.AcrossComments = false;
LLVMStyle.AlignConsecutiveAssignments.AlignCompound = false;
LLVMStyle.AlignConsecutiveAssignments.AlignFunctionPointers = false;
LLVMStyle.AlignConsecutiveAssignments.PadOperators = true;
LLVMStyle.AlignConsecutiveBitFields = {};
LLVMStyle.AlignConsecutiveDeclarations = {};

View File

@ -113,8 +113,8 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
if (!State.NextToken || !State.NextToken->Previous)
return 0;
if (Formats.size() == 1)
return 0; // Handled by formatFromToken
if (Formats.size() <= 1)
return 0; // Handled by formatFromToken (1) or avoid severe penalty (0).
// Ensure that we start on the opening brace.
const FormatToken *LBrace =

View File

@ -275,14 +275,15 @@ class AnnotatedLine;
struct FormatToken {
FormatToken()
: HasUnescapedNewline(false), IsMultiline(false), IsFirst(false),
MustBreakBefore(false), IsUnterminatedLiteral(false),
CanBreakBefore(false), ClosesTemplateDeclaration(false),
StartsBinaryExpression(false), EndsBinaryExpression(false),
PartOfMultiVariableDeclStmt(false), ContinuesLineCommentSection(false),
Finalized(false), ClosesRequiresClause(false),
EndsCppAttributeGroup(false), BlockKind(BK_Unknown),
Decision(FD_Unformatted), PackingKind(PPK_Inconclusive),
TypeIsFinalized(false), Type(TT_Unknown) {}
MustBreakBefore(false), MustBreakBeforeFinalized(false),
IsUnterminatedLiteral(false), CanBreakBefore(false),
ClosesTemplateDeclaration(false), StartsBinaryExpression(false),
EndsBinaryExpression(false), PartOfMultiVariableDeclStmt(false),
ContinuesLineCommentSection(false), Finalized(false),
ClosesRequiresClause(false), EndsCppAttributeGroup(false),
BlockKind(BK_Unknown), Decision(FD_Unformatted),
PackingKind(PPK_Inconclusive), TypeIsFinalized(false),
Type(TT_Unknown) {}
/// The \c Token.
Token Tok;
@ -318,6 +319,10 @@ struct FormatToken {
/// before the token.
unsigned MustBreakBefore : 1;
/// Whether MustBreakBefore is finalized during parsing and must not
/// be reset between runs.
unsigned MustBreakBeforeFinalized : 1;
/// Set to \c true if this token is an unterminated literal.
unsigned IsUnterminatedLiteral : 1;
@ -416,10 +421,14 @@ struct FormatToken {
/// to another one please use overwriteFixedType, or even better remove the
/// need to reassign the type.
void setFinalizedType(TokenType T) {
if (MacroCtx && MacroCtx->Role == MR_UnexpandedArg)
return;
Type = T;
TypeIsFinalized = true;
}
void overwriteFixedType(TokenType T) {
if (MacroCtx && MacroCtx->Role == MR_UnexpandedArg)
return;
TypeIsFinalized = false;
setType(T);
}

View File

@ -2769,13 +2769,6 @@ class ExpressionParser {
// Consume operators with higher precedence.
parse(Precedence + 1);
// Do not assign fake parenthesis to tokens that are part of an
// unexpanded macro call. The line within the macro call contains
// the parenthesis and commas, and we will not find operators within
// that structure.
if (Current && Current->MacroParent)
break;
int CurrentPrecedence = getCurrentPrecedence();
if (Precedence == CurrentPrecedence && Current &&
@ -2919,6 +2912,13 @@ class ExpressionParser {
void addFakeParenthesis(FormatToken *Start, prec::Level Precedence,
FormatToken *End = nullptr) {
// Do not assign fake parenthesis to tokens that are part of an
// unexpanded macro call. The line within the macro call contains
// the parenthesis and commas, and we will not find operators within
// that structure.
if (Start->MacroParent)
return;
Start->FakeLParens.push_back(Precedence);
if (Precedence > prec::Unknown)
Start->StartsBinaryExpression = true;

View File

@ -954,13 +954,15 @@ static void markFinalized(FormatToken *Tok) {
// will be modified as unexpanded arguments (as part of the macro call
// formatting) in the next pass.
Tok->MacroCtx->Role = MR_UnexpandedArg;
// Reset whether spaces are required before this token, as that is context
// dependent, and that context may change when formatting the macro call.
// For example, given M(x) -> 2 * x, and the macro call M(var),
// the token 'var' will have SpacesRequiredBefore = 1 after being
// Reset whether spaces or a line break are required before this token, as
// that is context dependent, and that context may change when formatting
// the macro call. For example, given M(x) -> 2 * x, and the macro call
// M(var), the token 'var' will have SpacesRequiredBefore = 1 after being
// formatted as part of the expanded macro, but SpacesRequiredBefore = 0
// for its position within the macro call.
Tok->SpacesRequiredBefore = 0;
if (!Tok->MustBreakBeforeFinalized)
Tok->MustBreakBefore = 0;
} else {
Tok->Finalized = true;
}

View File

@ -2308,7 +2308,7 @@ bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
LeftSquare->isCppStructuredBinding(Style)) {
return false;
}
if (FormatTok->is(tok::l_square))
if (FormatTok->is(tok::l_square) || tok::isLiteral(FormatTok->Tok.getKind()))
return false;
if (FormatTok->is(tok::r_square)) {
const FormatToken *Next = Tokens->peekNextToken(/*SkipComment=*/true);
@ -4675,6 +4675,7 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
conditionalCompilationEnd();
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
FormatTok->MustBreakBeforeFinalized = true;
}
auto IsFirstNonCommentOnLine = [](bool FirstNonCommentOnLine,
@ -4891,6 +4892,7 @@ void UnwrappedLineParser::pushToken(FormatToken *Tok) {
Line->Tokens.push_back(UnwrappedLineNode(Tok));
if (MustBreakBeforeNextToken) {
Line->Tokens.back().Tok->MustBreakBefore = true;
Line->Tokens.back().Tok->MustBreakBeforeFinalized = true;
MustBreakBeforeNextToken = false;
}
}

View File

@ -978,7 +978,14 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
AlignTokens(
Style,
[](Change const &C) {
[&](Change const &C) {
if (Style.AlignConsecutiveDeclarations.AlignFunctionPointers) {
for (const auto *Prev = C.Tok->Previous; Prev; Prev = Prev->Previous)
if (Prev->is(tok::equal))
return false;
if (C.Tok->is(TT_FunctionTypeLParen))
return true;
}
if (C.Tok->is(TT_FunctionDeclarationName))
return true;
if (C.Tok->isNot(TT_StartOfName))

View File

@ -282,6 +282,7 @@ class WhitespaceManager {
for (auto PrevIter = Start; PrevIter != End; ++PrevIter) {
// If we broke the line the initial spaces are already
// accounted for.
assert(PrevIter->Index < Changes.size());
if (Changes[PrevIter->Index].NewlinesBefore > 0)
NetWidth = 0;
NetWidth +=

View File

@ -611,12 +611,19 @@ static bool ParseDirective(StringRef S, ExpectedData *ED, SourceManager &SM,
diag::err_verify_missing_start) << KindStr;
continue;
}
llvm::SmallString<8> CloseBrace("}}");
const char *const DelimBegin = PH.C;
PH.Advance();
// Count the number of opening braces for `string` kinds
for (; !D.RegexKind && PH.Next("{"); PH.Advance())
CloseBrace += '}';
const char* const ContentBegin = PH.C; // mark content begin
// Search for token: }}
if (!PH.SearchClosingBrace("{{", "}}")) {
Diags.Report(Pos.getLocWithOffset(PH.C-PH.Begin),
diag::err_verify_missing_end) << KindStr;
// Search for closing brace
StringRef OpenBrace(DelimBegin, ContentBegin - DelimBegin);
if (!PH.SearchClosingBrace(OpenBrace, CloseBrace)) {
Diags.Report(Pos.getLocWithOffset(PH.C - PH.Begin),
diag::err_verify_missing_end)
<< KindStr << CloseBrace;
continue;
}
const char* const ContentEnd = PH.P; // mark content end

View File

@ -26,51 +26,48 @@
#define __DEFAULT_FN_ATTRS_CONSTEXPR __DEFAULT_FN_ATTRS
#endif
/** Find the first set bit starting from the lsb. Result is undefined if
* input is 0.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> BSF </c> instruction or the
* <c> TZCNT </c> instruction.
*
* \param __A
* A 32-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
/// Find the first set bit starting from the lsb. Result is undefined if
/// input is 0.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c BSF instruction or the
/// \c TZCNT instruction.
///
/// \param __A
/// A 32-bit integer operand.
/// \returns A 32-bit integer containing the bit number.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfd(int __A) {
return __builtin_ctz((unsigned int)__A);
}
/** Find the first set bit starting from the msb. Result is undefined if
* input is 0.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> BSR </c> instruction or the
* <c> LZCNT </c> instruction and an <c> XOR </c>.
*
* \param __A
* A 32-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
/// Find the first set bit starting from the msb. Result is undefined if
/// input is 0.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c BSR instruction or the
/// \c LZCNT instruction and an \c XOR.
///
/// \param __A
/// A 32-bit integer operand.
/// \returns A 32-bit integer containing the bit number.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrd(int __A) {
return 31 - __builtin_clz((unsigned int)__A);
}
/** Swaps the bytes in the input. Converting little endian to big endian or
* vice versa.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> BSWAP </c> instruction.
*
* \param __A
* A 32-bit integer operand.
* \returns A 32-bit integer containing the swapped bytes.
*/
/// Swaps the bytes in the input. Converting little endian to big endian or
/// vice versa.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c BSWAP instruction.
///
/// \param __A
/// A 32-bit integer operand.
/// \returns A 32-bit integer containing the swapped bytes.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapd(int __A) {
return (int)__builtin_bswap32((unsigned int)__A);
@ -85,51 +82,48 @@ _bswap(int __A) {
#define _bit_scan_reverse(A) __bsrd((A))
#ifdef __x86_64__
/** Find the first set bit starting from the lsb. Result is undefined if
* input is 0.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> BSF </c> instruction or the
* <c> TZCNT </c> instruction.
*
* \param __A
* A 64-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
/// Find the first set bit starting from the lsb. Result is undefined if
/// input is 0.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c BSF instruction or the
/// \c TZCNT instruction.
///
/// \param __A
/// A 64-bit integer operand.
/// \returns A 32-bit integer containing the bit number.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfq(long long __A) {
return (long long)__builtin_ctzll((unsigned long long)__A);
}
/** Find the first set bit starting from the msb. Result is undefined if
* input is 0.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> BSR </c> instruction or the
* <c> LZCNT </c> instruction and an <c> XOR </c>.
*
* \param __A
* A 64-bit integer operand.
* \returns A 32-bit integer containing the bit number.
*/
/// Find the first set bit starting from the msb. Result is undefined if
/// input is 0.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c BSR instruction or the
/// \c LZCNT instruction and an \c XOR.
///
/// \param __A
/// A 64-bit integer operand.
/// \returns A 32-bit integer containing the bit number.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrq(long long __A) {
return 63 - __builtin_clzll((unsigned long long)__A);
}
/** Swaps the bytes in the input. Converting little endian to big endian or
* vice versa.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> BSWAP </c> instruction.
*
* \param __A
* A 64-bit integer operand.
* \returns A 64-bit integer containing the swapped bytes.
*/
/// Swaps the bytes in the input. Converting little endian to big endian or
/// vice versa.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c BSWAP instruction.
///
/// \param __A
/// A 64-bit integer operand.
/// \returns A 64-bit integer containing the swapped bytes.
static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapq(long long __A) {
return (long long)__builtin_bswap64((unsigned long long)__A);
@ -138,18 +132,17 @@ __bswapq(long long __A) {
#define _bswap64(A) __bswapq((A))
#endif
/** Counts the number of bits in the source operand having a value of 1.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> POPCNT </c> instruction or a
* a sequence of arithmetic and logic ops to calculate it.
*
* \param __A
* An unsigned 32-bit integer operand.
* \returns A 32-bit integer containing the number of bits with value 1 in the
* source operand.
*/
/// Counts the number of bits in the source operand having a value of 1.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c POPCNT instruction or a
/// a sequence of arithmetic and logic ops to calculate it.
///
/// \param __A
/// An unsigned 32-bit integer operand.
/// \returns A 32-bit integer containing the number of bits with value 1 in the
/// source operand.
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__popcntd(unsigned int __A)
{
@ -159,18 +152,17 @@ __popcntd(unsigned int __A)
#define _popcnt32(A) __popcntd((A))
#ifdef __x86_64__
/** Counts the number of bits in the source operand having a value of 1.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> POPCNT </c> instruction or a
* a sequence of arithmetic and logic ops to calculate it.
*
* \param __A
* An unsigned 64-bit integer operand.
* \returns A 64-bit integer containing the number of bits with value 1 in the
* source operand.
*/
/// Counts the number of bits in the source operand having a value of 1.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c POPCNT instruction or a
/// a sequence of arithmetic and logic ops to calculate it.
///
/// \param __A
/// An unsigned 64-bit integer operand.
/// \returns A 64-bit integer containing the number of bits with value 1 in the
/// source operand.
static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
__popcntq(unsigned long long __A)
{
@ -207,123 +199,120 @@ __writeeflags(unsigned int __f)
}
#endif /* !__x86_64__ */
/** Cast a 32-bit float value to a 32-bit unsigned integer value
*
* \headerfile <x86intrin.h>
* This intrinsic corresponds to the <c> VMOVD / MOVD </c> instruction in x86_64,
* and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
*
* \param __A
* A 32-bit float value.
* \returns a 32-bit unsigned integer containing the converted value.
*/
/// Cast a 32-bit float value to a 32-bit unsigned integer value.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c VMOVD / \c MOVD instruction in x86_64,
/// and corresponds to the \c VMOVL / \c MOVL instruction in ia32.
///
/// \param __A
/// A 32-bit float value.
/// \returns a 32-bit unsigned integer containing the converted value.
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CAST
_castf32_u32(float __A) {
return __builtin_bit_cast(unsigned int, __A);
}
/** Cast a 64-bit float value to a 64-bit unsigned integer value
*
* \headerfile <x86intrin.h>
* This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
* and corresponds to the <c> VMOVL / MOVL </c> instruction in ia32.
*
* \param __A
* A 64-bit float value.
* \returns a 64-bit unsigned integer containing the converted value.
*/
/// Cast a 64-bit float value to a 64-bit unsigned integer value.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64,
/// and corresponds to the \c VMOVL / \c MOVL instruction in ia32.
///
/// \param __A
/// A 64-bit float value.
/// \returns a 64-bit unsigned integer containing the converted value.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CAST
_castf64_u64(double __A) {
return __builtin_bit_cast(unsigned long long, __A);
}
/** Cast a 32-bit unsigned integer value to a 32-bit float value
*
* \headerfile <x86intrin.h>
* This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
* and corresponds to the <c> FLDS </c> instruction in ia32.
*
* \param __A
* A 32-bit unsigned integer value.
* \returns a 32-bit float value containing the converted value.
*/
/// Cast a 32-bit unsigned integer value to a 32-bit float value.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64,
/// and corresponds to the \c FLDS instruction in ia32.
///
/// \param __A
/// A 32-bit unsigned integer value.
/// \returns a 32-bit float value containing the converted value.
static __inline__ float __DEFAULT_FN_ATTRS_CAST
_castu32_f32(unsigned int __A) {
return __builtin_bit_cast(float, __A);
}
/** Cast a 64-bit unsigned integer value to a 64-bit float value
*
* \headerfile <x86intrin.h>
* This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction in x86_64,
* and corresponds to the <c> FLDL </c> instruction in ia32.
*
* \param __A
* A 64-bit unsigned integer value.
* \returns a 64-bit float value containing the converted value.
*/
/// Cast a 64-bit unsigned integer value to a 64-bit float value.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c VMOVQ / \c MOVQ instruction in x86_64,
/// and corresponds to the \c FLDL instruction in ia32.
///
/// \param __A
/// A 64-bit unsigned integer value.
/// \returns a 64-bit float value containing the converted value.
static __inline__ double __DEFAULT_FN_ATTRS_CAST
_castu64_f64(unsigned long long __A) {
return __builtin_bit_cast(double, __A);
}
/** Adds the unsigned integer operand to the CRC-32C checksum of the
* unsigned char operand.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> CRC32B </c> instruction.
*
* \param __C
* An unsigned integer operand to add to the CRC-32C checksum of operand
* \a __D.
* \param __D
* An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
/// Adds the unsigned integer operand to the CRC-32C checksum of the
/// unsigned char operand.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c CRC32B instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
/// \a __D.
/// \param __D
/// An unsigned 8-bit integer operand used to compute the CRC-32C checksum.
/// \returns The result of adding operand \a __C to the CRC-32C checksum of
/// operand \a __D.
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32
__crc32b(unsigned int __C, unsigned char __D)
{
return __builtin_ia32_crc32qi(__C, __D);
}
/** Adds the unsigned integer operand to the CRC-32C checksum of the
* unsigned short operand.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> CRC32W </c> instruction.
*
* \param __C
* An unsigned integer operand to add to the CRC-32C checksum of operand
* \a __D.
* \param __D
* An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
/// Adds the unsigned integer operand to the CRC-32C checksum of the
/// unsigned short operand.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c CRC32W instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
/// \a __D.
/// \param __D
/// An unsigned 16-bit integer operand used to compute the CRC-32C checksum.
/// \returns The result of adding operand \a __C to the CRC-32C checksum of
/// operand \a __D.
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32
__crc32w(unsigned int __C, unsigned short __D)
{
return __builtin_ia32_crc32hi(__C, __D);
}
/** Adds the unsigned integer operand to the CRC-32C checksum of the
* second unsigned integer operand.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> CRC32D </c> instruction.
*
* \param __C
* An unsigned integer operand to add to the CRC-32C checksum of operand
* \a __D.
* \param __D
* An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
/// Adds the unsigned integer operand to the CRC-32C checksum of the
/// second unsigned integer operand.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c CRC32D instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
/// \a __D.
/// \param __D
/// An unsigned 32-bit integer operand used to compute the CRC-32C checksum.
/// \returns The result of adding operand \a __C to the CRC-32C checksum of
/// operand \a __D.
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CRC32
__crc32d(unsigned int __C, unsigned int __D)
{
@ -331,21 +320,20 @@ __crc32d(unsigned int __C, unsigned int __D)
}
#ifdef __x86_64__
/** Adds the unsigned integer operand to the CRC-32C checksum of the
* unsigned 64-bit integer operand.
*
* \headerfile <x86intrin.h>
*
* This intrinsic corresponds to the <c> CRC32Q </c> instruction.
*
* \param __C
* An unsigned integer operand to add to the CRC-32C checksum of operand
* \a __D.
* \param __D
* An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
* \returns The result of adding operand \a __C to the CRC-32C checksum of
* operand \a __D.
*/
/// Adds the unsigned integer operand to the CRC-32C checksum of the
/// unsigned 64-bit integer operand.
///
/// \headerfile <x86intrin.h>
///
/// This intrinsic corresponds to the \c CRC32Q instruction.
///
/// \param __C
/// An unsigned integer operand to add to the CRC-32C checksum of operand
/// \a __D.
/// \param __D
/// An unsigned 64-bit integer operand used to compute the CRC-32C checksum.
/// \returns The result of adding operand \a __C to the CRC-32C checksum of
/// operand \a __D.
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CRC32
__crc32q(unsigned long long __C, unsigned long long __D)
{

View File

@ -148,6 +148,7 @@ IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
// We do C++ by default; append right after argv[0] if no "-x" given
ClangArgv.insert(ClangArgv.end(), "-Xclang");
ClangArgv.insert(ClangArgv.end(), "-fincremental-extensions");
ClangArgv.insert(ClangArgv.end(), "-mcpu=native");
ClangArgv.insert(ClangArgv.end(), "-c");
// Put a dummy C++ file on to ensure there's at least one compile job for the

View File

@ -2661,7 +2661,12 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
// ProduceConstructorSignatureHelp only on VarDecls.
ExpressionStarts = SetPreferredType;
}
if (ParseExpressionList(Exprs, ExpressionStarts)) {
bool SawError = ParseExpressionList(Exprs, ExpressionStarts);
InitScope.pop();
if (SawError) {
if (ThisVarDecl && PP.isCodeCompletionReached() && !CalledSignatureHelp) {
Actions.ProduceConstructorSignatureHelp(
ThisVarDecl->getType()->getCanonicalTypeInternal(),
@ -2674,7 +2679,6 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
} else {
// Match the ')'.
T.consumeClose();
InitScope.pop();
ExprResult Initializer = Actions.ActOnParenListExpr(T.getOpenLocation(),
T.getCloseLocation(),

View File

@ -1974,10 +1974,11 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
PreferredType.enterSubscript(Actions, Tok.getLocation(), LHS.get());
// We try to parse a list of indexes in all language mode first
// and, in we find 0 or one index, we try to parse an OpenMP array
// and, in we find 0 or one index, we try to parse an OpenMP/OpenACC array
// section. This allow us to support C++23 multi dimensional subscript and
// OpenMp sections in the same language mode.
if (!getLangOpts().OpenMP || Tok.isNot(tok::colon)) {
// OpenMP/OpenACC sections in the same language mode.
if ((!getLangOpts().OpenMP && !AllowOpenACCArraySections) ||
Tok.isNot(tok::colon)) {
if (!getLangOpts().CPlusPlus23) {
ExprResult Idx;
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
@ -2001,7 +2002,18 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
}
}
if (ArgExprs.size() <= 1 && getLangOpts().OpenMP) {
// Handle OpenACC first, since 'AllowOpenACCArraySections' is only enabled
// when actively parsing a 'var' in a 'var-list' during clause/'cache'
// parsing, so it is the most specific, and best allows us to handle
// OpenACC and OpenMP at the same time.
if (ArgExprs.size() <= 1 && AllowOpenACCArraySections) {
ColonProtectionRAIIObject RAII(*this);
if (Tok.is(tok::colon)) {
// Consume ':'
ColonLocFirst = ConsumeToken();
Length = Actions.CorrectDelayedTyposInExpr(ParseExpression());
}
} else if (ArgExprs.size() <= 1 && getLangOpts().OpenMP) {
ColonProtectionRAIIObject RAII(*this);
if (Tok.is(tok::colon)) {
// Consume ':'
@ -2031,6 +2043,12 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (!LHS.isInvalid() && !HasError && !Length.isInvalid() &&
!Stride.isInvalid() && Tok.is(tok::r_square)) {
if (ColonLocFirst.isValid() || ColonLocSecond.isValid()) {
// FIXME: OpenACC hasn't implemented Sema/Array section handling at a
// semantic level yet. For now, just reuse the OpenMP implementation
// as it gets the parsing/type management mostly right, and we can
// replace this call to ActOnOpenACCArraySectionExpr in the future.
// Eventually we'll genericize the OPenMPArraySectionExpr type as
// well.
LHS = Actions.ActOnOMPArraySectionExpr(
LHS.get(), Loc, ArgExprs.empty() ? nullptr : ArgExprs[0],
ColonLocFirst, ColonLocSecond, Length.get(), Stride.get(), RLoc);

View File

@ -554,49 +554,17 @@ ExprResult Parser::ParseOpenACCIDExpression() {
return getActions().CorrectDelayedTyposInExpr(Res);
}
/// OpenACC 3.3, section 2.10:
/// A 'var' in a cache directive must be a single array element or a simple
/// subarray. In C and C++, a simple subarray is an array name followed by an
/// extended array range specification in brackets, with a start and length such
/// as:
///
/// arr[lower:length]
///
bool Parser::ParseOpenACCCacheVar() {
ExprResult ArrayName = ParseOpenACCIDExpression();
if (ArrayName.isInvalid())
return true;
// If the expression is invalid, just continue parsing the brackets, there
// is likely other useful diagnostics we can emit inside of those.
BalancedDelimiterTracker SquareBrackets(*this, tok::l_square,
tok::annot_pragma_openacc_end);
// Square brackets are required, so error here, and try to recover by moving
// until the next comma, or the close paren/end of pragma.
if (SquareBrackets.expectAndConsume()) {
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openacc_end,
Parser::StopBeforeMatch);
return true;
}
ExprResult Lower = getActions().CorrectDelayedTyposInExpr(ParseExpression());
if (Lower.isInvalid())
return true;
// The 'length' expression is optional, as this could be a single array
// element. If there is no colon, we can treat it as that.
if (getCurToken().is(tok::colon)) {
ConsumeToken();
ExprResult Length =
getActions().CorrectDelayedTyposInExpr(ParseExpression());
if (Length.isInvalid())
return true;
}
// Diagnose the square bracket being in the wrong place and continue.
return SquareBrackets.consumeClose();
/// OpenACC 3.3, section 1.6:
/// In this spec, a 'var' (in italics) is one of the following:
/// - a variable name (a scalar, array, or compisite variable name)
/// - a subarray specification with subscript ranges
/// - an array element
/// - a member of a composite variable
/// - a common block name between slashes (fortran only)
bool Parser::ParseOpenACCVar() {
OpenACCArraySectionRAII ArraySections(*this);
ExprResult Res = ParseAssignmentExpression();
return Res.isInvalid();
}
/// OpenACC 3.3, section 2.10:
@ -627,7 +595,16 @@ void Parser::ParseOpenACCCacheVarList() {
if (!FirstArray)
ExpectAndConsume(tok::comma);
FirstArray = false;
if (ParseOpenACCCacheVar())
// OpenACC 3.3, section 2.10:
// A 'var' in a cache directive must be a single array element or a simple
// subarray. In C and C++, a simple subarray is an array name followed by
// an extended array range specification in brackets, with a start and
// length such as:
//
// arr[lower:length]
//
if (ParseOpenACCVar())
SkipUntil(tok::r_paren, tok::annot_pragma_openacc_end, tok::comma,
StopBeforeMatch);
}

View File

@ -2315,6 +2315,12 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
}
ShadowingDecls.erase(ShadowI);
}
if (!getLangOpts().CPlusPlus && S->isClassScope()) {
if (auto *FD = dyn_cast<FieldDecl>(TmpD);
FD && FD->hasAttr<CountedByAttr>())
CheckCountedByAttr(S, FD);
}
}
llvm::sort(DeclDiags,

View File

@ -8460,6 +8460,135 @@ static void handleZeroCallUsedRegsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(ZeroCallUsedRegsAttr::Create(S.Context, Kind, AL));
}
static void handleCountedByAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.isArgIdent(0)) {
S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
<< AL << AANT_ArgumentIdentifier;
return;
}
IdentifierLoc *IL = AL.getArgAsIdent(0);
CountedByAttr *CBA =
::new (S.Context) CountedByAttr(S.Context, AL, IL->Ident);
CBA->setCountedByFieldLoc(IL->Loc);
D->addAttr(CBA);
}
static const FieldDecl *
FindFieldInTopLevelOrAnonymousStruct(const RecordDecl *RD,
const IdentifierInfo *FieldName) {
for (const Decl *D : RD->decls()) {
if (const auto *FD = dyn_cast<FieldDecl>(D))
if (FD->getName() == FieldName->getName())
return FD;
if (const auto *R = dyn_cast<RecordDecl>(D))
if (const FieldDecl *FD =
FindFieldInTopLevelOrAnonymousStruct(R, FieldName))
return FD;
}
return nullptr;
}
bool Sema::CheckCountedByAttr(Scope *S, const FieldDecl *FD) {
LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
LangOptions::StrictFlexArraysLevelKind::IncompleteOnly;
if (!Decl::isFlexibleArrayMemberLike(Context, FD, FD->getType(),
StrictFlexArraysLevel, true)) {
// The "counted_by" attribute must be on a flexible array member.
SourceRange SR = FD->getLocation();
Diag(SR.getBegin(), diag::err_counted_by_attr_not_on_flexible_array_member)
<< SR;
return true;
}
const auto *CBA = FD->getAttr<CountedByAttr>();
const IdentifierInfo *FieldName = CBA->getCountedByField();
auto GetNonAnonStructOrUnion = [](const RecordDecl *RD) {
while (RD && !RD->getDeclName())
if (const auto *R = dyn_cast<RecordDecl>(RD->getDeclContext()))
RD = R;
else
break;
return RD;
};
const RecordDecl *EnclosingRD = GetNonAnonStructOrUnion(FD->getParent());
const FieldDecl *CountFD =
FindFieldInTopLevelOrAnonymousStruct(EnclosingRD, FieldName);
if (!CountFD) {
DeclarationNameInfo NameInfo(FieldName,
CBA->getCountedByFieldLoc().getBegin());
LookupResult MemResult(*this, NameInfo, Sema::LookupMemberName);
LookupName(MemResult, S);
if (!MemResult.empty()) {
SourceRange SR = CBA->getCountedByFieldLoc();
Diag(SR.getBegin(), diag::err_flexible_array_count_not_in_same_struct)
<< CBA->getCountedByField() << SR;
if (auto *ND = MemResult.getAsSingle<NamedDecl>()) {
SR = ND->getLocation();
Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
<< ND << SR;
}
return true;
} else {
// The "counted_by" field needs to exist in the struct.
LookupResult OrdResult(*this, NameInfo, Sema::LookupOrdinaryName);
LookupName(OrdResult, S);
if (!OrdResult.empty()) {
SourceRange SR = FD->getLocation();
Diag(SR.getBegin(), diag::err_counted_by_must_be_in_structure)
<< FieldName << SR;
if (auto *ND = OrdResult.getAsSingle<NamedDecl>()) {
SR = ND->getLocation();
Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
<< ND << SR;
}
return true;
}
}
CXXScopeSpec SS;
DeclFilterCCC<FieldDecl> Filter(FieldName);
return DiagnoseEmptyLookup(S, SS, MemResult, Filter, nullptr, std::nullopt,
const_cast<DeclContext *>(FD->getDeclContext()));
}
if (CountFD->hasAttr<CountedByAttr>()) {
// The "counted_by" field can't point to the flexible array member.
SourceRange SR = CBA->getCountedByFieldLoc();
Diag(SR.getBegin(), diag::err_counted_by_attr_refers_to_flexible_array)
<< CBA->getCountedByField() << SR;
return true;
}
if (!CountFD->getType()->isIntegerType() ||
CountFD->getType()->isBooleanType()) {
// The "counted_by" field must have an integer type.
SourceRange SR = CBA->getCountedByFieldLoc();
Diag(SR.getBegin(),
diag::err_flexible_array_counted_by_attr_field_not_integer)
<< CBA->getCountedByField() << SR;
SR = CountFD->getLocation();
Diag(SR.getBegin(), diag::note_flexible_array_counted_by_attr_field)
<< CountFD << SR;
return true;
}
return false;
}
static void handleFunctionReturnThunksAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
StringRef KindStr;
@ -9420,6 +9549,10 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
handleAvailableOnlyInDefaultEvalMethod(S, D, AL);
break;
case ParsedAttr::AT_CountedBy:
handleCountedByAttr(S, D, AL);
break;
// Microsoft attributes:
case ParsedAttr::AT_LayoutVersion:
handleLayoutVersion(S, D, AL);

View File

@ -2469,7 +2469,8 @@ bool Sema::DiagnoseDependentMemberLookup(const LookupResult &R) {
bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, TypoExpr **Out) {
ArrayRef<Expr *> Args, DeclContext *LookupCtx,
TypoExpr **Out) {
DeclarationName Name = R.getLookupName();
unsigned diagnostic = diag::err_undeclared_var_use;
@ -2485,7 +2486,8 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
// unqualified lookup. This is useful when (for example) the
// original lookup would not have found something because it was a
// dependent name.
DeclContext *DC = SS.isEmpty() ? CurContext : nullptr;
DeclContext *DC =
LookupCtx ? LookupCtx : (SS.isEmpty() ? CurContext : nullptr);
while (DC) {
if (isa<CXXRecordDecl>(DC)) {
LookupQualifiedName(R, DC);
@ -2528,12 +2530,12 @@ bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
emitEmptyLookupTypoDiagnostic(TC, *this, SS, Name, TypoLoc, Args,
diagnostic, diagnostic_suggest);
},
nullptr, CTK_ErrorRecovery);
nullptr, CTK_ErrorRecovery, LookupCtx);
if (*Out)
return true;
} else if (S &&
(Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(),
S, &SS, CCC, CTK_ErrorRecovery))) {
} else if (S && (Corrected =
CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S,
&SS, CCC, CTK_ErrorRecovery, LookupCtx))) {
std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
bool DroppedSpecifier =
Corrected.WillReplaceSpecifier() && Name.getAsString() == CorrectedStr;
@ -2823,7 +2825,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// a template name, but we happen to have always already looked up the name
// before we get here if it must be a template name.
if (DiagnoseEmptyLookup(S, SS, R, CCC ? *CCC : DefaultValidator, nullptr,
std::nullopt, &TE)) {
std::nullopt, nullptr, &TE)) {
if (TE && KeywordReplacement) {
auto &State = getTypoExprState(TE);
auto BestTC = State.Consumer->getNextCorrection();

View File

@ -782,7 +782,8 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs) {
if (BaseType->isDependentType() ||
(SS.isSet() && isDependentScopeSpecifier(SS)))
(SS.isSet() && isDependentScopeSpecifier(SS)) ||
NameInfo.getName().isDependentName())
return ActOnDependentMemberExpr(Base, BaseType,
IsArrow, OpLoc,
SS, TemplateKWLoc, FirstQualifierInScope,

View File

@ -6056,6 +6056,16 @@ static ExprResult BuildConvertedConstantExpression(Sema &S, Expr *From,
diag::err_typecheck_converted_constant_expression_indirect)
<< From->getType() << From->getSourceRange() << T;
}
// 'TryCopyInitialization' returns incorrect info for attempts to bind
// a reference to a bit-field due to C++ [over.ics.ref]p4. Namely,
// 'SCS->DirectBinding' occurs to be set to 'true' despite it is not
// the direct binding according to C++ [dcl.init.ref]p5. Hence, check this
// case explicitly.
if (From->refersToBitField() && T.getTypePtr()->isReferenceType()) {
return S.Diag(From->getBeginLoc(),
diag::err_reference_bind_to_bitfield_in_cce)
<< From->getSourceRange();
}
// Usually we can simply apply the ImplicitConversionSequence we formed
// earlier, but that's not guaranteed to work when initializing an object of

View File

@ -3200,7 +3200,7 @@ static void DiagnoseForRangeConstVariableCopies(Sema &SemaRef,
// (The function `getTypeSize` returns the size in bits.)
ASTContext &Ctx = SemaRef.Context;
if (Ctx.getTypeSize(VariableType) <= 64 * 8 &&
(VariableType.isTriviallyCopyableType(Ctx) ||
(VariableType.isTriviallyCopyConstructibleType(Ctx) ||
hasTrivialABIAttr(VariableType)))
return;

View File

@ -6192,6 +6192,13 @@ bool TreeTransform<Derived>::TransformExceptionSpec(
// Instantiate a dynamic noexcept expression, if any.
if (isComputedNoexcept(ESI.Type)) {
// Update this scrope because ContextDecl in Sema will be used in
// TransformExpr.
auto *Method = dyn_cast_if_present<CXXMethodDecl>(ESI.SourceTemplate);
Sema::CXXThisScopeRAII ThisScope(
SemaRef, Method ? Method->getParent() : nullptr,
Method ? Method->getMethodQualifiers() : Qualifiers{},
Method != nullptr);
EnterExpressionEvaluationContext Unevaluated(
getSema(), Sema::ExpressionEvaluationContext::ConstantEvaluated);
ExprResult NoexceptExpr = getDerived().TransformExpr(ESI.NoexceptExpr);
@ -7732,7 +7739,11 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
if (Then.isInvalid())
return StmtError();
} else {
Then = new (getSema().Context) NullStmt(S->getThen()->getBeginLoc());
// Discarded branch is replaced with empty CompoundStmt so we can keep
// proper source location for start and end of original branch, so
// subsequent transformations like CoverageMapping work properly
Then = new (getSema().Context)
CompoundStmt(S->getThen()->getBeginLoc(), S->getThen()->getEndLoc());
}
// Transform the "else" branch.
@ -7741,6 +7752,13 @@ TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
Else = getDerived().TransformStmt(S->getElse());
if (Else.isInvalid())
return StmtError();
} else if (S->getElse() && ConstexprConditionValue &&
*ConstexprConditionValue) {
// Same thing here as with <then> branch, we are discarding it, we can't
// replace it with NULL nor NullStmt as we need to keep for source location
// range, for CoverageMapping
Else = new (getSema().Context)
CompoundStmt(S->getElse()->getBeginLoc(), S->getElse()->getEndLoc());
}
if (!getDerived().AlwaysRebuild() &&

View File

@ -2201,6 +2201,25 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));
// int ungetc(int c, FILE *stream);
addToFunctionSummaryMap(
"ungetc", Signature(ArgTypes{IntTy, FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(BO_EQ, ArgNo(0)),
ArgumentCondition(0, WithinRange, {{0, UCharRangeMax}})},
ErrnoMustNotBeChecked, GenericSuccessMsg)
.Case({ReturnValueCondition(WithinRange, SingleValue(EOFv)),
ArgumentCondition(0, WithinRange, {{EOFv, EOFv}})},
ErrnoNEZeroIrrelevant,
"Assuming that 'ungetc' fails because EOF was passed as "
"character")
.Case({ReturnValueCondition(WithinRange, SingleValue(EOFv)),
ArgumentCondition(0, WithinRange, {{0, UCharRangeMax}})},
ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(ArgumentCondition(
0, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}}))
.ArgConstraint(NotNull(ArgNo(1))));
// int fseek(FILE *stream, long offset, int whence);
// FIXME: It can be possible to get the 'SEEK_' values (like EOFv) and use
// these for condition of arg 2.
@ -2255,7 +2274,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"ftell", Signature(ArgTypes{FilePtrTy}, RetType{LongTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange, Range(1, LongMax))},
.Case({ReturnValueCondition(WithinRange, Range(0, LongMax))},
ErrnoUnchanged, GenericSuccessMsg)
.Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant, GenericFailureMsg)
.ArgConstraint(NotNull(ArgNo(0))));

View File

@ -263,10 +263,17 @@ class StreamChecker : public Checker<check::PreCall, eval::Call,
{{{"fputs"}, 2},
{std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
std::bind(&StreamChecker::evalFputx, _1, _2, _3, _4, false), 1}},
{{{"ungetc"}, 2},
{std::bind(&StreamChecker::preReadWrite, _1, _2, _3, _4, false),
std::bind(&StreamChecker::evalUngetc, _1, _2, _3, _4), 1}},
{{{"fseek"}, 3},
{&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
{{{"fseeko"}, 3},
{&StreamChecker::preFseek, &StreamChecker::evalFseek, 0}},
{{{"ftell"}, 1},
{&StreamChecker::preDefault, &StreamChecker::evalFtell, 0}},
{{{"ftello"}, 1},
{&StreamChecker::preDefault, &StreamChecker::evalFtell, 0}},
{{{"fflush"}, 1},
{&StreamChecker::preFflush, &StreamChecker::evalFflush, 0}},
{{{"rewind"}, 1},
@ -332,6 +339,9 @@ class StreamChecker : public Checker<check::PreCall, eval::Call,
void evalFputx(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C, bool IsSingleChar) const;
void evalUngetc(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
void preFseek(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const;
void evalFseek(const FnDescription *Desc, const CallEvent &Call,
@ -916,6 +926,45 @@ void StreamChecker::evalFputx(const FnDescription *Desc, const CallEvent &Call,
C.addTransition(StateFailed);
}
void StreamChecker::evalUngetc(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
SymbolRef StreamSym = getStreamArg(Desc, Call).getAsSymbol();
if (!StreamSym)
return;
const CallExpr *CE = dyn_cast_or_null<CallExpr>(Call.getOriginExpr());
if (!CE)
return;
const StreamState *OldSS = State->get<StreamMap>(StreamSym);
if (!OldSS)
return;
assertStreamStateOpened(OldSS);
// Generate a transition for the success state.
std::optional<NonLoc> PutVal = Call.getArgSVal(0).getAs<NonLoc>();
if (!PutVal)
return;
ProgramStateRef StateNotFailed =
State->BindExpr(CE, C.getLocationContext(), *PutVal);
StateNotFailed =
StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
C.addTransition(StateNotFailed);
// Add transition for the failed state.
// Failure of 'ungetc' does not result in feof or ferror state.
// If the PutVal has value of EofVal the function should "fail", but this is
// the same transition as the success state.
// In this case only one state transition is added by the analyzer (the two
// new states may be similar).
ProgramStateRef StateFailed = bindInt(*EofVal, State, C, CE);
StateFailed =
StateFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
C.addTransition(StateFailed);
}
void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@ -1068,10 +1117,10 @@ void StreamChecker::evalFtell(const FnDescription *Desc, const CallEvent &Call,
NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
ProgramStateRef StateNotFailed =
State->BindExpr(CE, C.getLocationContext(), RetVal);
auto Cond = SVB.evalBinOp(State, BO_GE, RetVal,
SVB.makeZeroVal(C.getASTContext().LongTy),
SVB.getConditionType())
.getAs<DefinedOrUnknownSVal>();
auto Cond =
SVB.evalBinOp(State, BO_GE, RetVal, SVB.makeZeroVal(Call.getResultType()),
SVB.getConditionType())
.getAs<DefinedOrUnknownSVal>();
if (!Cond)
return;
StateNotFailed = StateNotFailed->assume(*Cond, true);
@ -1079,7 +1128,7 @@ void StreamChecker::evalFtell(const FnDescription *Desc, const CallEvent &Call,
return;
ProgramStateRef StateFailed = State->BindExpr(
CE, C.getLocationContext(), SVB.makeIntVal(-1, C.getASTContext().LongTy));
CE, C.getLocationContext(), SVB.makeIntVal(-1, Call.getResultType()));
// This function does not affect the stream state.
// Still we add success and failure state with the appropriate return value.

View File

@ -571,6 +571,11 @@ static int dumpConfig(bool IsSTDIN) {
return 0;
}
using String = SmallString<128>;
static String IgnoreDir; // Directory of .clang-format-ignore file.
static String PrevDir; // Directory of previous `FilePath`.
static SmallVector<String> Patterns; // Patterns in .clang-format-ignore file.
// Check whether `FilePath` is ignored according to the nearest
// .clang-format-ignore file based on the rules below:
// - A blank line is skipped.
@ -586,33 +591,50 @@ static bool isIgnored(StringRef FilePath) {
if (!is_regular_file(FilePath))
return false;
using namespace llvm::sys::path;
SmallString<128> Path, AbsPath{FilePath};
String Path;
String AbsPath{FilePath};
using namespace llvm::sys::path;
make_absolute(AbsPath);
remove_dots(AbsPath, /*remove_dot_dot=*/true);
StringRef IgnoreDir{AbsPath};
do {
IgnoreDir = parent_path(IgnoreDir);
if (IgnoreDir.empty())
if (StringRef Dir{parent_path(AbsPath)}; PrevDir != Dir) {
PrevDir = Dir;
for (;;) {
Path = Dir;
append(Path, ".clang-format-ignore");
if (is_regular_file(Path))
break;
Dir = parent_path(Dir);
if (Dir.empty())
return false;
}
IgnoreDir = convert_to_slash(Dir);
std::ifstream IgnoreFile{Path.c_str()};
if (!IgnoreFile.good())
return false;
Path = IgnoreDir;
append(Path, ".clang-format-ignore");
} while (!is_regular_file(Path));
Patterns.clear();
std::ifstream IgnoreFile{Path.c_str()};
if (!IgnoreFile.good())
for (std::string Line; std::getline(IgnoreFile, Line);) {
if (const auto Pattern{StringRef{Line}.trim()};
// Skip empty and comment lines.
!Pattern.empty() && Pattern[0] != '#') {
Patterns.push_back(Pattern);
}
}
}
if (IgnoreDir.empty())
return false;
const auto Pathname = convert_to_slash(AbsPath);
for (std::string Line; std::getline(IgnoreFile, Line);) {
auto Pattern = StringRef(Line).trim();
if (Pattern.empty() || Pattern[0] == '#')
continue;
const bool IsNegated = Pattern[0] == '!';
const auto Pathname{convert_to_slash(AbsPath)};
for (const auto &Pat : Patterns) {
const bool IsNegated = Pat[0] == '!';
StringRef Pattern{Pat};
if (IsNegated)
Pattern = Pattern.drop_front();
@ -620,11 +642,14 @@ static bool isIgnored(StringRef FilePath) {
continue;
Pattern = Pattern.ltrim();
// `Pattern` is relative to `IgnoreDir` unless it starts with a slash.
// This doesn't support patterns containing drive names (e.g. `C:`).
if (Pattern[0] != '/') {
Path = convert_to_slash(IgnoreDir);
Path = IgnoreDir;
append(Path, Style::posix, Pattern);
remove_dots(Path, /*remove_dot_dot=*/true, Style::posix);
Pattern = Path.str();
Pattern = Path;
}
if (clang::format::matchFilePath(Pattern, Pathname) == !IsNegated)

View File

@ -123,6 +123,8 @@ INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::PointerType::getUnqual(Ctx), Next, \
/* INSTR_PROF_RAW_HEADER start */
/* Definition of member fields of the raw profile header data structure. */
/* Please update llvm/docs/InstrProfileFormat.rst as appropriate when updating
raw profile format. */
#ifndef INSTR_PROF_RAW_HEADER
#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
#else

View File

@ -20,15 +20,6 @@
#include "InstrProfiling.h"
#include "InstrProfilingInternal.h"
#if defined(__FreeBSD__) && !defined(ElfW)
/*
* FreeBSD's elf.h and link.h headers do not define the ElfW(type) macro yet.
* If this is added to all supported FreeBSD versions in the future, this
* compatibility macro can be removed.
*/
#define ElfW(type) __ElfN(type)
#endif
#define PROF_DATA_START INSTR_PROF_SECT_START(INSTR_PROF_DATA_COMMON)
#define PROF_DATA_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_DATA_COMMON)
#define PROF_NAME_START INSTR_PROF_SECT_START(INSTR_PROF_NAME_COMMON)

View File

@ -279,3 +279,6 @@ COMMON_FLAG(bool, test_only_replace_dlopen_main_program, false,
COMMON_FLAG(bool, enable_symbolizer_markup, SANITIZER_FUCHSIA,
"Use sanitizer symbolizer markup, available on Linux "
"and always set true for Fuchsia.")
COMMON_FLAG(bool, detect_invalid_join, true,
"If set, check invalid joins of threads.")

View File

@ -95,17 +95,33 @@ void ReportErrorSummary(const char *error_type, const StackTrace *stack,
#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
if (stack->size == 0) {
ReportErrorSummary(error_type);
return;
// Find first non-internal stack frame.
for (uptr i = 0; i < stack->size; ++i) {
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[i]);
SymbolizedStackHolder symbolized_stack(
Symbolizer::GetOrInit()->SymbolizePC(pc));
if (const SymbolizedStack *frame = symbolized_stack.get()) {
if (const SymbolizedStack *summary_frame = SkipInternalFrames(frame)) {
ReportErrorSummary(error_type, summary_frame->info, alt_tool_name);
return;
}
}
}
// Currently, we include the first stack frame into the report summary.
// Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStackHolder symbolized_stack(
Symbolizer::GetOrInit()->SymbolizePC(pc));
const SymbolizedStack *frame = symbolized_stack.get();
ReportErrorSummary(error_type, frame->info, alt_tool_name);
// Fallback to the top one.
if (stack->size) {
uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
SymbolizedStackHolder symbolized_stack(
Symbolizer::GetOrInit()->SymbolizePC(pc));
if (const SymbolizedStack *frame = symbolized_stack.get()) {
ReportErrorSummary(error_type, frame->info, alt_tool_name);
return;
}
}
// Fallback to a summary without location.
ReportErrorSummary(error_type);
#endif
}

View File

@ -23,6 +23,9 @@ void ThreadArgRetval::CreateLocked(uptr thread, bool detached,
Data& t = data_[thread];
t = {};
t.gen = gen_++;
static_assert(sizeof(gen_) == sizeof(u32) && kInvalidGen == UINT32_MAX);
if (gen_ == kInvalidGen)
gen_ = 0;
t.detached = detached;
t.args = args;
}
@ -53,16 +56,28 @@ void ThreadArgRetval::Finish(uptr thread, void* retval) {
u32 ThreadArgRetval::BeforeJoin(uptr thread) const {
__sanitizer::Lock lock(&mtx_);
auto t = data_.find(thread);
CHECK(t);
CHECK(!t->second.detached);
return t->second.gen;
if (t && !t->second.detached) {
return t->second.gen;
}
if (!common_flags()->detect_invalid_join)
return kInvalidGen;
const char* reason = "unknown";
if (!t) {
reason = "already joined";
} else if (t->second.detached) {
reason = "detached";
}
Report("ERROR: %s: Joining %s thread, aborting.\n", SanitizerToolName,
reason);
Die();
}
void ThreadArgRetval::AfterJoin(uptr thread, u32 gen) {
__sanitizer::Lock lock(&mtx_);
auto t = data_.find(thread);
if (!t || gen != t->second.gen) {
// Thread was reused and erased by any other event.
// Thread was reused and erased by any other event, or we had an invalid
// join.
return;
}
CHECK(!t->second.detached);

View File

@ -93,6 +93,7 @@ class SANITIZER_MUTEX ThreadArgRetval {
// will keep pointers alive forever, missing leaks caused by cancelation.
private:
static const u32 kInvalidGen = UINT32_MAX;
struct Data {
Args args;
u32 gen; // Avoid collision if thread id re-used.

View File

@ -51,7 +51,7 @@ struct ConditionVariableState {
template <typename Config>
struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
static constexpr bool enabled() { return true; }
static constexpr bool enabled() { return Config::UseConditionVariable; }
using ConditionVariableT = typename Config::ConditionVariableT;
};

View File

@ -1140,11 +1140,6 @@ __sanitizer_verify_double_ended_contiguous_container(const void*, const void*, c
# define _LIBCPP_HAS_TRIVIAL_CONDVAR_DESTRUCTION
# endif
// Some systems do not provide gets() in their C library, for security reasons.
# if defined(_LIBCPP_MSVCRT) || (defined(__FreeBSD_version) && __FreeBSD_version >= 1300043) || defined(__OpenBSD__)
# define _LIBCPP_C_HAS_NO_GETS
# endif
# if defined(__BIONIC__) || defined(__NuttX__) || defined(__Fuchsia__) || defined(__wasi__) || \
defined(_LIBCPP_HAS_MUSL_LIBC) || defined(__OpenBSD__)
# define _LIBCPP_PROVIDES_DEFAULT_RUNE_TABLE

View File

@ -1166,12 +1166,12 @@ inline _LIBCPP_HIDE_FROM_ABI bool operator!=(nullptr_t, const shared_ptr<_Tp>& _
template <class _Tp>
inline _LIBCPP_HIDE_FROM_ABI bool operator<(const shared_ptr<_Tp>& __x, nullptr_t) _NOEXCEPT {
return less<_Tp*>()(__x.get(), nullptr);
return less<typename shared_ptr<_Tp>::element_type*>()(__x.get(), nullptr);
}
template <class _Tp>
inline _LIBCPP_HIDE_FROM_ABI bool operator<(nullptr_t, const shared_ptr<_Tp>& __x) _NOEXCEPT {
return less<_Tp*>()(nullptr, __x.get());
return less<typename shared_ptr<_Tp>::element_type*>()(nullptr, __x.get());
}
template <class _Tp>

View File

@ -159,7 +159,7 @@ using ::tmpfile _LIBCPP_USING_IF_EXISTS;
using ::tmpnam _LIBCPP_USING_IF_EXISTS;
using ::getchar _LIBCPP_USING_IF_EXISTS;
#if _LIBCPP_STD_VER <= 11 && !defined(_LIBCPP_C_HAS_NO_GETS)
#if _LIBCPP_STD_VER <= 11
using ::gets _LIBCPP_USING_IF_EXISTS;
#endif
using ::scanf _LIBCPP_USING_IF_EXISTS;

View File

@ -1889,6 +1889,9 @@ void __r_anchor_multiline<_CharT>::__exec(__state& __s) const {
if (__s.__current_ == __s.__last_ && !(__s.__flags_ & regex_constants::match_not_eol)) {
__s.__do_ = __state::__accept_but_not_consume;
__s.__node_ = this->first();
} else if (__s.__current_ == __s.__first_ && !(__s.__flags_ & regex_constants::match_not_eol)) {
__s.__do_ = __state::__accept_but_not_consume;
__s.__node_ = this->first();
} else if (__multiline_ && std::__is_eol(*__s.__current_)) {
__s.__do_ = __state::__accept_but_not_consume;
__s.__node_ = this->first();

View File

@ -141,7 +141,7 @@ template <class... Tuples> tuple<CTypes...> tuple_cat(Tuples&&... tpls); // cons
// [tuple.apply], calling a function with a tuple of arguments:
template <class F, class Tuple>
constexpr decltype(auto) apply(F&& f, Tuple&& t); // C++17
constexpr decltype(auto) apply(F&& f, Tuple&& t) noexcept(see below); // C++17 noexcept since C++23
template <class T, class Tuple>
constexpr T make_from_tuple(Tuple&& t); // C++17

View File

@ -165,6 +165,8 @@ RelExpr AArch64::getRelExpr(RelType type, const Symbol &s,
case R_AARCH64_ADR_GOT_PAGE:
case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
return R_AARCH64_GOT_PAGE_PC;
case R_AARCH64_GOTPCREL32:
return R_GOT_PC;
case R_AARCH64_NONE:
return R_NONE;
default:
@ -374,6 +376,7 @@ void AArch64::relocate(uint8_t *loc, const Relocation &rel,
write32(loc, val);
break;
case R_AARCH64_PLT32:
case R_AARCH64_GOTPCREL32:
checkInt(loc, val, 32, rel);
write32(loc, val);
break;

View File

@ -82,89 +82,33 @@ static uint64_t getLoongArchPage(uint64_t p) {
static uint32_t lo12(uint32_t val) { return val & 0xfff; }
// Calculate the adjusted page delta between dest and PC.
uint64_t elf::getLoongArchPageDelta(uint64_t dest, uint64_t pc) {
// Consider the large code model access pattern, of which the smaller code
// models' access patterns are a subset:
//
// pcalau12i U, %foo_hi20(sym) ; b in [-0x80000, 0x7ffff]
// addi.d T, zero, %foo_lo12(sym) ; a in [-0x800, 0x7ff]
// lu32i.d T, %foo64_lo20(sym) ; c in [-0x80000, 0x7ffff]
// lu52i.d T, T, %foo64_hi12(sym) ; d in [-0x800, 0x7ff]
// {ldx,stx,add}.* dest, U, T
//
// Let page(pc) = 0xRRR'QQQQQ'PPPPP'000 and dest = 0xZZZ'YYYYY'XXXXX'AAA,
// with RQ, P, ZY, X and A representing the respective bitfields as unsigned
// integers. We have:
//
// page(dest) = 0xZZZ'YYYYY'XXXXX'000
// - page(pc) = 0xRRR'QQQQQ'PPPPP'000
// ----------------------------------
// 0xddd'ccccc'bbbbb'000
//
// Now consider the above pattern's actual effects:
//
// page(pc) 0xRRR'QQQQQ'PPPPP'000
// pcalau12i + 0xiii'iiiii'bbbbb'000
// addi + 0xjjj'jjjjj'kkkkk'AAA
// lu32i.d & lu52i.d + 0xddd'ccccc'00000'000
// --------------------------------------------------
// dest = U + T
// = ((RQ<<32) + (P<<12) + i + (b<<12)) + (j + k + A + (cd<<32))
// = (((RQ+cd)<<32) + i + j) + (((P+b)<<12) + k) + A
// = (ZY<<32) + (X<<12) + A
//
// ZY<<32 = (RQ<<32)+(cd<<32)+i+j, X<<12 = (P<<12)+(b<<12)+k
// cd<<32 = (ZY<<32)-(RQ<<32)-i-j, b<<12 = (X<<12)-(P<<12)-k
//
// where i and k are terms representing the effect of b's and A's sign
// extension respectively.
//
// i = signed b < 0 ? -0x10000'0000 : 0
// k = signed A < 0 ? -0x1000 : 0
//
// The j term is a bit complex: it represents the higher half of
// sign-extended bits from A that are effectively lost if i == 0 but k != 0,
// due to overwriting by lu32i.d & lu52i.d.
//
// j = signed A < 0 && signed b >= 0 ? 0x10000'0000 : 0
//
// The actual effect of the instruction sequence before the final addition,
// i.e. our desired result value, is thus:
//
// result = (cd<<32) + (b<<12)
// = (ZY<<32)-(RQ<<32)-i-j + (X<<12)-(P<<12)-k
// = ((ZY<<32)+(X<<12)) - ((RQ<<32)+(P<<12)) - i - j - k
// = page(dest) - page(pc) - i - j - k
//
// when signed A >= 0 && signed b >= 0:
//
// i = j = k = 0
// result = page(dest) - page(pc)
//
// when signed A >= 0 && signed b < 0:
//
// i = -0x10000'0000, j = k = 0
// result = page(dest) - page(pc) + 0x10000'0000
//
// when signed A < 0 && signed b >= 0:
//
// i = 0, j = 0x10000'0000, k = -0x1000
// result = page(dest) - page(pc) - 0x10000'0000 + 0x1000
//
// when signed A < 0 && signed b < 0:
//
// i = -0x10000'0000, j = 0, k = -0x1000
// result = page(dest) - page(pc) + 0x1000
uint64_t result = getLoongArchPage(dest) - getLoongArchPage(pc);
bool negativeA = lo12(dest) > 0x7ff;
bool negativeB = (result & 0x8000'0000) != 0;
if (negativeA)
result += 0x1000;
if (negativeA && !negativeB)
result -= 0x10000'0000;
else if (!negativeA && negativeB)
result += 0x10000'0000;
uint64_t elf::getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type) {
// Note that if the sequence being relocated is `pcalau12i + addi.d + lu32i.d
// + lu52i.d`, they must be adjancent so that we can infer the PC of
// `pcalau12i` when calculating the page delta for the other two instructions
// (lu32i.d and lu52i.d). Compensate all the sign-extensions is a bit
// complicated. Just use psABI recommended algorithm.
uint64_t pcalau12i_pc;
switch (type) {
case R_LARCH_PCALA64_LO20:
case R_LARCH_GOT64_PC_LO20:
case R_LARCH_TLS_IE64_PC_LO20:
pcalau12i_pc = pc - 8;
break;
case R_LARCH_PCALA64_HI12:
case R_LARCH_GOT64_PC_HI12:
case R_LARCH_TLS_IE64_PC_HI12:
pcalau12i_pc = pc - 12;
break;
default:
pcalau12i_pc = pc;
break;
}
uint64_t result = getLoongArchPage(dest) - getLoongArchPage(pcalau12i_pc);
if (dest & 0x800)
result += 0x1000 - 0x1'0000'0000;
if (result & 0x8000'0000)
result += 0x1'0000'0000;
return result;
}

View File

@ -290,6 +290,7 @@ RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
case R_RISCV_PLT32:
return R_PLT_PC;
case R_RISCV_GOT_HI20:
case R_RISCV_GOT32_PCREL:
return R_GOT_PC;
case R_RISCV_PCREL_LO12_I:
case R_RISCV_PCREL_LO12_S:
@ -499,6 +500,8 @@ void RISCV::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {
case R_RISCV_SET32:
case R_RISCV_32_PCREL:
case R_RISCV_PLT32:
case R_RISCV_GOT32_PCREL:
checkInt(loc, val, 32, rel);
write32le(loc, val);
return;
@ -954,8 +957,8 @@ static void mergeArch(RISCVISAInfo::OrderedExtensionMap &mergedExts,
} else {
for (const auto &ext : info.getExtensions()) {
if (auto it = mergedExts.find(ext.first); it != mergedExts.end()) {
if (std::tie(it->second.MajorVersion, it->second.MinorVersion) >=
std::tie(ext.second.MajorVersion, ext.second.MinorVersion))
if (std::tie(it->second.Major, it->second.Minor) >=
std::tie(ext.second.Major, ext.second.Minor))
continue;
}
mergedExts[ext.first] = ext.second;

View File

@ -716,8 +716,8 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
return sym.getGotVA() + a - p;
case R_LOONGARCH_GOT_PAGE_PC:
if (sym.hasFlag(NEEDS_TLSGD))
return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p);
return getLoongArchPageDelta(sym.getGotVA() + a, p);
return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type);
return getLoongArchPageDelta(sym.getGotVA() + a, p, type);
case R_MIPS_GOTREL:
return sym.getVA(a) - in.mipsGot->getGp(file);
case R_MIPS_GOT_GP:
@ -767,7 +767,7 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
return 0;
}
case R_LOONGARCH_PAGE_PC:
return getLoongArchPageDelta(sym.getVA(a), p);
return getLoongArchPageDelta(sym.getVA(a), p, type);
case R_PC:
case R_ARM_PCA: {
uint64_t dest;
@ -802,7 +802,7 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_PPC64_CALL_PLT:
return sym.getPltVA() + a - p;
case R_LOONGARCH_PLT_PAGE_PC:
return getLoongArchPageDelta(sym.getPltVA() + a, p);
return getLoongArchPageDelta(sym.getPltVA() + a, p, type);
case R_PLT_GOTPLT:
return sym.getPltVA() + a - in.gotPlt->getVA();
case R_PPC32_PLTREL:
@ -864,7 +864,7 @@ uint64_t InputSectionBase::getRelocTargetVA(const InputFile *file, RelType type,
case R_TLSGD_PC:
return in.got->getGlobalDynAddr(sym) + a - p;
case R_LOONGARCH_TLSGD_PAGE_PC:
return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p);
return getLoongArchPageDelta(in.got->getGlobalDynAddr(sym) + a, p, type);
case R_TLSLD_GOTPLT:
return in.got->getVA() + in.got->getTlsIndexOff() + a - in.gotPlt->getVA();
case R_TLSLD_GOT:

View File

@ -228,7 +228,7 @@ void addPPC64SaveRestore();
uint64_t getPPC64TocBase();
uint64_t getAArch64Page(uint64_t expr);
template <typename ELFT> void writeARMCmseImportLib();
uint64_t getLoongArchPageDelta(uint64_t dest, uint64_t pc);
uint64_t getLoongArchPageDelta(uint64_t dest, uint64_t pc, RelType type);
void riscvFinalizeRelax(int passes);
void mergeRISCVAttributesSections();
void addArmInputSectionMappingSymbols();

View File

@ -337,6 +337,12 @@ class Module : public std::enable_shared_from_this<Module>,
const ModuleFunctionSearchOptions &options,
SymbolContextList &sc_list);
/// Find functions by compiler context.
void FindFunctions(llvm::ArrayRef<CompilerContext> compiler_ctx,
lldb::FunctionNameType name_type_mask,
const ModuleFunctionSearchOptions &options,
SymbolContextList &sc_list);
/// Find functions by name.
///
/// If the function is an inlined function, it will have a block,

View File

@ -247,6 +247,10 @@ class TypeQuery {
/// match.
void AddLanguage(lldb::LanguageType language);
/// Set the list of languages that should produce a match to only the ones
/// specified in \ref languages.
void SetLanguages(LanguageSet languages);
/// Check if the language matches any languages that have been added to this
/// match object.
///

View File

@ -22,7 +22,7 @@ namespace lldb_private {
class StreamString : public Stream {
public:
StreamString();
StreamString(bool colors = false);
StreamString(uint32_t flags, uint32_t addr_size, lldb::ByteOrder byte_order);

View File

@ -855,6 +855,23 @@ void Module::FindFunctions(ConstString name,
}
}
void Module::FindFunctions(llvm::ArrayRef<CompilerContext> compiler_ctx,
FunctionNameType name_type_mask,
const ModuleFunctionSearchOptions &options,
SymbolContextList &sc_list) {
if (compiler_ctx.empty() ||
compiler_ctx.back().kind != CompilerContextKind::Function)
return;
ConstString name = compiler_ctx.back().name;
SymbolContextList unfiltered;
FindFunctions(name, CompilerDeclContext(), name_type_mask, options,
unfiltered);
// Filter by context.
for (auto &sc : unfiltered)
if (sc.function && compiler_ctx.equals(sc.function->GetCompilerContext()))
sc_list.Append(sc);
}
void Module::FindFunctions(const RegularExpression &regex,
const ModuleFunctionSearchOptions &options,
SymbolContextList &sc_list) {

View File

@ -943,44 +943,41 @@ void ClangASTImporter::ASTImporterDelegate::ImportDefinitionTo(
// the class was originally sourced from symbols.
if (ObjCInterfaceDecl *to_objc_interface = dyn_cast<ObjCInterfaceDecl>(to)) {
do {
ObjCInterfaceDecl *to_superclass = to_objc_interface->getSuperClass();
ObjCInterfaceDecl *to_superclass = to_objc_interface->getSuperClass();
if (to_superclass)
break; // we're not going to override it if it's set
if (to_superclass)
return; // we're not going to override it if it's set
ObjCInterfaceDecl *from_objc_interface =
dyn_cast<ObjCInterfaceDecl>(from);
ObjCInterfaceDecl *from_objc_interface = dyn_cast<ObjCInterfaceDecl>(from);
if (!from_objc_interface)
break;
if (!from_objc_interface)
return;
ObjCInterfaceDecl *from_superclass = from_objc_interface->getSuperClass();
ObjCInterfaceDecl *from_superclass = from_objc_interface->getSuperClass();
if (!from_superclass)
break;
if (!from_superclass)
return;
llvm::Expected<Decl *> imported_from_superclass_decl =
Import(from_superclass);
llvm::Expected<Decl *> imported_from_superclass_decl =
Import(from_superclass);
if (!imported_from_superclass_decl) {
LLDB_LOG_ERROR(log, imported_from_superclass_decl.takeError(),
"Couldn't import decl: {0}");
break;
}
if (!imported_from_superclass_decl) {
LLDB_LOG_ERROR(log, imported_from_superclass_decl.takeError(),
"Couldn't import decl: {0}");
return;
}
ObjCInterfaceDecl *imported_from_superclass =
dyn_cast<ObjCInterfaceDecl>(*imported_from_superclass_decl);
ObjCInterfaceDecl *imported_from_superclass =
dyn_cast<ObjCInterfaceDecl>(*imported_from_superclass_decl);
if (!imported_from_superclass)
break;
if (!imported_from_superclass)
return;
if (!to_objc_interface->hasDefinition())
to_objc_interface->startDefinition();
if (!to_objc_interface->hasDefinition())
to_objc_interface->startDefinition();
to_objc_interface->setSuperClass(m_source_ctx->getTrivialTypeSourceInfo(
m_source_ctx->getObjCInterfaceType(imported_from_superclass)));
} while (false);
to_objc_interface->setSuperClass(m_source_ctx->getTrivialTypeSourceInfo(
m_source_ctx->getObjCInterfaceType(imported_from_superclass)));
}
}

View File

@ -1031,6 +1031,41 @@ static void LoadLibCxxFormatters(lldb::TypeCategoryImplSP cpp_category_sp) {
"^std::__[[:alnum:]]+::chrono::seconds", eFormatterMatchRegex,
TypeSummaryImplSP(new StringSummaryFormat(
eTypeOptionHideChildren | eTypeOptionHideValue, "${var.__rep_} s")));
// Chrono calendar types
cpp_category_sp->AddTypeSummary(
"^std::__[[:alnum:]]+::chrono::day$", eFormatterMatchRegex,
TypeSummaryImplSP(new StringSummaryFormat(eTypeOptionHideChildren |
eTypeOptionHideValue,
"day=${var.__d_%u}")));
AddCXXSummary(cpp_category_sp,
lldb_private::formatters::LibcxxChronoMonthSummaryProvider,
"libc++ std::chrono::month summary provider",
"^std::__[[:alnum:]]+::chrono::month$",
eTypeOptionHideChildren | eTypeOptionHideValue, true);
cpp_category_sp->AddTypeSummary(
"^std::__[[:alnum:]]+::chrono::year$", eFormatterMatchRegex,
TypeSummaryImplSP(new StringSummaryFormat(
eTypeOptionHideChildren | eTypeOptionHideValue, "year=${var.__y_}")));
cpp_category_sp->AddTypeSummary(
"^std::__[[:alnum:]]+::chrono::month_day$", eFormatterMatchRegex,
TypeSummaryImplSP(new StringSummaryFormat(eTypeOptionHideChildren |
eTypeOptionHideValue,
"${var.__m_} ${var.__d_}")));
cpp_category_sp->AddTypeSummary(
"^std::__[[:alnum:]]+::chrono::month_day_last$", eFormatterMatchRegex,
TypeSummaryImplSP(new StringSummaryFormat(eTypeOptionHideChildren |
eTypeOptionHideValue,
"${var.__m_} day=last")));
AddCXXSummary(
cpp_category_sp,
lldb_private::formatters::LibcxxChronoYearMonthDaySummaryProvider,
"libc++ std::chrono::year_month_day summary provider",
"^std::__[[:alnum:]]+::chrono::year_month_day$",
eTypeOptionHideChildren | eTypeOptionHideValue, true);
}
static void LoadLibStdcppFormatters(lldb::TypeCategoryImplSP cpp_category_sp) {

View File

@ -1084,3 +1084,60 @@ bool lldb_private::formatters::LibcxxWStringViewSummaryProvider(
return ::LibcxxWStringSummaryProvider(valobj, stream, summary_options,
dataobj, size);
}
bool lldb_private::formatters::LibcxxChronoMonthSummaryProvider(
ValueObject &valobj, Stream &stream, const TypeSummaryOptions &options) {
// FIXME: These are the names used in the C++20 ostream operator. Since LLVM
// uses C++17 it's not possible to use the ostream operator directly.
static const std::array<std::string_view, 12> months = {
"January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"};
ValueObjectSP ptr_sp = valobj.GetChildMemberWithName("__m_");
if (!ptr_sp)
return false;
const unsigned month = ptr_sp->GetValueAsUnsigned(0);
if (month >= 1 && month <= 12)
stream << "month=" << months[month - 1];
else
stream.Printf("month=%u", month);
return true;
}
bool lldb_private::formatters::LibcxxChronoYearMonthDaySummaryProvider(
ValueObject &valobj, Stream &stream, const TypeSummaryOptions &options) {
ValueObjectSP ptr_sp = valobj.GetChildMemberWithName("__y_");
if (!ptr_sp)
return false;
ptr_sp = ptr_sp->GetChildMemberWithName("__y_");
if (!ptr_sp)
return false;
int year = ptr_sp->GetValueAsSigned(0);
ptr_sp = valobj.GetChildMemberWithName("__m_");
if (!ptr_sp)
return false;
ptr_sp = ptr_sp->GetChildMemberWithName("__m_");
if (!ptr_sp)
return false;
const unsigned month = ptr_sp->GetValueAsUnsigned(0);
ptr_sp = valobj.GetChildMemberWithName("__d_");
if (!ptr_sp)
return false;
ptr_sp = ptr_sp->GetChildMemberWithName("__d_");
if (!ptr_sp)
return false;
const unsigned day = ptr_sp->GetValueAsUnsigned(0);
stream << "date=";
if (year < 0) {
stream << '-';
year = -year;
}
stream.Printf("%04d-%02u-%02u", year, month, day);
return true;
}

View File

@ -261,6 +261,14 @@ SyntheticChildrenFrontEnd *
LibcxxStdRangesRefViewSyntheticFrontEndCreator(CXXSyntheticChildren *,
lldb::ValueObjectSP);
bool LibcxxChronoMonthSummaryProvider(
ValueObject &valobj, Stream &stream,
const TypeSummaryOptions &options); // libc++ std::chrono::month
bool LibcxxChronoYearMonthDaySummaryProvider(
ValueObject &valobj, Stream &stream,
const TypeSummaryOptions &options); // libc++ std::chrono::year_month_day
} // namespace formatters
} // namespace lldb_private

View File

@ -373,44 +373,51 @@ std::vector<DWARFDIE> DWARFDIE::GetDeclContextDIEs() const {
return result;
}
std::vector<lldb_private::CompilerContext> DWARFDIE::GetDeclContext() const {
static std::vector<lldb_private::CompilerContext>
GetDeclContextImpl(llvm::SmallSet<lldb::user_id_t, 4> &seen, DWARFDIE die) {
std::vector<lldb_private::CompilerContext> context;
const dw_tag_t tag = Tag();
if (tag == DW_TAG_compile_unit || tag == DW_TAG_partial_unit)
// Stop if we hit a cycle.
if (!die || !seen.insert(die.GetID()).second)
return context;
DWARFDIE parent = GetParent();
if (parent)
context = parent.GetDeclContext();
// Handle outline member function DIEs by following the specification.
if (DWARFDIE spec = die.GetReferencedDIE(DW_AT_specification))
return GetDeclContextImpl(seen, spec);
// Get the parent context chain.
context = GetDeclContextImpl(seen, die.GetParent());
// Add this DIE's contribution at the end of the chain.
auto push_ctx = [&](CompilerContextKind kind, llvm::StringRef name) {
context.push_back({kind, ConstString(name)});
};
switch (tag) {
switch (die.Tag()) {
case DW_TAG_module:
push_ctx(CompilerContextKind::Module, GetName());
push_ctx(CompilerContextKind::Module, die.GetName());
break;
case DW_TAG_namespace:
push_ctx(CompilerContextKind::Namespace, GetName());
push_ctx(CompilerContextKind::Namespace, die.GetName());
break;
case DW_TAG_structure_type:
push_ctx(CompilerContextKind::Struct, GetName());
push_ctx(CompilerContextKind::Struct, die.GetName());
break;
case DW_TAG_union_type:
push_ctx(CompilerContextKind::Union, GetName());
push_ctx(CompilerContextKind::Union, die.GetName());
break;
case DW_TAG_class_type:
push_ctx(CompilerContextKind::Class, GetName());
push_ctx(CompilerContextKind::Class, die.GetName());
break;
case DW_TAG_enumeration_type:
push_ctx(CompilerContextKind::Enum, GetName());
push_ctx(CompilerContextKind::Enum, die.GetName());
break;
case DW_TAG_subprogram:
push_ctx(CompilerContextKind::Function, GetPubname());
push_ctx(CompilerContextKind::Function, die.GetName());
break;
case DW_TAG_variable:
push_ctx(CompilerContextKind::Variable, GetPubname());
push_ctx(CompilerContextKind::Variable, die.GetPubname());
break;
case DW_TAG_typedef:
push_ctx(CompilerContextKind::Typedef, GetName());
push_ctx(CompilerContextKind::Typedef, die.GetName());
break;
default:
break;
@ -418,6 +425,11 @@ std::vector<lldb_private::CompilerContext> DWARFDIE::GetDeclContext() const {
return context;
}
std::vector<lldb_private::CompilerContext> DWARFDIE::GetDeclContext() const {
llvm::SmallSet<lldb::user_id_t, 4> seen;
return GetDeclContextImpl(seen, *this);
}
std::vector<lldb_private::CompilerContext>
DWARFDIE::GetTypeLookupContext() const {
std::vector<lldb_private::CompilerContext> context;

View File

@ -2574,11 +2574,12 @@ void SymbolFileDWARF::FindFunctions(const Module::LookupInfo &lookup_info,
Module::LookupInfo no_tp_lookup_info(lookup_info);
no_tp_lookup_info.SetLookupName(ConstString(name_no_template_params));
m_index->GetFunctions(no_tp_lookup_info, *this, parent_decl_ctx, [&](DWARFDIE die) {
if (resolved_dies.insert(die.GetDIE()).second)
ResolveFunction(die, include_inlines, sc_list);
return true;
});
m_index->GetFunctions(no_tp_lookup_info, *this, parent_decl_ctx,
[&](DWARFDIE die) {
if (resolved_dies.insert(die.GetDIE()).second)
ResolveFunction(die, include_inlines, sc_list);
return true;
});
}
}

View File

@ -158,7 +158,7 @@ CommandObjectProcessTraceStartIntelPT::CommandOptions::GetDefinitions() {
return llvm::ArrayRef(g_process_trace_start_intel_pt_options);
}
bool CommandObjectProcessTraceStartIntelPT::DoExecute(
void CommandObjectProcessTraceStartIntelPT::DoExecute(
Args &command, CommandReturnObject &result) {
if (Error err = m_trace.Start(
m_options.m_ipt_trace_size, m_options.m_process_buffer_size_limit,
@ -167,8 +167,6 @@ bool CommandObjectProcessTraceStartIntelPT::DoExecute(
result.SetError(Status(std::move(err)));
else
result.SetStatus(eReturnStatusSuccessFinishResult);
return result.Succeeded();
}
std::optional<uint64_t>

View File

@ -85,11 +85,11 @@ double DecodedThread::NanosecondsRange::GetInterpolatedTime(
return interpolate(next_range->nanos);
}
uint64_t DecodedThread::GetItemsCount() const { return m_item_kinds.size(); }
uint64_t DecodedThread::GetItemsCount() const { return m_item_data.size(); }
lldb::addr_t
DecodedThread::GetInstructionLoadAddress(uint64_t item_index) const {
return m_item_data[item_index].load_address;
return std::get<lldb::addr_t>(m_item_data[item_index]);
}
lldb::addr_t
@ -99,14 +99,16 @@ DecodedThread::GetSyncPointOffsetByIndex(uint64_t item_index) const {
ThreadSP DecodedThread::GetThread() { return m_thread_sp; }
template <typename Data>
DecodedThread::TraceItemStorage &
DecodedThread::CreateNewTraceItem(lldb::TraceItemKind kind) {
m_item_kinds.push_back(kind);
m_item_data.emplace_back();
DecodedThread::CreateNewTraceItem(lldb::TraceItemKind kind, Data &&data) {
m_item_data.emplace_back(data);
if (m_last_tsc)
(*m_last_tsc)->second.items_count++;
if (m_last_nanoseconds)
(*m_last_nanoseconds)->second.items_count++;
return m_item_data.back();
}
@ -176,27 +178,27 @@ uint64_t DecodedThread::GetTotalInstructionCount() const {
}
void DecodedThread::AppendEvent(lldb::TraceEvent event) {
CreateNewTraceItem(lldb::eTraceItemKindEvent).event = event;
CreateNewTraceItem(lldb::eTraceItemKindEvent, event);
m_events_stats.RecordEvent(event);
}
void DecodedThread::AppendInstruction(const pt_insn &insn) {
CreateNewTraceItem(lldb::eTraceItemKindInstruction).load_address = insn.ip;
CreateNewTraceItem(lldb::eTraceItemKindInstruction, insn.ip);
m_insn_count++;
}
void DecodedThread::AppendError(const IntelPTError &error) {
CreateNewTraceItem(lldb::eTraceItemKindError).error = error.message();
CreateNewTraceItem(lldb::eTraceItemKindError, error.message());
m_error_stats.RecordError(/*fatal=*/false);
}
void DecodedThread::AppendCustomError(StringRef err, bool fatal) {
CreateNewTraceItem(lldb::eTraceItemKindError).error = err.str();
CreateNewTraceItem(lldb::eTraceItemKindError, err.str());
m_error_stats.RecordError(fatal);
}
lldb::TraceEvent DecodedThread::GetEventByIndex(int item_index) const {
return m_item_data[item_index].event;
return std::get<lldb::TraceEvent>(m_item_data[item_index]);
}
const DecodedThread::EventsStats &DecodedThread::GetEventsStats() const {
@ -233,13 +235,18 @@ const DecodedThread::ErrorStats &DecodedThread::GetErrorStats() const {
lldb::TraceItemKind
DecodedThread::GetItemKindByIndex(uint64_t item_index) const {
return static_cast<lldb::TraceItemKind>(m_item_kinds[item_index]);
return std::visit(
llvm::makeVisitor(
[](const std::string &) { return lldb::eTraceItemKindError; },
[](lldb::TraceEvent) { return lldb::eTraceItemKindEvent; },
[](lldb::addr_t) { return lldb::eTraceItemKindInstruction; }),
m_item_data[item_index]);
}
llvm::StringRef DecodedThread::GetErrorByIndex(uint64_t item_index) const {
if (item_index >= m_item_data.size())
return llvm::StringRef();
return m_item_data[item_index].error;
return std::get<std::string>(m_item_data[item_index]);
}
DecodedThread::DecodedThread(
@ -249,7 +256,6 @@ DecodedThread::DecodedThread(
size_t DecodedThread::CalculateApproximateMemoryUsage() const {
return sizeof(TraceItemStorage) * m_item_data.size() +
sizeof(uint8_t) * m_item_kinds.size() +
(sizeof(uint64_t) + sizeof(TSC)) * m_tscs.size() +
(sizeof(uint64_t) + sizeof(uint64_t)) * m_nanoseconds.size() +
(sizeof(uint64_t) + sizeof(lldb::cpu_id_t)) * m_cpus.size();

View File

@ -14,9 +14,10 @@
#include "lldb/Utility/TraceIntelPTGDBRemotePackets.h"
#include "llvm/Support/Errc.h"
#include "llvm/Support/Error.h"
#include <deque>
#include <optional>
#include <utility>
#include <vector>
#include <variant>
namespace lldb_private {
namespace trace_intel_pt {
@ -265,30 +266,19 @@ class DecodedThread : public std::enable_shared_from_this<DecodedThread> {
/// to update \a CalculateApproximateMemoryUsage() accordingly.
lldb::ThreadSP m_thread_sp;
/// We use a union to optimize the memory usage for the different kinds of
/// trace items.
union TraceItemStorage {
/// The load addresses of this item if it's an instruction.
uint64_t load_address;
/// The event kind of this item if it's an event
lldb::TraceEvent event;
/// The string message of this item if it's an error
std::string error;
};
using TraceItemStorage =
std::variant<std::string, lldb::TraceEvent, lldb::addr_t>;
/// Create a new trace item.
///
/// \return
/// The index of the new item.
DecodedThread::TraceItemStorage &CreateNewTraceItem(lldb::TraceItemKind kind);
template <typename Data>
DecodedThread::TraceItemStorage &CreateNewTraceItem(lldb::TraceItemKind kind,
Data &&data);
/// Most of the trace data is stored here.
std::vector<TraceItemStorage> m_item_data;
/// The TraceItemKind for each trace item encoded as uint8_t. We don't include
/// it in TraceItemStorage to avoid padding.
std::vector<uint8_t> m_item_kinds;
std::deque<TraceItemStorage> m_item_data;
/// This map contains the TSCs of the decoded trace items. It maps
/// `item index -> TSC`, where `item index` is the first index

View File

@ -572,7 +572,7 @@ Error lldb_private::trace_intel_pt::DecodeSingleTraceForThread(
Expected<PSBBlockDecoder> decoder = PSBBlockDecoder::Create(
trace_intel_pt, block, buffer.slice(block.psb_offset, block.size),
*decoded_thread.GetThread()->GetProcess(),
i + 1 < blocks->size() ? blocks->at(i + 1).starting_ip : None,
i + 1 < blocks->size() ? blocks->at(i + 1).starting_ip : std::nullopt,
decoded_thread, std::nullopt);
if (!decoder)
return decoder.takeError();
@ -640,7 +640,7 @@ Error lldb_private::trace_intel_pt::DecodeSystemWideTraceForThread(
*decoded_thread.GetThread()->GetProcess(),
j + 1 < execution.psb_blocks.size()
? execution.psb_blocks[j + 1].starting_ip
: None,
: std::nullopt,
decoded_thread, execution.thread_execution.GetEndTSC());
if (!decoder)
return decoder.takeError();

Some files were not shown because too many files have changed in this diff Show More